music-kraken-core/src/music_kraken/pages/abstract.py

460 lines
17 KiB
Python
Raw Normal View History

import logging
import random
2023-04-03 08:38:12 +00:00
from copy import copy
2023-06-15 07:58:48 +00:00
from typing import Optional, Union, Type, Dict, Set, List, Tuple
2023-06-15 09:28:35 +00:00
from string import Formatter
import requests
from bs4 import BeautifulSoup
2023-04-20 17:45:29 +00:00
from ..connection import Connection
2023-03-10 09:13:35 +00:00
from ..objects import (
2023-01-23 13:53:35 +00:00
Song,
Source,
Album,
Artist,
Target,
2023-03-24 14:58:21 +00:00
DatabaseObject,
2023-03-20 13:40:32 +00:00
Options,
2023-03-24 13:28:19 +00:00
Collection,
2023-04-04 08:20:54 +00:00
Label,
2023-01-23 13:53:35 +00:00
)
2023-04-18 10:00:25 +00:00
from ..utils.enums.source import SourcePages
2023-04-18 10:14:34 +00:00
from ..utils.enums.album import AlbumType
from ..audio import write_metadata_to_target, correct_codec
2023-09-10 14:27:09 +00:00
from ..utils.config import main_settings
2023-06-15 09:53:55 +00:00
from ..utils.support_classes import Query, DownloadResult
2023-04-04 17:17:58 +00:00
2023-09-10 14:27:09 +00:00
2023-05-23 16:09:53 +00:00
INDEPENDENT_DB_OBJECTS = Union[Label, Album, Artist, Song]
INDEPENDENT_DB_TYPES = Union[Type[Song], Type[Album], Type[Artist], Type[Label]]
2023-05-24 06:50:56 +00:00
2023-06-15 09:28:35 +00:00
class NamingDict(dict):
CUSTOM_KEYS: Dict[str, str] = {
"label": "label.name",
"artist": "artist.name",
"song": "song.title",
"isrc": "song.isrc",
"album": "album.title",
"album_type": "album.album_type_string"
}
def __init__(self, values: dict, object_mappings: Dict[str, DatabaseObject] = None):
self.object_mappings: Dict[str, DatabaseObject] = object_mappings or dict()
super().__init__(values)
2023-09-10 14:27:09 +00:00
self["audio_format"] = main_settings["audio_format"]
2023-06-15 09:28:35 +00:00
def add_object(self, music_object: DatabaseObject):
self.object_mappings[type(music_object).__name__.lower()] = music_object
def copy(self) -> dict:
return type(self)(super().copy(), self.object_mappings.copy())
def __getitem__(self, key: str) -> str:
return super().__getitem__(key)
def default_value_for_name(self, name: str) -> str:
return f'Various {name.replace("_", " ").title()}'
def __missing__(self, key: str) -> str:
"""
TODO
add proper logging
"""
if "." not in key:
if key not in self.CUSTOM_KEYS:
return self.default_value_for_name(key)
key = self.CUSTOM_KEYS[key]
frag_list = key.split(".")
object_name = frag_list[0].strip().lower()
attribute_name = frag_list[-1].strip().lower()
if object_name not in self.object_mappings:
return self.default_value_for_name(attribute_name)
music_object = self.object_mappings[object_name]
try:
value = getattr(music_object, attribute_name)
if value is None:
return self.default_value_for_name(attribute_name)
return str(value)
except AttributeError:
return self.default_value_for_name(attribute_name)
2023-05-23 16:09:53 +00:00
def _clean_music_object(music_object: INDEPENDENT_DB_OBJECTS, collections: Dict[INDEPENDENT_DB_TYPES, Collection]):
if type(music_object) == Label:
return _clean_label(label=music_object, collections=collections)
if type(music_object) == Artist:
return _clean_artist(artist=music_object, collections=collections)
if type(music_object) == Album:
return _clean_album(album=music_object, collections=collections)
if type(music_object) == Song:
return _clean_song(song=music_object, collections=collections)
def _clean_collection(collection: Collection, collection_dict: Dict[INDEPENDENT_DB_TYPES, Collection]):
if collection.element_type not in collection_dict:
return
for i, element in enumerate(collection):
r = collection_dict[collection.element_type].append(element, merge_into_existing=True)
collection[i] = r.current_element
if not r.was_the_same:
_clean_music_object(r.current_element, collection_dict)
def _clean_label(label: Label, collections: Dict[INDEPENDENT_DB_TYPES, Collection]):
_clean_collection(label.current_artist_collection, collections)
_clean_collection(label.album_collection, collections)
def _clean_artist(artist: Artist, collections: Dict[INDEPENDENT_DB_TYPES, Collection]):
_clean_collection(artist.main_album_collection, collections)
_clean_collection(artist.feature_song_collection, collections)
_clean_collection(artist.label_collection, collections)
def _clean_album(album: Album, collections: Dict[INDEPENDENT_DB_TYPES, Collection]):
_clean_collection(album.label_collection, collections)
_clean_collection(album.song_collection, collections)
_clean_collection(album.artist_collection, collections)
def _clean_song(song: Song, collections: Dict[INDEPENDENT_DB_TYPES, Collection]):
_clean_collection(song.album_collection, collections)
_clean_collection(song.feature_artist_collection, collections)
_clean_collection(song.main_artist_collection, collections)
2023-05-24 07:16:29 +00:00
def clean_object(dirty_object: DatabaseObject) -> DatabaseObject:
if isinstance(dirty_object, INDEPENDENT_DB_OBJECTS):
2023-05-23 19:06:01 +00:00
collections = {
Label: Collection(element_type=Label),
Artist: Collection(element_type=Artist),
Album: Collection(element_type=Album),
Song: Collection(element_type=Song)
}
_clean_music_object(dirty_object, collections)
return dirty_object
2023-05-24 07:16:29 +00:00
def build_new_object(new_object: DatabaseObject) -> DatabaseObject:
new_object = clean_object(new_object)
new_object.compile(merge_into=False)
return new_object
2023-05-23 19:06:01 +00:00
2023-05-24 07:16:29 +00:00
def merge_together(old_object: DatabaseObject, new_object: DatabaseObject) -> DatabaseObject:
new_object = clean_object(new_object)
old_object.merge(new_object)
old_object.compile(merge_into=False)
return old_object
2023-05-23 19:06:01 +00:00
2023-06-12 17:46:46 +00:00
class Page:
2023-01-23 13:53:35 +00:00
"""
This is an abstract class, laying out the
functionality for every other class fetching something
"""
2023-05-25 11:46:47 +00:00
2023-03-20 13:40:32 +00:00
SOURCE_TYPE: SourcePages
2023-05-23 14:21:12 +00:00
LOGGER = logging.getLogger("this shouldn't be used")
2023-05-23 08:49:52 +00:00
# set this to true, if all song details can also be fetched by fetching album details
NO_ADDITIONAL_DATA_FROM_SONG = False
2023-05-26 08:11:36 +00:00
def __init__(self):
super().__init__()
"""
CODE I NEED WHEN I START WITH MULTITHREADING
def __init__(self, end_event: EndThread, search_queue: Queue, search_result_queue: Queue):
self.end_event = end_event
2023-05-25 11:46:47 +00:00
self.search_queue = search_queue
self.search_result_queue = search_result_queue
2023-05-26 08:11:36 +00:00
super().__init__()
@property
def _empty_working_queues(self):
return self.search_queue.empty()
2023-05-23 16:09:53 +00:00
def run(self) -> None:
while bool(self.end_event) and self._empty_working_queues:
if not self.search_queue.empty():
self.search(self.search_queue.get())
self.search_result_queue.put(FinishedSearch())
continue
2023-05-26 08:11:36 +00:00
"""
2023-05-24 06:50:56 +00:00
2023-05-24 08:12:03 +00:00
def get_source_type(self, source: Source) -> Optional[Type[DatabaseObject]]:
2023-05-24 06:50:56 +00:00
return None
2023-05-23 14:21:12 +00:00
2023-05-24 06:50:56 +00:00
def get_soup_from_response(self, r: requests.Response) -> BeautifulSoup:
return BeautifulSoup(r.content, "html.parser")
2023-05-23 14:21:12 +00:00
# to search stuff
def search(self, query: Query) -> List[DatabaseObject]:
music_object = query.music_object
search_functions = {
Song: self.song_search,
Album: self.album_search,
Artist: self.artist_search,
Label: self.label_search
}
if type(music_object) in search_functions:
r = search_functions[type(music_object)](music_object)
if r is not None and len(r) > 0:
2023-05-23 14:21:12 +00:00
return r
r = []
2023-05-23 08:49:52 +00:00
for default_query in query.default_search:
for single_option in self.general_search(default_query):
r.append(single_option)
2023-05-23 08:49:52 +00:00
2023-05-23 14:50:54 +00:00
return r
2023-05-23 14:21:12 +00:00
def general_search(self, search_query: str) -> List[DatabaseObject]:
return []
def label_search(self, label: Label) -> List[Label]:
return []
def artist_search(self, artist: Artist) -> List[Artist]:
return []
def album_search(self, album: Album) -> List[Album]:
2023-05-23 08:49:52 +00:00
return []
2023-05-23 14:21:12 +00:00
def song_search(self, song: Song) -> List[Song]:
return []
2023-01-23 13:53:35 +00:00
2023-05-23 19:06:01 +00:00
def fetch_details(self, music_object: DatabaseObject, stop_at_level: int = 1) -> DatabaseObject:
2023-01-23 13:53:35 +00:00
"""
2023-05-23 19:06:01 +00:00
when a music object with lacking data is passed in, it returns
2023-01-23 13:53:35 +00:00
the SAME object **(no copy)** with more detailed data.
2023-05-23 16:11:56 +00:00
If you for example put in, an album, it fetches the tracklist
2023-01-23 13:53:35 +00:00
:param music_object:
2023-03-24 13:28:19 +00:00
:param stop_at_level:
This says the depth of the level the scraper will recurse to.
If this is for example set to 2, then the levels could be:
1. Level: the album
2. Level: every song of the album + every artist of the album
If no additional requests are needed to get the data one level below the supposed stop level
this gets ignored
2023-01-23 14:52:50 +00:00
:return detailed_music_object: IT MODIFIES THE INPUT OBJ
2023-01-23 13:53:35 +00:00
"""
2023-04-03 17:59:31 +00:00
2023-05-23 19:06:01 +00:00
# creating a new object, of the same type
2023-03-24 14:58:21 +00:00
new_music_object: DatabaseObject = type(music_object)()
2023-05-23 19:06:01 +00:00
# only certain database objects, have a source list
2023-05-24 06:50:56 +00:00
if isinstance(music_object, INDEPENDENT_DB_OBJECTS):
2023-05-23 19:06:01 +00:00
source: Source
for source in music_object.source_collection.get_sources_from_page(self.SOURCE_TYPE):
new_music_object.merge(
2023-05-24 08:12:03 +00:00
self.fetch_object_from_source(
source=source,
enforce_type=type(music_object),
stop_at_level=stop_at_level,
post_process=False
)
)
2023-03-29 09:34:58 +00:00
2023-05-24 07:16:29 +00:00
return merge_together(music_object, new_music_object)
2023-04-03 17:59:31 +00:00
2023-05-24 06:50:56 +00:00
def fetch_object_from_source(self, source: Source, stop_at_level: int = 2, enforce_type: Type[DatabaseObject] = None, post_process: bool = True) -> Optional[DatabaseObject]:
obj_type = self.get_source_type(source)
2023-03-30 10:00:39 +00:00
if obj_type is None:
2023-03-30 10:23:57 +00:00
return None
2023-05-24 06:50:56 +00:00
if enforce_type != obj_type and enforce_type is not None:
self.LOGGER.warning(f"Object type isn't type to enforce: {enforce_type}, {obj_type}")
return None
music_object: DatabaseObject = None
fetch_map = {
Song: self.fetch_song,
Album: self.fetch_album,
Artist: self.fetch_artist,
Label: self.fetch_label
}
if obj_type in fetch_map:
music_object = fetch_map[obj_type](source, stop_at_level)
2023-05-24 07:16:29 +00:00
else:
self.LOGGER.warning(f"Can't fetch details of type: {obj_type}")
return None
2023-04-03 17:59:31 +00:00
2023-05-24 07:16:29 +00:00
if post_process and music_object:
return build_new_object(music_object)
2023-04-16 12:36:33 +00:00
2023-03-30 10:09:36 +00:00
return music_object
2023-05-24 06:50:56 +00:00
def fetch_song(self, source: Source, stop_at_level: int = 1) -> Song:
return Song()
2023-03-20 13:40:32 +00:00
2023-05-24 06:50:56 +00:00
def fetch_album(self, source: Source, stop_at_level: int = 1) -> Album:
return Album()
2023-04-03 17:59:31 +00:00
2023-05-24 06:50:56 +00:00
def fetch_artist(self, source: Source, stop_at_level: int = 1) -> Artist:
return Artist()
2023-01-23 14:52:50 +00:00
2023-05-24 06:50:56 +00:00
def fetch_label(self, source: Source, stop_at_level: int = 1) -> Label:
return Label()
2023-03-24 14:58:21 +00:00
2023-06-20 17:30:48 +00:00
def download(self, music_object: DatabaseObject, genre: str, download_all: bool = False, process_metadata_anyway: bool = False) -> DownloadResult:
2023-06-15 09:28:35 +00:00
naming_dict: NamingDict = NamingDict({"genre": genre})
2023-05-25 07:21:37 +00:00
def fill_naming_objects(naming_music_object: DatabaseObject):
2023-06-15 09:28:35 +00:00
nonlocal naming_dict
2023-05-25 07:21:37 +00:00
for collection_name in naming_music_object.UPWARDS_COLLECTION_ATTRIBUTES:
2023-06-12 15:40:54 +00:00
collection: Collection = getattr(naming_music_object, collection_name)
2023-05-25 07:21:37 +00:00
2023-06-12 17:46:46 +00:00
if collection.empty:
2023-05-25 07:21:37 +00:00
continue
dom_ordered_music_object: DatabaseObject = collection[0]
2023-06-15 09:28:35 +00:00
naming_dict.add_object(dom_ordered_music_object)
2023-05-25 07:21:37 +00:00
return fill_naming_objects(dom_ordered_music_object)
fill_naming_objects(music_object)
2023-06-20 17:30:48 +00:00
return self._download(music_object, naming_dict, download_all, process_metadata_anyway=process_metadata_anyway)
2023-05-24 23:27:05 +00:00
2023-06-20 17:30:48 +00:00
def _download(self, music_object: DatabaseObject, naming_dict: NamingDict, download_all: bool = False, skip_details: bool = False, process_metadata_anyway: bool = False) -> DownloadResult:
skip_next_details = skip_details
2023-05-24 23:27:05 +00:00
# Skips all releases, that are defined in shared.ALBUM_TYPE_BLACKLIST, if download_all is False
if isinstance(music_object, Album):
if self.NO_ADDITIONAL_DATA_FROM_SONG:
skip_next_details = True
2023-09-10 15:27:07 +00:00
if not download_all and music_object.album_type.value in main_settings["album_type_blacklist"]:
2023-05-25 09:21:39 +00:00
return DownloadResult()
2023-05-24 23:27:05 +00:00
if not isinstance(music_object, Song) or not self.NO_ADDITIONAL_DATA_FROM_SONG:
self.fetch_details(music_object=music_object, stop_at_level=2)
2023-06-15 09:28:35 +00:00
naming_dict.add_object(music_object)
2023-05-24 23:27:05 +00:00
if isinstance(music_object, Song):
2023-06-20 17:30:48 +00:00
return self._download_song(music_object, naming_dict, process_metadata_anyway=process_metadata_anyway)
2023-05-24 23:27:05 +00:00
2023-05-25 09:21:39 +00:00
download_result: DownloadResult = DownloadResult()
2023-05-24 23:27:05 +00:00
for collection_name in music_object.DOWNWARDS_COLLECTION_ATTRIBUTES:
2023-06-12 17:46:46 +00:00
collection: Collection = getattr(music_object, collection_name)
2023-05-24 23:27:05 +00:00
sub_ordered_music_object: DatabaseObject
for sub_ordered_music_object in collection:
2023-06-20 17:30:48 +00:00
download_result.merge(self._download(sub_ordered_music_object, naming_dict.copy(), download_all, skip_details=skip_next_details, process_metadata_anyway=process_metadata_anyway))
2023-05-24 23:27:05 +00:00
2023-05-25 09:21:39 +00:00
return download_result
2023-05-24 23:27:05 +00:00
2023-06-20 17:30:48 +00:00
def _download_song(self, song: Song, naming_dict: NamingDict, process_metadata_anyway: bool = False):
if "genre" not in naming_dict and song.genre is not None:
naming_dict["genre"] = song.genre
if song.genre is None:
song.genre = naming_dict["genre"]
2023-09-10 14:27:09 +00:00
path_parts = Formatter().parse(main_settings["download_path"])
file_parts = Formatter().parse(main_settings["download_file"])
2023-05-25 07:21:37 +00:00
new_target = Target(
relative_to_music_dir=True,
2023-09-10 14:27:09 +00:00
path=main_settings["download_path"].format(**{part[1]: naming_dict[part[1]] for part in path_parts}),
file=main_settings["download_file"].format(**{part[1]: naming_dict[part[1]] for part in file_parts})
2023-05-25 07:21:37 +00:00
)
2023-06-15 16:30:02 +00:00
2023-03-30 12:39:28 +00:00
if song.target_collection.empty:
2023-05-25 09:21:39 +00:00
song.target_collection.append(new_target)
2023-06-15 16:30:02 +00:00
2023-05-25 09:21:39 +00:00
sources = song.source_collection.get_sources_from_page(self.SOURCE_TYPE)
2023-03-30 12:39:28 +00:00
if len(sources) == 0:
2023-05-25 09:21:39 +00:00
return DownloadResult(error_message=f"No source found for {song.title} as {self.__class__.__name__}.")
2023-04-03 17:59:31 +00:00
2023-03-31 07:47:03 +00:00
temp_target: Target = Target(
2023-09-10 15:27:07 +00:00
path=main_settings["temp_directory"],
2023-03-31 07:47:03 +00:00
file=str(random.randint(0, 999999))
)
2023-06-16 10:26:02 +00:00
r = DownloadResult(1)
2023-06-15 16:30:02 +00:00
found_on_disc = False
target: Target
for target in song.target_collection:
if target.exists:
2023-06-20 17:30:48 +00:00
if process_metadata_anyway:
target.copy_content(temp_target)
2023-06-15 16:30:02 +00:00
found_on_disc = True
2023-06-16 10:26:02 +00:00
r.found_on_disk += 1
r.add_target(target)
2023-06-20 17:30:48 +00:00
if found_on_disc and not process_metadata_anyway:
2023-06-16 10:26:02 +00:00
self.LOGGER.info(f"{song.option_string} already exists, thus not downloading again.")
return r
2023-06-15 16:30:02 +00:00
2023-06-15 07:58:48 +00:00
source = sources[0]
2023-06-20 17:30:48 +00:00
if not found_on_disc:
r = self.download_song_to_target(source=source, target=temp_target, desc=song.title)
2023-06-16 10:26:02 +00:00
2023-04-03 17:59:31 +00:00
2023-04-04 19:18:56 +00:00
if not r.is_fatal_error:
2023-06-15 16:30:02 +00:00
r.merge(self._post_process_targets(song, temp_target, [] if found_on_disc else self.get_skip_intervals(song, source)))
2023-04-05 09:54:02 +00:00
2023-04-04 18:58:22 +00:00
return r
2023-05-25 09:21:39 +00:00
2023-06-15 16:30:02 +00:00
def _post_process_targets(self, song: Song, temp_target: Target, interval_list: List) -> DownloadResult:
correct_codec(temp_target, interval_list=interval_list)
self.post_process_hook(song, temp_target)
2023-03-30 14:50:27 +00:00
write_metadata_to_target(song.metadata, temp_target)
2023-04-03 17:59:31 +00:00
2023-04-05 10:25:57 +00:00
r = DownloadResult()
2023-03-30 14:50:27 +00:00
target: Target
for target in song.target_collection:
2023-04-05 10:25:57 +00:00
if temp_target is not target:
temp_target.copy_content(target)
r.add_target(target)
2023-05-25 09:21:39 +00:00
temp_target.delete()
2023-06-16 10:26:02 +00:00
r.sponsor_segments += len(interval_list)
2023-04-05 10:25:57 +00:00
return r
2023-05-25 09:21:39 +00:00
2023-06-15 07:58:48 +00:00
def get_skip_intervals(self, song: Song, source: Source) -> List[Tuple[float, float]]:
return []
def post_process_hook(self, song: Song, temp_target: Target, **kwargs):
pass
2023-05-25 09:21:39 +00:00
def download_song_to_target(self, source: Source, target: Target, desc: str = None) -> DownloadResult:
2023-04-04 18:58:22 +00:00
return DownloadResult()