from typing import Tuple, Type, Dict, Set, Optional, List from collections import defaultdict from pathlib import Path import re from . import FetchOptions, DownloadOptions from .results import SearchResults from ..objects import ( DatabaseObject as DataObject, Collection, Target, Source, Options, Song, Album, Artist, Label, ) from ..utils.string_processing import fit_to_file_system from ..utils.config import youtube_settings, main_settings from ..utils.path_manager import LOCATIONS from ..utils.enums import SourceType from ..utils.support_classes.download_result import DownloadResult from ..utils.support_classes.query import Query from ..utils.support_classes.download_result import DownloadResult from ..utils.exception import MKMissingNameException from ..utils.exception.download import UrlNotFoundException from ..utils.shared import DEBUG_PAGES from ..pages import Page, EncyclopaediaMetallum, Musify, YouTube, YoutubeMusic, Bandcamp, INDEPENDENT_DB_OBJECTS ALL_PAGES: Set[Type[Page]] = { # EncyclopaediaMetallum, Musify, YoutubeMusic, Bandcamp } if youtube_settings["use_youtube_alongside_youtube_music"]: ALL_PAGES.add(YouTube) AUDIO_PAGES: Set[Type[Page]] = { Musify, YouTube, YoutubeMusic, Bandcamp } SHADY_PAGES: Set[Type[Page]] = { Musify, } fetch_map = { Song: "fetch_song", Album: "fetch_album", Artist: "fetch_artist", Label: "fetch_label", } if DEBUG_PAGES: DEBUGGING_PAGE = Bandcamp print(f"Only downloading from page {DEBUGGING_PAGE}.") ALL_PAGES = {DEBUGGING_PAGE} AUDIO_PAGES = ALL_PAGES.union(AUDIO_PAGES) class Pages: def __init__(self, exclude_pages: Set[Type[Page]] = None, exclude_shady: bool = False, download_options: DownloadOptions = None, fetch_options: FetchOptions = None): self.download_options: DownloadOptions = download_options or DownloadOptions() self.fetch_options: FetchOptions = fetch_options or FetchOptions() # initialize all page instances self._page_instances: Dict[Type[Page], Page] = dict() self._source_to_page: Dict[SourceType, Type[Page]] = dict() exclude_pages = exclude_pages if exclude_pages is not None else set() if exclude_shady: exclude_pages = exclude_pages.union(SHADY_PAGES) if not exclude_pages.issubset(ALL_PAGES): raise ValueError(f"The excluded pages have to be a subset of all pages: {exclude_pages} | {ALL_PAGES}") def _set_to_tuple(page_set: Set[Type[Page]]) -> Tuple[Type[Page], ...]: return tuple(sorted(page_set, key=lambda page: page.__name__)) self._pages_set: Set[Type[Page]] = ALL_PAGES.difference(exclude_pages) self.pages: Tuple[Type[Page], ...] = _set_to_tuple(self._pages_set) self._audio_pages_set: Set[Type[Page]] = self._pages_set.intersection(AUDIO_PAGES) self.audio_pages: Tuple[Type[Page], ...] = _set_to_tuple(self._audio_pages_set) for page_type in self.pages: self._page_instances[page_type] = page_type(fetch_options=self.fetch_options, download_options=self.download_options) self._source_to_page[page_type.SOURCE_TYPE] = page_type def _get_page_from_enum(self, source_page: SourceType) -> Page: if source_page not in self._source_to_page: return None return self._page_instances[self._source_to_page[source_page]] def search(self, query: Query) -> SearchResults: result = SearchResults() for page_type in self.pages: result.add( page=page_type, search_result=self._page_instances[page_type].search(query=query) ) return result def fetch_details(self, data_object: DataObject, stop_at_level: int = 1, **kwargs) -> DataObject: if not isinstance(data_object, INDEPENDENT_DB_OBJECTS): return data_object source: Source for source in data_object.source_collection.get_sources(): new_data_object = self.fetch_from_source(source=source, stop_at_level=stop_at_level) if new_data_object is not None: data_object.merge(new_data_object) return data_object def fetch_from_source(self, source: Source, **kwargs) -> Optional[DataObject]: page: Page = self._get_page_from_enum(source.source_type) if page is None: return None # getting the appropriate function for the page and the object type source_type = page.get_source_type(source) if not hasattr(page, fetch_map[source_type]): return None func = getattr(page, fetch_map[source_type])(source=source, **kwargs) # fetching the data object and marking it as fetched data_object: DataObject = func(source=source) data_object.mark_as_fetched(source.hash_url) return data_object def fetch_from_url(self, url: str) -> Optional[DataObject]: source = Source.match_url(url, SourceType.MANUAL) if source is None: return None return self.fetch_from_source(source=source) def is_downloadable(self, music_object: DataObject) -> bool: _page_types = set(self._source_to_page) for src in music_object.source_collection.source_pages: if src in self._source_to_page: _page_types.add(self._source_to_page[src]) audio_pages = self._audio_pages_set.intersection(_page_types) return len(audio_pages) > 0 def _skip_object(self, data_object: DataObject) -> bool: if isinstance(data_object, Album): if not self.download_options.download_all and data_object.album_type in self.download_options.album_type_blacklist: return True return False def download(self, data_object: DataObject, genre: str, **kwargs) -> DownloadResult: # fetch the given object self.fetch_details(data_object) # fetching all parent objects (e.g. if you only download a song) if not kwargs.get("fetched_upwards", False): to_fetch: List[DataObject] = [data_object] while len(to_fetch) > 0: new_to_fetch = [] for d in to_fetch: if self._skip_object(d): continue self.fetch_details(d) for c in d.get_parent_collections(): new_to_fetch.extend(c) to_fetch = new_to_fetch kwargs["fetched_upwards"] = True # download all children download_result: DownloadResult = DownloadResult() for c in data_object.get_children(): for d in c: if self._skip_object(d): continue download_result.merge(self.download(d, genre, **kwargs)) # actually download if the object is a song if isinstance(data_object, Song): """ TODO add the traced artist and album to the naming. I am able to do that, because duplicate values are removed later on. """ self._download_song(data_object, naming={ "genre": [genre], "audio_format": main_settings["audio_format"], }) return download_result def _extract_fields_from_template(self, path_template: str) -> Set[str]: return set(re.findall(r"{([^}]+)}", path_template)) def _parse_path_template(self, path_template: str, naming: Dict[str, List[str]]) -> str: field_names: Set[str] = self._extract_fields_from_template(path_template) for field in field_names: if len(naming[field]) == 0: raise MKMissingNameException(f"Missing field for {field}.") path_template = path_template.replace(f"{{{field}}}", naming[field][0]) return possible_parts def _get_pages_with_source(self, data_object: DataObject, sort_by_attribute: str = "DOWNLOAD_PRIORITY") -> List[Page]: pages = [self._get_page_from_enum(s.source_type) for s in data_object.source_collection.get_sources()] pages.sort(key=lambda p: getattr(p, sort_by_attribute), reverse=True) return list(pages) def _download_song(self, song: Song, naming: dict) -> DownloadOptions: """ TODO Search the song in the file system. """ r = DownloadResult(total=1) # pre process the data recursively song.compile() # manage the naming naming: Dict[str, List[str]] = defaultdict(list, naming) naming["song"].append(song.title_string) naming["genre"].append(song.genre) naming["isrc"].append(song.isrc) naming["album"].extend(a.title_string for a in song.album_collection) naming["album_type"].extend(a.album_type.value for a in song.album_collection) naming["artist"].extend(a.name for a in song.main_artist_collection) naming["artist"].extend(a.name for a in song.feature_artist_collection) for a in song.album_collection: naming["label"].extend([l.title_string for l in a.label_collection]) # removing duplicates from the naming, and process the strings for key, value in naming.items(): # https://stackoverflow.com/a/17016257 naming[key] = list(dict.fromkeys(items)) # manage the targets tmp: Target = Target.temp(file_extension=main_settings["audio_format"]) found_on_disc = False song.target_collection.append(Target( relative_to_music_dir=True, file_path=Path( self._parse_path_template(main_settings["download_path"], naming=naming), self._parse_path_template(main_settings["download_file"], naming=naming), ) )) for target in song.target_collection: if target.exists(): output(f'- {target.file_path} {BColors.OKGREEN.value}[already exists]', color=BColors.GREY) found_on_disc = True r.found_on_disk += 1 target.copy_content(tmp) else: target.create_parent_directories() output(f'- {target.file_path}', color=BColors.GREY) # actually download for page in self._get_pages_with_source(song, sort_by_attribute="DOWNLOAD_PRIORITY"): r = page.download_song_to_target(song, tmp, r) return r def fetch_url(self, url: str, stop_at_level: int = 2) -> Tuple[Type[Page], DataObject]: source = Source.match_url(url, SourceType.MANUAL) if source is None: raise UrlNotFoundException(url=url) _actual_page = self._source_to_page[source.source_type] return _actual_page, self._page_instances[_actual_page].fetch_object_from_source(source=source, stop_at_level=stop_at_level)