feat: build
This commit is contained in:
25
music_kraken/objects/__init__.py
Normal file
25
music_kraken/objects/__init__.py
Normal file
@@ -0,0 +1,25 @@
|
||||
from typing_extensions import TypeVar
|
||||
from .option import Options
|
||||
|
||||
from .metadata import Metadata, Mapping as ID3Mapping, ID3Timestamp
|
||||
|
||||
from .source import Source, SourcePages, SourceTypes
|
||||
|
||||
from .song import (
|
||||
Song,
|
||||
Album,
|
||||
Artist,
|
||||
Target,
|
||||
Lyrics,
|
||||
Label
|
||||
)
|
||||
|
||||
from .formatted_text import FormattedText
|
||||
from .collection import Collection
|
||||
|
||||
from .country import Country
|
||||
from .contact import Contact
|
||||
|
||||
from .parents import OuterProxy
|
||||
|
||||
DatabaseObject = TypeVar('T', bound=OuterProxy)
|
||||
110
music_kraken/objects/cache.py
Normal file
110
music_kraken/objects/cache.py
Normal file
@@ -0,0 +1,110 @@
|
||||
from collections import defaultdict
|
||||
from typing import Dict, List, Optional
|
||||
import weakref
|
||||
|
||||
from .parents import DatabaseObject
|
||||
|
||||
"""
|
||||
This is a cache for the objects, that et pulled out of the database.
|
||||
This is necessary, to not have duplicate objects with the same id.
|
||||
|
||||
Using a cache that maps the ojects to their id has multiple benefits:
|
||||
- if you modify the object at any point, all objects with the same id get modified *(copy by reference)*
|
||||
- less ram usage
|
||||
- to further decrease ram usage I only store weak refs and not a strong reference, for the gc to still work
|
||||
"""
|
||||
|
||||
|
||||
class ObjectCache:
|
||||
"""
|
||||
ObjectCache is a cache for the objects retrieved from a database.
|
||||
It maps each object to its id and uses weak references to manage its memory usage.
|
||||
Using a cache for these objects provides several benefits:
|
||||
|
||||
- Modifying an object updates all objects with the same id (due to copy by reference)
|
||||
- Reduced memory usage
|
||||
|
||||
:attr object_to_id: Dictionary that maps DatabaseObjects to their id.
|
||||
:attr weakref_map: Dictionary that uses weak references to DatabaseObjects as keys and their id as values.
|
||||
|
||||
:method exists: Check if a DatabaseObject already exists in the cache.
|
||||
:method append: Add a DatabaseObject to the cache if it does not already exist.
|
||||
:method extent: Add a list of DatabaseObjects to the cache.
|
||||
:method remove: Remove a DatabaseObject from the cache by its id.
|
||||
:method get: Retrieve a DatabaseObject from the cache by its id. """
|
||||
object_to_id: Dict[str, DatabaseObject]
|
||||
weakref_map: Dict[weakref.ref, str]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.object_to_id = dict()
|
||||
self.weakref_map = defaultdict()
|
||||
|
||||
def exists(self, database_object: DatabaseObject) -> bool:
|
||||
"""
|
||||
Check if a DatabaseObject with the same id already exists in the cache.
|
||||
|
||||
:param database_object: The DatabaseObject to check for.
|
||||
:return: True if the DatabaseObject exists, False otherwise.
|
||||
"""
|
||||
if database_object.dynamic:
|
||||
return True
|
||||
return database_object.id in self.object_to_id
|
||||
|
||||
def on_death(self, weakref_: weakref.ref) -> None:
|
||||
"""
|
||||
Callback function that gets triggered when the reference count of a DatabaseObject drops to 0.
|
||||
This function removes the DatabaseObject from the cache.
|
||||
|
||||
:param weakref_: The weak reference of the DatabaseObject that has been garbage collected.
|
||||
"""
|
||||
data_id = self.weakref_map.pop(weakref_)
|
||||
self.object_to_id.pop(data_id)
|
||||
|
||||
def get_weakref(self, database_object: DatabaseObject) -> weakref.ref:
|
||||
return weakref.ref(database_object, self.on_death)
|
||||
|
||||
|
||||
def append(self, database_object: DatabaseObject) -> bool:
|
||||
"""
|
||||
Add a DatabaseObject to the cache.
|
||||
|
||||
:param database_object: The DatabaseObject to add to the cache.
|
||||
:return: True if the DatabaseObject already exists in the cache, False otherwise.
|
||||
"""
|
||||
if self.exists(database_object):
|
||||
return True
|
||||
|
||||
self.weakref_map[weakref.ref(database_object, self.on_death)] = database_object.id
|
||||
self.object_to_id[database_object.id] = database_object
|
||||
|
||||
return False
|
||||
|
||||
def extent(self, database_object_list: List[DatabaseObject]):
|
||||
"""
|
||||
adjacent to the extent method of list, this appends n Object
|
||||
"""
|
||||
for database_object in database_object_list:
|
||||
self.append(database_object)
|
||||
|
||||
def remove(self, _id: str):
|
||||
"""
|
||||
Remove a DatabaseObject from the cache.
|
||||
|
||||
:param _id: The id of the DatabaseObject to remove from the cache.
|
||||
"""
|
||||
data = self.object_to_id.get(_id)
|
||||
if data:
|
||||
self.weakref_map.pop(weakref.ref(data))
|
||||
self.object_to_id.pop(_id)
|
||||
|
||||
def __getitem__(self, item) -> Optional[DatabaseObject]:
|
||||
"""
|
||||
this returns the data obj
|
||||
:param item: the id of the music object
|
||||
:return:
|
||||
"""
|
||||
|
||||
return self.object_to_id.get(item)
|
||||
|
||||
def get(self, _id: str) -> Optional[DatabaseObject]:
|
||||
return self.__getitem__(_id)
|
||||
331
music_kraken/objects/collection.py
Normal file
331
music_kraken/objects/collection.py
Normal file
@@ -0,0 +1,331 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import TypeVar, Generic, Dict, Optional, Iterable, List, Iterator, Tuple
|
||||
from .parents import OuterProxy
|
||||
|
||||
T = TypeVar('T', bound=OuterProxy)
|
||||
|
||||
|
||||
class Collection(Generic[T]):
|
||||
__is_collection__ = True
|
||||
|
||||
_data: List[T]
|
||||
|
||||
_indexed_values: Dict[str, set]
|
||||
_indexed_to_objects: Dict[any, list]
|
||||
|
||||
shallow_list = property(fget=lambda self: self.data)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data: Optional[Iterable[T]] = None,
|
||||
sync_on_append: Dict[str, Collection] = None,
|
||||
contain_given_in_attribute: Dict[str, Collection] = None,
|
||||
contain_attribute_in_given: Dict[str, Collection] = None,
|
||||
append_object_to_attribute: Dict[str, T] = None
|
||||
) -> None:
|
||||
self._contains_ids = set()
|
||||
self._data = []
|
||||
|
||||
self.parents: List[Collection[T]] = []
|
||||
self.children: List[Collection[T]] = []
|
||||
|
||||
# List of collection attributes that should be modified on append
|
||||
# Key: collection attribute (str) of appended element
|
||||
# Value: main collection to sync to
|
||||
self.contain_given_in_attribute: Dict[str, Collection] = contain_given_in_attribute or {}
|
||||
self.contain_attribute_in_given: Dict[str, Collection] = contain_attribute_in_given or {}
|
||||
self.append_object_to_attribute: Dict[str, T] = append_object_to_attribute or {}
|
||||
|
||||
self.contain_self_on_append: List[str] = []
|
||||
|
||||
self._indexed_values = defaultdict(set)
|
||||
self._indexed_to_objects = defaultdict(list)
|
||||
|
||||
self.extend(data)
|
||||
|
||||
def _map_element(self, __object: T, from_map: bool = False):
|
||||
if __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
self._contains_ids.add(__object.id)
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
self._indexed_values[name].add(value)
|
||||
self._indexed_to_objects[value].append(__object)
|
||||
|
||||
if not from_map:
|
||||
for attribute, new_object in self.contain_given_in_attribute.items():
|
||||
__object.__getattribute__(attribute).contain_collection_inside(new_object)
|
||||
|
||||
for attribute, new_object in self.contain_attribute_in_given.items():
|
||||
new_object.contain_collection_inside(__object.__getattribute__(attribute))
|
||||
|
||||
for attribute, new_object in self.append_object_to_attribute.items():
|
||||
__object.__getattribute__(attribute).append(new_object)
|
||||
|
||||
def _unmap_element(self, __object: T):
|
||||
if __object.id in self._contains_ids:
|
||||
self._contains_ids.remove(__object.id)
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value not in self._indexed_values[name]:
|
||||
continue
|
||||
|
||||
try:
|
||||
self._indexed_to_objects[value].remove(__object)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if not len(self._indexed_to_objects[value]):
|
||||
self._indexed_values[name].remove(value)
|
||||
|
||||
def _contained_in_self(self, __object: T) -> bool:
|
||||
if __object.id in self._contains_ids:
|
||||
return True
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value in self._indexed_values[name]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _contained_in_sub(self, __object: T, break_at_first: bool = True) -> List[Collection]:
|
||||
"""
|
||||
Gets the collection this object is found in, if it is found in any.
|
||||
|
||||
:param __object:
|
||||
:param break_at_first:
|
||||
:return:
|
||||
"""
|
||||
results = []
|
||||
|
||||
if self._contained_in_self(__object):
|
||||
return [self]
|
||||
|
||||
for collection in self.children:
|
||||
results.extend(collection._contained_in_sub(__object, break_at_first=break_at_first))
|
||||
|
||||
if break_at_first:
|
||||
return results
|
||||
|
||||
return results
|
||||
|
||||
def _get_root_collections(self) -> List[Collection]:
|
||||
if not len(self.parents):
|
||||
return [self]
|
||||
|
||||
root_collections = []
|
||||
for upper_collection in self.parents:
|
||||
root_collections.extend(upper_collection._get_root_collections())
|
||||
return root_collections
|
||||
|
||||
@property
|
||||
def _is_root(self) -> bool:
|
||||
return len(self.parents) <= 0
|
||||
|
||||
def _get_parents_of_multiple_contained_children(self, __object: T):
|
||||
results = []
|
||||
if len(self.children) < 2 or self._contained_in_self(__object):
|
||||
return results
|
||||
|
||||
count = 0
|
||||
|
||||
for collection in self.children:
|
||||
sub_results = collection._get_parents_of_multiple_contained_children(__object)
|
||||
|
||||
if len(sub_results) > 0:
|
||||
count += 1
|
||||
results.extend(sub_results)
|
||||
|
||||
if count >= 2:
|
||||
results.append(self)
|
||||
|
||||
return results
|
||||
|
||||
def merge_into_self(self, __object: T, from_map: bool = False):
|
||||
"""
|
||||
1. find existing objects
|
||||
2. merge into existing object
|
||||
3. remap existing object
|
||||
"""
|
||||
if __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
existing_object: T = None
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
if value in self._indexed_values[name]:
|
||||
existing_object = self._indexed_to_objects[value][0]
|
||||
if existing_object.id == __object.id:
|
||||
return None
|
||||
|
||||
break
|
||||
|
||||
if existing_object is None:
|
||||
return None
|
||||
|
||||
existing_object.merge(__object)
|
||||
|
||||
# just a check if it really worked
|
||||
if existing_object.id != __object.id:
|
||||
raise ValueError("This should NEVER happen. Merging doesn't work.")
|
||||
|
||||
self._map_element(existing_object, from_map=from_map)
|
||||
|
||||
def contains(self, __object: T) -> bool:
|
||||
return len(self._contained_in_sub(__object)) > 0
|
||||
|
||||
def _append(self, __object: T, from_map: bool = False):
|
||||
print(self, __object)
|
||||
self._map_element(__object, from_map=from_map)
|
||||
self._data.append(__object)
|
||||
|
||||
def _find_object_in_self(self, __object: T) -> Optional[T]:
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value in self._indexed_values[name]:
|
||||
return self._indexed_to_objects[value][0]
|
||||
|
||||
def _find_object(self, __object: T) -> Tuple[Collection[T], Optional[T]]:
|
||||
other_object = self._find_object_in_self(__object)
|
||||
if other_object is not None:
|
||||
return self, other_object
|
||||
|
||||
for c in self.children:
|
||||
o, other_object = c._find_object(__object)
|
||||
if other_object is not None:
|
||||
return o, other_object
|
||||
|
||||
return self, None
|
||||
|
||||
def append(self, __object: Optional[T], already_is_parent: bool = False, from_map: bool = False):
|
||||
"""
|
||||
If an object, that represents the same entity exists in a relevant collection,
|
||||
merge into this object. (and remap)
|
||||
Else append to this collection.
|
||||
|
||||
:param __object:
|
||||
:param already_is_parent:
|
||||
:param from_map:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if __object is None or __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
append_to, existing_object = self._find_object(__object)
|
||||
|
||||
if existing_object is None:
|
||||
# append
|
||||
# print("appending", existing_object, __object)
|
||||
append_to._data.append(__object)
|
||||
else:
|
||||
# merge
|
||||
append_to._unmap_element(existing_object)
|
||||
existing_object.merge(__object)
|
||||
|
||||
append_to._map_element(__object, from_map=from_map)
|
||||
|
||||
"""
|
||||
exists_in_collection = self._contained_in_sub(__object)
|
||||
if len(exists_in_collection) and self is exists_in_collection[0]:
|
||||
# assuming that the object already is contained in the correct collections
|
||||
if not already_is_parent:
|
||||
self.merge_into_self(__object, from_map=from_map)
|
||||
return
|
||||
|
||||
if not len(exists_in_collection):
|
||||
self._append(__object, from_map=from_map)
|
||||
else:
|
||||
exists_in_collection[0].merge_into_self(__object, from_map=from_map)
|
||||
|
||||
if not already_is_parent or not self._is_root:
|
||||
for parent_collection in self._get_parents_of_multiple_contained_children(__object):
|
||||
pass
|
||||
parent_collection.append(__object, already_is_parent=True, from_map=from_map)
|
||||
"""
|
||||
|
||||
def extend(self, __iterable: Optional[Iterable[T]], from_map: bool = False):
|
||||
if __iterable is None:
|
||||
return
|
||||
|
||||
for __object in __iterable:
|
||||
self.append(__object, from_map=from_map)
|
||||
|
||||
def sync_with_other_collection(self, equal_collection: Collection):
|
||||
"""
|
||||
If two collections always need to have the same values, this can be used.
|
||||
|
||||
Internally:
|
||||
1. import the data from other to self
|
||||
- _data
|
||||
- contained_collections
|
||||
2. replace all refs from the other object, with refs from this object
|
||||
"""
|
||||
if equal_collection is self:
|
||||
return
|
||||
|
||||
# don't add the elements from the subelements from the other collection.
|
||||
# this will be done in the next step.
|
||||
self.extend(equal_collection._data)
|
||||
# add all submodules
|
||||
for equal_sub_collection in equal_collection.children:
|
||||
self.contain_collection_inside(equal_sub_collection)
|
||||
|
||||
def contain_collection_inside(self, sub_collection: Collection):
|
||||
"""
|
||||
This collection will ALWAYS contain everything from the passed in collection
|
||||
"""
|
||||
if self is sub_collection or sub_collection in self.children:
|
||||
return
|
||||
|
||||
self.children.append(sub_collection)
|
||||
sub_collection.parents.append(self)
|
||||
|
||||
@property
|
||||
def data(self) -> List[T]:
|
||||
return [*self._data,
|
||||
*(__object for collection in self.children for __object in collection.shallow_list)]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._data) + sum(len(collection) for collection in self.children)
|
||||
|
||||
@property
|
||||
def empty(self) -> bool:
|
||||
return self.__len__() <= 0
|
||||
|
||||
def __iter__(self) -> Iterator[T]:
|
||||
for element in self._data:
|
||||
yield element
|
||||
|
||||
for c in self.children:
|
||||
for element in c:
|
||||
yield element
|
||||
|
||||
def __merge__(self, __other: Collection, override: bool = False):
|
||||
self.extend(__other._data, from_map=True)
|
||||
|
||||
def __getitem__(self, item: int):
|
||||
if item < len(self._data):
|
||||
return self._data[item]
|
||||
|
||||
item = item - len(self._data)
|
||||
|
||||
for c in self.children:
|
||||
if item < len(c):
|
||||
return c.__getitem__(item)
|
||||
item = item - len(c._data)
|
||||
|
||||
raise IndexError
|
||||
38
music_kraken/objects/contact.py
Normal file
38
music_kraken/objects/contact.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from typing import Optional, List, Tuple
|
||||
|
||||
from ..utils.enums.contact import ContactMethod
|
||||
from .parents import OuterProxy
|
||||
|
||||
|
||||
class Contact(OuterProxy):
|
||||
COLLECTION_STRING_ATTRIBUTES = tuple()
|
||||
SIMPLE_STRING_ATTRIBUTES = {
|
||||
"contact_method": None,
|
||||
"value": None,
|
||||
}
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [
|
||||
('id', self.id),
|
||||
('value', self.value),
|
||||
]
|
||||
|
||||
def __init__(self, contact_method: ContactMethod, value: str, **kwargs) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self.contact_method: ContactMethod = contact_method
|
||||
self.value: str = value
|
||||
|
||||
@classmethod
|
||||
def match_url(cls, url: str) -> Optional["Contact"]:
|
||||
url = url.strip()
|
||||
|
||||
if url.startswith("mailto:"):
|
||||
return cls(ContactMethod.EMAIL, url.replace("mailto:", "", 1))
|
||||
|
||||
if url.startswith("tel:"):
|
||||
return cls(ContactMethod.PHONE, url.replace("tel:", "", 1))
|
||||
|
||||
if url.startswith("fax:"):
|
||||
return cls(ContactMethod.FAX, url.replace("fax:", "", 1))
|
||||
|
||||
92
music_kraken/objects/country.py
Normal file
92
music_kraken/objects/country.py
Normal file
File diff suppressed because one or more lines are too long
78
music_kraken/objects/formatted_text.py
Normal file
78
music_kraken/objects/formatted_text.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import pandoc
|
||||
|
||||
"""
|
||||
TODO
|
||||
implement in setup.py a skript to install pandocs
|
||||
https://pandoc.org/installing.html
|
||||
|
||||
!!!!!!!!!!!!!!!!!!IMPORTANT!!!!!!!!!!!!!!!!!!
|
||||
"""
|
||||
|
||||
|
||||
class FormattedText:
|
||||
"""
|
||||
the self.html value should be saved to the database
|
||||
"""
|
||||
|
||||
doc = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
plaintext: str = None,
|
||||
markdown: str = None,
|
||||
html: str = None
|
||||
) -> None:
|
||||
self.set_plaintext(plaintext)
|
||||
self.set_markdown(markdown)
|
||||
self.set_html(html)
|
||||
|
||||
def set_plaintext(self, plaintext: str):
|
||||
if plaintext is None:
|
||||
return
|
||||
self.doc = pandoc.read(plaintext)
|
||||
|
||||
def set_markdown(self, markdown: str):
|
||||
if markdown is None:
|
||||
return
|
||||
self.doc = pandoc.read(markdown, format="markdown")
|
||||
|
||||
def set_html(self, html: str):
|
||||
if html is None:
|
||||
return
|
||||
self.doc = pandoc.read(html, format="html")
|
||||
|
||||
def get_markdown(self) -> str:
|
||||
if self.doc is None:
|
||||
return ""
|
||||
return pandoc.write(self.doc, format="markdown").strip()
|
||||
|
||||
def get_html(self) -> str:
|
||||
if self.doc is None:
|
||||
return ""
|
||||
return pandoc.write(self.doc, format="html").strip()
|
||||
|
||||
def get_plaintext(self) -> str:
|
||||
if self.doc is None:
|
||||
return ""
|
||||
return pandoc.write(self.doc, format="plain").strip()
|
||||
|
||||
@property
|
||||
def is_empty(self) -> bool:
|
||||
return self.doc is None
|
||||
|
||||
def __eq__(self, other) -> False:
|
||||
if type(other) != type(self):
|
||||
return False
|
||||
if self.is_empty and other.is_empty:
|
||||
return True
|
||||
|
||||
return self.doc == other.doc
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.plaintext
|
||||
|
||||
|
||||
|
||||
plaintext = property(fget=get_plaintext, fset=set_plaintext)
|
||||
markdown = property(fget=get_markdown, fset=set_markdown)
|
||||
html = property(fget=get_html, fset=set_html)
|
||||
96
music_kraken/objects/lint_default_factories.py
Normal file
96
music_kraken/objects/lint_default_factories.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from typing import List, TypeVar, Type
|
||||
|
||||
from .country import Language
|
||||
from .lyrics import Lyrics
|
||||
from .parents import OuterProxy
|
||||
from .song import Song, Album, Artist, Label
|
||||
from .source import Source
|
||||
from .target import Target
|
||||
|
||||
T = TypeVar('T', bound=OuterProxy)
|
||||
ALL_CLASSES: List[Type[T]] = [Song, Album, Artist, Label, Source, Target, Lyrics]
|
||||
|
||||
|
||||
def print_lint_res(missing_values: dict):
|
||||
print("_default_factories = {")
|
||||
for key, value in missing_values.items():
|
||||
print(f'\t"{key}": {value},')
|
||||
print("}")
|
||||
|
||||
# def __init__(self, foo: str, bar) -> None: ...
|
||||
|
||||
def lint_type(cls: T):
|
||||
all_values: dict = {}
|
||||
missing_values: dict = {}
|
||||
|
||||
for key, value in cls.__annotations__.items():
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
if (not key.islower()) or key.startswith("_") or (key.startswith("__") and key.endswith("__")):
|
||||
continue
|
||||
|
||||
if key in cls._default_factories:
|
||||
continue
|
||||
|
||||
factory = "lambda: None"
|
||||
if isinstance(value, str):
|
||||
if value == "SourceCollection":
|
||||
factory = "SourceCollection"
|
||||
elif "collection" in value.lower():
|
||||
factory = "Collection"
|
||||
elif value.istitle():
|
||||
factory = value
|
||||
else:
|
||||
if value is Language:
|
||||
factory = 'Language.by_alpha_2("en")'
|
||||
else:
|
||||
try:
|
||||
value()
|
||||
factory = value.__name__
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
missing_values[key] = factory
|
||||
|
||||
if len(missing_values) > 0:
|
||||
print(f"{cls.__name__}:")
|
||||
print_lint_res(missing_values)
|
||||
print()
|
||||
else:
|
||||
print(f"Everything is fine at {cls.__name__}")
|
||||
|
||||
p = []
|
||||
s = []
|
||||
for key, value in cls.__annotations__.items():
|
||||
has_default = key in cls._default_factories
|
||||
|
||||
if not isinstance(value, str):
|
||||
value = value.__name__
|
||||
|
||||
if key.endswith("_collection"):
|
||||
key = key.replace("_collection", "_list")
|
||||
|
||||
if isinstance(value, str):
|
||||
if value.startswith("Collection[") and value.endswith("]"):
|
||||
value = value.replace("Collection", "List")
|
||||
|
||||
if isinstance(value, str) and has_default:
|
||||
value = value + " = None"
|
||||
|
||||
p.append(f'{key}: {value}')
|
||||
s.append(f'{key}={key}')
|
||||
p.append("**kwargs")
|
||||
s.append("**kwargs")
|
||||
|
||||
print("# This is automatically generated")
|
||||
print(f"def __init__(self, {', '.join(p)}) -> None:")
|
||||
print(f"\tsuper().__init__({', '.join(s)})")
|
||||
print()
|
||||
|
||||
|
||||
def lint():
|
||||
for i in ALL_CLASSES:
|
||||
lint_type(i)
|
||||
|
||||
print()
|
||||
27
music_kraken/objects/lyrics.py
Normal file
27
music_kraken/objects/lyrics.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from typing import List
|
||||
from collections import defaultdict
|
||||
import pycountry
|
||||
|
||||
from .parents import OuterProxy
|
||||
from .source import Source, SourceCollection
|
||||
from .formatted_text import FormattedText
|
||||
from .country import Language
|
||||
|
||||
|
||||
class Lyrics(OuterProxy):
|
||||
text: FormattedText
|
||||
language: Language
|
||||
|
||||
source_collection: SourceCollection
|
||||
|
||||
_default_factories = {
|
||||
"text": FormattedText,
|
||||
"language": lambda: Language.by_alpha_2("en"),
|
||||
|
||||
"source_collection": SourceCollection,
|
||||
}
|
||||
|
||||
# This is automatically generated
|
||||
def __init__(self, text: FormattedText = None, language: Language = None, source_list: SourceCollection = None,
|
||||
**kwargs) -> None:
|
||||
super().__init__(text=text, language=language, source_list=source_list, **kwargs)
|
||||
400
music_kraken/objects/metadata.py
Normal file
400
music_kraken/objects/metadata.py
Normal file
@@ -0,0 +1,400 @@
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Tuple
|
||||
|
||||
from mutagen import id3
|
||||
import datetime
|
||||
|
||||
|
||||
class Mapping(Enum):
|
||||
"""
|
||||
These frames belong to the id3 standart
|
||||
https://web.archive.org/web/20220830091059/https://id3.org/id3v2.4.0-frames
|
||||
https://id3lib.sourceforge.net/id3/id3v2com-00.html
|
||||
https://mutagen-specs.readthedocs.io/en/latest/id3/id3v2.4.0-frames.html
|
||||
"""
|
||||
# Textframes
|
||||
TITLE = "TIT2"
|
||||
ISRC = "TSRC"
|
||||
LENGTH = "TLEN" # in milliseconds
|
||||
# The 'Date' frame is a numeric string in the DDMM format containing the date for the recording. This field is always four characters long.
|
||||
DATE = "TDAT"
|
||||
# The 'Time' frame is a numeric string in the HHMM format containing the time for the recording. This field is always four characters long.
|
||||
TIME = "TIME"
|
||||
YEAR = "TYER"
|
||||
TRACKNUMBER = "TRCK"
|
||||
TOTALTRACKS = "TRCK" # Stored in the same frame with TRACKNUMBER, separated by '/': e.g. '4/9'.
|
||||
TITLESORTORDER = "TSOT"
|
||||
ENCODING_SETTINGS = "TSSE"
|
||||
SUBTITLE = "TIT3"
|
||||
SET_SUBTITLE = "TSST"
|
||||
RELEASE_DATE = "TDRL"
|
||||
RECORDING_DATES = "TXXX"
|
||||
PUBLISHER_URL = "WPUB"
|
||||
PUBLISHER = "TPUB"
|
||||
RATING = "POPM"
|
||||
DISCNUMBER = "TPOS"
|
||||
MOVEMENT_COUNT = "MVIN"
|
||||
TOTALDISCS = "TPOS"
|
||||
ORIGINAL_RELEASE_DATE = "TDOR"
|
||||
ORIGINAL_ARTIST = "TOPE"
|
||||
ORIGINAL_ALBUM = "TOAL"
|
||||
MEDIA_TYPE = "TMED"
|
||||
LYRICIST = "TEXT"
|
||||
WRITER = "TEXT"
|
||||
ARTIST = "TPE1"
|
||||
LANGUAGE = "TLAN" # https://en.wikipedia.org/wiki/ISO_639-2
|
||||
ITUNESCOMPILATION = "TCMP"
|
||||
REMIXED_BY = "TPE4"
|
||||
RADIO_STATION_OWNER = "TRSO"
|
||||
RADIO_STATION = "TRSN"
|
||||
INITIAL_KEY = "TKEY"
|
||||
OWNER = "TOWN"
|
||||
ENCODED_BY = "TENC"
|
||||
COPYRIGHT = "TCOP"
|
||||
GENRE = "TCON"
|
||||
GROUPING = "TIT1"
|
||||
CONDUCTOR = "TPE3"
|
||||
COMPOSERSORTORDER = "TSOC"
|
||||
COMPOSER = "TCOM"
|
||||
BPM = "TBPM"
|
||||
ALBUM_ARTIST = "TPE2"
|
||||
BAND = "TPE2"
|
||||
ARTISTSORTORDER = "TSOP"
|
||||
ALBUM = "TALB"
|
||||
ALBUMSORTORDER = "TSOA"
|
||||
ALBUMARTISTSORTORDER = "TSO2"
|
||||
TAGGING_TIME = "TDTG"
|
||||
|
||||
SOURCE_WEBPAGE_URL = "WOAS"
|
||||
FILE_WEBPAGE_URL = "WOAF"
|
||||
INTERNET_RADIO_WEBPAGE_URL = "WORS"
|
||||
ARTIST_WEBPAGE_URL = "WOAR"
|
||||
COPYRIGHT_URL = "WCOP"
|
||||
COMMERCIAL_INFORMATION_URL = "WCOM"
|
||||
PAYMEMT_URL = "WPAY"
|
||||
|
||||
MOVEMENT_INDEX = "MVIN"
|
||||
MOVEMENT_NAME = "MVNM"
|
||||
|
||||
UNSYNCED_LYRICS = "USLT"
|
||||
COMMENT = "COMM"
|
||||
|
||||
@classmethod
|
||||
def get_text_instance(cls, key: str, value: str):
|
||||
return id3.Frames[key](encoding=3, text=value)
|
||||
|
||||
@classmethod
|
||||
def get_url_instance(cls, key: str, url: str):
|
||||
return id3.Frames[key](encoding=3, url=url)
|
||||
|
||||
@classmethod
|
||||
def get_mutagen_instance(cls, attribute, value):
|
||||
key = attribute.value
|
||||
|
||||
if key[0] == 'T':
|
||||
# a text fiel
|
||||
return cls.get_text_instance(key, value)
|
||||
if key[0] == "W":
|
||||
# an url field
|
||||
return cls.get_url_instance(key, value)
|
||||
|
||||
|
||||
class ID3Timestamp:
|
||||
def __init__(
|
||||
self,
|
||||
year: int = None,
|
||||
month: int = None,
|
||||
day: int = None,
|
||||
hour: int = None,
|
||||
minute: int = None,
|
||||
second: int = None
|
||||
):
|
||||
self.year = year
|
||||
self.month = month
|
||||
self.day = day
|
||||
self.hour = hour
|
||||
self.minute = minute
|
||||
self.second = second
|
||||
|
||||
self.has_year = year is not None
|
||||
self.has_month = month is not None
|
||||
self.has_day = day is not None
|
||||
self.has_hour = hour is not None
|
||||
self.has_minute = minute is not None
|
||||
self.has_second = second is not None
|
||||
|
||||
if not self.has_year:
|
||||
year = 1
|
||||
if not self.has_month:
|
||||
month = 1
|
||||
if not self.has_day:
|
||||
day = 1
|
||||
if not self.has_hour:
|
||||
hour = 1
|
||||
if not self.has_minute:
|
||||
minute = 1
|
||||
if not self.has_second:
|
||||
second = 1
|
||||
|
||||
self.date_obj = datetime.datetime(
|
||||
year=year,
|
||||
month=month,
|
||||
day=day,
|
||||
hour=hour,
|
||||
minute=minute,
|
||||
second=second
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return self.date_obj.__hash__()
|
||||
|
||||
def __lt__(self, other):
|
||||
return self.date_obj < other.date_obj
|
||||
|
||||
def __le__(self, other):
|
||||
return self.date_obj <= other.date_obj
|
||||
|
||||
def __gt__(self, other):
|
||||
return self.date_obj > other.date_obj
|
||||
|
||||
def __ge__(self, other):
|
||||
return self.date_obj >= other.date_obj
|
||||
|
||||
def __eq__(self, other):
|
||||
if type(other) != type(self):
|
||||
return False
|
||||
return self.date_obj == other.date_obj
|
||||
|
||||
def get_time_format(self) -> str:
|
||||
"""
|
||||
https://mutagen-specs.readthedocs.io/en/latest/id3/id3v2.4.0-structure.html
|
||||
|
||||
The timestamp fields are based on a subset of ISO 8601. When being as precise as possible the format of a
|
||||
time string is
|
||||
- yyyy-MM-ddTHH:mm:ss
|
||||
- (year[%Y], “-”, month[%m], “-”, day[%d], “T”, hour (out of 24)[%H], ”:”, minutes[%M], ”:”, seconds[%S])
|
||||
- %Y-%m-%dT%H:%M:%S
|
||||
but the precision may be reduced by removing as many time indicators as wanted. Hence valid timestamps are
|
||||
- yyyy
|
||||
- yyyy-MM
|
||||
- yyyy-MM-dd
|
||||
- yyyy-MM-ddTHH
|
||||
- yyyy-MM-ddTHH:mm
|
||||
- yyyy-MM-ddTHH:mm:ss
|
||||
All time stamps are UTC. For durations, use the slash character as described in 8601,
|
||||
and for multiple non-contiguous dates, use multiple strings, if allowed by the frame definition.
|
||||
|
||||
:return timestamp: as timestamp in the format of the id3 time as above described
|
||||
"""
|
||||
|
||||
if self.has_year and self.has_month and self.has_day and self.has_hour and self.has_minute and self.has_second:
|
||||
return "%Y-%m-%dT%H:%M:%S"
|
||||
if self.has_year and self.has_month and self.has_day and self.has_hour and self.has_minute:
|
||||
return "%Y-%m-%dT%H:%M"
|
||||
if self.has_year and self.has_month and self.has_day and self.has_hour:
|
||||
return "%Y-%m-%dT%H"
|
||||
if self.has_year and self.has_month and self.has_day:
|
||||
return "%Y-%m-%d"
|
||||
if self.has_year and self.has_month:
|
||||
return "%Y-%m"
|
||||
if self.has_year:
|
||||
return "%Y"
|
||||
return ""
|
||||
|
||||
def get_timestamp(self) -> str:
|
||||
time_format = self.get_time_format()
|
||||
return self.date_obj.strftime(time_format)
|
||||
|
||||
def get_timestamp_w_format(self) -> Tuple[str, str]:
|
||||
time_format = self.get_time_format()
|
||||
return time_format, self.date_obj.strftime(time_format)
|
||||
|
||||
@classmethod
|
||||
def fromtimestamp(cls, utc_timestamp: int):
|
||||
date_obj = datetime.datetime.fromtimestamp(utc_timestamp)
|
||||
|
||||
return cls(
|
||||
year=date_obj.year,
|
||||
month=date_obj.month,
|
||||
day=date_obj.day,
|
||||
hour=date_obj.hour,
|
||||
minute=date_obj.minute,
|
||||
second=date_obj.second
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def strptime(cls, time_stamp: str, format: str):
|
||||
"""
|
||||
day: "%d"
|
||||
month: "%b", "%B", "%m"
|
||||
year: "%y", "%Y"
|
||||
hour: "%H", "%I"
|
||||
minute: "%M"
|
||||
second: "%S"
|
||||
"""
|
||||
date_obj = datetime.datetime.strptime(time_stamp, format)
|
||||
|
||||
day = None
|
||||
if "%d" in format:
|
||||
day = date_obj.day
|
||||
month = None
|
||||
if any([i in format for i in ("%b", "%B", "%m")]):
|
||||
month = date_obj.month
|
||||
year = None
|
||||
if any([i in format for i in ("%y", "%Y")]):
|
||||
year = date_obj.year
|
||||
hour = None
|
||||
if any([i in format for i in ("%H", "%I")]):
|
||||
hour = date_obj.hour
|
||||
minute = None
|
||||
if "%M" in format:
|
||||
minute = date_obj.minute
|
||||
second = None
|
||||
if "%S" in format:
|
||||
second = date_obj.second
|
||||
|
||||
return cls(
|
||||
year=year,
|
||||
month=month,
|
||||
day=day,
|
||||
hour=hour,
|
||||
minute=minute,
|
||||
second=second
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def now(cls):
|
||||
date_obj = datetime.datetime.now()
|
||||
|
||||
return cls(
|
||||
year=date_obj.year,
|
||||
month=date_obj.month,
|
||||
day=date_obj.day,
|
||||
hour=date_obj.hour,
|
||||
minute=date_obj.minute,
|
||||
second=date_obj.second
|
||||
)
|
||||
|
||||
def strftime(self, format: str) -> str:
|
||||
return self.date_obj.strftime(format)
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.timestamp
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.timestamp
|
||||
|
||||
timestamp: str = property(fget=get_timestamp)
|
||||
timeformat: str = property(fget=get_time_format)
|
||||
|
||||
|
||||
class Metadata:
|
||||
# it's a null byte for the later concatenation of text frames
|
||||
NULL_BYTE: str = "\x00"
|
||||
# this is pretty self-explanatory
|
||||
# the key is an enum from Mapping
|
||||
# the value is a list with each value
|
||||
# the mutagen object for each frame will be generated dynamically
|
||||
id3_dict: Dict[Mapping, list]
|
||||
|
||||
def __init__(self, id3_dict: Dict[any, list] = None) -> None:
|
||||
self.id3_dict = dict()
|
||||
if id3_dict is not None:
|
||||
self.add_metadata_dict(id3_dict)
|
||||
|
||||
def __setitem__(self, frame: Mapping, value_list: list, override_existing: bool = True):
|
||||
if type(value_list) != list:
|
||||
raise ValueError(f"can only set attribute to list, not {type(value_list)}")
|
||||
|
||||
new_val = [i for i in value_list if i not in {None, ''}]
|
||||
|
||||
if len(new_val) == 0:
|
||||
return
|
||||
|
||||
if override_existing:
|
||||
self.id3_dict[frame] = new_val
|
||||
else:
|
||||
if frame not in self.id3_dict:
|
||||
self.id3_dict[frame] = new_val
|
||||
return
|
||||
|
||||
self.id3_dict[frame].extend(new_val)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if key not in self.id3_dict:
|
||||
return None
|
||||
return self.id3_dict[key]
|
||||
|
||||
def delete_field(self, key: str):
|
||||
if key in self.id3_dict:
|
||||
return self.id3_dict.pop(key)
|
||||
|
||||
def add_metadata_dict(self, metadata_dict: dict, override_existing: bool = True):
|
||||
for field_enum, value in metadata_dict.items():
|
||||
self.__setitem__(field_enum, value, override_existing=override_existing)
|
||||
|
||||
def merge(self, other, override_existing: bool = False):
|
||||
"""
|
||||
adds the values of another metadata obj to this one
|
||||
|
||||
other is a value of the type MetadataAttribute.Metadata
|
||||
"""
|
||||
|
||||
self.add_metadata_dict(other.id3_dict, override_existing=override_existing)
|
||||
|
||||
def merge_many(self, many_other):
|
||||
"""
|
||||
adds the values of many other metadata objects to this one
|
||||
"""
|
||||
|
||||
for other in many_other:
|
||||
self.merge(other)
|
||||
|
||||
def get_id3_value(self, field):
|
||||
if field not in self.id3_dict:
|
||||
return None
|
||||
|
||||
list_data = self.id3_dict[field]
|
||||
|
||||
# convert for example the time objects to timestamps
|
||||
for i, element in enumerate(list_data):
|
||||
# for performance’s sake I don't do other checks if it is already the right type
|
||||
if type(element) == str:
|
||||
continue
|
||||
|
||||
if type(element) in {int}:
|
||||
list_data[i] = str(element)
|
||||
|
||||
if type(element) == ID3Timestamp:
|
||||
list_data[i] = element.timestamp
|
||||
continue
|
||||
|
||||
"""
|
||||
Version 2.4 of the specification prescribes that all text fields (the fields that start with a T, except for TXXX) can contain multiple values separated by a null character.
|
||||
Thus if above conditions are met, I concatenate the list,
|
||||
else I take the first element
|
||||
"""
|
||||
if field.value[0].upper() == "T" and field.value.upper() != "TXXX":
|
||||
return self.NULL_BYTE.join(list_data)
|
||||
|
||||
return list_data[0]
|
||||
|
||||
def get_mutagen_object(self, field):
|
||||
return Mapping.get_mutagen_instance(field, self.get_id3_value(field))
|
||||
|
||||
def __str__(self) -> str:
|
||||
rows = []
|
||||
for key, value in self.id3_dict.items():
|
||||
rows.append(f"{key} - {str(value)}")
|
||||
return "\n".join(rows)
|
||||
|
||||
def __iter__(self):
|
||||
"""
|
||||
returns a generator, you can iterate through,
|
||||
to directly tagg a file with id3 container.
|
||||
"""
|
||||
# set the tagging timestamp to the current time
|
||||
self.__setitem__(Mapping.TAGGING_TIME, [ID3Timestamp.now()])
|
||||
|
||||
for field in self.id3_dict:
|
||||
yield self.get_mutagen_object(field)
|
||||
257
music_kraken/objects/new_collection.py
Normal file
257
music_kraken/objects/new_collection.py
Normal file
@@ -0,0 +1,257 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from typing import TypeVar, Generic, Dict, Optional, Iterable, List
|
||||
from .parents import OuterProxy
|
||||
|
||||
T = TypeVar('T', bound=OuterProxy)
|
||||
|
||||
|
||||
class Collection(Generic[T]):
|
||||
_data: List[T]
|
||||
|
||||
_indexed_values: Dict[str, set]
|
||||
_indexed_to_objects: Dict[any, list]
|
||||
|
||||
shallow_list = property(fget=lambda self: self.data)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
data: Optional[Iterable[T]] = None,
|
||||
sync_on_append: Dict[str, "Collection"] = None,
|
||||
contain_given_in_attribute: Dict[str, "Collection"] = None,
|
||||
contain_attribute_in_given: Dict[str, "Collection"] = None,
|
||||
append_object_to_attribute: Dict[str, T] = None
|
||||
) -> None:
|
||||
self._contains_ids = set()
|
||||
self._data = []
|
||||
self.upper_collections: List[Collection[T]] = []
|
||||
self.contained_collections: List[Collection[T]] = []
|
||||
|
||||
# List of collection attributes that should be modified on append
|
||||
# Key: collection attribute (str) of appended element
|
||||
# Value: main collection to sync to
|
||||
self.sync_on_append: Dict[str, Collection] = sync_on_append or {}
|
||||
self.contain_given_in_attribute: Dict[str, Collection] = contain_given_in_attribute or {}
|
||||
self.contain_attribute_in_given: Dict[str, Collection] = contain_attribute_in_given or {}
|
||||
self.append_object_to_attribute: Dict[str, T] = append_object_to_attribute or {}
|
||||
|
||||
self.contain_self_on_append: List[str] = []
|
||||
|
||||
self._indexed_values = defaultdict(set)
|
||||
self._indexed_to_objects = defaultdict(list)
|
||||
|
||||
self.extend(data)
|
||||
|
||||
def _map_element(self, __object: T, from_map: bool = False):
|
||||
self._contains_ids.add(__object.id)
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
self._indexed_values[name].add(value)
|
||||
self._indexed_to_objects[value].append(__object)
|
||||
|
||||
if not from_map:
|
||||
for attribute, new_object in self.contain_given_in_attribute.items():
|
||||
__object.__getattribute__(attribute).contain_collection_inside(new_object)
|
||||
|
||||
for attribute, new_object in self.contain_given_in_attribute.items():
|
||||
new_object.contain_collection_inside(__object.__getattribute__(attribute))
|
||||
|
||||
for attribute, new_object in self.append_object_to_attribute.items():
|
||||
__object.__getattribute__(attribute).append(new_object, from_map=True)
|
||||
|
||||
def _unmap_element(self, __object: T):
|
||||
self._contains_ids.remove(__object.id)
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value not in self._indexed_values[name]:
|
||||
continue
|
||||
|
||||
try:
|
||||
self._indexed_to_objects[value].remove(__object)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if not len(self._indexed_to_objects[value]):
|
||||
self._indexed_values[name].remove(value)
|
||||
|
||||
def _contained_in_self(self, __object: T) -> bool:
|
||||
if __object.id in self._contains_ids:
|
||||
return True
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value in self._indexed_values[name]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_root_collections(self) -> List["Collection"]:
|
||||
if not len(self.upper_collections):
|
||||
return [self]
|
||||
|
||||
root_collections = []
|
||||
for upper_collection in self.upper_collections:
|
||||
root_collections.extend(upper_collection._get_root_collections())
|
||||
return root_collections
|
||||
|
||||
@property
|
||||
def _is_root(self) -> bool:
|
||||
return len(self.upper_collections) <= 0
|
||||
|
||||
def _contained_in_sub(self, __object: T, break_at_first: bool = True) -> List["Collection"]:
|
||||
results = []
|
||||
|
||||
if self._contained_in_self(__object):
|
||||
return [self]
|
||||
|
||||
for collection in self.contained_collections:
|
||||
results.extend(collection._contained_in_sub(__object, break_at_first=break_at_first))
|
||||
if break_at_first:
|
||||
return results
|
||||
|
||||
return results
|
||||
|
||||
def _get_parents_of_multiple_contained_children(self, __object: T):
|
||||
results = []
|
||||
if len(self.contained_collections) < 2 or self._contained_in_self(__object):
|
||||
return results
|
||||
|
||||
count = 0
|
||||
|
||||
for collection in self.contained_collections:
|
||||
sub_results = collection._get_parents_of_multiple_contained_children(__object)
|
||||
|
||||
if len(sub_results) > 0:
|
||||
count += 1
|
||||
results.extend(sub_results)
|
||||
|
||||
if count >= 2:
|
||||
results.append(self)
|
||||
|
||||
return results
|
||||
|
||||
def _merge_in_self(self, __object: T, from_map: bool = False):
|
||||
"""
|
||||
1. find existing objects
|
||||
2. merge into existing object
|
||||
3. remap existing object
|
||||
"""
|
||||
if __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
existing_object: DatabaseObject = None
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value in self._indexed_values[name]:
|
||||
existing_object = self._indexed_to_objects[value][0]
|
||||
if existing_object.id == __object.id:
|
||||
return None
|
||||
|
||||
break
|
||||
|
||||
if existing_object is None:
|
||||
return None
|
||||
|
||||
existing_object.merge(__object, replace_all_refs=True)
|
||||
|
||||
# just a check if it really worked
|
||||
if existing_object.id != __object.id:
|
||||
raise ValueError("This should NEVER happen. Merging doesn't work.")
|
||||
|
||||
self._map_element(existing_object, from_map=from_map)
|
||||
|
||||
def contains(self, __object: T) -> bool:
|
||||
return len(self._contained_in_sub(__object)) > 0
|
||||
|
||||
def _append(self, __object: T, from_map: bool = False):
|
||||
for attribute, to_sync_with in self.sync_on_append.items():
|
||||
pass
|
||||
to_sync_with.sync_with_other_collection(__object.__getattribute__(attribute))
|
||||
|
||||
self._map_element(__object, from_map=from_map)
|
||||
self._data.append(__object)
|
||||
|
||||
def append(self, __object: Optional[T], already_is_parent: bool = False, from_map: bool = False):
|
||||
if __object is None:
|
||||
return
|
||||
if __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
exists_in_collection = self._contained_in_sub(__object)
|
||||
if len(exists_in_collection) and self is exists_in_collection[0]:
|
||||
# assuming that the object already is contained in the correct collections
|
||||
if not already_is_parent:
|
||||
self._merge_in_self(__object, from_map=from_map)
|
||||
return
|
||||
|
||||
if not len(exists_in_collection):
|
||||
self._append(__object, from_map=from_map)
|
||||
else:
|
||||
pass
|
||||
exists_in_collection[0]._merge_in_self(__object, from_map=from_map)
|
||||
|
||||
if not already_is_parent or not self._is_root:
|
||||
for parent_collection in self._get_parents_of_multiple_contained_children(__object):
|
||||
pass
|
||||
parent_collection.append(__object, already_is_parent=True, from_map=from_map)
|
||||
|
||||
def extend(self, __iterable: Optional[Iterable[T]]):
|
||||
if __iterable is None:
|
||||
return
|
||||
|
||||
for __object in __iterable:
|
||||
self.append(__object)
|
||||
|
||||
def sync_with_other_collection(self, equal_collection: "Collection"):
|
||||
"""
|
||||
If two collections always need to have the same values, this can be used.
|
||||
|
||||
Internally:
|
||||
1. import the data from other to self
|
||||
- _data
|
||||
- contained_collections
|
||||
2. replace all refs from the other object, with refs from this object
|
||||
"""
|
||||
if equal_collection is self:
|
||||
return
|
||||
|
||||
# don't add the elements from the subelements from the other collection.
|
||||
# this will be done in the next step.
|
||||
self.extend(equal_collection._data)
|
||||
# add all submodules
|
||||
for equal_sub_collection in equal_collection.contained_collections:
|
||||
self.contain_collection_inside(equal_sub_collection)
|
||||
|
||||
# now the ugly part
|
||||
# replace all refs of the other element with this one
|
||||
self._risky_merge(equal_collection)
|
||||
|
||||
def contain_collection_inside(self, sub_collection: "Collection"):
|
||||
"""
|
||||
This collection will ALWAYS contain everything from the passed in collection
|
||||
"""
|
||||
if sub_collection in self.contained_collections:
|
||||
return
|
||||
|
||||
self.contained_collections.append(sub_collection)
|
||||
sub_collection.upper_collections.append(self)
|
||||
|
||||
@property
|
||||
def data(self) -> List[T]:
|
||||
return [*self._data,
|
||||
*(__object for collection in self.contained_collections for __object in collection.shallow_list)]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._data) + sum(len(collection) for collection in self.contained_collections)
|
||||
|
||||
def __iter__(self) -> Iterator[T]:
|
||||
for element in self._data:
|
||||
yield element
|
||||
256
music_kraken/objects/old_collection.py
Normal file
256
music_kraken/objects/old_collection.py
Normal file
@@ -0,0 +1,256 @@
|
||||
from typing import List, Iterable, Iterator, Optional, TypeVar, Generic, Dict, Type
|
||||
from collections import defaultdict
|
||||
|
||||
from .parents import DatabaseObject
|
||||
from ..utils.support_classes.hacking import MetaClass
|
||||
|
||||
T = TypeVar('T', bound=DatabaseObject)
|
||||
|
||||
|
||||
class Collection(Generic[T]):
|
||||
_data: List[T]
|
||||
|
||||
_indexed_values: Dict[str, set]
|
||||
_indexed_to_objects: Dict[any, list]
|
||||
|
||||
shallow_list = property(fget=lambda self: self.data)
|
||||
|
||||
def __init__(
|
||||
self, data: Optional[Iterable[T]] = None,
|
||||
sync_on_append: Dict[str, "Collection"] = None,
|
||||
contain_given_in_attribute: Dict[str, "Collection"] = None,
|
||||
contain_attribute_in_given: Dict[str, "Collection"] = None,
|
||||
append_object_to_attribute: Dict[str, DatabaseObject] = None
|
||||
) -> None:
|
||||
self._contains_ids = set()
|
||||
self._data = []
|
||||
self.upper_collections: List[Collection[T]] = []
|
||||
self.contained_collections: List[Collection[T]] = []
|
||||
|
||||
# List of collection attributes that should be modified on append
|
||||
# Key: collection attribute (str) of appended element
|
||||
# Value: main collection to sync to
|
||||
self.sync_on_append: Dict[str, Collection] = sync_on_append or {}
|
||||
self.contain_given_in_attribute: Dict[str, Collection] = contain_given_in_attribute or {}
|
||||
self.contain_attribute_in_given: Dict[str, Collection] = contain_attribute_in_given or {}
|
||||
self.append_object_to_attribute: Dict[str, DatabaseObject] = append_object_to_attribute or {}
|
||||
|
||||
self.contain_self_on_append: List[str] = []
|
||||
|
||||
self._indexed_values = defaultdict(set)
|
||||
self._indexed_to_objects = defaultdict(list)
|
||||
|
||||
self.extend(data)
|
||||
|
||||
def _map_element(self, __object: T, from_map: bool = False):
|
||||
self._contains_ids.add(__object.id)
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
|
||||
self._indexed_values[name].add(value)
|
||||
self._indexed_to_objects[value].append(__object)
|
||||
|
||||
if not from_map:
|
||||
for attribute, new_object in self.contain_given_in_attribute.items():
|
||||
__object.__getattribute__(attribute).contain_collection_inside(new_object)
|
||||
|
||||
for attribute, new_object in self.contain_given_in_attribute.items():
|
||||
new_object.contain_collection_inside(__object.__getattribute__(attribute))
|
||||
|
||||
for attribute, new_object in self.append_object_to_attribute.items():
|
||||
__object.__getattribute__(attribute).append(new_object, from_map=True)
|
||||
|
||||
def _unmap_element(self, __object: T):
|
||||
self._contains_ids.remove(__object.id)
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value not in self._indexed_values[name]:
|
||||
continue
|
||||
|
||||
try:
|
||||
self._indexed_to_objects[value].remove(__object)
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
if not len(self._indexed_to_objects[value]):
|
||||
self._indexed_values[name].remove(value)
|
||||
|
||||
def _contained_in_self(self, __object: T) -> bool:
|
||||
if __object.id in self._contains_ids:
|
||||
return True
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value in self._indexed_values[name]:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _get_root_collections(self) -> List["Collection"]:
|
||||
if not len(self.upper_collections):
|
||||
return [self]
|
||||
|
||||
root_collections = []
|
||||
for upper_collection in self.upper_collections:
|
||||
root_collections.extend(upper_collection._get_root_collections())
|
||||
return root_collections
|
||||
|
||||
@property
|
||||
def _is_root(self) -> bool:
|
||||
return len(self.upper_collections) <= 0
|
||||
|
||||
def _contained_in_sub(self, __object: T, break_at_first: bool = True) -> List["Collection"]:
|
||||
results = []
|
||||
|
||||
if self._contained_in_self(__object):
|
||||
return [self]
|
||||
|
||||
for collection in self.contained_collections:
|
||||
results.extend(collection._contained_in_sub(__object, break_at_first=break_at_first))
|
||||
if break_at_first:
|
||||
return results
|
||||
|
||||
return results
|
||||
|
||||
def _get_parents_of_multiple_contained_children(self, __object: T):
|
||||
results = []
|
||||
if len(self.contained_collections) < 2 or self._contained_in_self(__object):
|
||||
return results
|
||||
|
||||
count = 0
|
||||
|
||||
for collection in self.contained_collections:
|
||||
sub_results = collection._get_parents_of_multiple_contained_children(__object)
|
||||
|
||||
if len(sub_results) > 0:
|
||||
count += 1
|
||||
results.extend(sub_results)
|
||||
|
||||
if count >= 2:
|
||||
results.append(self)
|
||||
|
||||
return results
|
||||
|
||||
def _merge_in_self(self, __object: T, from_map: bool = False):
|
||||
"""
|
||||
1. find existing objects
|
||||
2. merge into existing object
|
||||
3. remap existing object
|
||||
"""
|
||||
if __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
existing_object: DatabaseObject = None
|
||||
|
||||
for name, value in __object.indexing_values:
|
||||
if value is None:
|
||||
continue
|
||||
if value in self._indexed_values[name]:
|
||||
existing_object = self._indexed_to_objects[value][0]
|
||||
if existing_object.id == __object.id:
|
||||
return None
|
||||
|
||||
break
|
||||
|
||||
if existing_object is None:
|
||||
return None
|
||||
|
||||
existing_object.merge(__object, replace_all_refs=True)
|
||||
|
||||
# just a check if it really worked
|
||||
if existing_object.id != __object.id:
|
||||
raise ValueError("This should NEVER happen. Merging doesn't work.")
|
||||
|
||||
self._map_element(existing_object, from_map=from_map)
|
||||
|
||||
def contains(self, __object: T) -> bool:
|
||||
return len(self._contained_in_sub(__object)) > 0
|
||||
|
||||
def _append(self, __object: T, from_map: bool = False):
|
||||
for attribute, to_sync_with in self.sync_on_append.items():
|
||||
pass
|
||||
to_sync_with.sync_with_other_collection(__object.__getattribute__(attribute))
|
||||
|
||||
self._map_element(__object, from_map=from_map)
|
||||
self._data.append(__object)
|
||||
|
||||
def append(self, __object: Optional[T], already_is_parent: bool = False, from_map: bool = False):
|
||||
if __object is None:
|
||||
return
|
||||
if __object.id in self._contains_ids:
|
||||
return
|
||||
|
||||
exists_in_collection = self._contained_in_sub(__object)
|
||||
if len(exists_in_collection) and self is exists_in_collection[0]:
|
||||
# assuming that the object already is contained in the correct collections
|
||||
if not already_is_parent:
|
||||
self._merge_in_self(__object, from_map=from_map)
|
||||
return
|
||||
|
||||
if not len(exists_in_collection):
|
||||
self._append(__object, from_map=from_map)
|
||||
else:
|
||||
pass
|
||||
exists_in_collection[0]._merge_in_self(__object, from_map=from_map)
|
||||
|
||||
if not already_is_parent or not self._is_root:
|
||||
for parent_collection in self._get_parents_of_multiple_contained_children(__object):
|
||||
pass
|
||||
parent_collection.append(__object, already_is_parent=True, from_map=from_map)
|
||||
|
||||
def extend(self, __iterable: Optional[Iterable[T]]):
|
||||
if __iterable is None:
|
||||
return
|
||||
|
||||
for __object in __iterable:
|
||||
self.append(__object)
|
||||
|
||||
def sync_with_other_collection(self, equal_collection: "Collection"):
|
||||
"""
|
||||
If two collections always need to have the same values, this can be used.
|
||||
|
||||
Internally:
|
||||
1. import the data from other to self
|
||||
- _data
|
||||
- contained_collections
|
||||
2. replace all refs from the other object, with refs from this object
|
||||
"""
|
||||
if equal_collection is self:
|
||||
return
|
||||
|
||||
# don't add the elements from the subelements from the other collection.
|
||||
# this will be done in the next step.
|
||||
self.extend(equal_collection._data)
|
||||
# add all submodules
|
||||
for equal_sub_collection in equal_collection.contained_collections:
|
||||
self.contain_collection_inside(equal_sub_collection)
|
||||
|
||||
# now the ugly part
|
||||
# replace all refs of the other element with this one
|
||||
self._risky_merge(equal_collection)
|
||||
|
||||
def contain_collection_inside(self, sub_collection: "Collection"):
|
||||
"""
|
||||
This collection will ALWAYS contain everything from the passed in collection
|
||||
"""
|
||||
if sub_collection in self.contained_collections:
|
||||
return
|
||||
|
||||
self.contained_collections.append(sub_collection)
|
||||
sub_collection.upper_collections.append(self)
|
||||
|
||||
@property
|
||||
def data(self) -> List[T]:
|
||||
return [*self._data,
|
||||
*(__object for collection in self.contained_collections for __object in collection.shallow_list)]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._data) + sum(len(collection) for collection in self.contained_collections)
|
||||
|
||||
def __iter__(self) -> Iterator[T]:
|
||||
for element in self._data:
|
||||
yield element
|
||||
40
music_kraken/objects/option.py
Normal file
40
music_kraken/objects/option.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from typing import TYPE_CHECKING, List, Iterable
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .parents import DatabaseObject
|
||||
|
||||
|
||||
class Options:
|
||||
def __init__(self, option_list: List['DatabaseObject'] = None):
|
||||
self._data: List['DatabaseObject'] = option_list or list()
|
||||
|
||||
def __str__(self):
|
||||
return "\n".join(f"{i:02d}: {database_object.option_string}" for i, database_object in enumerate(self._data))
|
||||
|
||||
def __iter__(self):
|
||||
for database_object in self._data:
|
||||
yield database_object
|
||||
|
||||
def append(self, element: 'DatabaseObject'):
|
||||
self._data.append(element)
|
||||
|
||||
def extend(self, iterable: Iterable['DatabaseObject']):
|
||||
for element in iterable:
|
||||
self.append(element)
|
||||
|
||||
def get_next_options(self, index: int) -> 'Options':
|
||||
if index >= len(self._data):
|
||||
raise ValueError("Index out of bounds")
|
||||
|
||||
return self._data[index].options
|
||||
|
||||
def __getitem__(self, item: int) -> 'DatabaseObject':
|
||||
if type(item) != int:
|
||||
raise TypeError("Key needs to be an Integer")
|
||||
if item >= len(self._data):
|
||||
raise ValueError("Index out of bounds")
|
||||
|
||||
return self._data[item]
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._data)
|
||||
220
music_kraken/objects/parents.py
Normal file
220
music_kraken/objects/parents.py
Normal file
@@ -0,0 +1,220 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
from collections import defaultdict
|
||||
from functools import lru_cache
|
||||
|
||||
from typing import Optional, Dict, Tuple, List, Type, Generic, Any, TypeVar, Set
|
||||
|
||||
from .metadata import Metadata
|
||||
from ..utils.config import logging_settings
|
||||
from ..utils.shared import HIGHEST_ID
|
||||
from ..utils.support_classes.hacking import MetaClass
|
||||
|
||||
LOGGER = logging_settings["object_logger"]
|
||||
|
||||
P = TypeVar("P", bound="OuterProxy")
|
||||
|
||||
|
||||
class InnerData:
|
||||
"""
|
||||
This is the core class, which is used for every Data class.
|
||||
The attributes are set, and can be merged.
|
||||
|
||||
The concept is, that the outer class proxies this class.
|
||||
If the data in the wrapper class has to be merged, then this class is just replaced and garbage collected.
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
for key, value in kwargs.items():
|
||||
self.__setattr__(key, value)
|
||||
|
||||
def __merge__(self, __other: InnerData, override: bool = False):
|
||||
"""
|
||||
TODO
|
||||
is default is totally ignored
|
||||
|
||||
:param __other:
|
||||
:param override:
|
||||
:return:
|
||||
"""
|
||||
|
||||
for key, value in __other.__dict__.copy().items():
|
||||
# just set the other value if self doesn't already have it
|
||||
if key not in self.__dict__:
|
||||
self.__setattr__(key, value)
|
||||
continue
|
||||
|
||||
# if the object of value implemented __merge__, it merges
|
||||
existing = self.__getattribute__(key)
|
||||
if hasattr(type(existing), "__merge__"):
|
||||
existing.__merge__(value, override)
|
||||
continue
|
||||
|
||||
# override the existing value if requested
|
||||
if override:
|
||||
self.__setattr__(key, value)
|
||||
|
||||
|
||||
class OuterProxy:
|
||||
"""
|
||||
Wraps the inner data, and provides apis, to naturally access those values.
|
||||
"""
|
||||
|
||||
_default_factories: dict = {}
|
||||
_outer_attribute: Set[str] = {"options", "metadata", "indexing_values", "option_string"}
|
||||
|
||||
DOWNWARDS_COLLECTION_STRING_ATTRIBUTES = tuple()
|
||||
UPWARDS_COLLECTION_STRING_ATTRIBUTES = tuple()
|
||||
|
||||
def __init__(self, _id: int = None, dynamic: bool = False, **kwargs):
|
||||
_automatic_id: bool = False
|
||||
|
||||
if _id is None and not dynamic:
|
||||
"""
|
||||
generates a random integer id
|
||||
the range is defined in the config
|
||||
"""
|
||||
_id = random.randint(0, HIGHEST_ID)
|
||||
_automatic_id = True
|
||||
|
||||
kwargs["automatic_id"] = _automatic_id
|
||||
kwargs["id"] = _id
|
||||
kwargs["dynamic"] = dynamic
|
||||
|
||||
for name, factory in type(self)._default_factories.items():
|
||||
if kwargs.get(name, None) is None:
|
||||
kwargs[name] = factory()
|
||||
|
||||
collection_data: Dict[str, list] = {}
|
||||
for name, value in kwargs.copy().items():
|
||||
if isinstance(value, list) and name.endswith("_list"):
|
||||
collection_name = name.replace("_list", "_collection")
|
||||
collection_data[collection_name] = value
|
||||
|
||||
del kwargs[name]
|
||||
|
||||
self._inner: InnerData = InnerData(**kwargs)
|
||||
self.__init_collections__()
|
||||
|
||||
for name, data_list in collection_data.items():
|
||||
collection = self._inner.__getattribute__(name)
|
||||
collection.extend(data_list)
|
||||
|
||||
self._inner.__setattr__(name, collection)
|
||||
|
||||
def __init_collections__(self):
|
||||
pass
|
||||
|
||||
def __getattribute__(self, __name: str) -> Any:
|
||||
"""
|
||||
Returns the attribute of _inner if the attribute exists,
|
||||
else it returns the attribute of self.
|
||||
|
||||
That the _inner gets checked first is essential for the type hints.
|
||||
:param __name:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if __name.startswith("_") or __name in self._outer_attribute or __name.isupper():
|
||||
return object.__getattribute__(self, __name)
|
||||
|
||||
_inner: InnerData = super().__getattribute__("_inner")
|
||||
try:
|
||||
return _inner.__getattribute__(__name)
|
||||
except AttributeError:
|
||||
return super().__getattribute__(__name)
|
||||
|
||||
def __setattr__(self, __name, __value):
|
||||
if not __name.startswith("_") and hasattr(self, "_inner"):
|
||||
_inner: InnerData = super().__getattribute__("_inner")
|
||||
return _inner.__setattr__(__name, __value)
|
||||
|
||||
return super().__setattr__(__name, __value)
|
||||
|
||||
def _add_other_db_objects(self, object_type: Type[OuterProxy], object_list: List[OuterProxy]):
|
||||
pass
|
||||
|
||||
def add_list_of_other_objects(self, object_list: List[OuterProxy]):
|
||||
d: Dict[Type[OuterProxy], List[OuterProxy]] = defaultdict(list)
|
||||
|
||||
for db_object in object_list:
|
||||
d[type(db_object)].append(db_object)
|
||||
|
||||
for key, value in d.items():
|
||||
self._add_other_db_objects(key, value)
|
||||
|
||||
def __hash__(self):
|
||||
"""
|
||||
:raise: IsDynamicException
|
||||
:return:
|
||||
"""
|
||||
|
||||
if self.dynamic:
|
||||
return id(self._inner)
|
||||
|
||||
return self.id
|
||||
|
||||
def __eq__(self, other: Any):
|
||||
return self.__hash__() == other.__hash__()
|
||||
|
||||
def merge(self, __other: Optional[OuterProxy], override: bool = False):
|
||||
"""
|
||||
1. merges the data of __other in self
|
||||
2. replaces the data of __other with the data of self
|
||||
|
||||
:param __other:
|
||||
:param override:
|
||||
:return:
|
||||
"""
|
||||
if __other is None:
|
||||
_ = "debug"
|
||||
return
|
||||
|
||||
self._inner.__merge__(__other._inner, override=override)
|
||||
__other._inner = self._inner
|
||||
|
||||
@property
|
||||
def metadata(self) -> Metadata:
|
||||
"""
|
||||
This is an interface.
|
||||
:return:
|
||||
"""
|
||||
return Metadata()
|
||||
|
||||
@property
|
||||
def options(self) -> List[P]:
|
||||
return [self]
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
"""
|
||||
This is an interface.
|
||||
It is supposed to return a map of the name and values for all important attributes.
|
||||
This helps in comparing classes for equal data (e.g. being the same song but different attributes)
|
||||
|
||||
TODO
|
||||
Rewrite this approach into a approach, that is centered around statistics, and not binaries.
|
||||
Instead of: one of this matches, it is the same
|
||||
This: If enough attributes are similar enough, they are the same
|
||||
|
||||
Returns:
|
||||
List[Tuple[str, object]]: the first element in the tuple is the name of the attribute, the second the value.
|
||||
"""
|
||||
|
||||
return []
|
||||
|
||||
@property
|
||||
@lru_cache()
|
||||
def all_collections(self):
|
||||
r = []
|
||||
|
||||
for key in self._default_factories:
|
||||
val = self._inner.__getattribute__(key)
|
||||
if hasattr(val, "__is_collection__"):
|
||||
r.append(val)
|
||||
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return f"{type(self).__name__}({', '.join(key + ': ' + str(val) for key, val in self.indexing_values)})"
|
||||
688
music_kraken/objects/song.py
Normal file
688
music_kraken/objects/song.py
Normal file
@@ -0,0 +1,688 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
from collections import defaultdict
|
||||
from typing import List, Optional, Dict, Tuple, Type, Union
|
||||
|
||||
import pycountry
|
||||
|
||||
from ..utils.enums.album import AlbumType, AlbumStatus
|
||||
from .collection import Collection
|
||||
from .formatted_text import FormattedText
|
||||
from .lyrics import Lyrics
|
||||
from .contact import Contact
|
||||
from .metadata import (
|
||||
Mapping as id3Mapping,
|
||||
ID3Timestamp,
|
||||
Metadata
|
||||
)
|
||||
from .option import Options
|
||||
from .parents import OuterProxy, P
|
||||
from .source import Source, SourceCollection
|
||||
from .target import Target
|
||||
from .country import Language, Country
|
||||
from ..utils.string_processing import unify
|
||||
|
||||
from .parents import OuterProxy as Base
|
||||
|
||||
from ..utils.config import main_settings
|
||||
|
||||
"""
|
||||
All Objects dependent
|
||||
"""
|
||||
|
||||
CountryTyping = type(list(pycountry.countries)[0])
|
||||
OPTION_STRING_DELIMITER = " | "
|
||||
|
||||
|
||||
class Song(Base):
|
||||
title: str
|
||||
unified_title: str
|
||||
isrc: str
|
||||
length: int
|
||||
genre: str
|
||||
note: FormattedText
|
||||
tracksort: int
|
||||
|
||||
source_collection: SourceCollection
|
||||
target_collection: Collection[Target]
|
||||
lyrics_collection: Collection[Lyrics]
|
||||
main_artist_collection: Collection[Artist]
|
||||
feature_artist_collection: Collection[Artist]
|
||||
album_collection: Collection[Album]
|
||||
|
||||
_default_factories = {
|
||||
"note": FormattedText,
|
||||
"length": lambda: 0,
|
||||
"source_collection": SourceCollection,
|
||||
"target_collection": Collection,
|
||||
"lyrics_collection": Collection,
|
||||
|
||||
"main_artist_collection": Collection,
|
||||
"album_collection": Collection,
|
||||
"feature_artist_collection": Collection,
|
||||
|
||||
"title": lambda: "",
|
||||
"unified_title": lambda: None,
|
||||
"isrc": lambda: None,
|
||||
"genre": lambda: None,
|
||||
|
||||
"tracksort": lambda: 0,
|
||||
}
|
||||
|
||||
def __init__(self, title: str = "", unified_title: str = None, isrc: str = None, length: int = None,
|
||||
genre: str = None, note: FormattedText = None, source_list: List[Source] = None,
|
||||
target_list: List[Target] = None, lyrics_list: List[Lyrics] = None,
|
||||
main_artist_list: List[Artist] = None, feature_artist_list: List[Artist] = None,
|
||||
album_list: List[Album] = None, tracksort: int = 0, **kwargs) -> None:
|
||||
|
||||
Base.__init__(**locals())
|
||||
|
||||
UPWARDS_COLLECTION_STRING_ATTRIBUTES = ("album_collection", "main_artist_collection", "feature_artist_collection")
|
||||
|
||||
def __init_collections__(self) -> None:
|
||||
self.album_collection.contain_given_in_attribute = {
|
||||
"artist_collection": self.main_artist_collection,
|
||||
}
|
||||
self.album_collection.append_object_to_attribute = {
|
||||
"song_collection": self,
|
||||
}
|
||||
|
||||
self.main_artist_collection.contain_given_in_attribute = {
|
||||
"main_album_collection": self.album_collection
|
||||
}
|
||||
self.feature_artist_collection.append_object_to_attribute = {
|
||||
"feature_song_collection": self
|
||||
}
|
||||
|
||||
def _add_other_db_objects(self, object_type: Type[OuterProxy], object_list: List[OuterProxy]):
|
||||
if object_type is Song:
|
||||
return
|
||||
|
||||
if isinstance(object_list, Lyrics):
|
||||
self.lyrics_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if isinstance(object_list, Artist):
|
||||
self.main_artist_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if isinstance(object_list, Album):
|
||||
self.album_collection.extend(object_list)
|
||||
return
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [
|
||||
('id', self.id),
|
||||
('title', self.unified_title),
|
||||
('isrc', self.isrc),
|
||||
*[('url', source.url) for source in self.source_collection]
|
||||
]
|
||||
|
||||
@property
|
||||
def metadata(self) -> Metadata:
|
||||
metadata = Metadata({
|
||||
id3Mapping.TITLE: [self.title],
|
||||
id3Mapping.ISRC: [self.isrc],
|
||||
id3Mapping.LENGTH: [self.length],
|
||||
id3Mapping.GENRE: [self.genre],
|
||||
id3Mapping.TRACKNUMBER: [self.tracksort_str]
|
||||
})
|
||||
|
||||
# metadata.merge_many([s.get_song_metadata() for s in self.source_collection]) album sources have no relevant metadata for id3
|
||||
metadata.merge_many([a.metadata for a in self.album_collection])
|
||||
metadata.merge_many([a.metadata for a in self.main_artist_collection])
|
||||
metadata.merge_many([a.metadata for a in self.feature_artist_collection])
|
||||
metadata.merge_many([lyrics.metadata for lyrics in self.lyrics_collection])
|
||||
|
||||
return metadata
|
||||
|
||||
def get_artist_credits(self) -> str:
|
||||
main_artists = ", ".join([artist.name for artist in self.main_artist_collection])
|
||||
feature_artists = ", ".join([artist.name for artist in self.feature_artist_collection])
|
||||
|
||||
if len(feature_artists) == 0:
|
||||
return main_artists
|
||||
return f"{main_artists} feat. {feature_artists}"
|
||||
|
||||
"""
|
||||
def __str__(self) -> str:
|
||||
artist_credit_str = ""
|
||||
artist_credits = self.get_artist_credits()
|
||||
if artist_credits != "":
|
||||
artist_credit_str = f" by {artist_credits}"
|
||||
|
||||
return f"\"{self.title}\"{artist_credit_str}"
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Song(\"{self.title}\")"
|
||||
|
||||
@property
|
||||
def option_string(self) -> str:
|
||||
return f"{self.__repr__()} " \
|
||||
f"from Album({OPTION_STRING_DELIMITER.join(album.title for album in self.album_collection)}) " \
|
||||
f"by Artist({OPTION_STRING_DELIMITER.join(artist.name for artist in self.main_artist_collection)}) " \
|
||||
f"feat. Artist({OPTION_STRING_DELIMITER.join(artist.name for artist in self.feature_artist_collection)})"
|
||||
|
||||
@property
|
||||
def options(self) -> List[P]:
|
||||
options = self.main_artist_collection.shallow_list
|
||||
options.extend(self.feature_artist_collection)
|
||||
options.extend(self.album_collection)
|
||||
options.append(self)
|
||||
return options
|
||||
|
||||
@property
|
||||
def tracksort_str(self) -> str:
|
||||
"""
|
||||
if the album tracklist is empty, it sets it length to 1, this song has to be on the Album
|
||||
:returns id3_tracksort: {song_position}/{album.length_of_tracklist}
|
||||
"""
|
||||
if len(self.album_collection) == 0:
|
||||
return f"{self.tracksort}"
|
||||
|
||||
return f"{self.tracksort}/{len(self.album_collection[0].song_collection) or 1}"
|
||||
|
||||
|
||||
"""
|
||||
All objects dependent on Album
|
||||
"""
|
||||
|
||||
|
||||
class Album(Base):
|
||||
title: str
|
||||
unified_title: str
|
||||
album_status: AlbumStatus
|
||||
album_type: AlbumType
|
||||
language: Language
|
||||
date: ID3Timestamp
|
||||
barcode: str
|
||||
albumsort: int
|
||||
notes: FormattedText
|
||||
|
||||
source_collection: SourceCollection
|
||||
artist_collection: Collection[Artist]
|
||||
song_collection: Collection[Song]
|
||||
label_collection: Collection[Label]
|
||||
|
||||
_default_factories = {
|
||||
"title": lambda: None,
|
||||
"unified_title": lambda: None,
|
||||
"album_status": lambda: None,
|
||||
"barcode": lambda: None,
|
||||
"albumsort": lambda: None,
|
||||
|
||||
"album_type": lambda: AlbumType.OTHER,
|
||||
"language": lambda: Language.by_alpha_2("en"),
|
||||
"date": ID3Timestamp,
|
||||
"notes": FormattedText,
|
||||
|
||||
"source_collection": SourceCollection,
|
||||
"artist_collection": Collection,
|
||||
"song_collection": Collection,
|
||||
"label_collection": Collection,
|
||||
|
||||
}
|
||||
|
||||
# This is automatically generated
|
||||
def __init__(self, title: str = None, unified_title: str = None, album_status: AlbumStatus = None,
|
||||
album_type: AlbumType = None, language: Language = None, date: ID3Timestamp = None,
|
||||
barcode: str = None, albumsort: int = None, notes: FormattedText = None,
|
||||
source_list: List[Source] = None, artist_list: List[Artist] = None, song_list: List[Song] = None,
|
||||
label_list: List[Label] = None, **kwargs) -> None:
|
||||
super().__init__(title=title, unified_title=unified_title, album_status=album_status, album_type=album_type,
|
||||
language=language, date=date, barcode=barcode, albumsort=albumsort, notes=notes,
|
||||
source_list=source_list, artist_list=artist_list, song_list=song_list, label_list=label_list,
|
||||
**kwargs)
|
||||
|
||||
DOWNWARDS_COLLECTION_STRING_ATTRIBUTES = ("song_collection",)
|
||||
UPWARDS_COLLECTION_STRING_ATTRIBUTES = ("artist_collection", "label_collection")
|
||||
|
||||
def __init_collections__(self):
|
||||
self.song_collection.contain_attribute_in_given = {
|
||||
"main_artist_collection": self.artist_collection
|
||||
}
|
||||
|
||||
def _add_other_db_objects(self, object_type: Type[OuterProxy], object_list: List[OuterProxy]):
|
||||
if object_type is Song:
|
||||
self.song_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if object_type is Artist:
|
||||
self.artist_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if object_type is Album:
|
||||
return
|
||||
|
||||
if object_type is Label:
|
||||
self.label_collection.extend(object_list)
|
||||
return
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [
|
||||
('id', self.id),
|
||||
('title', self.unified_title),
|
||||
('barcode', self.barcode),
|
||||
*[('url', source.url) for source in self.source_collection]
|
||||
]
|
||||
|
||||
@property
|
||||
def metadata(self) -> Metadata:
|
||||
"""
|
||||
TODO
|
||||
- barcode
|
||||
:return:
|
||||
"""
|
||||
return Metadata({
|
||||
id3Mapping.ALBUM: [self.title],
|
||||
id3Mapping.COPYRIGHT: [self.copyright],
|
||||
id3Mapping.LANGUAGE: [self.iso_639_2_lang],
|
||||
id3Mapping.ALBUM_ARTIST: [a.name for a in self.artist_collection],
|
||||
id3Mapping.DATE: [self.date.strftime("%d%m")] if self.date.has_year and self.date.has_month else [],
|
||||
id3Mapping.TIME: [self.date.strftime(("%H%M"))] if self.date.has_hour and self.date.has_minute else [],
|
||||
id3Mapping.YEAR: [str(self.date.year).zfill(4)] if self.date.has_year else [],
|
||||
id3Mapping.RELEASE_DATE: [self.date.timestamp],
|
||||
id3Mapping.ORIGINAL_RELEASE_DATE: [self.date.timestamp],
|
||||
id3Mapping.ALBUMSORTORDER: [str(self.albumsort)] if self.albumsort is not None else []
|
||||
})
|
||||
|
||||
def __repr__(self):
|
||||
return f"Album(\"{self.title}\")"
|
||||
|
||||
@property
|
||||
def option_string(self) -> str:
|
||||
return f"{self.__repr__()} " \
|
||||
f"by Artist({OPTION_STRING_DELIMITER.join([artist.name for artist in self.artist_collection])}) " \
|
||||
f"under Label({OPTION_STRING_DELIMITER.join([label.name for label in self.label_collection])})"
|
||||
|
||||
@property
|
||||
def options(self) -> List[P]:
|
||||
options = [*self.artist_collection, self, *self.song_collection]
|
||||
|
||||
return options
|
||||
|
||||
def update_tracksort(self):
|
||||
"""
|
||||
This updates the tracksort attributes, of the songs in
|
||||
`self.song_collection`, and sorts the songs, if possible.
|
||||
|
||||
It is advised to only call this function, once all the tracks are
|
||||
added to the songs.
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
if self.song_collection.empty:
|
||||
return
|
||||
|
||||
tracksort_map: Dict[int, Song] = {
|
||||
song.tracksort: song for song in self.song_collection if song.tracksort is not None
|
||||
}
|
||||
|
||||
# place the songs, with set tracksort attribute according to it
|
||||
for tracksort, song in tracksort_map.items():
|
||||
index = tracksort - 1
|
||||
|
||||
"""
|
||||
I ONLY modify the `Collection._data` attribute directly,
|
||||
to bypass the mapping of the attributes, because I will add the item in the next step
|
||||
"""
|
||||
|
||||
"""
|
||||
but for some reason, neither
|
||||
`self.song_collection._data.index(song)`
|
||||
`self.song_collection._data.remove(song)`
|
||||
get the right object.
|
||||
|
||||
I have NO FUCKING CLUE why xD
|
||||
But I just implemented it myself.
|
||||
"""
|
||||
for old_index, temp_song in enumerate(self.song_collection._data):
|
||||
if song is temp_song:
|
||||
break
|
||||
|
||||
# the list can't be empty
|
||||
del self.song_collection._data[old_index]
|
||||
self.song_collection._data.insert(index, song)
|
||||
|
||||
# fill in the empty tracksort attributes
|
||||
for i, song in enumerate(self.song_collection):
|
||||
if song.tracksort is not None:
|
||||
continue
|
||||
song.tracksort = i + 1
|
||||
|
||||
def compile(self, merge_into: bool = False):
|
||||
"""
|
||||
compiles the recursive structures,
|
||||
and does depending on the object some other stuff.
|
||||
|
||||
no need to override if only the recursive structure should be built.
|
||||
override self.build_recursive_structures() instead
|
||||
"""
|
||||
|
||||
self.update_tracksort()
|
||||
self._build_recursive_structures(build_version=random.randint(0, 99999), merge=merge_into)
|
||||
|
||||
@property
|
||||
def copyright(self) -> str:
|
||||
if self.date is None:
|
||||
return ""
|
||||
if self.date.has_year or len(self.label_collection) == 0:
|
||||
return ""
|
||||
|
||||
return f"{self.date.year} {self.label_collection[0].name}"
|
||||
|
||||
@property
|
||||
def iso_639_2_lang(self) -> Optional[str]:
|
||||
if self.language is None:
|
||||
return None
|
||||
|
||||
return self.language.alpha_3
|
||||
|
||||
@property
|
||||
def is_split(self) -> bool:
|
||||
"""
|
||||
A split Album is an Album from more than one Artists
|
||||
usually half the songs are made by one Artist, the other half by the other one.
|
||||
In this case split means either that or one artist featured by all songs.
|
||||
:return:
|
||||
"""
|
||||
return len(self.artist_collection) > 1
|
||||
|
||||
@property
|
||||
def album_type_string(self) -> str:
|
||||
return self.album_type.value
|
||||
|
||||
|
||||
"""
|
||||
All objects dependent on Artist
|
||||
"""
|
||||
|
||||
|
||||
class Artist(Base):
|
||||
name: str
|
||||
unified_name: str
|
||||
country: Country
|
||||
formed_in: ID3Timestamp
|
||||
notes: FormattedText
|
||||
lyrical_themes: List[str]
|
||||
|
||||
general_genre: str
|
||||
unformated_location: str
|
||||
|
||||
source_collection: SourceCollection
|
||||
contact_collection: Collection[Contact]
|
||||
|
||||
feature_song_collection: Collection[Song]
|
||||
main_album_collection: Collection[Album]
|
||||
label_collection: Collection[Label]
|
||||
|
||||
_default_factories = {
|
||||
"name": str,
|
||||
"unified_name": lambda: None,
|
||||
"country": lambda: None,
|
||||
"unformated_location": lambda: None,
|
||||
|
||||
"formed_in": ID3Timestamp,
|
||||
"notes": FormattedText,
|
||||
"lyrical_themes": list,
|
||||
"general_genre": lambda: "",
|
||||
|
||||
"source_collection": SourceCollection,
|
||||
"feature_song_collection": Collection,
|
||||
"main_album_collection": Collection,
|
||||
"contact_collection": Collection,
|
||||
"label_collection": Collection,
|
||||
}
|
||||
|
||||
# This is automatically generated
|
||||
def __init__(self, name: str = "", unified_name: str = None, country: Country = None,
|
||||
formed_in: ID3Timestamp = None, notes: FormattedText = None, lyrical_themes: List[str] = None,
|
||||
general_genre: str = None, unformated_location: str = None, source_list: List[Source] = None,
|
||||
contact_list: List[Contact] = None, feature_song_list: List[Song] = None,
|
||||
main_album_list: List[Album] = None, label_list: List[Label] = None, **kwargs) -> None:
|
||||
|
||||
super().__init__(name=name, unified_name=unified_name, country=country, formed_in=formed_in, notes=notes,
|
||||
lyrical_themes=lyrical_themes, general_genre=general_genre,
|
||||
unformated_location=unformated_location, source_list=source_list, contact_list=contact_list,
|
||||
feature_song_list=feature_song_list, main_album_list=main_album_list, label_list=label_list,
|
||||
**kwargs)
|
||||
|
||||
DOWNWARDS_COLLECTION_STRING_ATTRIBUTES = ("feature_song_collection", "main_album_collection")
|
||||
UPWARDS_COLLECTION_STRING_ATTRIBUTES = ("label_collection",)
|
||||
|
||||
def __init_collections__(self):
|
||||
self.feature_song_collection.append_object_to_attribute = {
|
||||
"feature_artist_collection": self
|
||||
}
|
||||
|
||||
self.main_album_collection.append_object_to_attribute = {
|
||||
"artist_collection": self
|
||||
}
|
||||
|
||||
self.label_collection.append_object_to_attribute = {
|
||||
"current_artist_collection": self
|
||||
}
|
||||
|
||||
def _add_other_db_objects(self, object_type: Type[OuterProxy], object_list: List[OuterProxy]):
|
||||
if object_type is Song:
|
||||
# this doesn't really make sense
|
||||
# self.feature_song_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if object_type is Artist:
|
||||
return
|
||||
|
||||
if object_type is Album:
|
||||
self.main_album_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if object_type is Label:
|
||||
self.label_collection.extend(object_list)
|
||||
return
|
||||
|
||||
@property
|
||||
def options(self) -> List[P]:
|
||||
options = [self, *self.main_album_collection.shallow_list, *self.feature_album]
|
||||
print(options)
|
||||
return options
|
||||
|
||||
def update_albumsort(self):
|
||||
"""
|
||||
This updates the albumsort attributes, of the albums in
|
||||
`self.main_album_collection`, and sorts the albums, if possible.
|
||||
|
||||
It is advised to only call this function, once all the albums are
|
||||
added to the artist.
|
||||
|
||||
:return:
|
||||
"""
|
||||
if len(self.main_album_collection) <= 0:
|
||||
return
|
||||
|
||||
type_section: Dict[AlbumType, int] = defaultdict(lambda: 2, {
|
||||
AlbumType.OTHER: 0, # if I don't know it, I add it to the first section
|
||||
AlbumType.STUDIO_ALBUM: 0,
|
||||
AlbumType.EP: 0,
|
||||
AlbumType.SINGLE: 1
|
||||
}) if main_settings["sort_album_by_type"] else defaultdict(lambda: 0)
|
||||
|
||||
sections = defaultdict(list)
|
||||
|
||||
# order albums in the previously defined section
|
||||
album: Album
|
||||
for album in self.main_album_collection:
|
||||
sections[type_section[album.album_type]].append(album)
|
||||
|
||||
def sort_section(_section: List[Album], last_albumsort: int) -> int:
|
||||
# album is just a value used in loops
|
||||
nonlocal album
|
||||
|
||||
if main_settings["sort_by_date"]:
|
||||
_section.sort(key=lambda _album: _album.date, reverse=True)
|
||||
|
||||
new_last_albumsort = last_albumsort
|
||||
|
||||
for album_index, album in enumerate(_section):
|
||||
if album.albumsort is None:
|
||||
album.albumsort = new_last_albumsort = album_index + 1 + last_albumsort
|
||||
|
||||
_section.sort(key=lambda _album: _album.albumsort)
|
||||
|
||||
return new_last_albumsort
|
||||
|
||||
# sort the sections individually
|
||||
_last_albumsort = 1
|
||||
for section_index in sorted(sections):
|
||||
_last_albumsort = sort_section(sections[section_index], _last_albumsort)
|
||||
|
||||
# merge all sections again
|
||||
album_list = []
|
||||
for section_index in sorted(sections):
|
||||
album_list.extend(sections[section_index])
|
||||
|
||||
# replace the old collection with the new one
|
||||
self.main_album_collection: Collection = Collection(data=album_list, element_type=Album)
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [
|
||||
('id', self.id),
|
||||
('name', self.unified_name),
|
||||
*[('url', source.url) for source in self.source_collection],
|
||||
*[('contact', contact.value) for contact in self.contact_collection]
|
||||
]
|
||||
|
||||
@property
|
||||
def metadata(self) -> Metadata:
|
||||
metadata = Metadata({
|
||||
id3Mapping.ARTIST: [self.name]
|
||||
})
|
||||
metadata.merge_many([s.get_artist_metadata() for s in self.source_collection])
|
||||
|
||||
return metadata
|
||||
|
||||
"""
|
||||
def __str__(self, include_notes: bool = False):
|
||||
string = self.name or ""
|
||||
if include_notes:
|
||||
plaintext_notes = self.notes.get_plaintext()
|
||||
if plaintext_notes is not None:
|
||||
string += "\n" + plaintext_notes
|
||||
return string
|
||||
"""
|
||||
|
||||
def __repr__(self):
|
||||
return f"Artist(\"{self.name}\")"
|
||||
|
||||
@property
|
||||
def option_string(self) -> str:
|
||||
return f"{self.__repr__()} " \
|
||||
f"under Label({OPTION_STRING_DELIMITER.join([label.name for label in self.label_collection])})"
|
||||
|
||||
@property
|
||||
def options(self) -> List[P]:
|
||||
options = [self]
|
||||
options.extend(self.main_album_collection)
|
||||
options.extend(self.feature_song_collection)
|
||||
return options
|
||||
|
||||
@property
|
||||
def feature_album(self) -> Album:
|
||||
return Album(
|
||||
title="features",
|
||||
album_status=AlbumStatus.UNRELEASED,
|
||||
album_type=AlbumType.COMPILATION_ALBUM,
|
||||
is_split=True,
|
||||
albumsort=666,
|
||||
dynamic=True,
|
||||
song_list=self.feature_song_collection.shallow_list
|
||||
)
|
||||
|
||||
def get_all_songs(self) -> List[Song]:
|
||||
"""
|
||||
returns a list of all Songs.
|
||||
probably not that useful, because it is unsorted
|
||||
"""
|
||||
collection = self.feature_song_collection.copy()
|
||||
for album in self.discography:
|
||||
collection.extend(album.song_collection)
|
||||
|
||||
return collection
|
||||
|
||||
@property
|
||||
def discography(self) -> List[Album]:
|
||||
flat_copy_discography = self.main_album_collection.copy()
|
||||
flat_copy_discography.append(self.feature_album)
|
||||
|
||||
return flat_copy_discography
|
||||
|
||||
|
||||
"""
|
||||
Label
|
||||
"""
|
||||
|
||||
|
||||
class Label(Base):
|
||||
COLLECTION_STRING_ATTRIBUTES = ("album_collection", "current_artist_collection")
|
||||
|
||||
DOWNWARDS_COLLECTION_STRING_ATTRIBUTES = COLLECTION_STRING_ATTRIBUTES
|
||||
|
||||
name: str
|
||||
unified_name: str
|
||||
notes: FormattedText
|
||||
|
||||
source_collection: SourceCollection
|
||||
contact_collection: Collection[Contact]
|
||||
|
||||
album_collection: Collection[Album]
|
||||
current_artist_collection: Collection[Artist]
|
||||
|
||||
_default_factories = {
|
||||
"notes": FormattedText,
|
||||
"album_collection": Collection,
|
||||
"current_artist_collection": Collection,
|
||||
"source_collection": SourceCollection,
|
||||
"contact_collection": Collection,
|
||||
"name": lambda: None,
|
||||
"unified_name": lambda: None,
|
||||
}
|
||||
|
||||
def __init__(self, name: str = None, unified_name: str = None, notes: FormattedText = None,
|
||||
source_list: List[Source] = None, contact_list: List[Contact] = None,
|
||||
album_list: List[Album] = None, current_artist_list: List[Artist] = None, **kwargs) -> None:
|
||||
super().__init__(name=name, unified_name=unified_name, notes=notes, source_list=source_list,
|
||||
contact_list=contact_list, album_list=album_list, current_artist_list=current_artist_list,
|
||||
**kwargs)
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [
|
||||
('id', self.id),
|
||||
('name', self.unified_name),
|
||||
*[('url', source.url) for source in self.source_collection]
|
||||
]
|
||||
|
||||
def _add_other_db_objects(self, object_type: Type[OuterProxy], object_list: List[OuterProxy]):
|
||||
if object_type is Song:
|
||||
return
|
||||
|
||||
if object_type is Artist:
|
||||
self.current_artist_collection.extend(object_list)
|
||||
return
|
||||
|
||||
if object_type is Album:
|
||||
self.album_collection.extend(object_list)
|
||||
return
|
||||
|
||||
@property
|
||||
def options(self) -> List[P]:
|
||||
options = [self]
|
||||
options.extend(self.current_artist_collection.shallow_list)
|
||||
options.extend(self.album_collection.shallow_list)
|
||||
|
||||
return options
|
||||
132
music_kraken/objects/source.py
Normal file
132
music_kraken/objects/source.py
Normal file
@@ -0,0 +1,132 @@
|
||||
from collections import defaultdict
|
||||
from enum import Enum
|
||||
from typing import List, Dict, Set, Tuple, Optional, Iterable
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ..utils.enums.source import SourcePages, SourceTypes
|
||||
from ..utils.config import youtube_settings
|
||||
|
||||
from .metadata import Mapping, Metadata
|
||||
from .parents import OuterProxy
|
||||
from .collection import Collection
|
||||
|
||||
|
||||
class Source(OuterProxy):
|
||||
url: str
|
||||
|
||||
page_enum: SourcePages
|
||||
referer_page: SourcePages
|
||||
|
||||
audio_url: str
|
||||
|
||||
_default_factories = {
|
||||
"audio_url": lambda: None,
|
||||
}
|
||||
|
||||
# This is automatically generated
|
||||
def __init__(self, page_enum: SourcePages, url: str, referer_page: SourcePages = None, audio_url: str = None,
|
||||
**kwargs) -> None:
|
||||
|
||||
if referer_page is None:
|
||||
referer_page = page_enum
|
||||
|
||||
super().__init__(url=url, page_enum=page_enum, referer_page=referer_page, audio_url=audio_url, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def match_url(cls, url: str, referer_page: SourcePages) -> Optional["Source"]:
|
||||
"""
|
||||
this shouldn't be used, unlesse you are not certain what the source is for
|
||||
the reason is that it is more inefficient
|
||||
"""
|
||||
parsed = urlparse(url)
|
||||
url = parsed.geturl()
|
||||
|
||||
if "musify" in parsed.netloc:
|
||||
return cls(SourcePages.MUSIFY, url, referer_page=referer_page)
|
||||
|
||||
if parsed.netloc in [_url.netloc for _url in youtube_settings['youtube_url']]:
|
||||
return cls(SourcePages.YOUTUBE, url, referer_page=referer_page)
|
||||
|
||||
if url.startswith("https://www.deezer"):
|
||||
return cls(SourcePages.DEEZER, url, referer_page=referer_page)
|
||||
|
||||
if url.startswith("https://open.spotify.com"):
|
||||
return cls(SourcePages.SPOTIFY, url, referer_page=referer_page)
|
||||
|
||||
if "bandcamp" in url:
|
||||
return cls(SourcePages.BANDCAMP, url, referer_page=referer_page)
|
||||
|
||||
if "wikipedia" in parsed.netloc:
|
||||
return cls(SourcePages.WIKIPEDIA, url, referer_page=referer_page)
|
||||
|
||||
if url.startswith("https://www.metal-archives.com/"):
|
||||
return cls(SourcePages.ENCYCLOPAEDIA_METALLUM, url, referer_page=referer_page)
|
||||
|
||||
# the less important once
|
||||
if url.startswith("https://www.facebook"):
|
||||
return cls(SourcePages.FACEBOOK, url, referer_page=referer_page)
|
||||
|
||||
if url.startswith("https://www.instagram"):
|
||||
return cls(SourcePages.INSTAGRAM, url, referer_page=referer_page)
|
||||
|
||||
if url.startswith("https://twitter"):
|
||||
return cls(SourcePages.TWITTER, url, referer_page=referer_page)
|
||||
|
||||
if url.startswith("https://myspace.com"):
|
||||
return cls(SourcePages.MYSPACE, url, referer_page=referer_page)
|
||||
|
||||
def get_song_metadata(self) -> Metadata:
|
||||
return Metadata({
|
||||
Mapping.FILE_WEBPAGE_URL: [self.url],
|
||||
Mapping.SOURCE_WEBPAGE_URL: [self.homepage]
|
||||
})
|
||||
|
||||
def get_artist_metadata(self) -> Metadata:
|
||||
return Metadata({
|
||||
Mapping.ARTIST_WEBPAGE_URL: [self.url]
|
||||
})
|
||||
|
||||
@property
|
||||
def metadata(self) -> Metadata:
|
||||
return self.get_song_metadata()
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [
|
||||
('id', self.id),
|
||||
('url', self.url),
|
||||
('audio_url', self.audio_url),
|
||||
]
|
||||
|
||||
def __str__(self):
|
||||
return self.__repr__()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Src({self.page_enum.value}: {self.url}, {self.audio_url})"
|
||||
|
||||
page_str = property(fget=lambda self: self.page_enum.value)
|
||||
type_str = property(fget=lambda self: self.type_enum.value)
|
||||
homepage = property(fget=lambda self: SourcePages.get_homepage(self.page_enum))
|
||||
|
||||
|
||||
class SourceCollection(Collection):
|
||||
def __init__(self, data: Optional[Iterable[Source]] = None, **kwargs):
|
||||
self._page_to_source_list: Dict[SourcePages, List[Source]] = defaultdict(list)
|
||||
|
||||
super().__init__(data=data, **kwargs)
|
||||
|
||||
def _map_element(self, __object: Source, **kwargs):
|
||||
super()._map_element(__object, **kwargs)
|
||||
|
||||
self._page_to_source_list[__object.page_enum].append(__object)
|
||||
|
||||
@property
|
||||
def source_pages(self) -> Set[SourcePages]:
|
||||
return set(source.page_enum for source in self._data)
|
||||
|
||||
def get_sources_from_page(self, source_page: SourcePages) -> List[Source]:
|
||||
"""
|
||||
getting the sources for a specific page like
|
||||
YouTube or musify
|
||||
"""
|
||||
return self._page_to_source_list[source_page].copy()
|
||||
108
music_kraken/objects/target.py
Normal file
108
music_kraken/objects/target.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple, TextIO, Union
|
||||
import logging
|
||||
|
||||
import requests
|
||||
from tqdm import tqdm
|
||||
|
||||
from .parents import OuterProxy
|
||||
from ..utils.config import main_settings, logging_settings
|
||||
from ..utils.string_processing import fit_to_file_system
|
||||
|
||||
|
||||
LOGGER = logging.getLogger("target")
|
||||
|
||||
|
||||
class Target(OuterProxy):
|
||||
"""
|
||||
create somehow like that
|
||||
```python
|
||||
# I know path is pointless, and I will change that (don't worry about backwards compatibility there)
|
||||
Target(file="song.mp3", path="~/Music/genre/artist/album")
|
||||
```
|
||||
"""
|
||||
|
||||
file_path: Path
|
||||
|
||||
_default_factories = {
|
||||
}
|
||||
|
||||
# This is automatically generated
|
||||
def __init__(self, file_path: Union[Path, str], relative_to_music_dir: bool = False, **kwargs) -> None:
|
||||
if not isinstance(file_path, Path):
|
||||
file_path = Path(file_path)
|
||||
|
||||
if relative_to_music_dir:
|
||||
file_path = Path(main_settings["music_directory"], file_path)
|
||||
|
||||
super().__init__(file_path=fit_to_file_system(file_path), **kwargs)
|
||||
|
||||
self.is_relative_to_music_dir: bool = relative_to_music_dir
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return str(self.file_path)
|
||||
|
||||
@property
|
||||
def indexing_values(self) -> List[Tuple[str, object]]:
|
||||
return [('filepath', self.file_path)]
|
||||
|
||||
@property
|
||||
def exists(self) -> bool:
|
||||
return self.file_path.is_file()
|
||||
|
||||
@property
|
||||
def size(self) -> int:
|
||||
"""
|
||||
returns the size the downloaded audio takes up in bytes
|
||||
returns 0 if the file doesn't exist
|
||||
"""
|
||||
if not self.exists:
|
||||
return 0
|
||||
|
||||
return self.file_path.stat().st_size
|
||||
|
||||
def create_path(self):
|
||||
self.file_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def copy_content(self, copy_to: Target):
|
||||
if not self.exists:
|
||||
LOGGER.warning(f"No file exists at: {self.file_path}")
|
||||
return
|
||||
|
||||
with open(self.file_path, "rb") as read_from:
|
||||
copy_to.create_path()
|
||||
with open(copy_to.file_path, "wb") as write_to:
|
||||
write_to.write(read_from.read())
|
||||
|
||||
def stream_into(self, r: requests.Response, desc: str = None) -> bool:
|
||||
if r is None:
|
||||
return False
|
||||
|
||||
self.create_path()
|
||||
|
||||
total_size = int(r.headers.get('content-length'))
|
||||
|
||||
with open(self.file_path, 'wb') as f:
|
||||
try:
|
||||
"""
|
||||
https://en.wikipedia.org/wiki/Kilobyte
|
||||
> The internationally recommended unit symbol for the kilobyte is kB.
|
||||
"""
|
||||
with tqdm(total=total_size, unit='B', unit_scale=True, unit_divisor=1024, desc=desc) as t:
|
||||
|
||||
for chunk in r.iter_content(chunk_size=main_settings["chunk_size"]):
|
||||
size = f.write(chunk)
|
||||
t.update(size)
|
||||
return True
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
logging_settings["download_logger"].error("Stream timed out.")
|
||||
return False
|
||||
|
||||
def open(self, file_mode: str, **kwargs) -> TextIO:
|
||||
return self.file_path.open(file_mode, **kwargs)
|
||||
|
||||
def delete(self):
|
||||
self.file_path.unlink(missing_ok=True)
|
||||
Reference in New Issue
Block a user