This commit is contained in:
Hellow2 2023-05-23 10:49:52 +02:00
parent 08339bab68
commit 2a00735f72
8 changed files with 23 additions and 126 deletions

View File

@ -3,7 +3,7 @@ from music_kraken.pages import EncyclopaediaMetallum
def search(): def search():
results = EncyclopaediaMetallum.search_by_query("#a Ghost Bath") results = EncyclopaediaMetallum._raw_search("#a Ghost Bath")
print(results) print(results)
print(results[0].source_collection) print(results[0].source_collection)

View File

@ -7,8 +7,6 @@ import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from ..connection import Connection from ..connection import Connection
from ..utils.support_classes.default_target import DefaultTarget
from ..utils.support_classes.download_result import DownloadResult
from ..objects import ( from ..objects import (
Song, Song,
Source, Source,
@ -24,6 +22,7 @@ from ..utils.enums.source import SourcePages
from ..utils.enums.album import AlbumType from ..utils.enums.album import AlbumType
from ..audio import write_metadata_to_target, correct_codec from ..audio import write_metadata_to_target, correct_codec
from ..utils import shared from ..utils import shared
from ..utils.support_classes import Query, DownloadResult, DefaultTarget
class Page: class Page:
@ -42,124 +41,21 @@ class Page:
SOURCE_TYPE: SourcePages SOURCE_TYPE: SourcePages
@classmethod
def get_request(
cls,
url: str,
stream: bool = False,
accepted_response_codes: set = {200},
trie: int = 0
) -> Optional[requests.Response]:
retry = False
try:
r = cls.API_SESSION.get(url, timeout=cls.TIMEOUT, stream=stream)
except requests.exceptions.Timeout:
cls.LOGGER.warning(f"request timed out at \"{url}\": ({trie}-{cls.TRIES})")
retry = True
except requests.exceptions.ConnectionError:
cls.LOGGER.warning(f"couldn't connect to \"{url}\": ({trie}-{cls.TRIES})")
retry = True
if not retry and r.status_code in accepted_response_codes:
return r
if not retry:
cls.LOGGER.warning(f"{cls.__name__} responded wit {r.status_code} at GET:{url}. ({trie}-{cls.TRIES})")
cls.LOGGER.debug(r.content)
if trie >= cls.TRIES:
cls.LOGGER.warning("to many tries. Aborting.")
return None
return cls.get_request(url=url, stream=stream, accepted_response_codes=accepted_response_codes, trie=trie + 1)
@classmethod
def post_request(cls, url: str, json: dict, accepted_response_codes: set = {200}, trie: int = 0) -> Optional[
requests.Response]:
retry = False
try:
r = cls.API_SESSION.post(url, json=json, timeout=cls.POST_TIMEOUT)
except requests.exceptions.Timeout:
cls.LOGGER.warning(f"request timed out at \"{url}\": ({trie}-{cls.TRIES})")
retry = True
except requests.exceptions.ConnectionError:
cls.LOGGER.warning(f"couldn't connect to \"{url}\": ({trie}-{cls.TRIES})")
retry = True
if not retry and r.status_code in accepted_response_codes:
return r
if not retry:
cls.LOGGER.warning(f"{cls.__name__} responded wit {r.status_code} at POST:{url}. ({trie}-{cls.TRIES})")
cls.LOGGER.debug(r.content)
if trie >= cls.TRIES:
cls.LOGGER.warning("to many tries. Aborting.")
return None
cls.LOGGER.warning(f"payload: {json}")
return cls.post_request(url=url, json=json, accepted_response_codes=accepted_response_codes, trie=trie + 1)
@classmethod @classmethod
def get_soup_from_response(cls, r: requests.Response) -> BeautifulSoup: def get_soup_from_response(cls, r: requests.Response) -> BeautifulSoup:
return BeautifulSoup(r.content, "html.parser") return BeautifulSoup(r.content, "html.parser")
class Query: @classmethod
def __init__(self, query: str): def search(cls, query: Query) -> Options:
self.query = query results = []
self.is_raw = False
self.artist = None for default_query in query.default_search:
self.album = None results.extend(cls._raw_search(default_query))
self.song = None
self.parse_query(query=query) return Options(results)
def __str__(self):
if self.is_raw:
return self.query
return f"{self.artist}; {self.album}; {self.song}"
def parse_query(self, query: str):
if not '#' in query:
self.is_raw = True
return
query = query.strip()
parameters = query.split('#')
parameters.remove('')
for parameter in parameters:
splitted = parameter.split(" ")
type_ = splitted[0]
input_ = " ".join(splitted[1:]).strip()
if type_ == "a":
self.artist = input_
continue
if type_ == "r":
self.album = input_
continue
if type_ == "t":
self.song = input_
continue
def get_str(self, string):
if string is None:
return ""
return string
artist_str = property(fget=lambda self: self.get_str(self.artist))
album_str = property(fget=lambda self: self.get_str(self.album))
song_str = property(fget=lambda self: self.get_str(self.song))
@classmethod @classmethod
def search_by_object(cls, data_object: DatabaseObject, filter_none: bool = True) -> List[DatabaseObject]: def _raw_search(cls, query: str) -> Options:
return []
@classmethod
def search_by_query(cls, query: str) -> Options:
""" """
# The Query # The Query
You can define a new parameter with "#", You can define a new parameter with "#",
@ -174,7 +70,7 @@ class Page:
:return possible_music_objects: :return possible_music_objects:
""" """
return Options() return []
@classmethod @classmethod
def fetch_details(cls, music_object: Union[Song, Album, Artist, Label], stop_at_level: int = 1) -> DatabaseObject: def fetch_details(cls, music_object: Union[Song, Album, Artist, Label], stop_at_level: int = 1) -> DatabaseObject:

View File

@ -67,7 +67,7 @@ class Search(Download):
""" """
for page in self.pages: for page in self.pages:
self._current_option[page] = page.search_by_query(query=query) self._current_option[page] = page._raw_search(query=query)
def choose_page(self, page: Type[Page]): def choose_page(self, page: Type[Page]):
""" """

View File

@ -1,13 +1,11 @@
from collections import defaultdict from collections import defaultdict
from typing import List, Optional, Dict, Type, Union from typing import List, Optional, Dict, Type, Union
import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
import pycountry import pycountry
from urllib.parse import urlparse from urllib.parse import urlparse
from ..connection import Connection from ..connection import Connection
from ..utils.shared import ENCYCLOPAEDIA_METALLUM_LOGGER, proxies from ..utils.shared import ENCYCLOPAEDIA_METALLUM_LOGGER
from ..utils import string_processing
from .abstract import Page from .abstract import Page
from ..utils.enums.source import SourcePages from ..utils.enums.source import SourcePages
from ..utils.enums.album import AlbumType from ..utils.enums.album import AlbumType
@ -45,7 +43,7 @@ class EncyclopaediaMetallum(Page):
LOGGER = ENCYCLOPAEDIA_METALLUM_LOGGER LOGGER = ENCYCLOPAEDIA_METALLUM_LOGGER
@classmethod @classmethod
def search_by_query(cls, query: str) -> Options: def _raw_search(cls, query: str) -> Options:
query_obj = cls.Query(query) query_obj = cls.Query(query)
if query_obj.is_raw: if query_obj.is_raw:

View File

@ -25,7 +25,7 @@ from ..objects import (
) )
from ..utils.shared import MUSIFY_LOGGER from ..utils.shared import MUSIFY_LOGGER
from ..utils import string_processing, shared from ..utils import string_processing, shared
from .support_classes.download_result import DownloadResult from ..utils.support_classes import DownloadResult
""" """
https://musify.club/artist/ghost-bath-280348?_pjax=#bodyContent https://musify.club/artist/ghost-bath-280348?_pjax=#bodyContent
@ -112,7 +112,7 @@ class Musify(Page):
) )
@classmethod @classmethod
def search_by_query(cls, query: str) -> Options: def _raw_search(cls, query: str) -> Options:
query_obj = cls.Query(query) query_obj = cls.Query(query)
if query_obj.is_raw: if query_obj.is_raw:

View File

@ -46,7 +46,7 @@ class YouTube(Page):
@classmethod @classmethod
def search_by_query(cls, query: str) -> Options: def _raw_search(cls, query: str) -> Options:
return Options() return Options()
@classmethod @classmethod

View File

@ -0,0 +1,3 @@
from .default_target import DefaultTarget
from .download_result import DownloadResult
from .query import Query

View File

@ -3,7 +3,7 @@ from music_kraken.pages import Musify
def search(): def search():
results = Musify.search_by_query("#a Ghost Bath") results = Musify._raw_search("#a Ghost Bath")
print(results) print(results)