2023-09-12 12:17:58 +00:00
|
|
|
from typing import List, Optional, Type
|
2023-09-13 14:01:01 +00:00
|
|
|
from urllib.parse import urlparse, urlunparse
|
2023-09-12 18:18:51 +00:00
|
|
|
import json
|
2023-09-12 13:12:36 +00:00
|
|
|
from enum import Enum
|
2023-09-12 16:50:32 +00:00
|
|
|
from bs4 import BeautifulSoup
|
2023-09-12 21:39:22 +00:00
|
|
|
import pycountry
|
2023-09-12 12:17:58 +00:00
|
|
|
|
|
|
|
from ..objects import Source, DatabaseObject
|
|
|
|
from .abstract import Page
|
|
|
|
from ..objects import (
|
|
|
|
Artist,
|
|
|
|
Source,
|
|
|
|
SourcePages,
|
|
|
|
Song,
|
|
|
|
Album,
|
|
|
|
Label,
|
2023-09-12 16:50:32 +00:00
|
|
|
Target,
|
2023-09-12 20:16:08 +00:00
|
|
|
Contact,
|
2023-09-12 21:39:22 +00:00
|
|
|
ID3Timestamp,
|
|
|
|
Lyrics,
|
2024-04-19 10:17:08 +00:00
|
|
|
FormattedText,
|
|
|
|
Artwork,
|
2023-09-12 12:17:58 +00:00
|
|
|
)
|
|
|
|
from ..connection import Connection
|
2023-10-23 14:21:44 +00:00
|
|
|
from ..utils.support_classes.download_result import DownloadResult
|
2024-04-19 09:46:56 +00:00
|
|
|
from ..utils.string_processing import clean_song_title
|
2023-09-12 12:17:58 +00:00
|
|
|
from ..utils.config import main_settings, logging_settings
|
2023-09-12 13:12:36 +00:00
|
|
|
from ..utils.shared import DEBUG
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 13:12:36 +00:00
|
|
|
if DEBUG:
|
2024-04-09 10:55:35 +00:00
|
|
|
from ..utils import dump_to_file
|
2023-09-12 13:12:36 +00:00
|
|
|
|
|
|
|
|
2023-09-13 16:55:04 +00:00
|
|
|
def _parse_artist_url(url: str) -> str:
|
|
|
|
parsed = urlparse(url)
|
|
|
|
return urlunparse((parsed.scheme, parsed.netloc, "/music/", "", "", ""))
|
|
|
|
|
|
|
|
|
|
|
|
def _get_host(source: Source) -> str:
|
|
|
|
parsed = urlparse(source.url)
|
|
|
|
return urlunparse((parsed.scheme, parsed.netloc, "", "", "", ""))
|
|
|
|
|
|
|
|
|
2023-09-12 13:12:36 +00:00
|
|
|
class BandcampTypes(Enum):
|
|
|
|
ARTIST = "b"
|
|
|
|
ALBUM = "a"
|
|
|
|
SONG = "t"
|
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
|
|
|
|
class Bandcamp(Page):
|
|
|
|
# CHANGE
|
|
|
|
SOURCE_TYPE = SourcePages.BANDCAMP
|
|
|
|
LOGGER = logging_settings["bandcamp_logger"]
|
|
|
|
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
self.connection: Connection = Connection(
|
|
|
|
host="https://bandcamp.com/",
|
2024-01-19 17:45:12 +00:00
|
|
|
logger=self.LOGGER,
|
|
|
|
module="bandcamp",
|
2023-09-12 12:17:58 +00:00
|
|
|
)
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
super().__init__(*args, **kwargs)
|
|
|
|
|
|
|
|
def get_source_type(self, source: Source) -> Optional[Type[DatabaseObject]]:
|
2023-09-12 15:59:04 +00:00
|
|
|
parsed_url = urlparse(source.url)
|
2023-09-13 14:01:01 +00:00
|
|
|
path = parsed_url.path.replace("/", "")
|
2023-09-12 15:59:04 +00:00
|
|
|
|
2023-09-13 16:55:04 +00:00
|
|
|
if path == "" or path.startswith("music"):
|
2023-09-12 15:59:04 +00:00
|
|
|
return Artist
|
2023-09-13 14:01:01 +00:00
|
|
|
if path.startswith("album"):
|
2023-09-12 15:59:04 +00:00
|
|
|
return Album
|
2023-09-13 14:01:01 +00:00
|
|
|
if path.startswith("track"):
|
2023-09-12 15:59:04 +00:00
|
|
|
return Song
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
return super().get_source_type(source)
|
2023-09-12 13:12:36 +00:00
|
|
|
|
|
|
|
def _parse_autocomplete_api_result(self, data: dict) -> DatabaseObject:
|
|
|
|
try:
|
|
|
|
object_type = BandcampTypes(data["type"])
|
|
|
|
except ValueError:
|
|
|
|
return
|
|
|
|
|
|
|
|
url = data["item_url_root"]
|
|
|
|
if "item_url_path" in data:
|
|
|
|
url = data["item_url_path"]
|
|
|
|
|
|
|
|
source_list = [Source(self.SOURCE_TYPE, url)]
|
|
|
|
name = data["name"]
|
|
|
|
|
|
|
|
if data.get("is_label", False):
|
|
|
|
return Label(
|
|
|
|
name=name,
|
|
|
|
source_list=source_list
|
|
|
|
)
|
|
|
|
|
|
|
|
if object_type is BandcampTypes.ARTIST:
|
2023-09-13 16:55:04 +00:00
|
|
|
source_list = [Source(self.SOURCE_TYPE, _parse_artist_url(url))]
|
2023-09-12 13:12:36 +00:00
|
|
|
return Artist(
|
|
|
|
name=name,
|
|
|
|
source_list=source_list
|
|
|
|
)
|
|
|
|
|
|
|
|
if object_type is BandcampTypes.ALBUM:
|
|
|
|
return Album(
|
|
|
|
title=name,
|
|
|
|
source_list=source_list,
|
|
|
|
artist_list=[
|
|
|
|
Artist(
|
2023-09-13 16:55:04 +00:00
|
|
|
name=data["band_name"].strip(),
|
2023-09-12 13:12:36 +00:00
|
|
|
source_list=[
|
|
|
|
Source(self.SOURCE_TYPE, data["item_url_root"])
|
|
|
|
]
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
if object_type is BandcampTypes.SONG:
|
2023-09-12 14:30:08 +00:00
|
|
|
return Song(
|
2024-04-19 09:46:56 +00:00
|
|
|
title=clean_song_title(name, artist_name=data["band_name"]),
|
2023-09-12 14:30:08 +00:00
|
|
|
source_list=source_list,
|
|
|
|
main_artist_list=[
|
|
|
|
Artist(
|
|
|
|
name=data["band_name"],
|
|
|
|
source_list=[
|
|
|
|
Source(self.SOURCE_TYPE, data["item_url_root"])
|
|
|
|
]
|
|
|
|
)
|
|
|
|
]
|
|
|
|
)
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 13:12:36 +00:00
|
|
|
def general_search(self, search_query: str, filter_string: str = "") -> List[DatabaseObject]:
|
|
|
|
results = []
|
|
|
|
|
|
|
|
r = self.connection.post("https://bandcamp.com/api/bcsearch_public_api/1/autocomplete_elastic", json={
|
|
|
|
"fan_id": None,
|
|
|
|
"full_page": True,
|
|
|
|
"search_filter": filter_string,
|
|
|
|
"search_text": search_query,
|
2024-04-26 19:31:31 +00:00
|
|
|
}, name=f"search_{filter_string}_{search_query}")
|
2023-09-12 13:12:36 +00:00
|
|
|
if r is None:
|
|
|
|
return results
|
|
|
|
|
|
|
|
if DEBUG:
|
2023-09-13 16:55:04 +00:00
|
|
|
dump_to_file("bandcamp_search_response.json", r.text, is_json=True, exit_after_dump=False)
|
2023-09-12 13:12:36 +00:00
|
|
|
|
|
|
|
data = r.json()
|
|
|
|
|
|
|
|
for element in data.get("auto", {}).get("results", []):
|
|
|
|
r = self._parse_autocomplete_api_result(element)
|
|
|
|
if r is not None:
|
|
|
|
results.append(r)
|
|
|
|
|
|
|
|
return results
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
def label_search(self, label: Label) -> List[Label]:
|
2023-09-12 14:30:08 +00:00
|
|
|
return self.general_search(label.name, filter_string="b")
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
def artist_search(self, artist: Artist) -> List[Artist]:
|
2023-09-12 13:14:44 +00:00
|
|
|
return self.general_search(artist.name, filter_string="b")
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
def album_search(self, album: Album) -> List[Album]:
|
2023-09-12 14:30:08 +00:00
|
|
|
return self.general_search(album.title, filter_string="a")
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 12:17:58 +00:00
|
|
|
def song_search(self, song: Song) -> List[Song]:
|
2023-09-12 14:30:08 +00:00
|
|
|
return self.general_search(song.title, filter_string="t")
|
2023-09-12 18:18:51 +00:00
|
|
|
|
|
|
|
def fetch_label(self, source: Source, stop_at_level: int = 1) -> Label:
|
|
|
|
return Label()
|
2023-09-12 16:50:32 +00:00
|
|
|
|
|
|
|
def _parse_artist_details(self, soup: BeautifulSoup) -> Artist:
|
|
|
|
name: str = None
|
|
|
|
source_list: List[Source] = []
|
|
|
|
contact_list: List[Contact] = []
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 16:50:32 +00:00
|
|
|
band_name_location: BeautifulSoup = soup.find("p", {"id": "band-name-location"})
|
|
|
|
if band_name_location is not None:
|
|
|
|
title_span = band_name_location.find("span", {"class": "title"})
|
|
|
|
if title_span is not None:
|
|
|
|
name = title_span.text.strip()
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 16:50:32 +00:00
|
|
|
link_container: BeautifulSoup = soup.find("ol", {"id": "band-links"})
|
|
|
|
if link_container is not None:
|
|
|
|
li: BeautifulSoup
|
|
|
|
for li in link_container.find_all("a"):
|
|
|
|
if li is None and li['href'] is not None:
|
|
|
|
continue
|
|
|
|
|
2023-09-13 16:55:04 +00:00
|
|
|
source_list.append(Source.match_url(_parse_artist_url(li['href']), referer_page=self.SOURCE_TYPE))
|
2023-09-12 16:50:32 +00:00
|
|
|
|
|
|
|
return Artist(
|
|
|
|
name=name,
|
|
|
|
source_list=source_list
|
|
|
|
)
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-13 16:55:04 +00:00
|
|
|
def _parse_album(self, soup: BeautifulSoup, initial_source: Source) -> List[Album]:
|
2023-09-12 16:50:32 +00:00
|
|
|
title = None
|
|
|
|
source_list: List[Source] = []
|
|
|
|
|
|
|
|
a = soup.find("a")
|
|
|
|
if a is not None and a["href"] is not None:
|
2023-09-13 16:55:04 +00:00
|
|
|
source_list.append(Source(self.SOURCE_TYPE, _get_host(initial_source) + a["href"]))
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 16:50:32 +00:00
|
|
|
title_p = soup.find("p", {"class": "title"})
|
|
|
|
if title_p is not None:
|
|
|
|
title = title_p.text.strip()
|
|
|
|
|
|
|
|
return Album(title=title, source_list=source_list)
|
2023-09-12 15:59:04 +00:00
|
|
|
|
2023-09-13 14:01:01 +00:00
|
|
|
def _parse_artist_data_blob(self, data_blob: dict, artist_url: str):
|
|
|
|
parsed_artist_url = urlparse(artist_url)
|
|
|
|
album_list: List[Album] = []
|
|
|
|
|
|
|
|
for album_json in data_blob.get("buyfulldisco", {}).get("tralbums", []):
|
|
|
|
album_list.append(Album(
|
2023-09-13 16:55:04 +00:00
|
|
|
title=album_json["title"].strip(),
|
2023-09-13 14:01:01 +00:00
|
|
|
source_list=[Source(
|
|
|
|
self.SOURCE_TYPE,
|
|
|
|
urlunparse((parsed_artist_url.scheme, parsed_artist_url.netloc, album_json["page_url"], "", "", ""))
|
|
|
|
)]
|
|
|
|
))
|
|
|
|
|
|
|
|
return album_list
|
|
|
|
|
2023-09-12 15:59:04 +00:00
|
|
|
def fetch_artist(self, source: Source, stop_at_level: int = 1) -> Artist:
|
|
|
|
artist = Artist()
|
|
|
|
|
2024-04-26 19:31:31 +00:00
|
|
|
r = self.connection.get(_parse_artist_url(source.url), name=f"artist_{urlparse(source.url).scheme}_{urlparse(source.url).netloc}")
|
2023-09-12 15:59:04 +00:00
|
|
|
if r is None:
|
|
|
|
return artist
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 15:59:04 +00:00
|
|
|
soup = self.get_soup_from_response(r)
|
2023-09-12 16:50:32 +00:00
|
|
|
|
2023-09-12 15:59:04 +00:00
|
|
|
if DEBUG:
|
|
|
|
dump_to_file("artist_page.html", r.text, exit_after_dump=False)
|
2023-09-12 16:50:32 +00:00
|
|
|
|
|
|
|
artist = self._parse_artist_details(soup=soup.find("div", {"id": "bio-container"}))
|
2023-09-12 15:59:04 +00:00
|
|
|
|
2023-09-13 14:01:01 +00:00
|
|
|
html_music_grid = soup.find("ol", {"id": "music-grid"})
|
|
|
|
if html_music_grid is not None:
|
|
|
|
for subsoup in html_music_grid.find_all("li"):
|
2023-09-13 16:55:04 +00:00
|
|
|
artist.main_album_collection.append(self._parse_album(soup=subsoup, initial_source=source))
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-13 16:55:04 +00:00
|
|
|
for i, data_blob_soup in enumerate(soup.find_all("div", {"id": ["pagedata", "collectors-data"]})):
|
2023-09-13 14:01:01 +00:00
|
|
|
data_blob = data_blob_soup["data-blob"]
|
2023-09-13 16:55:04 +00:00
|
|
|
|
|
|
|
if DEBUG:
|
|
|
|
dump_to_file(f"bandcamp_artist_data_blob_{i}.json", data_blob, is_json=True, exit_after_dump=False)
|
|
|
|
|
2023-09-13 14:01:01 +00:00
|
|
|
if data_blob is not None:
|
|
|
|
artist.main_album_collection.extend(
|
2023-09-13 16:55:04 +00:00
|
|
|
self._parse_artist_data_blob(json.loads(data_blob), source.url)
|
2023-09-13 14:01:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
artist.source_collection.append(source)
|
2023-09-12 15:59:04 +00:00
|
|
|
return artist
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2024-04-19 10:37:14 +00:00
|
|
|
def _parse_track_element(self, track: dict, artwork: Artwork) -> Optional[Song]:
|
2024-04-19 10:17:08 +00:00
|
|
|
lyrics_list: List[Lyrics] = []
|
|
|
|
|
|
|
|
_lyrics: Optional[str] = track.get("item", {}).get("recordingOf", {}).get("lyrics", {}).get("text")
|
|
|
|
if _lyrics is not None:
|
|
|
|
lyrics_list.append(Lyrics(text=FormattedText(plain=_lyrics)))
|
|
|
|
|
2023-09-12 18:18:51 +00:00
|
|
|
return Song(
|
2024-04-19 09:46:56 +00:00
|
|
|
title=clean_song_title(track["item"]["name"]),
|
2023-09-12 18:18:51 +00:00
|
|
|
source_list=[Source(self.SOURCE_TYPE, track["item"]["mainEntityOfPage"])],
|
2024-04-19 10:37:14 +00:00
|
|
|
tracksort=int(track["position"]),
|
|
|
|
artwork=artwork,
|
2023-09-12 18:18:51 +00:00
|
|
|
)
|
2023-09-12 12:17:58 +00:00
|
|
|
|
|
|
|
def fetch_album(self, source: Source, stop_at_level: int = 1) -> Album:
|
2023-09-12 18:18:51 +00:00
|
|
|
album = Album()
|
2023-09-12 12:17:58 +00:00
|
|
|
|
2024-04-26 19:31:31 +00:00
|
|
|
r = self.connection.get(source.url, name=f"album_{urlparse(source.url).netloc.split('.')[0]}_{urlparse(source.url).path.replace('/', '').replace('album', '')}")
|
2023-09-12 18:18:51 +00:00
|
|
|
if r is None:
|
|
|
|
return album
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 18:18:51 +00:00
|
|
|
soup = self.get_soup_from_response(r)
|
|
|
|
|
|
|
|
data_container = soup.find("script", {"type": "application/ld+json"})
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 18:18:51 +00:00
|
|
|
if DEBUG:
|
|
|
|
dump_to_file("album_data.json", data_container.text, is_json=True, exit_after_dump=False)
|
|
|
|
|
|
|
|
data = json.loads(data_container.text)
|
2023-09-12 20:16:08 +00:00
|
|
|
artist_data = data["byArtist"]
|
|
|
|
|
2023-09-13 14:01:01 +00:00
|
|
|
artist_source_list = []
|
|
|
|
if "@id" in artist_data:
|
2024-01-15 10:40:48 +00:00
|
|
|
artist_source_list = [Source(self.SOURCE_TYPE, _parse_artist_url(artist_data["@id"]))]
|
2023-09-12 20:16:08 +00:00
|
|
|
album = Album(
|
2023-09-13 16:55:04 +00:00
|
|
|
title=data["name"].strip(),
|
2023-09-12 20:16:08 +00:00
|
|
|
source_list=[Source(self.SOURCE_TYPE, data.get("mainEntityOfPage", data["@id"]))],
|
|
|
|
date=ID3Timestamp.strptime(data["datePublished"], "%d %b %Y %H:%M:%S %Z"),
|
|
|
|
artist_list=[Artist(
|
2023-09-13 16:55:04 +00:00
|
|
|
name=artist_data["name"].strip(),
|
2023-09-13 14:01:01 +00:00
|
|
|
source_list=artist_source_list
|
2023-09-12 20:16:08 +00:00
|
|
|
)]
|
|
|
|
)
|
2023-09-12 18:18:51 +00:00
|
|
|
|
2024-04-19 10:37:14 +00:00
|
|
|
artwork: Artwork = Artwork()
|
|
|
|
|
|
|
|
def _get_artwork_url(_data: dict) -> Optional[str]:
|
|
|
|
if "image" in _data:
|
|
|
|
return _data["image"]
|
|
|
|
for _property in _data.get("additionalProperty", []):
|
|
|
|
if _property.get("name") == "art_id":
|
|
|
|
return f"https://f4.bcbits.com/img/a{_property.get('value')}_2.jpg"
|
|
|
|
|
|
|
|
_artwork_url = _get_artwork_url(data)
|
|
|
|
if _artwork_url is not None:
|
|
|
|
artwork.append(url=_artwork_url, width=350, height=350)
|
|
|
|
else:
|
|
|
|
for album_release in data.get("albumRelease", []):
|
|
|
|
_artwork_url = _get_artwork_url(album_release)
|
|
|
|
if _artwork_url is not None:
|
|
|
|
artwork.append(url=_artwork_url, width=350, height=350)
|
|
|
|
break
|
|
|
|
|
|
|
|
|
2023-09-12 18:18:51 +00:00
|
|
|
for i, track_json in enumerate(data.get("track", {}).get("itemListElement", [])):
|
|
|
|
if DEBUG:
|
|
|
|
dump_to_file(f"album_track_{i}.json", json.dumps(track_json), is_json=True, exit_after_dump=False)
|
|
|
|
|
|
|
|
try:
|
2024-04-19 10:37:14 +00:00
|
|
|
album.song_collection.append(self._parse_track_element(track_json, artwork=artwork))
|
2023-09-12 18:18:51 +00:00
|
|
|
except KeyError:
|
|
|
|
continue
|
|
|
|
|
2023-09-13 14:01:01 +00:00
|
|
|
album.source_collection.append(source)
|
2023-09-12 18:18:51 +00:00
|
|
|
return album
|
|
|
|
|
2023-09-12 21:39:22 +00:00
|
|
|
def _fetch_lyrics(self, soup: BeautifulSoup) -> List[Lyrics]:
|
|
|
|
track_lyrics = soup.find("div", {"class": "lyricsText"})
|
|
|
|
if track_lyrics:
|
2023-12-29 19:18:34 +00:00
|
|
|
return [Lyrics(text=FormattedText(html=track_lyrics.prettify()))]
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 21:39:22 +00:00
|
|
|
return []
|
|
|
|
|
2023-09-12 18:18:51 +00:00
|
|
|
def fetch_song(self, source: Source, stop_at_level: int = 1) -> Song:
|
2024-04-26 19:31:31 +00:00
|
|
|
r = self.connection.get(source.url, name=f"song_{urlparse(source.url).netloc.split('.')[0]}_{urlparse(source.url).path.replace('/', '').replace('track', '')}")
|
2023-09-12 20:48:52 +00:00
|
|
|
if r is None:
|
|
|
|
return Song()
|
2024-01-15 10:40:48 +00:00
|
|
|
|
2023-09-12 20:48:52 +00:00
|
|
|
soup = self.get_soup_from_response(r)
|
|
|
|
|
|
|
|
data_container = soup.find("script", {"type": "application/ld+json"})
|
2023-09-12 21:39:22 +00:00
|
|
|
other_data = {}
|
|
|
|
|
|
|
|
other_data_list = soup.select("script[data-tralbum]")
|
|
|
|
if len(other_data_list) > 0:
|
|
|
|
other_data = json.loads(other_data_list[0]["data-tralbum"])
|
|
|
|
|
2024-04-19 11:37:12 +00:00
|
|
|
dump_to_file("bandcamp_song_data.json", data_container.text, is_json=True, exit_after_dump=False)
|
|
|
|
dump_to_file("bandcamp_song_data_other.json", json.dumps(other_data), is_json=True, exit_after_dump=False)
|
|
|
|
dump_to_file("bandcamp_song_page.html", r.text, exit_after_dump=False)
|
2023-09-12 20:48:52 +00:00
|
|
|
|
|
|
|
data = json.loads(data_container.text)
|
|
|
|
album_data = data["inAlbum"]
|
|
|
|
artist_data = data["byArtist"]
|
|
|
|
|
2023-09-12 21:39:22 +00:00
|
|
|
mp3_url = None
|
|
|
|
for key, value in other_data.get("trackinfo", [{}])[0].get("file", {"": None}).items():
|
|
|
|
mp3_url = value
|
|
|
|
|
2023-09-12 20:48:52 +00:00
|
|
|
song = Song(
|
2024-04-19 09:46:56 +00:00
|
|
|
title=clean_song_title(data["name"], artist_name=artist_data["name"]),
|
2024-04-19 11:37:12 +00:00
|
|
|
source_list=[source, Source(self.SOURCE_TYPE, data.get("mainEntityOfPage", data["@id"]), audio_url=mp3_url)],
|
2023-09-12 20:48:52 +00:00
|
|
|
album_list=[Album(
|
2023-09-13 16:55:04 +00:00
|
|
|
title=album_data["name"].strip(),
|
2023-09-12 20:48:52 +00:00
|
|
|
date=ID3Timestamp.strptime(data["datePublished"], "%d %b %Y %H:%M:%S %Z"),
|
|
|
|
source_list=[Source(self.SOURCE_TYPE, album_data["@id"])]
|
|
|
|
)],
|
|
|
|
main_artist_list=[Artist(
|
2023-09-13 16:55:04 +00:00
|
|
|
name=artist_data["name"].strip(),
|
2024-01-15 10:40:48 +00:00
|
|
|
source_list=[Source(self.SOURCE_TYPE, _parse_artist_url(artist_data["@id"]))]
|
2023-09-12 21:39:22 +00:00
|
|
|
)],
|
|
|
|
lyrics_list=self._fetch_lyrics(soup=soup)
|
2023-09-12 20:48:52 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
return song
|
2023-09-12 12:17:58 +00:00
|
|
|
|
|
|
|
def download_song_to_target(self, source: Source, target: Target, desc: str = None) -> DownloadResult:
|
2023-09-12 21:39:22 +00:00
|
|
|
if source.audio_url is None:
|
|
|
|
return DownloadResult(error_message="Couldn't find download link.")
|
|
|
|
return self.connection.stream_into(url=source.audio_url, target=target, description=desc)
|