music-kraken-core/src/music_kraken/__init__.py

98 lines
2.4 KiB
Python
Raw Normal View History

import gc
2022-11-23 07:24:05 +00:00
import musicbrainzngs
import logging
2023-03-30 10:43:43 +00:00
import re
2022-11-23 07:24:05 +00:00
2023-04-04 18:19:29 +00:00
from . import objects, pages
2023-04-04 20:07:56 +00:00
from .utils.shared import MUSIC_DIR, NOT_A_GENRE, MODIFY_GC, get_random_message
if MODIFY_GC:
"""
At the start I modify the garbage collector to run a bit fewer times.
This should increase speed:
https://mkennedy.codes/posts/python-gc-settings-change-this-and-make-your-app-go-20pc-faster/
"""
# Clean up what might be garbage so far.
gc.collect(2)
allocs, gen1, gen2 = gc.get_threshold()
allocs = 50_000 # Start the GC sequence every 50K not 700 allocations.
gen1 = gen1 * 2
gen2 = gen2 * 2
gc.set_threshold(allocs, gen1, gen2)
2022-11-22 13:53:29 +00:00
logging.getLogger("musicbrainzngs").setLevel(logging.WARNING)
musicbrainzngs.set_useragent("metadata receiver", "0.1", "https://github.com/HeIIow2/music-downloader")
2023-04-04 19:27:27 +00:00
URL_REGEX = 'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
2023-03-31 08:34:29 +00:00
DOWNLOAD_COMMANDS = {
"ok",
"download",
2023-04-04 19:29:10 +00:00
"\\d",
2023-03-31 08:34:29 +00:00
"hs"
}
2023-03-30 10:43:43 +00:00
2023-04-04 18:19:29 +00:00
EXIT_COMMANDS = {
"exit",
"quit"
}
2022-11-24 17:25:49 +00:00
2023-02-09 08:40:57 +00:00
def cli():
2023-04-04 19:26:37 +00:00
def next_search(_search: pages.Search, query: str) -> bool:
2023-04-04 18:19:29 +00:00
"""
2023-04-04 19:26:37 +00:00
:param _search:
2023-04-04 18:19:29 +00:00
:param query:
:return exit in the next step:
"""
2023-03-30 08:49:17 +00:00
query: str = query.strip()
parsed: str = query.lower()
2023-04-04 18:19:29 +00:00
if parsed in EXIT_COMMANDS:
return True
2023-03-30 08:49:17 +00:00
if parsed == ".":
2023-04-04 18:19:29 +00:00
return False
2023-03-30 08:49:17 +00:00
if parsed == "..":
2023-04-04 19:26:37 +00:00
_search.goto_previous()
2023-04-04 18:19:29 +00:00
return False
2023-03-30 08:49:17 +00:00
if parsed.isdigit():
2023-04-04 19:26:37 +00:00
_search.choose_index(int(parsed))
2023-04-04 18:19:29 +00:00
return False
2023-03-30 08:49:17 +00:00
2023-03-31 08:34:29 +00:00
if parsed in DOWNLOAD_COMMANDS:
2023-04-04 19:26:37 +00:00
r = _search.download_chosen()
2023-04-04 19:18:56 +00:00
2023-04-04 18:19:29 +00:00
print()
print(r)
2023-04-04 19:18:56 +00:00
print()
return not r.is_mild_failure
2023-04-04 19:27:27 +00:00
url = re.match(URL_REGEX, query)
2023-03-30 10:43:43 +00:00
if url is not None:
2023-04-04 19:26:37 +00:00
if not _search.search_url(url.string):
2023-04-04 18:19:29 +00:00
print("The given url couldn't be found.")
return False
2023-03-30 10:43:43 +00:00
2023-04-04 19:26:37 +00:00
page = _search.get_page_from_query(parsed)
2023-03-30 08:49:17 +00:00
if page is not None:
2023-04-04 19:26:37 +00:00
_search.choose_page(page)
2023-04-04 18:19:29 +00:00
return False
2023-03-30 08:49:17 +00:00
# if everything else is not valid search
2023-04-04 19:26:37 +00:00
_search.search(query)
2023-04-04 18:19:29 +00:00
return False
2023-03-30 08:49:17 +00:00
search = pages.Search()
2023-03-29 15:24:02 +00:00
while True:
2023-04-04 18:19:29 +00:00
if next_search(search, input(">> ")):
break
2023-03-29 15:24:02 +00:00
print(search)
2023-04-04 18:19:29 +00:00
print()
print(get_random_message())