mirror of https://github.com/yutto-dev/yutto
🐛 fix: store semaphore in `FetcherContext` to avoid create it outside event loop (#404)
This commit is contained in:
parent
0f127a790c
commit
2ee1bb4420
|
@ -32,7 +32,7 @@ from yutto.processor.path_resolver import create_unique_path_resolver
|
|||
from yutto.utils.asynclib import sleep_with_status_bar_refresh
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.danmaku import DanmakuOptions
|
||||
from yutto.utils.fetcher import Fetcher, create_client
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
from yutto.utils.time import TIME_FULL_FMT
|
||||
from yutto.validator import (
|
||||
|
@ -51,22 +51,24 @@ if TYPE_CHECKING:
|
|||
def main():
|
||||
parser = cli()
|
||||
args = parser.parse_args()
|
||||
initial_validation(args)
|
||||
ctx = FetcherContext()
|
||||
initial_validation(ctx, args)
|
||||
args_list = flatten_args(args, parser)
|
||||
try:
|
||||
run(args_list)
|
||||
run(ctx, args_list)
|
||||
except (SystemExit, KeyboardInterrupt, asyncio.exceptions.CancelledError):
|
||||
Logger.info("已终止下载,再次运行即可继续下载~")
|
||||
sys.exit(ErrorCode.PAUSED_DOWNLOAD.value)
|
||||
|
||||
|
||||
@as_sync
|
||||
async def run(args_list: list[argparse.Namespace]):
|
||||
async def run(ctx: FetcherContext, args_list: list[argparse.Namespace]):
|
||||
ctx.set_fetch_semaphore(fetch_workers=8)
|
||||
unique_path = create_unique_path_resolver()
|
||||
async with create_client(
|
||||
cookies=Fetcher.cookies,
|
||||
trust_env=Fetcher.trust_env,
|
||||
proxy=Fetcher.proxy,
|
||||
cookies=ctx.cookies,
|
||||
trust_env=ctx.trust_env,
|
||||
proxy=ctx.proxy,
|
||||
) as client:
|
||||
if len(args_list) > 1:
|
||||
Logger.info(f"列表里共检测到 {len(args_list)} 项")
|
||||
|
@ -107,12 +109,12 @@ async def run(args_list: list[argparse.Namespace]):
|
|||
break
|
||||
|
||||
# 在开始前校验,减少对第一个视频的请求
|
||||
if not await validate_user_info({"is_login": args.login_strict, "vip_status": args.vip_strict}):
|
||||
if not await validate_user_info(ctx, {"is_login": args.login_strict, "vip_status": args.vip_strict}):
|
||||
Logger.error("启用了严格校验大会员或登录模式,请检查 SESSDATA 或大会员状态!")
|
||||
sys.exit(ErrorCode.NOT_LOGIN_ERROR.value)
|
||||
# 重定向到可识别的 url
|
||||
try:
|
||||
url = await Fetcher.get_redirected_url(client, url)
|
||||
url = await Fetcher.get_redirected_url(ctx, client, url)
|
||||
except httpx.InvalidURL:
|
||||
Logger.error(f"无效的 url({url})~请检查一下链接是否正确~")
|
||||
sys.exit(ErrorCode.WRONG_URL_ERROR.value)
|
||||
|
@ -128,7 +130,7 @@ async def run(args_list: list[argparse.Namespace]):
|
|||
# 提取信息,构造解析任务~
|
||||
for extractor in extractors:
|
||||
if extractor.match(url):
|
||||
download_list = await extractor(client, args)
|
||||
download_list = await extractor(ctx, client, args)
|
||||
break
|
||||
else:
|
||||
if args.batch:
|
||||
|
@ -146,7 +148,7 @@ async def run(args_list: list[argparse.Namespace]):
|
|||
continue
|
||||
|
||||
# 中途校验,因为批量下载时可能会失效
|
||||
if not await validate_user_info({"is_login": args.login_strict, "vip_status": args.vip_strict}):
|
||||
if not await validate_user_info(ctx, {"is_login": args.login_strict, "vip_status": args.vip_strict}):
|
||||
Logger.error("启用了严格校验大会员或登录模式,请检查 SESSDATA 或大会员状态!")
|
||||
sys.exit(ErrorCode.NOT_LOGIN_ERROR.value)
|
||||
|
||||
|
@ -167,6 +169,7 @@ async def run(args_list: list[argparse.Namespace]):
|
|||
)
|
||||
|
||||
current_download_state = await start_downloader(
|
||||
ctx,
|
||||
client,
|
||||
episode_data,
|
||||
{
|
||||
|
|
|
@ -18,7 +18,7 @@ from yutto._typing import (
|
|||
from yutto.bilibili_typing.codec import audio_codec_map, video_codec_map
|
||||
from yutto.exceptions import NoAccessPermissionError, UnSupportedTypeError
|
||||
from yutto.utils.console.logger import Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.funcutils import data_has_chained_keys
|
||||
from yutto.utils.metadata import MetaData
|
||||
from yutto.utils.time import get_time_stamp_by_now
|
||||
|
@ -43,23 +43,23 @@ class BangumiList(TypedDict):
|
|||
pages: list[BangumiListItem]
|
||||
|
||||
|
||||
async def get_season_id_by_media_id(client: AsyncClient, media_id: MediaId) -> SeasonId:
|
||||
async def get_season_id_by_media_id(ctx: FetcherContext, client: AsyncClient, media_id: MediaId) -> SeasonId:
|
||||
media_api = f"https://api.bilibili.com/pgc/review/user?media_id={media_id}"
|
||||
res_json = await Fetcher.fetch_json(client, media_api)
|
||||
res_json = await Fetcher.fetch_json(ctx, client, media_api)
|
||||
assert res_json is not None
|
||||
return SeasonId(str(res_json["result"]["media"]["season_id"]))
|
||||
|
||||
|
||||
async def get_season_id_by_episode_id(client: AsyncClient, episode_id: EpisodeId) -> SeasonId:
|
||||
async def get_season_id_by_episode_id(ctx: FetcherContext, client: AsyncClient, episode_id: EpisodeId) -> SeasonId:
|
||||
episode_api = f"https://api.bilibili.com/pgc/view/web/season?ep_id={episode_id}"
|
||||
res_json = await Fetcher.fetch_json(client, episode_api)
|
||||
res_json = await Fetcher.fetch_json(ctx, client, episode_api)
|
||||
assert res_json is not None
|
||||
return SeasonId(str(res_json["result"]["season_id"]))
|
||||
|
||||
|
||||
async def get_bangumi_list(client: AsyncClient, season_id: SeasonId) -> BangumiList:
|
||||
async def get_bangumi_list(ctx: FetcherContext, client: AsyncClient, season_id: SeasonId) -> BangumiList:
|
||||
list_api = "http://api.bilibili.com/pgc/view/web/season?season_id={season_id}"
|
||||
resp_json = await Fetcher.fetch_json(client, list_api.format(season_id=season_id))
|
||||
resp_json = await Fetcher.fetch_json(ctx, client, list_api.format(season_id=season_id))
|
||||
if resp_json is None:
|
||||
raise NoAccessPermissionError(f"无法解析该番剧列表(season_id: {season_id})")
|
||||
if resp_json.get("result") is None:
|
||||
|
@ -90,11 +90,11 @@ async def get_bangumi_list(client: AsyncClient, season_id: SeasonId) -> BangumiL
|
|||
|
||||
|
||||
async def get_bangumi_playurl(
|
||||
client: AsyncClient, avid: AvId, cid: CId
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, cid: CId
|
||||
) -> tuple[list[VideoUrlMeta], list[AudioUrlMeta]]:
|
||||
play_api = "https://api.bilibili.com/pgc/player/web/v2/playurl?avid={aid}&bvid={bvid}&cid={cid}&qn=127&fnver=0&fnval=4048&fourk=1&support_multi_audio=true&from_client=BROWSER"
|
||||
|
||||
resp_json = await Fetcher.fetch_json(client, play_api.format(**avid.to_dict(), cid=cid))
|
||||
resp_json = await Fetcher.fetch_json(ctx, client, play_api.format(**avid.to_dict(), cid=cid))
|
||||
if resp_json is None:
|
||||
raise NoAccessPermissionError(f"无法获取该视频链接({format_ids(avid, cid)})")
|
||||
if resp_json.get("result") is None or resp_json["result"].get("video_info") is None:
|
||||
|
@ -133,10 +133,12 @@ async def get_bangumi_playurl(
|
|||
)
|
||||
|
||||
|
||||
async def get_bangumi_subtitles(client: AsyncClient, avid: AvId, cid: CId) -> list[MultiLangSubtitle]:
|
||||
async def get_bangumi_subtitles(
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, cid: CId
|
||||
) -> list[MultiLangSubtitle]:
|
||||
subtitile_api = "https://api.bilibili.com/x/player/v2?cid={cid}&aid={aid}&bvid={bvid}"
|
||||
subtitile_url = subtitile_api.format(**avid.to_dict(), cid=cid)
|
||||
subtitles_json_info = await Fetcher.fetch_json(client, subtitile_url)
|
||||
subtitles_json_info = await Fetcher.fetch_json(ctx, client, subtitile_url)
|
||||
if subtitles_json_info is None:
|
||||
return []
|
||||
if not data_has_chained_keys(subtitles_json_info, ["data", "subtitle", "subtitles"]):
|
||||
|
@ -145,7 +147,7 @@ async def get_bangumi_subtitles(client: AsyncClient, avid: AvId, cid: CId) -> li
|
|||
subtitles_info = subtitles_json_info["data"]["subtitle"]
|
||||
results: list[MultiLangSubtitle] = []
|
||||
for sub_info in subtitles_info["subtitles"]:
|
||||
subtitle_text = await Fetcher.fetch_json(client, "https:" + sub_info["subtitle_url"])
|
||||
subtitle_text = await Fetcher.fetch_json(ctx, client, "https:" + sub_info["subtitle_url"])
|
||||
if subtitle_text is None:
|
||||
continue
|
||||
results.append(
|
||||
|
|
|
@ -16,7 +16,7 @@ from yutto._typing import (
|
|||
from yutto.bilibili_typing.codec import audio_codec_map, video_codec_map
|
||||
from yutto.exceptions import NoAccessPermissionError, UnSupportedTypeError
|
||||
from yutto.utils.console.logger import Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.funcutils import data_has_chained_keys
|
||||
from yutto.utils.metadata import MetaData
|
||||
from yutto.utils.time import get_time_stamp_by_now
|
||||
|
@ -39,16 +39,16 @@ class CheeseList(TypedDict):
|
|||
pages: list[CheeseListItem]
|
||||
|
||||
|
||||
async def get_season_id_by_episode_id(client: AsyncClient, episode_id: EpisodeId) -> SeasonId:
|
||||
async def get_season_id_by_episode_id(ctx: FetcherContext, client: AsyncClient, episode_id: EpisodeId) -> SeasonId:
|
||||
home_url = f"https://api.bilibili.com/pugv/view/web/season?ep_id={episode_id}"
|
||||
res_json = await Fetcher.fetch_json(client, home_url)
|
||||
res_json = await Fetcher.fetch_json(ctx, client, home_url)
|
||||
assert res_json is not None
|
||||
return SeasonId(str(res_json["data"]["season_id"]))
|
||||
|
||||
|
||||
async def get_cheese_list(client: AsyncClient, season_id: SeasonId) -> CheeseList:
|
||||
async def get_cheese_list(ctx: FetcherContext, client: AsyncClient, season_id: SeasonId) -> CheeseList:
|
||||
list_api = "https://api.bilibili.com/pugv/view/web/season?season_id={season_id}"
|
||||
resp_json = await Fetcher.fetch_json(client, list_api.format(season_id=season_id))
|
||||
resp_json = await Fetcher.fetch_json(ctx, client, list_api.format(season_id=season_id))
|
||||
if resp_json is None:
|
||||
raise NoAccessPermissionError(f"无法解析该课程列表(season_id: {season_id})")
|
||||
if resp_json.get("data") is None:
|
||||
|
@ -72,13 +72,13 @@ async def get_cheese_list(client: AsyncClient, season_id: SeasonId) -> CheeseLis
|
|||
|
||||
|
||||
async def get_cheese_playurl(
|
||||
client: AsyncClient, avid: AvId, episode_id: EpisodeId, cid: CId
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, episode_id: EpisodeId, cid: CId
|
||||
) -> tuple[list[VideoUrlMeta], list[AudioUrlMeta]]:
|
||||
play_api = (
|
||||
"https://api.bilibili.com/pugv/player/web/playurl?avid={aid}&cid={"
|
||||
"cid}&qn=80&fnver=0&fnval=16&fourk=1&ep_id={episode_id}&from_client=BROWSER&drm_tech_type=2"
|
||||
)
|
||||
resp_json = await Fetcher.fetch_json(client, play_api.format(**avid.to_dict(), cid=cid, episode_id=episode_id))
|
||||
resp_json = await Fetcher.fetch_json(ctx, client, play_api.format(**avid.to_dict(), cid=cid, episode_id=episode_id))
|
||||
if resp_json is None:
|
||||
raise NoAccessPermissionError(f"无法获取该视频链接({format_ids(avid, cid)})")
|
||||
if resp_json.get("data") is None:
|
||||
|
@ -115,10 +115,12 @@ async def get_cheese_playurl(
|
|||
)
|
||||
|
||||
|
||||
async def get_cheese_subtitles(client: AsyncClient, avid: AvId, cid: CId) -> list[MultiLangSubtitle]:
|
||||
async def get_cheese_subtitles(
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, cid: CId
|
||||
) -> list[MultiLangSubtitle]:
|
||||
subtitile_api = "https://api.bilibili.com/x/player/v2?cid={cid}&aid={aid}&bvid={bvid}"
|
||||
subtitile_url = subtitile_api.format(**avid.to_dict(), cid=cid)
|
||||
subtitles_json_info = await Fetcher.fetch_json(client, subtitile_url)
|
||||
subtitles_json_info = await Fetcher.fetch_json(ctx, client, subtitile_url)
|
||||
if subtitles_json_info is None:
|
||||
return []
|
||||
if not data_has_chained_keys(subtitles_json_info, ["data", "subtitle", "subtitles"]):
|
||||
|
@ -127,7 +129,7 @@ async def get_cheese_subtitles(client: AsyncClient, avid: AvId, cid: CId) -> lis
|
|||
subtitles_info = subtitles_json_info["data"]["subtitle"]
|
||||
results: list[MultiLangSubtitle] = []
|
||||
for sub_info in subtitles_info["subtitles"]:
|
||||
subtitle_text = await Fetcher.fetch_json(client, "https:" + sub_info["subtitle_url"])
|
||||
subtitle_text = await Fetcher.fetch_json(ctx, client, "https:" + sub_info["subtitle_url"])
|
||||
if subtitle_text is None:
|
||||
continue
|
||||
results.append(
|
||||
|
|
|
@ -5,7 +5,7 @@ import math
|
|||
from typing import TYPE_CHECKING, TypedDict
|
||||
|
||||
from yutto._typing import AvId, BvId, MId, SeriesId
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from httpx import AsyncClient
|
||||
|
@ -22,10 +22,12 @@ class CollectionDetails(TypedDict):
|
|||
pages: list[CollectionDetailsItem]
|
||||
|
||||
|
||||
async def get_collection_details(client: AsyncClient, series_id: SeriesId, mid: MId) -> CollectionDetails:
|
||||
async def get_collection_details(
|
||||
ctx: FetcherContext, client: AsyncClient, series_id: SeriesId, mid: MId
|
||||
) -> CollectionDetails:
|
||||
title, avids = await asyncio.gather(
|
||||
_get_collection_title(client, series_id),
|
||||
_get_collection_avids(client, series_id, mid),
|
||||
_get_collection_title(ctx, client, series_id),
|
||||
_get_collection_avids(ctx, client, series_id, mid),
|
||||
)
|
||||
return CollectionDetails(
|
||||
title=title,
|
||||
|
@ -40,7 +42,7 @@ async def get_collection_details(client: AsyncClient, series_id: SeriesId, mid:
|
|||
)
|
||||
|
||||
|
||||
async def _get_collection_avids(client: AsyncClient, series_id: SeriesId, mid: MId) -> list[AvId]:
|
||||
async def _get_collection_avids(ctx: FetcherContext, client: AsyncClient, series_id: SeriesId, mid: MId) -> list[AvId]:
|
||||
api = "https://api.bilibili.com/x/polymer/space/seasons_archives_list?mid={mid}&season_id={series_id}&sort_reverse=false&page_num={pn}&page_size={ps}"
|
||||
ps = 30
|
||||
pn = 1
|
||||
|
@ -49,7 +51,7 @@ async def _get_collection_avids(client: AsyncClient, series_id: SeriesId, mid: M
|
|||
|
||||
while pn <= total:
|
||||
space_videos_url = api.format(series_id=series_id, ps=ps, pn=pn, mid=mid)
|
||||
json_data = await Fetcher.fetch_json(client, space_videos_url)
|
||||
json_data = await Fetcher.fetch_json(ctx, client, space_videos_url)
|
||||
assert json_data is not None
|
||||
total = math.ceil(json_data["data"]["page"]["total"] / ps)
|
||||
pn += 1
|
||||
|
@ -57,8 +59,8 @@ async def _get_collection_avids(client: AsyncClient, series_id: SeriesId, mid: M
|
|||
return all_avid
|
||||
|
||||
|
||||
async def _get_collection_title(client: AsyncClient, series_id: SeriesId) -> str:
|
||||
async def _get_collection_title(ctx: FetcherContext, client: AsyncClient, series_id: SeriesId) -> str:
|
||||
api = "https://api.bilibili.com/x/v1/medialist/info?type=8&biz_id={series_id}"
|
||||
json_data = await Fetcher.fetch_json(client, api.format(series_id=series_id))
|
||||
json_data = await Fetcher.fetch_json(ctx, client, api.format(series_id=series_id))
|
||||
assert json_data is not None
|
||||
return json_data["data"]["title"]
|
||||
|
|
|
@ -6,7 +6,7 @@ from typing import TYPE_CHECKING
|
|||
from biliass import get_danmaku_meta_size
|
||||
|
||||
from yutto.api.user_info import get_user_info
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import httpx
|
||||
|
@ -15,41 +15,44 @@ if TYPE_CHECKING:
|
|||
from yutto.utils.danmaku import DanmakuData, DanmakuSaveType
|
||||
|
||||
|
||||
async def get_xml_danmaku(client: httpx.AsyncClient, cid: CId) -> str:
|
||||
async def get_xml_danmaku(ctx: FetcherContext, client: httpx.AsyncClient, cid: CId) -> str:
|
||||
danmaku_api = "http://comment.bilibili.com/{cid}.xml"
|
||||
results = await Fetcher.fetch_text(client, danmaku_api.format(cid=cid), encoding="utf-8")
|
||||
results = await Fetcher.fetch_text(ctx, client, danmaku_api.format(cid=cid), encoding="utf-8")
|
||||
assert results is not None
|
||||
return results
|
||||
|
||||
|
||||
async def get_protobuf_danmaku_segment(client: httpx.AsyncClient, cid: CId, segment_id: int = 1) -> bytes:
|
||||
async def get_protobuf_danmaku_segment(
|
||||
ctx: FetcherContext, client: httpx.AsyncClient, cid: CId, segment_id: int = 1
|
||||
) -> bytes:
|
||||
danmaku_api = "http://api.bilibili.com/x/v2/dm/web/seg.so?type=1&oid={cid}&segment_index={segment_id}"
|
||||
results = await Fetcher.fetch_bin(client, danmaku_api.format(cid=cid, segment_id=segment_id))
|
||||
results = await Fetcher.fetch_bin(ctx, client, danmaku_api.format(cid=cid, segment_id=segment_id))
|
||||
assert results is not None
|
||||
return results
|
||||
|
||||
|
||||
async def get_protobuf_danmaku(client: httpx.AsyncClient, avid: AvId, cid: CId) -> list[bytes]:
|
||||
async def get_protobuf_danmaku(ctx: FetcherContext, client: httpx.AsyncClient, avid: AvId, cid: CId) -> list[bytes]:
|
||||
danmaku_meta_api = "https://api.bilibili.com/x/v2/dm/web/view?type=1&oid={cid}&pid={aid}"
|
||||
aid = avid.as_aid()
|
||||
meta_results = await Fetcher.fetch_bin(client, danmaku_meta_api.format(cid=cid, aid=aid.value))
|
||||
meta_results = await Fetcher.fetch_bin(ctx, client, danmaku_meta_api.format(cid=cid, aid=aid.value))
|
||||
assert meta_results is not None
|
||||
size = get_danmaku_meta_size(meta_results)
|
||||
|
||||
results = await asyncio.gather(
|
||||
*[get_protobuf_danmaku_segment(client, cid, segment_id) for segment_id in range(1, size + 1)]
|
||||
*[get_protobuf_danmaku_segment(ctx, client, cid, segment_id) for segment_id in range(1, size + 1)]
|
||||
)
|
||||
return results
|
||||
|
||||
|
||||
async def get_danmaku(
|
||||
ctx: FetcherContext,
|
||||
client: httpx.AsyncClient,
|
||||
cid: CId,
|
||||
avid: AvId,
|
||||
save_type: DanmakuSaveType,
|
||||
) -> DanmakuData:
|
||||
# 在已经登录的情况下,使用 protobuf,因为未登录时 protobuf 弹幕会少非常多
|
||||
source_type = "xml" if save_type == "xml" or not (await get_user_info(client))["is_login"] else "protobuf"
|
||||
source_type = "xml" if save_type == "xml" or not (await get_user_info(ctx, client))["is_login"] else "protobuf"
|
||||
danmaku_data: DanmakuData = {
|
||||
"source_type": source_type,
|
||||
"save_type": save_type,
|
||||
|
@ -57,7 +60,7 @@ async def get_danmaku(
|
|||
}
|
||||
|
||||
if source_type == "xml":
|
||||
danmaku_data["data"].append(await get_xml_danmaku(client, cid))
|
||||
danmaku_data["data"].append(await get_xml_danmaku(ctx, client, cid))
|
||||
else:
|
||||
danmaku_data["data"].extend(await get_protobuf_danmaku(client, avid, cid))
|
||||
danmaku_data["data"].extend(await get_protobuf_danmaku(ctx, client, avid, cid))
|
||||
return danmaku_data
|
||||
|
|
|
@ -7,14 +7,14 @@ from yutto._typing import AvId, BvId, FavouriteMetaData, FId, MId, SeriesId
|
|||
from yutto.api.user_info import encode_wbi, get_wbi_img
|
||||
from yutto.exceptions import NotLoginError
|
||||
from yutto.utils.console.logger import Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from httpx import AsyncClient
|
||||
|
||||
|
||||
# 个人空间·全部
|
||||
async def get_user_space_all_videos_avids(client: AsyncClient, mid: MId) -> list[AvId]:
|
||||
async def get_user_space_all_videos_avids(ctx: FetcherContext, client: AsyncClient, mid: MId) -> list[AvId]:
|
||||
space_videos_api = "https://api.bilibili.com/x/space/wbi/arc/search"
|
||||
# ps 随机设置有时会出现错误,因此暂时固定在 30
|
||||
# ps: int = random.randint(3, 6) * 10
|
||||
|
@ -22,7 +22,7 @@ async def get_user_space_all_videos_avids(client: AsyncClient, mid: MId) -> list
|
|||
pn = 1
|
||||
total = 1
|
||||
all_avid: list[AvId] = []
|
||||
wbi_img = await get_wbi_img(client)
|
||||
wbi_img = await get_wbi_img(ctx, client)
|
||||
while pn <= total:
|
||||
params = {
|
||||
"mid": mid,
|
||||
|
@ -32,7 +32,7 @@ async def get_user_space_all_videos_avids(client: AsyncClient, mid: MId) -> list
|
|||
"order": "pubdate",
|
||||
}
|
||||
params = encode_wbi(params, wbi_img)
|
||||
json_data = await Fetcher.fetch_json(client, space_videos_api, params=params)
|
||||
json_data = await Fetcher.fetch_json(ctx, client, space_videos_api, params=params)
|
||||
assert json_data is not None
|
||||
total = math.ceil(json_data["data"]["page"]["count"] / ps)
|
||||
pn += 1
|
||||
|
@ -41,13 +41,13 @@ async def get_user_space_all_videos_avids(client: AsyncClient, mid: MId) -> list
|
|||
|
||||
|
||||
# 个人空间·用户名
|
||||
async def get_user_name(client: AsyncClient, mid: MId) -> str:
|
||||
wbi_img = await get_wbi_img(client)
|
||||
async def get_user_name(ctx: FetcherContext, client: AsyncClient, mid: MId) -> str:
|
||||
wbi_img = await get_wbi_img(ctx, client)
|
||||
params = {"mid": mid}
|
||||
params = encode_wbi(params, wbi_img)
|
||||
space_info_api = "https://api.bilibili.com/x/space/wbi/acc/info"
|
||||
await Fetcher.touch_url(client, "https://www.bilibili.com")
|
||||
user_info = await Fetcher.fetch_json(client, space_info_api, params=params)
|
||||
await Fetcher.touch_url(ctx, client, "https://www.bilibili.com")
|
||||
user_info = await Fetcher.fetch_json(ctx, client, space_info_api, params=params)
|
||||
assert user_info is not None
|
||||
if user_info["code"] == -404:
|
||||
Logger.warning(f"用户 {mid} 不存在,疑似注销或被封禁")
|
||||
|
@ -58,26 +58,26 @@ async def get_user_name(client: AsyncClient, mid: MId) -> str:
|
|||
|
||||
|
||||
# 个人空间·收藏夹·信息
|
||||
async def get_favourite_info(client: AsyncClient, fid: FId) -> FavouriteMetaData:
|
||||
async def get_favourite_info(ctx: FetcherContext, client: AsyncClient, fid: FId) -> FavouriteMetaData:
|
||||
api = "https://api.bilibili.com/x/v3/fav/folder/info?media_id={fid}"
|
||||
json_data = await Fetcher.fetch_json(client, api.format(fid=fid))
|
||||
json_data = await Fetcher.fetch_json(ctx, client, api.format(fid=fid))
|
||||
assert json_data is not None
|
||||
data = json_data["data"]
|
||||
return FavouriteMetaData(title=data["title"], fid=FId(str(data["id"])))
|
||||
|
||||
|
||||
# 个人空间·收藏夹·avid
|
||||
async def get_favourite_avids(client: AsyncClient, fid: FId) -> list[AvId]:
|
||||
async def get_favourite_avids(ctx: FetcherContext, client: AsyncClient, fid: FId) -> list[AvId]:
|
||||
api = "https://api.bilibili.com/x/v3/fav/resource/ids?media_id={fid}"
|
||||
json_data = await Fetcher.fetch_json(client, api.format(fid=fid))
|
||||
json_data = await Fetcher.fetch_json(ctx, client, api.format(fid=fid))
|
||||
assert json_data is not None
|
||||
return [BvId(video_info["bvid"]) for video_info in json_data["data"]]
|
||||
|
||||
|
||||
# 个人空间·收藏夹·全部
|
||||
async def get_all_favourites(client: AsyncClient, mid: MId) -> list[FavouriteMetaData]:
|
||||
async def get_all_favourites(ctx: FetcherContext, client: AsyncClient, mid: MId) -> list[FavouriteMetaData]:
|
||||
api = "https://api.bilibili.com/x/v3/fav/folder/created/list-all?up_mid={mid}"
|
||||
json_data = await Fetcher.fetch_json(client, api.format(mid=mid))
|
||||
json_data = await Fetcher.fetch_json(ctx, client, api.format(mid=mid))
|
||||
assert json_data is not None
|
||||
if not json_data["data"]:
|
||||
return []
|
||||
|
@ -85,7 +85,7 @@ async def get_all_favourites(client: AsyncClient, mid: MId) -> list[FavouriteMet
|
|||
|
||||
|
||||
# 个人空间·视频列表·avid
|
||||
async def get_medialist_avids(client: AsyncClient, series_id: SeriesId, mid: MId) -> list[AvId]:
|
||||
async def get_medialist_avids(ctx: FetcherContext, client: AsyncClient, series_id: SeriesId, mid: MId) -> list[AvId]:
|
||||
api = "https://api.bilibili.com/x/series/archives?mid={mid}&series_id={series_id}&only_normal=true&pn={pn}&ps={ps}"
|
||||
ps = 30
|
||||
pn = 1
|
||||
|
@ -94,7 +94,7 @@ async def get_medialist_avids(client: AsyncClient, series_id: SeriesId, mid: MId
|
|||
|
||||
while pn <= total:
|
||||
url = api.format(series_id=series_id, mid=mid, ps=ps, pn=pn)
|
||||
json_data = await Fetcher.fetch_json(client, url)
|
||||
json_data = await Fetcher.fetch_json(ctx, client, url)
|
||||
assert json_data is not None
|
||||
total = math.ceil(json_data["data"]["page"]["total"] / ps)
|
||||
pn += 1
|
||||
|
@ -103,17 +103,17 @@ async def get_medialist_avids(client: AsyncClient, series_id: SeriesId, mid: MId
|
|||
|
||||
|
||||
# 个人空间·视频列表·标题
|
||||
async def get_medialist_title(client: AsyncClient, series_id: SeriesId) -> str:
|
||||
async def get_medialist_title(ctx: FetcherContext, client: AsyncClient, series_id: SeriesId) -> str:
|
||||
api = "https://api.bilibili.com/x/v1/medialist/info?type=5&biz_id={series_id}"
|
||||
json_data = await Fetcher.fetch_json(client, api.format(series_id=series_id))
|
||||
json_data = await Fetcher.fetch_json(ctx, client, api.format(series_id=series_id))
|
||||
assert json_data is not None
|
||||
return json_data["data"]["title"]
|
||||
|
||||
|
||||
# 个人空间·稍后再看
|
||||
async def get_watch_later_avids(client: AsyncClient) -> list[AvId]:
|
||||
async def get_watch_later_avids(ctx: FetcherContext, client: AsyncClient) -> list[AvId]:
|
||||
api = "https://api.bilibili.com/x/v2/history/toview/web"
|
||||
json_data = await Fetcher.fetch_json(client, api)
|
||||
json_data = await Fetcher.fetch_json(ctx, client, api)
|
||||
assert json_data is not None
|
||||
if json_data["code"] in [-101, -400]:
|
||||
raise NotLoginError("账号未登录,无法获取稍后再看列表哦~ Ծ‸Ծ")
|
||||
|
|
|
@ -21,7 +21,7 @@ from yutto.exceptions import (
|
|||
UnSupportedTypeError,
|
||||
)
|
||||
from yutto.utils.console.logger import Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.funcutils.data_access import data_has_chained_keys
|
||||
from yutto.utils.metadata import Actor, ChapterInfoData, MetaData
|
||||
from yutto.utils.time import get_time_stamp_by_now
|
||||
|
@ -67,10 +67,10 @@ class UgcVideoList(TypedDict):
|
|||
pages: list[UgcVideoListItem]
|
||||
|
||||
|
||||
async def get_ugc_video_tag(client: AsyncClient, avid: AvId) -> list[str]:
|
||||
async def get_ugc_video_tag(ctx: FetcherContext, client: AsyncClient, avid: AvId) -> list[str]:
|
||||
tags: list[str] = []
|
||||
tag_api = "http://api.bilibili.com/x/tag/archive/tags?aid={aid}&bvid={bvid}"
|
||||
res_json = await Fetcher.fetch_json(client, tag_api.format(**avid.to_dict()))
|
||||
res_json = await Fetcher.fetch_json(ctx, client, tag_api.format(**avid.to_dict()))
|
||||
if res_json is None or res_json["code"] != 0:
|
||||
raise NotFoundError(f"无法获取视频 {avid} 标签")
|
||||
for tag in res_json["data"]:
|
||||
|
@ -78,10 +78,10 @@ async def get_ugc_video_tag(client: AsyncClient, avid: AvId) -> list[str]:
|
|||
return tags
|
||||
|
||||
|
||||
async def get_ugc_video_info(client: AsyncClient, avid: AvId) -> _UgcVideoInfo:
|
||||
async def get_ugc_video_info(ctx: FetcherContext, client: AsyncClient, avid: AvId) -> _UgcVideoInfo:
|
||||
regex_ep = re.compile(r"https?://www\.bilibili\.com/bangumi/play/ep(?P<episode_id>\d+)")
|
||||
info_api = "http://api.bilibili.com/x/web-interface/view?aid={aid}&bvid={bvid}"
|
||||
res_json = await Fetcher.fetch_json(client, info_api.format(**avid.to_dict()))
|
||||
res_json = await Fetcher.fetch_json(ctx, client, info_api.format(**avid.to_dict()))
|
||||
if res_json is None:
|
||||
raise NotFoundError(f"无法获取该视频 {avid} 信息")
|
||||
res_json_data = res_json.get("data")
|
||||
|
@ -93,14 +93,14 @@ async def get_ugc_video_info(client: AsyncClient, avid: AvId) -> _UgcVideoInfo:
|
|||
if res_json_data.get("forward"):
|
||||
forward_avid = AId(str(res_json_data["forward"]))
|
||||
Logger.info(f"视频 {avid} 撞车了哦!正在跳转到原视频 {forward_avid}~")
|
||||
return await get_ugc_video_info(client, forward_avid)
|
||||
return await get_ugc_video_info(ctx, client, forward_avid)
|
||||
episode_id = EpisodeId("")
|
||||
if res_json_data.get("redirect_url") and (ep_match := regex_ep.match(res_json_data["redirect_url"])):
|
||||
episode_id = EpisodeId(ep_match.group("episode_id"))
|
||||
|
||||
actors = _parse_actor_info(res_json_data)
|
||||
genres = _parse_genre_info(res_json_data)
|
||||
tags: list[str] = await get_ugc_video_tag(client, avid)
|
||||
tags: list[str] = await get_ugc_video_tag(ctx, client, avid)
|
||||
return {
|
||||
"avid": BvId(res_json_data["bvid"]),
|
||||
"aid": AId(str(res_json_data["aid"])),
|
||||
|
@ -125,8 +125,8 @@ async def get_ugc_video_info(client: AsyncClient, avid: AvId) -> _UgcVideoInfo:
|
|||
}
|
||||
|
||||
|
||||
async def get_ugc_video_list(client: AsyncClient, avid: AvId) -> UgcVideoList:
|
||||
video_info = await get_ugc_video_info(client, avid)
|
||||
async def get_ugc_video_list(ctx: FetcherContext, client: AsyncClient, avid: AvId) -> UgcVideoList:
|
||||
video_info = await get_ugc_video_info(ctx, client, avid)
|
||||
if avid not in [video_info["aid"], video_info["bvid"]]:
|
||||
avid = video_info["avid"]
|
||||
video_title = video_info["title"]
|
||||
|
@ -137,7 +137,7 @@ async def get_ugc_video_list(client: AsyncClient, avid: AvId) -> UgcVideoList:
|
|||
"pages": [],
|
||||
}
|
||||
list_api = "https://api.bilibili.com/x/player/pagelist?aid={aid}&bvid={bvid}&jsonp=jsonp"
|
||||
res_json = await Fetcher.fetch_json(client, list_api.format(**avid.to_dict()))
|
||||
res_json = await Fetcher.fetch_json(ctx, client, list_api.format(**avid.to_dict()))
|
||||
if res_json is None or res_json.get("data") is None:
|
||||
Logger.warning(f"啊叻?视频 {avid} 不见了诶")
|
||||
return result
|
||||
|
@ -164,12 +164,12 @@ async def get_ugc_video_list(client: AsyncClient, avid: AvId) -> UgcVideoList:
|
|||
|
||||
|
||||
async def get_ugc_video_playurl(
|
||||
client: AsyncClient, avid: AvId, cid: CId
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, cid: CId
|
||||
) -> tuple[list[VideoUrlMeta], list[AudioUrlMeta]]:
|
||||
# 4048 = 16(useDash) | 64(useHDR) | 128(use4K) | 256(useDolby) | 512(useXXX) | 1024(use8K) | 2048(useAV1)
|
||||
play_api = "https://api.bilibili.com/x/player/playurl?avid={aid}&bvid={bvid}&cid={cid}&qn=127&type=&otype=json&fnver=0&fnval=4048&fourk=1"
|
||||
|
||||
resp_json = await Fetcher.fetch_json(client, play_api.format(**avid.to_dict(), cid=cid))
|
||||
resp_json = await Fetcher.fetch_json(ctx, client, play_api.format(**avid.to_dict(), cid=cid))
|
||||
if resp_json is None:
|
||||
raise NoAccessPermissionError(f"无法获取该视频链接({format_ids(avid, cid)})")
|
||||
if resp_json.get("data") is None:
|
||||
|
@ -237,16 +237,18 @@ async def get_ugc_video_playurl(
|
|||
return (videos, audios)
|
||||
|
||||
|
||||
async def get_ugc_video_subtitles(client: AsyncClient, avid: AvId, cid: CId) -> list[MultiLangSubtitle]:
|
||||
async def get_ugc_video_subtitles(
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, cid: CId
|
||||
) -> list[MultiLangSubtitle]:
|
||||
subtitile_api = "https://api.bilibili.com/x/player/wbi/v2?aid={aid}&bvid={bvid}&cid={cid}"
|
||||
subtitile_url = subtitile_api.format(**avid.to_dict(), cid=cid)
|
||||
res_json = await Fetcher.fetch_json(client, subtitile_url)
|
||||
res_json = await Fetcher.fetch_json(ctx, client, subtitile_url)
|
||||
assert res_json is not None, "无法获取该视频的字幕信息"
|
||||
if not data_has_chained_keys(res_json, ["data", "subtitle", "subtitles"]):
|
||||
return []
|
||||
results: list[MultiLangSubtitle] = []
|
||||
for sub_info in res_json["data"]["subtitle"]["subtitles"]:
|
||||
subtitle_text = await Fetcher.fetch_json(client, "https:" + sub_info["subtitle_url"])
|
||||
subtitle_text = await Fetcher.fetch_json(ctx, client, "https:" + sub_info["subtitle_url"])
|
||||
if subtitle_text is None:
|
||||
continue
|
||||
results.append(
|
||||
|
@ -258,10 +260,12 @@ async def get_ugc_video_subtitles(client: AsyncClient, avid: AvId, cid: CId) ->
|
|||
return results
|
||||
|
||||
|
||||
async def get_ugc_video_chapters(client: AsyncClient, avid: AvId, cid: CId) -> list[ChapterInfoData]:
|
||||
async def get_ugc_video_chapters(
|
||||
ctx: FetcherContext, client: AsyncClient, avid: AvId, cid: CId
|
||||
) -> list[ChapterInfoData]:
|
||||
chapter_api = "https://api.bilibili.com/x/player/v2?avid={aid}&bvid={bvid}&cid={cid}"
|
||||
chapter_url = chapter_api.format(**avid.to_dict(), cid=cid)
|
||||
chapter_json_info = await Fetcher.fetch_json(client, chapter_url)
|
||||
chapter_json_info = await Fetcher.fetch_json(ctx, client, chapter_url)
|
||||
if chapter_json_info is None:
|
||||
return []
|
||||
if not data_has_chained_keys(chapter_json_info, ["data", "view_points"]):
|
||||
|
|
|
@ -11,7 +11,7 @@ from typing import TYPE_CHECKING, Any, TypedDict
|
|||
|
||||
from yutto._typing import UserInfo
|
||||
from yutto.utils.asynclib import async_cache
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from httpx import AsyncClient
|
||||
|
@ -28,9 +28,9 @@ dm_cover_img_str_cache: str = base64.b64encode("".join(random.choices(string.pri
|
|||
|
||||
|
||||
@async_cache(lambda _: "user_info")
|
||||
async def get_user_info(client: AsyncClient) -> UserInfo:
|
||||
async def get_user_info(ctx: FetcherContext, client: AsyncClient) -> UserInfo:
|
||||
info_api = "https://api.bilibili.com/x/web-interface/nav"
|
||||
res_json = await Fetcher.fetch_json(client, info_api)
|
||||
res_json = await Fetcher.fetch_json(ctx, client, info_api)
|
||||
assert res_json is not None
|
||||
res_json_data = res_json.get("data")
|
||||
return UserInfo(
|
||||
|
@ -39,12 +39,12 @@ async def get_user_info(client: AsyncClient) -> UserInfo:
|
|||
)
|
||||
|
||||
|
||||
async def get_wbi_img(client: AsyncClient) -> WbiImg:
|
||||
async def get_wbi_img(ctx: FetcherContext, client: AsyncClient) -> WbiImg:
|
||||
global wbi_img_cache
|
||||
if wbi_img_cache is not None:
|
||||
return wbi_img_cache
|
||||
url = "https://api.bilibili.com/x/web-interface/nav"
|
||||
res_json = await Fetcher.fetch_json(client, url)
|
||||
res_json = await Fetcher.fetch_json(ctx, client, url)
|
||||
assert res_json is not None
|
||||
wbi_img: WbiImg = {
|
||||
"img_key": _get_key_from_url(res_json["data"]["wbi_img"]["img_url"]),
|
||||
|
|
|
@ -10,6 +10,7 @@ if TYPE_CHECKING:
|
|||
|
||||
from yutto._typing import EpisodeData
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
@ -26,32 +27,32 @@ class Extractor(metaclass=ABCMeta):
|
|||
|
||||
@abstractmethod
|
||||
async def __call__(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SingleExtractor(Extractor):
|
||||
async def __call__(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
return [await self.extract(client, args)]
|
||||
return [await self.extract(ctx, client, args)]
|
||||
|
||||
@abstractmethod
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> CoroutineWrapper[EpisodeData | None] | None:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BatchExtractor(Extractor):
|
||||
async def __call__(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
return await self.extract(client, args)
|
||||
return await self.extract(ctx, client, args)
|
||||
|
||||
@abstractmethod
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
raise NotImplementedError
|
||||
|
|
|
@ -23,6 +23,8 @@ if TYPE_CHECKING:
|
|||
|
||||
import httpx
|
||||
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
|
||||
class BangumiExtractor(SingleExtractor):
|
||||
"""番剧单话"""
|
||||
|
@ -49,10 +51,10 @@ class BangumiExtractor(SingleExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> CoroutineWrapper[EpisodeData | None] | None:
|
||||
season_id = await get_season_id_by_episode_id(client, self.episode_id)
|
||||
bangumi_list = await get_bangumi_list(client, season_id)
|
||||
season_id = await get_season_id_by_episode_id(ctx, client, self.episode_id)
|
||||
bangumi_list = await get_bangumi_list(ctx, client, season_id)
|
||||
Logger.custom(bangumi_list["title"], Badge("番剧", fore="black", back="cyan"))
|
||||
try:
|
||||
for bangumi_item in bangumi_list["pages"]:
|
||||
|
@ -65,6 +67,7 @@ class BangumiExtractor(SingleExtractor):
|
|||
|
||||
return CoroutineWrapper(
|
||||
extract_bangumi_data(
|
||||
ctx,
|
||||
client,
|
||||
bangumi_list_item,
|
||||
args,
|
||||
|
|
|
@ -20,6 +20,8 @@ if TYPE_CHECKING:
|
|||
|
||||
import httpx
|
||||
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
|
||||
class BangumiBatchExtractor(BatchExtractor):
|
||||
"""番剧全集"""
|
||||
|
@ -60,22 +62,22 @@ class BangumiBatchExtractor(BatchExtractor):
|
|||
else:
|
||||
return False
|
||||
|
||||
async def _parse_ids(self, client: httpx.AsyncClient):
|
||||
async def _parse_ids(self, ctx: FetcherContext, client: httpx.AsyncClient):
|
||||
if "episode_id" in self._match_result.groupdict().keys():
|
||||
episode_id = EpisodeId(self._match_result.group("episode_id"))
|
||||
self.season_id = await get_season_id_by_episode_id(client, episode_id)
|
||||
self.season_id = await get_season_id_by_episode_id(ctx, client, episode_id)
|
||||
elif "season_id" in self._match_result.groupdict().keys():
|
||||
self.season_id = SeasonId(self._match_result.group("season_id"))
|
||||
else:
|
||||
media_id = MediaId(self._match_result.group("media_id"))
|
||||
self.season_id = await get_season_id_by_media_id(client, media_id)
|
||||
self.season_id = await get_season_id_by_media_id(ctx, client, media_id)
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
await self._parse_ids(client)
|
||||
await self._parse_ids(ctx, client)
|
||||
|
||||
bangumi_list = await get_bangumi_list(client, self.season_id)
|
||||
bangumi_list = await get_bangumi_list(ctx, client, self.season_id)
|
||||
Logger.custom(bangumi_list["title"], Badge("番剧", fore="black", back="cyan"))
|
||||
# 如果没有 with_section 则不需要专区内容
|
||||
bangumi_list["pages"] = list(
|
||||
|
@ -87,6 +89,7 @@ class BangumiBatchExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_bangumi_data(
|
||||
ctx,
|
||||
client,
|
||||
bangumi_item,
|
||||
args,
|
||||
|
|
|
@ -23,6 +23,8 @@ if TYPE_CHECKING:
|
|||
|
||||
import httpx
|
||||
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
|
||||
class CheeseExtractor(SingleExtractor):
|
||||
"""单课时"""
|
||||
|
@ -49,10 +51,10 @@ class CheeseExtractor(SingleExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> CoroutineWrapper[EpisodeData | None] | None:
|
||||
season_id = await get_season_id_by_episode_id(client, self.episode_id)
|
||||
cheese_list = await get_cheese_list(client, season_id)
|
||||
season_id = await get_season_id_by_episode_id(ctx, client, self.episode_id)
|
||||
cheese_list = await get_cheese_list(ctx, client, season_id)
|
||||
Logger.custom(cheese_list["title"], Badge("课程", fore="black", back="cyan"))
|
||||
try:
|
||||
for cheese_item in cheese_list["pages"]:
|
||||
|
@ -65,6 +67,7 @@ class CheeseExtractor(SingleExtractor):
|
|||
|
||||
return CoroutineWrapper(
|
||||
extract_cheese_data(
|
||||
ctx,
|
||||
client,
|
||||
self.episode_id,
|
||||
cheese_list_item,
|
||||
|
|
|
@ -16,6 +16,8 @@ if TYPE_CHECKING:
|
|||
|
||||
import httpx
|
||||
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
|
||||
class CheeseBatchExtractor(BatchExtractor):
|
||||
"""课程全集"""
|
||||
|
@ -48,19 +50,19 @@ class CheeseBatchExtractor(BatchExtractor):
|
|||
else:
|
||||
return False
|
||||
|
||||
async def _parse_ids(self, client: httpx.AsyncClient):
|
||||
async def _parse_ids(self, ctx: FetcherContext, client: httpx.AsyncClient):
|
||||
if "episode_id" in self._match_result.groupdict().keys():
|
||||
episode_id = EpisodeId(self._match_result.group("episode_id"))
|
||||
self.season_id = await get_season_id_by_episode_id(client, episode_id)
|
||||
self.season_id = await get_season_id_by_episode_id(ctx, client, episode_id)
|
||||
else:
|
||||
self.season_id = SeasonId(self._match_result.group("season_id"))
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
await self._parse_ids(client)
|
||||
await self._parse_ids(ctx, client)
|
||||
|
||||
cheese_list = await get_cheese_list(client, self.season_id)
|
||||
cheese_list = await get_cheese_list(ctx, client, self.season_id)
|
||||
Logger.custom(cheese_list["title"], Badge("课程", fore="black", back="cyan"))
|
||||
# 选集过滤
|
||||
episodes = parse_episodes_selection(args.episodes, len(cheese_list["pages"]))
|
||||
|
@ -68,6 +70,7 @@ class CheeseBatchExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_cheese_data(
|
||||
ctx,
|
||||
client,
|
||||
cheese_item["episode_id"],
|
||||
cheese_item,
|
||||
|
|
|
@ -14,7 +14,7 @@ from yutto.extractor.common import extract_ugc_video_data
|
|||
from yutto.processor.selector import parse_episodes_selection
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -52,11 +52,11 @@ class CollectionExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
username, collection_details = await asyncio.gather(
|
||||
get_user_name(client, self.mid),
|
||||
get_collection_details(client, self.series_id, self.mid),
|
||||
get_user_name(ctx, client, self.mid),
|
||||
get_collection_details(ctx, client, self.series_id, self.mid),
|
||||
)
|
||||
collection_title = collection_details["title"]
|
||||
Logger.custom(collection_title, Badge("视频合集", fore="black", back="cyan"))
|
||||
|
@ -70,11 +70,11 @@ class CollectionExtractor(BatchExtractor):
|
|||
for item in collection_details["pages"]:
|
||||
try:
|
||||
avid = item["avid"]
|
||||
ugc_video_list = await get_ugc_video_list(client, avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, avid)
|
||||
if not Filter.verify_timer(ugc_video_list["pubdate"]):
|
||||
Logger.debug(f"因为发布时间为 {ugc_video_list['pubdate']},跳过 {ugc_video_list['title']}")
|
||||
continue
|
||||
await Fetcher.touch_url(client, avid.to_url())
|
||||
await Fetcher.touch_url(ctx, client, avid.to_url())
|
||||
if len(ugc_video_list["pages"]) != 1:
|
||||
Logger.error(f"视频合集 {collection_title} 中的视频 {item['avid']} 包含多个视频!")
|
||||
for ugc_video_item in ugc_video_list["pages"]:
|
||||
|
@ -92,6 +92,7 @@ class CollectionExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -29,7 +29,7 @@ from yutto.processor.path_resolver import (
|
|||
)
|
||||
from yutto.utils.console.logger import Logger
|
||||
from yutto.utils.danmaku import EmptyDanmakuData
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.metadata import attach_chapter_info
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -40,6 +40,7 @@ if TYPE_CHECKING:
|
|||
|
||||
|
||||
async def extract_bangumi_data(
|
||||
ctx: FetcherContext,
|
||||
client: httpx.AsyncClient,
|
||||
bangumi_info: BangumiListItem,
|
||||
args: argparse.Namespace,
|
||||
|
@ -54,14 +55,16 @@ async def extract_bangumi_data(
|
|||
if bangumi_info["is_preview"]:
|
||||
Logger.warning(f"视频({format_ids(avid, cid)})是预览视频(疑似未登录或非大会员用户)")
|
||||
videos, audios = (
|
||||
await get_bangumi_playurl(client, avid, cid) if args.require_video or args.require_audio else ([], [])
|
||||
await get_bangumi_playurl(ctx, client, avid, cid) if args.require_video or args.require_audio else ([], [])
|
||||
)
|
||||
subtitles = await get_bangumi_subtitles(client, avid, cid) if args.require_subtitle else []
|
||||
subtitles = await get_bangumi_subtitles(ctx, client, avid, cid) if args.require_subtitle else []
|
||||
danmaku = (
|
||||
await get_danmaku(client, cid, avid, args.danmaku_format) if args.require_danmaku else EmptyDanmakuData
|
||||
await get_danmaku(ctx, client, cid, avid, args.danmaku_format) if args.require_danmaku else EmptyDanmakuData
|
||||
)
|
||||
metadata = bangumi_info["metadata"] if args.require_metadata else None
|
||||
cover_data = await Fetcher.fetch_bin(client, bangumi_info["metadata"]["thumb"]) if args.require_cover else None
|
||||
cover_data = (
|
||||
await Fetcher.fetch_bin(ctx, client, bangumi_info["metadata"]["thumb"]) if args.require_cover else None
|
||||
)
|
||||
subpath_variables_base: PathTemplateVariableDict = {
|
||||
"id": id,
|
||||
"name": name,
|
||||
|
@ -94,6 +97,7 @@ async def extract_bangumi_data(
|
|||
|
||||
|
||||
async def extract_cheese_data(
|
||||
ctx: FetcherContext,
|
||||
client: httpx.AsyncClient,
|
||||
episode_id: EpisodeId,
|
||||
cheese_info: CheeseListItem,
|
||||
|
@ -107,16 +111,18 @@ async def extract_cheese_data(
|
|||
name = cheese_info["name"]
|
||||
id = cheese_info["id"]
|
||||
videos, audios = (
|
||||
await get_cheese_playurl(client, avid, episode_id, cid)
|
||||
await get_cheese_playurl(ctx, client, avid, episode_id, cid)
|
||||
if args.require_video or args.require_audio
|
||||
else ([], [])
|
||||
)
|
||||
subtitles = await get_cheese_subtitles(client, avid, cid) if args.require_subtitle else []
|
||||
subtitles = await get_cheese_subtitles(ctx, client, avid, cid) if args.require_subtitle else []
|
||||
danmaku = (
|
||||
await get_danmaku(client, cid, avid, args.danmaku_format) if args.require_danmaku else EmptyDanmakuData
|
||||
await get_danmaku(ctx, client, cid, avid, args.danmaku_format) if args.require_danmaku else EmptyDanmakuData
|
||||
)
|
||||
metadata = cheese_info["metadata"] if args.require_metadata else None
|
||||
cover_data = await Fetcher.fetch_bin(client, cheese_info["metadata"]["thumb"]) if args.require_cover else None
|
||||
cover_data = (
|
||||
await Fetcher.fetch_bin(ctx, client, cheese_info["metadata"]["thumb"]) if args.require_cover else None
|
||||
)
|
||||
subpath_variables_base: PathTemplateVariableDict = {
|
||||
"id": id,
|
||||
"name": name,
|
||||
|
@ -149,6 +155,7 @@ async def extract_cheese_data(
|
|||
|
||||
|
||||
async def extract_ugc_video_data(
|
||||
ctx: FetcherContext,
|
||||
client: httpx.AsyncClient,
|
||||
avid: AvId,
|
||||
ugc_video_info: UgcVideoListItem,
|
||||
|
@ -161,18 +168,20 @@ async def extract_ugc_video_data(
|
|||
name = ugc_video_info["name"]
|
||||
id = ugc_video_info["id"]
|
||||
videos, audios = (
|
||||
await get_ugc_video_playurl(client, avid, cid) if args.require_video or args.require_audio else ([], [])
|
||||
await get_ugc_video_playurl(ctx, client, avid, cid)
|
||||
if args.require_video or args.require_audio
|
||||
else ([], [])
|
||||
)
|
||||
subtitles = await get_ugc_video_subtitles(client, avid, cid) if args.require_subtitle else []
|
||||
chapter_info_data = await get_ugc_video_chapters(client, avid, cid) if args.require_chapter_info else []
|
||||
subtitles = await get_ugc_video_subtitles(ctx, client, avid, cid) if args.require_subtitle else []
|
||||
chapter_info_data = await get_ugc_video_chapters(ctx, client, avid, cid) if args.require_chapter_info else []
|
||||
danmaku = (
|
||||
await get_danmaku(client, cid, avid, args.danmaku_format) if args.require_danmaku else EmptyDanmakuData
|
||||
await get_danmaku(ctx, client, cid, avid, args.danmaku_format) if args.require_danmaku else EmptyDanmakuData
|
||||
)
|
||||
metadata = ugc_video_info["metadata"] if args.require_metadata else None
|
||||
if metadata and chapter_info_data:
|
||||
attach_chapter_info(metadata, chapter_info_data)
|
||||
cover_data = (
|
||||
await Fetcher.fetch_bin(client, ugc_video_info["metadata"]["thumb"]) if args.require_cover else None
|
||||
await Fetcher.fetch_bin(ctx, client, ugc_video_info["metadata"]["thumb"]) if args.require_cover else None
|
||||
)
|
||||
owner_uid: str = (
|
||||
ugc_video_info["metadata"]["actor"][0]["profile"].split("/")[-1]
|
||||
|
|
|
@ -12,7 +12,7 @@ from yutto.extractor._abc import BatchExtractor
|
|||
from yutto.extractor.common import extract_ugc_video_data
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -38,25 +38,25 @@ class FavouritesExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
username, favourite_info = await asyncio.gather(
|
||||
get_user_name(client, self.mid),
|
||||
get_favourite_info(client, self.fid),
|
||||
get_user_name(ctx, client, self.mid),
|
||||
get_favourite_info(ctx, client, self.fid),
|
||||
)
|
||||
Logger.custom(favourite_info["title"], Badge("收藏夹", fore="black", back="cyan"))
|
||||
|
||||
ugc_video_info_list: list[tuple[UgcVideoListItem, str, int]] = []
|
||||
|
||||
for avid in await get_favourite_avids(client, self.fid):
|
||||
for avid in await get_favourite_avids(ctx, client, self.fid):
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, avid)
|
||||
# 在使用 SESSDATA 时,如果不去事先 touch 一下视频链接的话,是无法获取 episode_data 的
|
||||
# 至于为什么前面那俩(投稿视频页和番剧页)不需要额外 touch,因为在 get_redirected_url 阶段连接过了呀
|
||||
if not Filter.verify_timer(ugc_video_list["pubdate"]):
|
||||
Logger.debug(f"因为发布时间为 {ugc_video_list['pubdate']},跳过 {ugc_video_list['title']}")
|
||||
continue
|
||||
await Fetcher.touch_url(client, avid.to_url())
|
||||
await Fetcher.touch_url(ctx, client, avid.to_url())
|
||||
for ugc_video_item in ugc_video_list["pages"]:
|
||||
ugc_video_info_list.append(
|
||||
(
|
||||
|
@ -72,6 +72,7 @@ class FavouritesExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -12,7 +12,7 @@ from yutto.extractor._abc import BatchExtractor
|
|||
from yutto.extractor.common import extract_ugc_video_data
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -43,21 +43,21 @@ class SeriesExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
username, series_title = await asyncio.gather(
|
||||
get_user_name(client, self.mid), get_medialist_title(client, self.series_id)
|
||||
get_user_name(ctx, client, self.mid), get_medialist_title(ctx, client, self.series_id)
|
||||
)
|
||||
Logger.custom(series_title, Badge("视频列表", fore="black", back="cyan"))
|
||||
|
||||
ugc_video_info_list: list[tuple[UgcVideoListItem, str, int]] = []
|
||||
for avid in await get_medialist_avids(client, self.series_id, self.mid):
|
||||
for avid in await get_medialist_avids(ctx, client, self.series_id, self.mid):
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, avid)
|
||||
if not Filter.verify_timer(ugc_video_list["pubdate"]):
|
||||
Logger.debug(f"因为发布时间为 {ugc_video_list['pubdate']},跳过 {ugc_video_list['title']}")
|
||||
continue
|
||||
await Fetcher.touch_url(client, avid.to_url())
|
||||
await Fetcher.touch_url(ctx, client, avid.to_url())
|
||||
for ugc_video_item in ugc_video_list["pages"]:
|
||||
ugc_video_info_list.append(
|
||||
(
|
||||
|
@ -73,6 +73,7 @@ class SeriesExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -21,6 +21,8 @@ if TYPE_CHECKING:
|
|||
|
||||
import httpx
|
||||
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
|
||||
class UgcVideoExtractor(SingleExtractor):
|
||||
"""投稿视频单视频"""
|
||||
|
@ -71,14 +73,15 @@ class UgcVideoExtractor(SingleExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> CoroutineWrapper[EpisodeData | None] | None:
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, self.avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, self.avid)
|
||||
self.avid = ugc_video_list["avid"] # 当视频撞车时,使用新的 avid 替代原有 avid,见 #96
|
||||
Logger.custom(ugc_video_list["title"], Badge("投稿视频", fore="black", back="cyan"))
|
||||
return CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
self.avid,
|
||||
ugc_video_list["pages"][self.page - 1],
|
||||
|
|
|
@ -17,6 +17,8 @@ if TYPE_CHECKING:
|
|||
|
||||
import httpx
|
||||
|
||||
from yutto.utils.fetcher import FetcherContext
|
||||
|
||||
|
||||
class UgcVideoBatchExtractor(BatchExtractor):
|
||||
"""投稿视频批下载"""
|
||||
|
@ -63,10 +65,10 @@ class UgcVideoBatchExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, self.avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, self.avid)
|
||||
Logger.custom(ugc_video_list["title"], Badge("投稿视频", fore="black", back="cyan"))
|
||||
except NotFoundError as e:
|
||||
# 由于获取 info 时候也会因为视频不存在而报错,因此这里需要捕捉下
|
||||
|
@ -80,6 +82,7 @@ class UgcVideoBatchExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -11,7 +11,7 @@ from yutto.extractor._abc import BatchExtractor
|
|||
from yutto.extractor.common import extract_ugc_video_data
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -35,23 +35,23 @@ class UserAllFavouritesExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
username = await get_user_name(client, self.mid)
|
||||
username = await get_user_name(ctx, client, self.mid)
|
||||
Logger.custom(username, Badge("用户收藏夹", fore="black", back="cyan"))
|
||||
|
||||
ugc_video_info_list: list[tuple[UgcVideoListItem, str, int, str]] = []
|
||||
|
||||
for fav in await get_all_favourites(client, self.mid):
|
||||
for fav in await get_all_favourites(ctx, client, self.mid):
|
||||
series_title = fav["title"]
|
||||
fid = fav["fid"]
|
||||
for avid in await get_favourite_avids(client, fid):
|
||||
for avid in await get_favourite_avids(ctx, client, fid):
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, avid)
|
||||
if not Filter.verify_timer(ugc_video_list["pubdate"]):
|
||||
Logger.debug(f"因为发布时间为 {ugc_video_list['pubdate']},跳过 {ugc_video_list['title']}")
|
||||
continue
|
||||
await Fetcher.touch_url(client, avid.to_url())
|
||||
await Fetcher.touch_url(ctx, client, avid.to_url())
|
||||
for ugc_video_item in ugc_video_list["pages"]:
|
||||
ugc_video_info_list.append(
|
||||
(
|
||||
|
@ -68,6 +68,7 @@ class UserAllFavouritesExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -11,7 +11,7 @@ from yutto.extractor._abc import BatchExtractor
|
|||
from yutto.extractor.common import extract_ugc_video_data
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -35,19 +35,19 @@ class UserAllUgcVideosExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
username = await get_user_name(client, self.mid)
|
||||
username = await get_user_name(ctx, client, self.mid)
|
||||
Logger.custom(username, Badge("UP 主投稿视频", fore="black", back="cyan"))
|
||||
|
||||
ugc_video_info_list: list[tuple[UgcVideoListItem, str, int]] = []
|
||||
for avid in await get_user_space_all_videos_avids(client, self.mid):
|
||||
for avid in await get_user_space_all_videos_avids(ctx, client, self.mid):
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, avid)
|
||||
if not Filter.verify_timer(ugc_video_list["pubdate"]):
|
||||
Logger.debug(f"因为发布时间为 {ugc_video_list['pubdate']},跳过 {ugc_video_list['title']}")
|
||||
continue
|
||||
await Fetcher.touch_url(client, avid.to_url())
|
||||
await Fetcher.touch_url(ctx, client, avid.to_url())
|
||||
for ugc_video_item in ugc_video_list["pages"]:
|
||||
ugc_video_info_list.append(
|
||||
(
|
||||
|
@ -63,6 +63,7 @@ class UserAllUgcVideosExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -10,7 +10,7 @@ from yutto.extractor._abc import BatchExtractor
|
|||
from yutto.extractor.common import extract_ugc_video_data
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
@ -33,25 +33,25 @@ class UserWatchLaterExtractor(BatchExtractor):
|
|||
return False
|
||||
|
||||
async def extract(
|
||||
self, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
self, ctx: FetcherContext, client: httpx.AsyncClient, args: argparse.Namespace
|
||||
) -> list[CoroutineWrapper[EpisodeData | None] | None]:
|
||||
Logger.custom("当前用户", Badge("稍后再看", fore="black", back="cyan"))
|
||||
|
||||
ugc_video_info_list: list[tuple[UgcVideoListItem, str, int, str]] = []
|
||||
|
||||
try:
|
||||
avid_list = await get_watch_later_avids(client)
|
||||
avid_list = await get_watch_later_avids(ctx, client)
|
||||
except NotLoginError as e:
|
||||
Logger.error(e.message)
|
||||
return []
|
||||
|
||||
for avid in avid_list:
|
||||
try:
|
||||
ugc_video_list = await get_ugc_video_list(client, avid)
|
||||
ugc_video_list = await get_ugc_video_list(ctx, client, avid)
|
||||
if not Filter.verify_timer(ugc_video_list["pubdate"]):
|
||||
Logger.debug(f"因为发布时间为 {ugc_video_list['pubdate']},跳过 {ugc_video_list['title']}")
|
||||
continue
|
||||
await Fetcher.touch_url(client, avid.to_url())
|
||||
await Fetcher.touch_url(ctx, client, avid.to_url())
|
||||
for ugc_video_item in ugc_video_list["pages"]:
|
||||
ugc_video_info_list.append(
|
||||
(
|
||||
|
@ -68,6 +68,7 @@ class UserWatchLaterExtractor(BatchExtractor):
|
|||
return [
|
||||
CoroutineWrapper(
|
||||
extract_ugc_video_data(
|
||||
ctx,
|
||||
client,
|
||||
ugc_video_item["avid"],
|
||||
ugc_video_item,
|
||||
|
|
|
@ -13,7 +13,7 @@ from yutto.utils.asynclib import CoroutineWrapper, first_successful_with_check
|
|||
from yutto.utils.console.colorful import colored_string
|
||||
from yutto.utils.console.logger import Badge, Logger
|
||||
from yutto.utils.danmaku import write_danmaku
|
||||
from yutto.utils.fetcher import Fetcher
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext
|
||||
from yutto.utils.ffmpeg import FFmpeg, FFmpegCommandBuilder
|
||||
from yutto.utils.file_buffer import AsyncFileBuffer
|
||||
from yutto.utils.funcutils import filter_none_value, xmerge
|
||||
|
@ -106,6 +106,7 @@ def create_mirrors_filter(banned_mirrors_pattern: str | None) -> Callable[[list[
|
|||
|
||||
|
||||
async def download_video_and_audio(
|
||||
ctx: FetcherContext,
|
||||
client: httpx.AsyncClient,
|
||||
video: VideoUrlMeta | None,
|
||||
video_path: Path,
|
||||
|
@ -119,15 +120,16 @@ async def download_video_and_audio(
|
|||
sizes: list[int | None] = [None, None]
|
||||
coroutines_list: list[list[CoroutineWrapper[None]]] = []
|
||||
mirrors_filter = create_mirrors_filter(options["banned_mirrors_pattern"])
|
||||
Fetcher.set_semaphore(options["num_workers"])
|
||||
ctx.set_download_semaphore(options["num_workers"])
|
||||
if video is not None:
|
||||
vbuf = await AsyncFileBuffer(video_path, overwrite=options["overwrite"])
|
||||
vsize = await first_successful_with_check(
|
||||
[Fetcher.get_size(client, url) for url in [video["url"], *mirrors_filter(video["mirrors"])]]
|
||||
[Fetcher.get_size(ctx, client, url) for url in [video["url"], *mirrors_filter(video["mirrors"])]]
|
||||
)
|
||||
video_coroutines = [
|
||||
CoroutineWrapper(
|
||||
Fetcher.download_file_with_offset(
|
||||
ctx,
|
||||
client,
|
||||
video["url"],
|
||||
mirrors_filter(video["mirrors"]),
|
||||
|
@ -144,11 +146,12 @@ async def download_video_and_audio(
|
|||
if audio is not None:
|
||||
abuf = await AsyncFileBuffer(audio_path, overwrite=options["overwrite"])
|
||||
asize = await first_successful_with_check(
|
||||
[Fetcher.get_size(client, url) for url in [audio["url"], *mirrors_filter(audio["mirrors"])]]
|
||||
[Fetcher.get_size(ctx, client, url) for url in [audio["url"], *mirrors_filter(audio["mirrors"])]]
|
||||
)
|
||||
audio_coroutines = [
|
||||
CoroutineWrapper(
|
||||
Fetcher.download_file_with_offset(
|
||||
ctx,
|
||||
client,
|
||||
audio["url"],
|
||||
mirrors_filter(audio["mirrors"]),
|
||||
|
@ -260,6 +263,7 @@ class DownloadState(Enum):
|
|||
|
||||
|
||||
async def start_downloader(
|
||||
ctx: FetcherContext,
|
||||
client: httpx.AsyncClient,
|
||||
episode_data: EpisodeData,
|
||||
options: DownloaderOptions,
|
||||
|
@ -375,7 +379,7 @@ async def start_downloader(
|
|||
write_chapter_info(filename, chapter_info_data, chapter_info_path)
|
||||
|
||||
# 下载视频 / 音频
|
||||
await download_video_and_audio(client, video, video_path, audio, audio_path, options)
|
||||
await download_video_and_audio(ctx, client, video, video_path, audio, audio_path, options)
|
||||
|
||||
# 合并视频 / 音频
|
||||
merge_video_and_audio(
|
||||
|
|
|
@ -2,6 +2,7 @@ from __future__ import annotations
|
|||
|
||||
import asyncio
|
||||
import random
|
||||
from contextlib import asynccontextmanager
|
||||
from typing import TYPE_CHECKING, Any, Callable, TypeVar
|
||||
from urllib.parse import quote, unquote
|
||||
|
||||
|
@ -65,48 +66,81 @@ DEFAULT_HEADERS: dict[str, str] = {
|
|||
DEFAULT_COOKIES = httpx.Cookies()
|
||||
|
||||
|
||||
class Fetcher:
|
||||
proxy: str | None = DEFAULT_PROXY
|
||||
trust_env: bool = DEFAULT_TRUST_ENV
|
||||
headers: dict[str, str] = DEFAULT_HEADERS
|
||||
cookies: httpx.Cookies = DEFAULT_COOKIES
|
||||
# 初始使用较小的信号量用于抓取信息,下载时会重新设置一个较大的值
|
||||
semaphore: asyncio.Semaphore = asyncio.Semaphore(8)
|
||||
class FetcherContext:
|
||||
proxy: str | None
|
||||
trust_env: bool
|
||||
headers: dict[str, str]
|
||||
cookies: httpx.Cookies
|
||||
fetch_semaphore: asyncio.Semaphore | None
|
||||
download_semaphore: asyncio.Semaphore | None
|
||||
|
||||
@classmethod
|
||||
def set_proxy(cls, proxy: str):
|
||||
if proxy == "auto":
|
||||
Fetcher.proxy = None
|
||||
Fetcher.trust_env = True
|
||||
elif proxy == "no":
|
||||
Fetcher.proxy = None
|
||||
Fetcher.trust_env = False
|
||||
else:
|
||||
Fetcher.proxy = proxy
|
||||
Fetcher.trust_env = False
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
proxy: str | None = DEFAULT_PROXY,
|
||||
trust_env: bool = DEFAULT_TRUST_ENV,
|
||||
headers: dict[str, str] = DEFAULT_HEADERS,
|
||||
cookies: httpx.Cookies = DEFAULT_COOKIES,
|
||||
):
|
||||
self.proxy = proxy
|
||||
self.trust_env = trust_env
|
||||
self.headers = headers
|
||||
self.cookies = cookies
|
||||
self.fetch_semaphore = None
|
||||
self.download_semaphore = None
|
||||
|
||||
@classmethod
|
||||
def set_sessdata(cls, sessdata: str):
|
||||
Fetcher.cookies = httpx.Cookies()
|
||||
def set_fetch_semaphore(self, fetch_workers: int):
|
||||
self.fetch_semaphore = asyncio.Semaphore(fetch_workers)
|
||||
|
||||
def set_download_semaphore(self, download_workers: int):
|
||||
self.download_semaphore = asyncio.Semaphore(download_workers)
|
||||
|
||||
def set_sessdata(self, sessdata: str):
|
||||
self.cookies = httpx.Cookies()
|
||||
# 先解码后编码是防止获取到的 SESSDATA 是已经解码后的(包含「,」)
|
||||
# 而番剧无法使用解码后的 SESSDATA
|
||||
Fetcher.cookies.set("SESSDATA", quote(unquote(sessdata)))
|
||||
self.cookies.set("SESSDATA", quote(unquote(sessdata)))
|
||||
|
||||
@classmethod
|
||||
def set_semaphore(cls, num_workers: int):
|
||||
Fetcher.semaphore = asyncio.Semaphore(num_workers)
|
||||
def set_proxy(self, proxy: str):
|
||||
if proxy == "auto":
|
||||
self.proxy = None
|
||||
self.trust_env = True
|
||||
elif proxy == "no":
|
||||
self.proxy = None
|
||||
self.trust_env = False
|
||||
else:
|
||||
self.proxy = proxy
|
||||
self.trust_env = False
|
||||
|
||||
@classmethod
|
||||
@asynccontextmanager
|
||||
async def fetch_guard(self):
|
||||
if self.fetch_semaphore is None:
|
||||
yield
|
||||
return
|
||||
async with self.fetch_semaphore:
|
||||
yield
|
||||
|
||||
@asynccontextmanager
|
||||
async def download_guard(self):
|
||||
if self.download_semaphore is None:
|
||||
yield
|
||||
return
|
||||
async with self.download_semaphore:
|
||||
yield
|
||||
|
||||
|
||||
class Fetcher:
|
||||
@staticmethod
|
||||
@MaxRetry(2)
|
||||
async def fetch_text(
|
||||
cls,
|
||||
ctx: FetcherContext,
|
||||
client: AsyncClient,
|
||||
url: str,
|
||||
*,
|
||||
params: Mapping[str, str] | None = None,
|
||||
encoding: str | None = None, # TODO(SigureMo): Support this
|
||||
) -> str | None:
|
||||
async with cls.semaphore:
|
||||
async with ctx.fetch_guard():
|
||||
Logger.debug(f"Fetch text: {url}")
|
||||
Logger.status.next_tick()
|
||||
resp = await client.get(url, params=params)
|
||||
|
@ -114,16 +148,16 @@ class Fetcher:
|
|||
return None
|
||||
return resp.text
|
||||
|
||||
@classmethod
|
||||
@staticmethod
|
||||
@MaxRetry(2)
|
||||
async def fetch_bin(
|
||||
cls,
|
||||
ctx: FetcherContext,
|
||||
client: AsyncClient,
|
||||
url: str,
|
||||
*,
|
||||
params: Mapping[str, str] | None = None,
|
||||
) -> bytes | None:
|
||||
async with cls.semaphore:
|
||||
async with ctx.fetch_guard():
|
||||
Logger.debug(f"Fetch bin: {url}")
|
||||
Logger.status.next_tick()
|
||||
resp = await client.get(url, params=params)
|
||||
|
@ -131,16 +165,16 @@ class Fetcher:
|
|||
return None
|
||||
return resp.read()
|
||||
|
||||
@classmethod
|
||||
@staticmethod
|
||||
@MaxRetry(2)
|
||||
async def fetch_json(
|
||||
cls,
|
||||
ctx: FetcherContext,
|
||||
client: AsyncClient,
|
||||
url: str,
|
||||
*,
|
||||
params: Mapping[str, str] | None = None,
|
||||
) -> Any | None:
|
||||
async with cls.semaphore:
|
||||
async with ctx.fetch_guard():
|
||||
Logger.debug(f"Fetch json: {url}")
|
||||
Logger.status.next_tick()
|
||||
resp = await client.get(url, params=params)
|
||||
|
@ -148,13 +182,13 @@ class Fetcher:
|
|||
return None
|
||||
return resp.json()
|
||||
|
||||
@classmethod
|
||||
@staticmethod
|
||||
@MaxRetry(2)
|
||||
async def get_redirected_url(cls, client: AsyncClient, url: str) -> str:
|
||||
async def get_redirected_url(ctx: FetcherContext, client: AsyncClient, url: str) -> str:
|
||||
# 关于为什么要前往重定向 url,是因为 B 站的 url 类型实在是太多了,比如有 b23.tv 的短链接
|
||||
# 为 SEO 的搜索引擎链接、甚至有的 av、BV 链接实际上是番剧页面,一一列举实在太麻烦,而且最后一种
|
||||
# 情况需要在 av、BV 解析一部分信息后才能知道是否是番剧页面,处理起来非常麻烦(bilili 就是这么做的)
|
||||
async with cls.semaphore:
|
||||
async with ctx.fetch_guard():
|
||||
resp = await client.get(url)
|
||||
redirected_url = str(resp.url)
|
||||
if redirected_url == url:
|
||||
|
@ -164,10 +198,10 @@ class Fetcher:
|
|||
Logger.status.next_tick()
|
||||
return redirected_url
|
||||
|
||||
@classmethod
|
||||
@staticmethod
|
||||
@MaxRetry(2)
|
||||
async def get_size(cls, client: AsyncClient, url: str) -> int | None:
|
||||
async with cls.semaphore:
|
||||
async def get_size(ctx: FetcherContext, client: AsyncClient, url: str) -> int | None:
|
||||
async with ctx.fetch_guard():
|
||||
headers = client.headers.copy()
|
||||
headers["Range"] = "bytes=0-1"
|
||||
resp = await client.get(
|
||||
|
@ -181,18 +215,18 @@ class Fetcher:
|
|||
else:
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
@staticmethod
|
||||
@MaxRetry(2)
|
||||
# 对于相同 session,同样的页面没必要重复 touch
|
||||
@async_cache(lambda args: f"client_id={id(args.arguments['client'])}, url={args.arguments['url']}")
|
||||
async def touch_url(cls, client: AsyncClient, url: str):
|
||||
async with cls.semaphore:
|
||||
async def touch_url(ctx: FetcherContext, client: AsyncClient, url: str):
|
||||
async with ctx.fetch_guard():
|
||||
Logger.debug(f"Touch url: {url}")
|
||||
await client.get(url)
|
||||
|
||||
@classmethod
|
||||
@staticmethod
|
||||
async def download_file_with_offset(
|
||||
cls,
|
||||
ctx: FetcherContext,
|
||||
client: AsyncClient,
|
||||
url: str,
|
||||
mirrors: list[str],
|
||||
|
@ -200,7 +234,7 @@ class Fetcher:
|
|||
offset: int,
|
||||
size: int | None,
|
||||
) -> None:
|
||||
async with cls.semaphore:
|
||||
async with ctx.download_guard():
|
||||
Logger.debug(f"Start download (offset {offset}, number of mirrors {len(mirrors)}) {url}")
|
||||
done = False
|
||||
headers = client.headers.copy()
|
||||
|
|
|
@ -15,7 +15,7 @@ from yutto.processor.selector import validate_episodes_selection
|
|||
from yutto.utils.asynclib import initial_async_policy
|
||||
from yutto.utils.console.colorful import set_no_color
|
||||
from yutto.utils.console.logger import Badge, Logger, set_logger_debug
|
||||
from yutto.utils.fetcher import Fetcher, create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.ffmpeg import FFmpeg
|
||||
from yutto.utils.filter import Filter
|
||||
|
||||
|
@ -25,7 +25,7 @@ if TYPE_CHECKING:
|
|||
from yutto._typing import UserInfo
|
||||
|
||||
|
||||
def initial_validation(args: argparse.Namespace):
|
||||
def initial_validation(ctx: FetcherContext, args: argparse.Namespace):
|
||||
"""初始化检查,仅执行一次"""
|
||||
|
||||
if not args.no_progress:
|
||||
|
@ -48,14 +48,14 @@ def initial_validation(args: argparse.Namespace):
|
|||
if args.proxy not in ["no", "auto"] and not re.match(r"https?://", args.proxy):
|
||||
Logger.error(f"proxy 参数值({args.proxy})错误啦!")
|
||||
sys.exit(ErrorCode.WRONG_ARGUMENT_ERROR.value)
|
||||
Fetcher.set_proxy(args.proxy)
|
||||
ctx.set_proxy(args.proxy)
|
||||
|
||||
# 大会员身份校验
|
||||
if not args.sessdata:
|
||||
Logger.info("未提供 SESSDATA,无法下载高清视频、字幕等资源哦~")
|
||||
else:
|
||||
Fetcher.set_sessdata(args.sessdata)
|
||||
if asyncio.run(validate_user_info({"vip_status": True, "is_login": True})):
|
||||
ctx.set_sessdata(args.sessdata)
|
||||
if asyncio.run(validate_user_info(ctx, {"vip_status": True, "is_login": True})):
|
||||
Logger.custom("成功以大会员身份登录~", badge=Badge("大会员", fore="white", back="magenta", style=["bold"]))
|
||||
else:
|
||||
Logger.warning("以非大会员身份登录,注意无法下载会员专享剧集喔~")
|
||||
|
@ -155,17 +155,17 @@ def validate_batch_arguments(args: argparse.Namespace):
|
|||
sys.exit(ErrorCode.WRONG_ARGUMENT_ERROR.value)
|
||||
|
||||
|
||||
async def validate_user_info(check_option: UserInfo) -> bool:
|
||||
async def validate_user_info(ctx: FetcherContext, check_option: UserInfo) -> bool:
|
||||
"""UserInfo 结构和用户输入是匹配的,如果要校验则置 True 即可,估计不会有要校验为 False 的情况吧~~"""
|
||||
async with create_client(
|
||||
cookies=Fetcher.cookies,
|
||||
trust_env=Fetcher.trust_env,
|
||||
proxy=Fetcher.proxy,
|
||||
cookies=ctx.cookies,
|
||||
trust_env=ctx.trust_env,
|
||||
proxy=ctx.proxy,
|
||||
) as client:
|
||||
if check_option["is_login"] or check_option["vip_status"]:
|
||||
# 需要校验
|
||||
# 这么写 if 是为了少一个 get_user_info 请求
|
||||
user_info = await get_user_info(client)
|
||||
user_info = await get_user_info(ctx, client)
|
||||
if check_option["is_login"] and not user_info["is_login"]:
|
||||
return False
|
||||
if check_option["vip_status"] and not user_info["vip_status"]:
|
||||
|
|
|
@ -10,7 +10,7 @@ from yutto.api.bangumi import (
|
|||
get_season_id_by_episode_id,
|
||||
get_season_id_by_media_id,
|
||||
)
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
|
@ -19,8 +19,9 @@ from yutto.utils.funcutils import as_sync
|
|||
async def test_get_season_id_by_media_id():
|
||||
media_id = MediaId("28223066")
|
||||
season_id_excepted = SeasonId("28770")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
season_id = await get_season_id_by_media_id(client, media_id)
|
||||
season_id = await get_season_id_by_media_id(ctx, client, media_id)
|
||||
assert season_id == season_id_excepted
|
||||
|
||||
|
||||
|
@ -29,8 +30,9 @@ async def test_get_season_id_by_media_id():
|
|||
@pytest.mark.parametrize("episode_id", [EpisodeId("314477"), EpisodeId("300998")])
|
||||
async def test_get_season_id_by_episode_id(episode_id: EpisodeId):
|
||||
season_id_excepted = SeasonId("28770")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
season_id = await get_season_id_by_episode_id(client, episode_id)
|
||||
season_id = await get_season_id_by_episode_id(ctx, client, episode_id)
|
||||
assert season_id == season_id_excepted
|
||||
|
||||
|
||||
|
@ -38,8 +40,9 @@ async def test_get_season_id_by_episode_id(episode_id: EpisodeId):
|
|||
@as_sync
|
||||
async def test_get_bangumi_title():
|
||||
season_id = SeasonId("28770")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
title = (await get_bangumi_list(client, season_id))["title"]
|
||||
title = (await get_bangumi_list(ctx, client, season_id))["title"]
|
||||
assert title == "我的三体之章北海传"
|
||||
|
||||
|
||||
|
@ -47,8 +50,9 @@ async def test_get_bangumi_title():
|
|||
@as_sync
|
||||
async def test_get_bangumi_list():
|
||||
season_id = SeasonId("28770")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
bangumi_list = (await get_bangumi_list(client, season_id))["pages"]
|
||||
bangumi_list = (await get_bangumi_list(ctx, client, season_id))["pages"]
|
||||
assert bangumi_list[0]["id"] == 1
|
||||
assert bangumi_list[0]["name"] == "第1话"
|
||||
assert bangumi_list[0]["cid"] == CId("144541892")
|
||||
|
@ -68,8 +72,9 @@ async def test_get_bangumi_list():
|
|||
async def test_get_bangumi_playurl():
|
||||
avid = BvId("BV1q7411v7Vd")
|
||||
cid = CId("144541892")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
playlist = await get_bangumi_playurl(client, avid, cid)
|
||||
playlist = await get_bangumi_playurl(ctx, client, avid, cid)
|
||||
assert len(playlist[0]) > 0
|
||||
assert len(playlist[1]) > 0
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ from yutto.api.cheese import (
|
|||
get_cheese_playurl,
|
||||
get_season_id_by_episode_id,
|
||||
)
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
|
@ -17,8 +17,9 @@ from yutto.utils.funcutils import as_sync
|
|||
@pytest.mark.parametrize("episode_id", [EpisodeId("6945"), EpisodeId("6902")])
|
||||
async def test_get_season_id_by_episode_id(episode_id: EpisodeId):
|
||||
season_id_excepted = SeasonId("298")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
season_id = await get_season_id_by_episode_id(client, episode_id)
|
||||
season_id = await get_season_id_by_episode_id(ctx, client, episode_id)
|
||||
assert season_id == season_id_excepted
|
||||
|
||||
|
||||
|
@ -26,8 +27,9 @@ async def test_get_season_id_by_episode_id(episode_id: EpisodeId):
|
|||
@as_sync
|
||||
async def test_get_cheese_title():
|
||||
season_id = SeasonId("298")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
cheese_list = await get_cheese_list(client, season_id)
|
||||
cheese_list = await get_cheese_list(ctx, client, season_id)
|
||||
title = cheese_list["title"]
|
||||
assert title == "林超:给年轻人的跨学科通识课"
|
||||
|
||||
|
@ -36,8 +38,9 @@ async def test_get_cheese_title():
|
|||
@as_sync
|
||||
async def test_get_cheese_list():
|
||||
season_id = SeasonId("298")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
cheese_list = (await get_cheese_list(client, season_id))["pages"]
|
||||
cheese_list = (await get_cheese_list(ctx, client, season_id))["pages"]
|
||||
assert cheese_list[0]["id"] == 1
|
||||
assert cheese_list[0]["name"] == "【先导片】给年轻人的跨学科通识课"
|
||||
assert cheese_list[0]["cid"] == CId("344779477")
|
||||
|
@ -54,9 +57,10 @@ async def test_get_cheese_playurl():
|
|||
avid = AId("545852212")
|
||||
episode_id = EpisodeId("6902")
|
||||
cid = CId("344779477")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
playlist: tuple[list[VideoUrlMeta], list[AudioUrlMeta]] = await get_cheese_playurl(
|
||||
client, avid, episode_id, cid
|
||||
ctx, client, avid, episode_id, cid
|
||||
)
|
||||
assert len(playlist[0]) > 0
|
||||
assert len(playlist[1]) > 0
|
||||
|
|
|
@ -4,7 +4,7 @@ import pytest
|
|||
|
||||
from yutto._typing import BvId, MId, SeriesId
|
||||
from yutto.api.collection import get_collection_details
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
|
@ -14,8 +14,9 @@ async def test_get_collection_details():
|
|||
# 测试页面:https://space.bilibili.com/6762654/channel/collectiondetail?sid=39879&ctype=0
|
||||
series_id = SeriesId("39879")
|
||||
mid = MId("6762654")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
collection_details = await get_collection_details(client, series_id=series_id, mid=mid)
|
||||
collection_details = await get_collection_details(ctx, client, series_id=series_id, mid=mid)
|
||||
title = collection_details["title"]
|
||||
avids = [page["avid"] for page in collection_details["pages"]]
|
||||
assert title == "傻开心整活"
|
||||
|
|
|
@ -4,7 +4,7 @@ import pytest
|
|||
|
||||
from yutto._typing import AvId, CId
|
||||
from yutto.api.danmaku import get_danmaku, get_protobuf_danmaku_segment, get_xml_danmaku
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
|
@ -12,8 +12,9 @@ from yutto.utils.funcutils import as_sync
|
|||
@as_sync
|
||||
async def test_xml_danmaku():
|
||||
cid = CId("144541892")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
danmaku = await get_xml_danmaku(client, cid=cid)
|
||||
danmaku = await get_xml_danmaku(ctx, client, cid=cid)
|
||||
assert len(danmaku) > 0
|
||||
|
||||
|
||||
|
@ -21,8 +22,9 @@ async def test_xml_danmaku():
|
|||
@as_sync
|
||||
async def test_protobuf_danmaku():
|
||||
cid = CId("144541892")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
danmaku = await get_protobuf_danmaku_segment(client, cid=cid, segment_id=1)
|
||||
danmaku = await get_protobuf_danmaku_segment(ctx, client, cid=cid, segment_id=1)
|
||||
assert len(danmaku) > 0
|
||||
|
||||
|
||||
|
@ -31,8 +33,9 @@ async def test_protobuf_danmaku():
|
|||
async def test_danmaku():
|
||||
cid = CId("144541892")
|
||||
avid = AvId("BV1q7411v7Vd")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
danmaku = await get_danmaku(client, cid=cid, avid=avid, save_type="ass")
|
||||
danmaku = await get_danmaku(ctx, client, cid=cid, avid=avid, save_type="ass")
|
||||
assert len(danmaku["data"]) > 0
|
||||
assert danmaku["source_type"] == "xml"
|
||||
assert danmaku["save_type"] == "ass"
|
||||
|
|
|
@ -12,7 +12,7 @@ from yutto.api.space import (
|
|||
get_user_name,
|
||||
get_user_space_all_videos_avids,
|
||||
)
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
|
@ -21,8 +21,9 @@ from yutto.utils.funcutils import as_sync
|
|||
@as_sync
|
||||
async def test_get_user_space_all_videos_avids():
|
||||
mid = MId("100969474")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
all_avid = await get_user_space_all_videos_avids(client, mid=mid)
|
||||
all_avid = await get_user_space_all_videos_avids(ctx, client, mid=mid)
|
||||
assert len(all_avid) > 0
|
||||
assert AId("371660125") in all_avid or BvId("BV1vZ4y1M7mQ") in all_avid
|
||||
|
||||
|
@ -32,8 +33,9 @@ async def test_get_user_space_all_videos_avids():
|
|||
@as_sync
|
||||
async def test_get_user_name():
|
||||
mid = MId("100969474")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
username = await get_user_name(client, mid=mid)
|
||||
username = await get_user_name(ctx, client, mid=mid)
|
||||
assert username == "时雨千陌"
|
||||
|
||||
|
||||
|
@ -41,8 +43,9 @@ async def test_get_user_name():
|
|||
@as_sync
|
||||
async def test_get_favourite_info():
|
||||
fid = FId("1306978874")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
fav_info = await get_favourite_info(client, fid=fid)
|
||||
fav_info = await get_favourite_info(ctx, client, fid=fid)
|
||||
assert fav_info["fid"] == fid
|
||||
assert fav_info["title"] == "Test"
|
||||
|
||||
|
@ -51,8 +54,9 @@ async def test_get_favourite_info():
|
|||
@as_sync
|
||||
async def test_get_favourite_avids():
|
||||
fid = FId("1306978874")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
avids = await get_favourite_avids(client, fid=fid)
|
||||
avids = await get_favourite_avids(ctx, client, fid=fid)
|
||||
assert AId("456782499") in avids or BvId("BV1o541187Wh") in avids
|
||||
|
||||
|
||||
|
@ -60,8 +64,9 @@ async def test_get_favourite_avids():
|
|||
@as_sync
|
||||
async def test_all_favourites():
|
||||
mid = MId("100969474")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
fav_list = await get_all_favourites(client, mid=mid)
|
||||
fav_list = await get_all_favourites(ctx, client, mid=mid)
|
||||
assert {"fid": FId("1306978874"), "title": "Test"} in fav_list
|
||||
|
||||
|
||||
|
@ -70,8 +75,9 @@ async def test_all_favourites():
|
|||
async def test_get_medialist_avids():
|
||||
series_id = SeriesId("1947439")
|
||||
mid = MId("100969474")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
avids = await get_medialist_avids(client, series_id=series_id, mid=mid)
|
||||
avids = await get_medialist_avids(ctx, client, series_id=series_id, mid=mid)
|
||||
assert avids == [BvId("BV1Y441167U2"), BvId("BV1vZ4y1M7mQ")]
|
||||
|
||||
|
||||
|
@ -79,6 +85,7 @@ async def test_get_medialist_avids():
|
|||
@as_sync
|
||||
async def test_get_medialist_title():
|
||||
series_id = SeriesId("1947439")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
title = await get_medialist_title(client, series_id=series_id)
|
||||
title = await get_medialist_title(ctx, client, series_id=series_id)
|
||||
assert title == "一个小视频列表~"
|
||||
|
|
|
@ -9,7 +9,7 @@ from yutto.api.ugc_video import (
|
|||
get_ugc_video_playurl,
|
||||
get_ugc_video_subtitles,
|
||||
)
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
|
@ -21,8 +21,9 @@ async def test_get_ugc_video_info():
|
|||
aid = AId("84271171")
|
||||
avid = bvid
|
||||
episode_id = EpisodeId("300998")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
video_info = await get_ugc_video_info(client, avid=avid)
|
||||
video_info = await get_ugc_video_info(ctx, client, avid=avid)
|
||||
assert video_info["avid"] == aid or video_info["avid"] == bvid
|
||||
assert video_info["aid"] == aid
|
||||
assert video_info["bvid"] == bvid
|
||||
|
@ -36,8 +37,9 @@ async def test_get_ugc_video_info():
|
|||
@as_sync
|
||||
async def test_get_ugc_video_title():
|
||||
avid = BvId("BV1vZ4y1M7mQ")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
title = (await get_ugc_video_list(client, avid))["title"]
|
||||
title = (await get_ugc_video_list(ctx, client, avid))["title"]
|
||||
assert title == "用 bilili 下载 B 站视频"
|
||||
|
||||
|
||||
|
@ -45,8 +47,9 @@ async def test_get_ugc_video_title():
|
|||
@as_sync
|
||||
async def test_get_ugc_video_list():
|
||||
avid = BvId("BV1vZ4y1M7mQ")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
ugc_video_list = (await get_ugc_video_list(client, avid))["pages"]
|
||||
ugc_video_list = (await get_ugc_video_list(ctx, client, avid))["pages"]
|
||||
assert ugc_video_list[0]["id"] == 1
|
||||
assert ugc_video_list[0]["name"] == "bilili 特性以及使用方法简单介绍"
|
||||
assert ugc_video_list[0]["cid"] == CId("222190584")
|
||||
|
@ -68,8 +71,9 @@ async def test_get_ugc_video_list():
|
|||
async def test_get_ugc_video_playurl():
|
||||
avid = BvId("BV1vZ4y1M7mQ")
|
||||
cid = CId("222190584")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
playlist = await get_ugc_video_playurl(client, avid, cid)
|
||||
playlist = await get_ugc_video_playurl(ctx, client, avid, cid)
|
||||
assert len(playlist[0]) > 0
|
||||
assert len(playlist[1]) > 0
|
||||
|
||||
|
@ -82,7 +86,8 @@ async def test_get_ugc_video_playurl():
|
|||
async def test_get_ugc_video_subtitles():
|
||||
avid = BvId("BV1Ra411A7kN")
|
||||
cid = CId("253246252")
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
subtitles = await get_ugc_video_subtitles(client, avid=avid, cid=cid)
|
||||
subtitles = await get_ugc_video_subtitles(ctx, client, avid=avid, cid=cid)
|
||||
assert len(subtitles) > 0
|
||||
assert len(subtitles[0]["lines"]) > 0
|
||||
|
|
|
@ -3,14 +3,15 @@ from __future__ import annotations
|
|||
import pytest
|
||||
|
||||
from yutto.api.user_info import get_user_info
|
||||
from yutto.utils.fetcher import create_client
|
||||
from yutto.utils.fetcher import FetcherContext, create_client
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
||||
@pytest.mark.api
|
||||
@as_sync
|
||||
async def test_get_user_info():
|
||||
ctx = FetcherContext()
|
||||
async with create_client() as client:
|
||||
user_info = await get_user_info(client)
|
||||
user_info = await get_user_info(ctx, client)
|
||||
assert not user_info["vip_status"]
|
||||
assert not user_info["is_login"]
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 114cfabc284bc7607b3cdf8354a70369e3746dc7
|
||||
Subproject commit 9c360ad5d1683f5dc9b0e8ec2ec02939637e47a6
|
|
@ -7,7 +7,7 @@ import pytest
|
|||
|
||||
from yutto.processor.downloader import slice_blocks
|
||||
from yutto.utils.asynclib import CoroutineWrapper
|
||||
from yutto.utils.fetcher import Fetcher, create_client
|
||||
from yutto.utils.fetcher import Fetcher, FetcherContext, create_client
|
||||
from yutto.utils.file_buffer import AsyncFileBuffer
|
||||
from yutto.utils.funcutils import as_sync
|
||||
|
||||
|
@ -22,14 +22,15 @@ async def test_150_kB_downloader():
|
|||
# 因为 file-examples-com 挂掉了(GitHub 账号都消失了,因此暂时使用一个别处的 mirror)
|
||||
url = "https://github.com/nhegde610/samples-files/raw/main/file_example_MP4_480_1_5MG.mp4"
|
||||
file_path = TEST_DIR / "test_150_kB.pdf"
|
||||
ctx = FetcherContext()
|
||||
async with await AsyncFileBuffer(file_path, overwrite=False) as buffer:
|
||||
async with create_client(
|
||||
timeout=httpx.Timeout(7, connect=3),
|
||||
) as client:
|
||||
Fetcher.set_semaphore(4)
|
||||
size = await Fetcher.get_size(client, url)
|
||||
ctx.set_download_semaphore(4)
|
||||
size = await Fetcher.get_size(ctx, client, url)
|
||||
coroutines = [
|
||||
CoroutineWrapper(Fetcher.download_file_with_offset(client, url, [], buffer, offset, block_size))
|
||||
CoroutineWrapper(Fetcher.download_file_with_offset(ctx, client, url, [], buffer, offset, block_size))
|
||||
for offset, block_size in slice_blocks(buffer.written_size, size, 1 * 1024 * 1024)
|
||||
]
|
||||
|
||||
|
@ -46,13 +47,14 @@ async def test_150_kB_no_slice_downloader():
|
|||
# url = "https://file-examples-com.github.io/uploads/2017/04/file_example_MP4_480_1_5MG.mp4"
|
||||
url = "https://github.com/nhegde610/samples-files/raw/main/file_example_MP4_480_1_5MG.mp4"
|
||||
file_path = TEST_DIR / "test_150_kB_no_slice.pdf"
|
||||
ctx = FetcherContext()
|
||||
async with await AsyncFileBuffer(file_path, overwrite=False) as buffer:
|
||||
async with create_client(
|
||||
timeout=httpx.Timeout(7, connect=3),
|
||||
) as client:
|
||||
Fetcher.set_semaphore(4)
|
||||
size = await Fetcher.get_size(client, url)
|
||||
coroutines = [CoroutineWrapper(Fetcher.download_file_with_offset(client, url, [], buffer, 0, size))]
|
||||
ctx.set_download_semaphore(4)
|
||||
size = await Fetcher.get_size(ctx, client, url)
|
||||
coroutines = [CoroutineWrapper(Fetcher.download_file_with_offset(ctx, client, url, [], buffer, 0, size))]
|
||||
|
||||
print("开始下载……")
|
||||
await asyncio.gather(*coroutines)
|
||||
|
|
Loading…
Reference in New Issue