5 Commits

Author SHA1 Message Date
Andy
79e8184474 ci: enable manual triggering of release workflow 2026-03-17 09:10:50 -06:00
Andy
178eed9236 ci: add GitHub Actions release workflow for major/minor versions 2026-03-17 09:08:20 -06:00
Andy
63d2ba60c4 chore(changelog): tag v4.0.0 release 2026-03-17 08:57:34 -06:00
Andy
f46aa9d8c8 chore(changelog): update changelog for upcoming release and reorganize sections 2026-03-17 08:55:14 -06:00
Andy
b1447eb14b fix(dl): filter CC subtitle languages with --s-lang and extract all manifest CCs
Fixes issues introduced in 15acaea where CC extraction only used the first manifest entry and ignored --s-lang filtering entirely. Now all CC languages from the HLS manifest are iterated and filtered against --s-lang using the same match logic as regular subtitle selection.
2026-03-16 14:09:05 -06:00
5 changed files with 195 additions and 423 deletions

91
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,91 @@
name: Release
on:
workflow_dispatch:
push:
branches: [main]
paths:
- "pyproject.toml"
permissions:
contents: write
jobs:
check-version:
runs-on: ubuntu-latest
outputs:
should_release: ${{ steps.version_check.outputs.should_release }}
new_version: ${{ steps.version_check.outputs.new_version }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Check for major/minor version bump
id: version_check
run: |
NEW_VERSION=$(grep -m1 '^version' pyproject.toml | sed 's/version = "\(.*\)"/\1/')
echo "Detected version in pyproject.toml: $NEW_VERSION"
LATEST_TAG=$(git tag --list | grep -E '^[0-9]+\.[0-9]+\.[0-9]+$' | sort -V | tail -1)
echo "Latest git tag: $LATEST_TAG"
if [ -z "$LATEST_TAG" ]; then
echo "No previous tag found, treating as new release"
echo "should_release=true" >> "$GITHUB_OUTPUT"
echo "new_version=$NEW_VERSION" >> "$GITHUB_OUTPUT"
exit 0
fi
OLD_MAJOR=$(echo "$LATEST_TAG" | cut -d. -f1)
OLD_MINOR=$(echo "$LATEST_TAG" | cut -d. -f2)
NEW_MAJOR=$(echo "$NEW_VERSION" | cut -d. -f1)
NEW_MINOR=$(echo "$NEW_VERSION" | cut -d. -f2)
if [ "$NEW_MAJOR" -gt "$OLD_MAJOR" ] || [ "$NEW_MINOR" -gt "$OLD_MINOR" ]; then
echo "Major or minor version bump detected: $LATEST_TAG -> $NEW_VERSION"
echo "should_release=true" >> "$GITHUB_OUTPUT"
else
echo "Patch-only change ($LATEST_TAG -> $NEW_VERSION), skipping release"
echo "should_release=false" >> "$GITHUB_OUTPUT"
fi
echo "new_version=$NEW_VERSION" >> "$GITHUB_OUTPUT"
release:
needs: check-version
if: needs.check-version.outputs.should_release == 'true'
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install uv
uses: astral-sh/setup-uv@v4
- name: Set up Python
run: uv python install 3.12
- name: Install dependencies and build
run: |
uv sync
uv build
- name: Extract changelog for release
id: changelog
run: |
VERSION=${{ needs.check-version.outputs.new_version }}
# Extract the section for this version from CHANGELOG.md
awk "/^## \[$VERSION\]/{found=1; next} /^## \[/{if(found) exit} found{print}" CHANGELOG.md > release_notes.md
echo "Extracted release notes:"
cat release_notes.md
- name: Create GitHub Release
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
VERSION=${{ needs.check-version.outputs.new_version }}
gh release create "$VERSION" \
--title "$VERSION" \
--notes-file release_notes.md \
dist/unshackle-${VERSION}-py3-none-any.whl \
dist/unshackle-${VERSION}.tar.gz

View File

@@ -6,7 +6,7 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
This changelog is automatically generated using [git-cliff](https://git-cliff.org).
## [Unreleased]
## [4.0.0] - 2026-03-17
### Features
@@ -19,6 +19,11 @@ This changelog is automatically generated using [git-cliff](https://git-cliff.or
- *tracks*: Add edition tags to output filenames
- *templates*: [**breaking**] Add customizable output filename templates
- *templates*: Add configurable language tagging rule engine
- Update unshackle version to 4.0.0
- *dl*: Add --animeapi and --enrich options for anime metadata and tagging
- *dl*: Add skip messages for --no-audio and --no-chapters flags
- *dl*: Extract closed captions from HLS manifests and improve CC extraction
- *dl*: Add --worst flag and SHIELD OkHttp fingerprint preset
### Bug Fixes
@@ -33,6 +38,13 @@ This changelog is automatically generated using [git-cliff](https://git-cliff.or
- *n_m3u8dl_re*: Disable segment count validation for duration-based DASH
- Correct formatting and add missing newlines in selector and EXAMPLE service
- *dependencies*: Update pyplayready version to 0.8.3 and adjust dependencies
- *drm*: Update PlayReady KID extraction for pyplayready 0.8.3 compatibility
- *api*: Resolve Sentinel serialization, missing params, and add search endpoint
- *dash*: Pass period_filter to n_m3u8dl_re via filtered MPD file
- *title*: Add HDR Vivid Format HDR Tag
- *ism*: Prevent duplicate track IDs for audio tracks with same lang/codec/bitrate
- *aria2c*: Correct progress bar tracking for HLS downloads
- *dl*: Filter CC subtitle languages with --s-lang and extract all manifest CCs
### Documentation
@@ -45,10 +57,6 @@ This changelog is automatically generated using [git-cliff](https://git-cliff.or
- *example*: Migrate EXAMPLE service to track_request pattern
- *providers*: Extract metadata providers into modular system
### Maintenance
- *changelog*: Update changelog for upcoming release and reorganize sections
## [3.0.0] - 2026-02-15
### Features

View File

@@ -62,9 +62,9 @@ from unshackle.core.tracks import Audio, Subtitle, Tracks, Video
from unshackle.core.tracks.attachment import Attachment
from unshackle.core.tracks.hybrid import Hybrid
from unshackle.core.utilities import (find_font_with_fallbacks, get_debug_logger, get_system_fonts, init_debug_logger,
is_close_match, suggest_font_packages, time_elapsed_since)
is_close_match, is_exact_match, suggest_font_packages, time_elapsed_since)
from unshackle.core.utils import tags
from unshackle.core.utils.click_types import (AUDIO_CODEC_LIST, LANGUAGE_RANGE, OFFSET, QUALITY_LIST, SEASON_RANGE,
from unshackle.core.utils.click_types import (AUDIO_CODEC_LIST, LANGUAGE_RANGE, QUALITY_LIST, SEASON_RANGE,
ContextData, MultipleChoice, MultipleVideoCodecChoice,
SubtitleCodecChoice)
from unshackle.core.utils.collections import merge_dict
@@ -519,58 +519,6 @@ class dl:
default=False,
help="Continue with best available quality if requested resolutions are not available.",
)
@click.option(
"--cross-video",
nargs=2,
type=(str, str),
default=None,
help="Cross-mux: use video from another service. Format: SERVICE URL.",
)
@click.option(
"--cross-audio",
nargs=2,
type=(str, str),
default=None,
help="Cross-mux: use audio from another service. Format: SERVICE URL.",
)
@click.option(
"--cross-subtitles",
nargs=2,
type=(str, str),
default=None,
help="Cross-mux: use subtitles from another service. Format: SERVICE URL.",
)
@click.option(
"--cross-chapters",
nargs=2,
type=(str, str),
default=None,
help="Cross-mux: use chapters from another service. Format: SERVICE URL.",
)
@click.option(
"--cross-audio-offset",
type=OFFSET,
default=None,
help="Timing offset for cross-sourced audio, e.g. '10s', '500ms', '-5.5s'.",
)
@click.option(
"--cross-subtitle-offset",
type=OFFSET,
default=None,
help="Timing offset for cross-sourced subtitles, e.g. '10s', '500ms', '-5.5s'.",
)
@click.option(
"--cross-profile",
type=str,
default=None,
help="Profile to use for cross-service credentials. Defaults to --profile.",
)
@click.option(
"--cross-wanted",
type=str,
default=None,
help="Override episode mapping for cross-services, e.g. 'S01E02'.",
)
@click.pass_context
def cli(ctx: click.Context, **kwargs: Any) -> dl:
return dl(ctx, **kwargs)
@@ -590,14 +538,6 @@ class dl:
animeapi_id: Optional[str] = None,
enrich: bool = False,
output_dir: Optional[Path] = None,
cross_video: Optional[tuple[str, str]] = None,
cross_audio: Optional[tuple[str, str]] = None,
cross_subtitles: Optional[tuple[str, str]] = None,
cross_chapters: Optional[tuple[str, str]] = None,
cross_audio_offset: Optional[int] = None,
cross_subtitle_offset: Optional[int] = None,
cross_profile: Optional[str] = None,
cross_wanted: Optional[str] = None,
*_: Any,
**__: Any,
):
@@ -646,16 +586,6 @@ class dl:
self.animeapi_title: Optional[str] = None
self.output_dir = output_dir
# Cross-mux settings
self.cross_video = cross_video
self.cross_audio = cross_audio
self.cross_subtitles = cross_subtitles
self.cross_chapters = cross_chapters
self.cross_audio_offset = cross_audio_offset
self.cross_subtitle_offset = cross_subtitle_offset
self.cross_profile = cross_profile or profile
self.cross_wanted = cross_wanted
if animeapi_id:
from unshackle.core.utils.animeapi import resolve_animeapi
@@ -1024,218 +954,6 @@ class dl:
# able to keep `self` as the first positional
self.cli._result_callback = self.result
def _instantiate_cross_service(self, tag: str, url: str) -> tuple[Service, str]:
"""
Instantiate a cross-service for cross-mux by tag and URL.
Returns (service_instance, title_url) after authentication.
"""
tag = Services.get_tag(tag)
service_cls = Services.load(tag)
# Build service config for the cross-service
service_config_path = Services.get_path(tag) / config.filenames.config
if service_config_path.exists():
cross_service_config = yaml.safe_load(service_config_path.read_text(encoding="utf8"))
else:
cross_service_config = {}
# Load CDM for the cross-service
cross_cdm = self.get_cdm(tag, self.cross_profile)
# Build a synthetic Click context for the cross-service
cross_ctx_obj = ContextData(
config=cross_service_config,
cdm=cross_cdm,
proxy_providers=self.proxy_providers,
profile=self.cross_profile,
)
# Extract the title argument from the URL using TITLE_RE
title_id = url
if hasattr(service_cls, "TITLE_RE"):
m = re.match(service_cls.TITLE_RE, url)
if m:
# Try named group 'title_id' first, then 'id', then group(1)
title_id = m.group("title_id") if "title_id" in m.groupdict() else (
m.group("id") if "id" in m.groupdict() else m.group(1)
)
# Build kwargs from the service cli command's params with defaults
cli_cmd = service_cls.cli
kwargs = {"title": title_id}
for param in cli_cmd.params:
if param.name and param.name != "title" and param.name not in kwargs:
kwargs[param.name] = param.default
# Create a parent context that mimics what dl.__init__ sets up.
# Services access ctx.parent.params for various dl-level options,
# so we provide a complete set of defaults to avoid KeyError.
parent_ctx = click.Context(self.cli, info_name="dl")
parent_ctx.params = {
"no_proxy": True,
"proxy": None,
"proxy_query": None,
"proxy_provider": None,
"vcodec": [],
"acodec": [],
"range_": [Video.Range.SDR],
"best_available": False,
"profile": self.cross_profile,
"quality": [],
"wanted": None,
"video_only": False,
"audio_only": False,
"subs_only": False,
"chapters_only": False,
"list_": False,
"skip_dl": False,
"no_cache": False,
"reset_cache": False,
}
ctx = click.Context(cli_cmd, parent=parent_ctx, info_name=tag)
ctx.obj = cross_ctx_obj
# Instantiate the service
cross_service = service_cls(ctx, **kwargs)
# Authenticate
cookies = self.get_cookie_jar(tag, self.cross_profile)
credential = self.get_credentials(tag, self.cross_profile)
cross_service.authenticate(cookies, credential)
return cross_service
def _match_cross_title(
self, primary_title: Title_T, cross_titles: Any
) -> Optional[Title_T]:
"""Match a primary title to its counterpart in cross-service titles."""
if isinstance(primary_title, Movie):
# For movies, the cross URL should resolve to the same movie
if hasattr(cross_titles, "__iter__"):
for t in cross_titles:
if isinstance(t, Movie):
return t
return None
if isinstance(primary_title, Episode):
if self.cross_wanted:
# Manual override: parse S01E02 format
m = re.match(r"S(\d+)E(\d+)", self.cross_wanted, re.IGNORECASE)
if m:
wanted_season = int(m.group(1))
wanted_episode = int(m.group(2))
for t in cross_titles:
if isinstance(t, Episode) and t.season == wanted_season and t.number == wanted_episode:
return t
self.log.warning(
f"Cross-wanted S{wanted_season:02d}E{wanted_episode:02d} not found in cross-service"
)
return None
# Auto-match by season + episode number
for t in cross_titles:
if isinstance(t, Episode) and t.season == primary_title.season and t.number == primary_title.number:
return t
self.log.warning(
f"No cross-service match for S{primary_title.season:02d}E{primary_title.number:02d}"
)
return None
return None
def _process_cross_services(self, title: Title_T) -> dict[str, tuple[Service, Title_T, Tracks]]:
"""
Process all cross-service specs and return fetched tracks per track type.
Returns dict like {"video": (service, matched_title, tracks), ...}
"""
cross_specs: list[tuple[str, Optional[tuple[str, str]]]] = [
("video", self.cross_video),
("audio", self.cross_audio),
("subtitles", self.cross_subtitles),
("chapters", self.cross_chapters),
]
# Cache instantiated services by (tag, url) to avoid duplicate auth
service_cache: dict[tuple[str, str], Service] = {}
result: dict[str, tuple[Service, Title_T, Tracks]] = {}
for track_type, spec in cross_specs:
if not spec:
continue
tag, url = spec
cache_key = (Services.get_tag(tag), url)
if cache_key not in service_cache:
self.log.info(f"Cross-mux: loading {track_type} from {tag}")
cross_service = self._instantiate_cross_service(tag, url)
service_cache[cache_key] = cross_service
else:
cross_service = service_cache[cache_key]
# Get titles from cross-service
cross_titles = cross_service.get_titles()
# Match the primary title to a cross-service title
cross_title = self._match_cross_title(title, cross_titles)
if not cross_title:
self.log.warning(f"Cross-mux: could not match title for {track_type} from {tag}, skipping")
continue
# Get tracks from cross-service
cross_tracks = cross_service.get_tracks(cross_title)
cross_chapters = cross_service.get_chapters(cross_title)
cross_tracks.chapters = cross_chapters
result[track_type] = (cross_service, cross_title, cross_tracks)
return result
def _apply_cross_tracks(
self,
title: Title_T,
cross_results: dict[str, tuple[Service, Title_T, Tracks]],
) -> None:
"""Replace primary tracks with cross-service tracks and mark with metadata."""
if "video" in cross_results:
cross_service, cross_title, cross_tracks = cross_results["video"]
title.tracks.videos = cross_tracks.videos
for track in title.tracks.videos:
track.data["_cross_service"] = cross_service
track.data["_cross_title"] = cross_title
track.data["cross_source"] = cross_service.__class__.__name__
if "audio" in cross_results:
cross_service, cross_title, cross_tracks = cross_results["audio"]
title.tracks.audio = cross_tracks.audio
for track in title.tracks.audio:
track.data["_cross_service"] = cross_service
track.data["_cross_title"] = cross_title
track.data["cross_source"] = cross_service.__class__.__name__
if self.cross_audio_offset:
track.data["cross_offset_ms"] = self.cross_audio_offset
if "subtitles" in cross_results:
cross_service, cross_title, cross_tracks = cross_results["subtitles"]
title.tracks.subtitles = cross_tracks.subtitles
for track in title.tracks.subtitles:
track.data["_cross_service"] = cross_service
track.data["_cross_title"] = cross_title
track.data["cross_source"] = cross_service.__class__.__name__
if self.cross_subtitle_offset:
track.data["cross_offset_ms"] = self.cross_subtitle_offset
if "chapters" in cross_results:
_, _, cross_tracks = cross_results["chapters"]
title.tracks.chapters = cross_tracks.chapters
@property
def has_cross_mux(self) -> bool:
return any([self.cross_video, self.cross_audio, self.cross_subtitles, self.cross_chapters])
def result(
self,
service: Service,
@@ -1698,25 +1416,6 @@ class dl:
level="INFO", operation="get_tracks", service=self.service, context=tracks_info
)
# Cross-mux: replace tracks from cross-services if configured
if self.has_cross_mux:
with console.status("Cross-mux: fetching tracks from cross-services...", spinner="dots"):
try:
cross_results = self._process_cross_services(title)
if cross_results:
self._apply_cross_tracks(title, cross_results)
cross_sources = ", ".join(
f"{k}={v[0].__class__.__name__}" for k, v in cross_results.items()
)
self.log.info(f"Cross-mux: applied tracks from {cross_sources}")
except Exception as e:
self.log.error(f"Cross-mux failed: {e}")
if self.debug_logger:
self.debug_logger.log_error(
"cross_mux", e, service=self.service, context={"title": str(title)}
)
raise
# strip SDH subs to non-SDH if no equivalent same-lang non-SDH is available
# uses a loose check, e.g, wont strip en-US SDH sub if a non-SDH en-GB is available
# Check if automatic SDH stripping is enabled in config
@@ -2013,8 +1712,6 @@ class dl:
f"Required languages found ({', '.join(require_subs)}), downloading all available subtitles"
)
elif s_lang and "all" not in s_lang:
from unshackle.core.utilities import is_exact_match
match_func = is_exact_match if exact_lang else is_close_match
missing_langs = [
@@ -2251,35 +1948,21 @@ class dl:
(
pool.submit(
track.download,
session=(
track.data["_cross_service"].session
if track.data.get("_cross_service")
else service.session
),
session=service.session,
prepare_drm=partial(
partial(self.prepare_drm, table=download_table),
track=track,
title=track.data.get("_cross_title", title),
title=title,
certificate=partial(
(
track.data["_cross_service"].get_widevine_service_certificate
if track.data.get("_cross_service")
else service.get_widevine_service_certificate
),
title=track.data.get("_cross_title", title),
service.get_widevine_service_certificate,
title=title,
track=track,
),
licence=partial(
(
track.data["_cross_service"].get_playready_license
if track.data.get("_cross_service") and is_playready_cdm(self.cdm)
else track.data["_cross_service"].get_widevine_license
if track.data.get("_cross_service")
else service.get_playready_license
if is_playready_cdm(self.cdm)
else service.get_widevine_license
),
title=track.data.get("_cross_title", title),
service.get_playready_license
if is_playready_cdm(self.cdm)
else service.get_widevine_license,
title=title,
track=track,
),
cdm_only=cdm_only,
@@ -2420,6 +2103,7 @@ class dl:
and not video_only
and not no_video
):
match_func = is_exact_match if exact_lang else is_close_match
for video_track_n, video_track in enumerate(title.tracks.videos):
has_manifest_cc = bool(getattr(video_track, "closed_captions", None))
has_eia_cc = (
@@ -2433,31 +2117,48 @@ class dl:
if not has_manifest_cc and not has_eia_cc:
continue
# Build list of CC entries to extract
if has_manifest_cc:
cc_entries = video_track.closed_captions
# Filter CC languages against --s-lang if specified
if s_lang and "all" not in s_lang:
cc_entries = [
entry for entry in cc_entries
if entry.get("language")
and match_func(Language.get(entry["language"]), s_lang)
]
if not cc_entries:
continue
else:
# EIA fallback: single entry with unknown language
cc_entries = [{}]
with console.status(f"Checking Video track {video_track_n + 1} for Closed Captions..."):
try:
cc_lang = (
Language.get(video_track.closed_captions[0]["language"])
if has_manifest_cc and video_track.closed_captions[0].get("language")
else title.language or video_track.language
)
track_id = f"ccextractor-{video_track.id}"
cc = video_track.ccextractor(
track_id=track_id,
out_path=config.directories.temp
/ config.filenames.subtitle.format(id=track_id, language=cc_lang),
language=cc_lang,
original=False,
)
if cc:
cc.cc = True
title.tracks.add(cc)
self.log.info(
f"Extracted a Closed Caption from Video track {video_track_n + 1}"
for cc_idx, cc_entry in enumerate(cc_entries):
cc_lang = (
Language.get(cc_entry["language"])
if cc_entry.get("language")
else title.language or video_track.language
)
else:
self.log.info(
f"No Closed Captions were found in Video track {video_track_n + 1}"
track_id = f"ccextractor-{video_track.id}-{cc_idx}"
cc = video_track.ccextractor(
track_id=track_id,
out_path=config.directories.temp
/ config.filenames.subtitle.format(id=track_id, language=cc_lang),
language=cc_lang,
original=False,
)
if cc:
cc.cc = True
title.tracks.add(cc)
self.log.info(
f"Extracted a Closed Caption ({cc_lang}) from Video track {video_track_n + 1}"
)
else:
self.log.info(
f"No Closed Captions were found in Video track {video_track_n + 1}"
)
except EnvironmentError:
self.log.error(
"Cannot extract Closed Captions as the ccextractor executable was not found..."

View File

@@ -477,25 +477,25 @@ class Tracks:
if not at.path or not at.path.exists():
raise ValueError("Audio Track must be downloaded before muxing...")
events.emit(events.Types.TRACK_MULTIPLEX, track=at)
audio_args = [
"--track-name",
f"0:{at.get_track_name() or ''}",
"--language",
f"0:{at.language}",
"--default-track",
f"0:{at.is_original_lang}",
"--visual-impaired-flag",
f"0:{at.descriptive}",
"--original-flag",
f"0:{at.is_original_lang}",
"--compression",
"0:none", # disable extra compression
]
if at.data.get("cross_offset_ms"):
audio_args.extend(["--sync", f"0:{at.data['cross_offset_ms']}"])
cl.extend(audio_args + ["(", str(at.path), ")"])
cl.extend(
[
"--track-name",
f"0:{at.get_track_name() or ''}",
"--language",
f"0:{at.language}",
"--default-track",
f"0:{at.is_original_lang}",
"--visual-impaired-flag",
f"0:{at.descriptive}",
"--original-flag",
f"0:{at.is_original_lang}",
"--compression",
"0:none", # disable extra compression
"(",
str(at.path),
")",
]
)
if not skip_subtitles:
for st in self.subtitles:
@@ -503,29 +503,29 @@ class Tracks:
raise ValueError("Text Track must be downloaded before muxing...")
events.emit(events.Types.TRACK_MULTIPLEX, track=st)
default = bool(self.audio and is_close_match(st.language, [self.audio[0].language]) and st.forced)
sub_args = [
"--track-name",
f"0:{st.get_track_name() or ''}",
"--language",
f"0:{st.language}",
"--sub-charset",
"0:UTF-8",
"--forced-track",
f"0:{st.forced}",
"--default-track",
f"0:{default}",
"--hearing-impaired-flag",
f"0:{st.sdh}",
"--original-flag",
f"0:{st.is_original_lang}",
"--compression",
"0:none", # disable extra compression (probably zlib)
]
if st.data.get("cross_offset_ms"):
sub_args.extend(["--sync", f"0:{st.data['cross_offset_ms']}"])
cl.extend(sub_args + ["(", str(st.path), ")"])
cl.extend(
[
"--track-name",
f"0:{st.get_track_name() or ''}",
"--language",
f"0:{st.language}",
"--sub-charset",
"0:UTF-8",
"--forced-track",
f"0:{st.forced}",
"--default-track",
f"0:{default}",
"--hearing-impaired-flag",
f"0:{st.sdh}",
"--original-flag",
f"0:{st.is_original_lang}",
"--compression",
"0:none", # disable extra compression (probably zlib)
"(",
str(st.path),
")",
]
)
if self.chapters:
chapters_path = config.directories.temp / config.filenames.chapters.format(

View File

@@ -360,37 +360,9 @@ class MultipleChoice(click.Choice):
return super(self).shell_complete(ctx, param, incomplete)
class OffsetType(click.ParamType):
"""
Parses human-friendly time offset strings into milliseconds.
Accepts: '10s', '500ms', '-5.5s', '200' (bare number = ms).
"""
name = "offset"
_PATTERN = re.compile(r"^(-?\d+(?:\.\d+)?)\s*(s|ms)?$")
def convert(
self, value: Any, param: Optional[click.Parameter] = None, ctx: Optional[click.Context] = None
) -> int:
if isinstance(value, int):
return value
value = str(value).strip()
m = self._PATTERN.match(value)
if not m:
self.fail(f"'{value}' is not a valid offset. Use e.g. '10s', '500ms', '-5.5s'.", param, ctx)
number = float(m.group(1))
unit = m.group(2) or "ms"
if unit == "s":
return int(number * 1000)
return int(number)
SEASON_RANGE = SeasonRange()
LANGUAGE_RANGE = LanguageRange()
QUALITY_LIST = QualityList()
AUDIO_CODEC_LIST = AudioCodecList(Audio.Codec)
OFFSET = OffsetType()
# VIDEO_CODEC_CHOICE will be created dynamically when imported