8 Commits

10 changed files with 191 additions and 66 deletions

View File

@@ -5,6 +5,38 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [1.4.2] - 2025-08-14
### Added
- **Session Management for API Requests**: Enhanced API reliability with retry logic
- Implemented session management for tags functionality with automatic retry mechanisms
- Improved API request stability and error handling
- **Series Year Configuration**: New `series_year` option for title naming control
- Added configurable `series_year` option to control year inclusion in series titles
- Enhanced YAML configuration with series year handling options
- **Audio Language Override**: New audio language selection option
- Added `audio_language` option to override default language selection for audio tracks
- Provides more granular control over audio track selection
- **Vault Key Reception Control**: Enhanced vault security options
- Added `no_push` option to Vault and its subclasses to control key reception
- Improved key management security and flexibility
### Changed
- **HLS Segment Processing**: Enhanced segment retrieval and merging capabilities
- Enhanced segment retrieval to allow all file types for better compatibility
- Improved segment merging with recursive file search and fallback to binary concatenation
- Fixed issues with VTT files from HLS not being found correctly due to format changes
- Added cleanup of empty segment directories after processing
- **Documentation**: Updated README.md with latest information
### Fixed
- **Audio Track Selection**: Improved per-language logic for audio tracks
- Adjusted `per_language` logic to ensure correct audio track selection
- Fixed issue where all tracks for selected language were being downloaded instead of just the intended ones
## [1.4.1] - 2025-08-08
### Added

View File

@@ -2,6 +2,10 @@
<img width="16" height="16" alt="no_encryption" src="https://github.com/user-attachments/assets/6ff88473-0dd2-4bbc-b1ea-c683d5d7a134" /> unshackle
<br/>
<sup><em>Movie, TV, and Music Archival Software</em></sup>
<br/>
<a href="https://discord.gg/mHYyPaCbFK">
<img src="https://img.shields.io/discord/1395571732001325127?label=&logo=discord&logoColor=ffffff&color=7289DA&labelColor=7289DA" alt="Discord">
</a>
</p>
## What is unshackle?

View File

@@ -4,7 +4,7 @@ build-backend = "hatchling.build"
[project]
name = "unshackle"
version = "1.4.1"
version = "1.4.2"
description = "Modular Movie, TV, and Music Archival Software."
authors = [{ name = "unshackle team" }]
requires-python = ">=3.10,<3.13"

View File

@@ -792,7 +792,7 @@ class dl:
selected_audio.append(highest_quality)
title.tracks.audio = selected_audio
elif "all" not in processed_lang:
per_language = 0 if len(processed_lang) > 1 else 1
per_language = 1
title.tracks.audio = title.tracks.by_language(
title.tracks.audio, processed_lang, per_language=per_language
)

View File

@@ -1 +1 @@
__version__ = "1.4.1"
__version__ = "1.4.2"

View File

@@ -91,6 +91,7 @@ class Config:
self.update_checks: bool = kwargs.get("update_checks", True)
self.update_check_interval: int = kwargs.get("update_check_interval", 24)
self.scene_naming: bool = kwargs.get("scene_naming", True)
self.series_year: bool = kwargs.get("series_year", True)
self.title_cache_time: int = kwargs.get("title_cache_time", 1800) # 30 minutes default
self.title_cache_max_retention: int = kwargs.get("title_cache_max_retention", 86400) # 24 hours default

View File

@@ -4,6 +4,7 @@ import base64
import html
import json
import logging
import os
import shutil
import subprocess
import sys
@@ -584,11 +585,24 @@ class HLS:
if DOWNLOAD_LICENCE_ONLY.is_set():
return
if segment_save_dir.exists():
segment_save_dir.rmdir()
def find_segments_recursively(directory: Path) -> list[Path]:
"""Find all segment files recursively in any directory structure created by downloaders."""
segments = []
# First check direct files in the directory
if directory.exists():
segments.extend([x for x in directory.iterdir() if x.is_file()])
# If no direct files, recursively search subdirectories
if not segments:
for subdir in directory.iterdir():
if subdir.is_dir():
segments.extend(find_segments_recursively(subdir))
return sorted(segments)
# finally merge all the discontinuity save files together to the final path
segments_to_merge = [x for x in sorted(save_dir.iterdir()) if x.is_file()]
segments_to_merge = find_segments_recursively(save_dir)
if len(segments_to_merge) == 1:
shutil.move(segments_to_merge[0], save_path)
else:
@@ -601,9 +615,16 @@ class HLS:
discontinuity_data = discontinuity_file.read_bytes()
f.write(discontinuity_data)
f.flush()
os.fsync(f.fileno())
discontinuity_file.unlink()
save_dir.rmdir()
# Clean up empty segment directory
if save_dir.exists() and save_dir.name.endswith("_segments"):
try:
save_dir.rmdir()
except OSError:
# Directory might not be empty, try removing recursively
shutil.rmtree(save_dir, ignore_errors=True)
progress(downloaded="Downloaded")
@@ -613,40 +634,75 @@ class HLS:
@staticmethod
def merge_segments(segments: list[Path], save_path: Path) -> int:
"""
Concatenate Segments by first demuxing with FFmpeg.
Concatenate Segments using FFmpeg concat with binary fallback.
Returns the file size of the merged file.
"""
if not binaries.FFMPEG:
raise EnvironmentError("FFmpeg executable was not found but is required to merge HLS segments.")
demuxer_file = segments[0].parent / "ffmpeg_concat_demuxer.txt"
demuxer_file.write_text("\n".join([f"file '{segment}'" for segment in segments]))
subprocess.check_call(
[
binaries.FFMPEG,
"-hide_banner",
"-loglevel",
"panic",
"-f",
"concat",
"-safe",
"0",
"-i",
demuxer_file,
"-map",
"0",
"-c",
"copy",
save_path,
]
)
demuxer_file.unlink()
# Track segment directories for cleanup
segment_dirs = set()
for segment in segments:
segment.unlink()
# Track all parent directories that contain segments
current_dir = segment.parent
while current_dir.name and "_segments" in str(current_dir):
segment_dirs.add(current_dir)
current_dir = current_dir.parent
def cleanup_segments_and_dirs():
"""Clean up segments and directories after successful merge."""
for segment in segments:
segment.unlink(missing_ok=True)
for segment_dir in segment_dirs:
if segment_dir.exists():
try:
shutil.rmtree(segment_dir)
except OSError:
pass # Directory cleanup failed, but merge succeeded
# Try FFmpeg concat first (preferred method)
if binaries.FFMPEG:
try:
demuxer_file = save_path.parent / f"ffmpeg_concat_demuxer_{save_path.stem}.txt"
demuxer_file.write_text("\n".join([f"file '{segment.absolute()}'" for segment in segments]))
subprocess.check_call(
[
binaries.FFMPEG,
"-hide_banner",
"-loglevel",
"error",
"-f",
"concat",
"-safe",
"0",
"-i",
demuxer_file,
"-map",
"0",
"-c",
"copy",
save_path,
],
timeout=300, # 5 minute timeout
)
demuxer_file.unlink(missing_ok=True)
cleanup_segments_and_dirs()
return save_path.stat().st_size
except (subprocess.CalledProcessError, subprocess.TimeoutExpired, OSError) as e:
# FFmpeg failed, clean up demuxer file and fall back to binary concat
logging.getLogger("HLS").debug(f"FFmpeg concat failed ({e}), falling back to binary concatenation")
demuxer_file.unlink(missing_ok=True)
# Remove partial output file if it exists
save_path.unlink(missing_ok=True)
# Fallback: Binary concatenation
logging.getLogger("HLS").debug(f"Using binary concatenation for {len(segments)} segments")
with open(save_path, "wb") as output_file:
for segment in segments:
with open(segment, "rb") as segment_file:
output_file.write(segment_file.read())
cleanup_segments_and_dirs()
return save_path.stat().st_size
@staticmethod

View File

@@ -81,7 +81,7 @@ class Episode(Title):
def __str__(self) -> str:
return "{title}{year} S{season:02}E{number:02} {name}".format(
title=self.title,
year=f" {self.year}" if self.year else "",
year=f" {self.year}" if self.year and config.series_year else "",
season=self.season,
number=self.number,
name=self.name or "",
@@ -95,13 +95,13 @@ class Episode(Title):
# Title [Year] SXXEXX Name (or Title [Year] SXX if folder)
if folder:
name = f"{self.title}"
if self.year:
if self.year and config.series_year:
name += f" {self.year}"
name += f" S{self.season:02}"
else:
name = "{title}{year} S{season:02}E{number:02} {name}".format(
title=self.title.replace("$", "S"), # e.g., Arli$$
year=f" {self.year}" if self.year else "",
year=f" {self.year}" if self.year and config.series_year else "",
season=self.season,
number=self.number,
name=self.name or "",
@@ -197,7 +197,7 @@ class Series(SortedKeyList, ABC):
def __str__(self) -> str:
if not self:
return super().__str__()
return self[0].title + (f" ({self[0].year})" if self[0].year else "")
return self[0].title + (f" ({self[0].year})" if self[0].year and config.series_year else "")
def tree(self, verbose: bool = False) -> Tree:
seasons = Counter(x.season for x in self)

View File

@@ -10,6 +10,7 @@ from pathlib import Path
from typing import Optional, Tuple
import requests
from requests.adapters import HTTPAdapter, Retry
from unshackle.core import binaries
from unshackle.core.config import config
@@ -25,6 +26,22 @@ HEADERS = {"User-Agent": "unshackle-tags/1.0"}
log = logging.getLogger("TAGS")
def _get_session() -> requests.Session:
"""Create a requests session with retry logic for network failures."""
session = requests.Session()
session.headers.update(HEADERS)
retry = Retry(
total=3, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504], allowed_methods=["GET", "POST"]
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("https://", adapter)
session.mount("http://", adapter)
return session
def _api_key() -> Optional[str]:
return config.tmdb_api_key or os.getenv("TMDB_API_KEY")
@@ -59,7 +76,8 @@ def search_simkl(title: str, year: Optional[int], kind: str) -> Tuple[Optional[d
filename += " 2160p.mkv"
try:
resp = requests.post("https://api.simkl.com/search/file", json={"file": filename}, headers=HEADERS, timeout=30)
session = _get_session()
resp = session.post("https://api.simkl.com/search/file", json={"file": filename}, timeout=30)
resp.raise_for_status()
data = resp.json()
log.debug("Simkl API response received")
@@ -139,17 +157,21 @@ def search_tmdb(title: str, year: Optional[int], kind: str) -> Tuple[Optional[in
if year is not None:
params["year" if kind == "movie" else "first_air_date_year"] = year
r = requests.get(
f"https://api.themoviedb.org/3/search/{kind}",
params=params,
headers=HEADERS,
timeout=30,
)
r.raise_for_status()
js = r.json()
results = js.get("results") or []
log.debug("TMDB returned %d results", len(results))
if not results:
try:
session = _get_session()
r = session.get(
f"https://api.themoviedb.org/3/search/{kind}",
params=params,
timeout=30,
)
r.raise_for_status()
js = r.json()
results = js.get("results") or []
log.debug("TMDB returned %d results", len(results))
if not results:
return None, None
except requests.RequestException as exc:
log.warning("Failed to search TMDB for %s: %s", title, exc)
return None, None
best_ratio = 0.0
@@ -196,10 +218,10 @@ def get_title(tmdb_id: int, kind: str) -> Optional[str]:
return None
try:
r = requests.get(
session = _get_session()
r = session.get(
f"https://api.themoviedb.org/3/{kind}/{tmdb_id}",
params={"api_key": api_key},
headers=HEADERS,
timeout=30,
)
r.raise_for_status()
@@ -219,10 +241,10 @@ def get_year(tmdb_id: int, kind: str) -> Optional[int]:
return None
try:
r = requests.get(
session = _get_session()
r = session.get(
f"https://api.themoviedb.org/3/{kind}/{tmdb_id}",
params={"api_key": api_key},
headers=HEADERS,
timeout=30,
)
r.raise_for_status()
@@ -243,16 +265,21 @@ def external_ids(tmdb_id: int, kind: str) -> dict:
return {}
url = f"https://api.themoviedb.org/3/{kind}/{tmdb_id}/external_ids"
log.debug("Fetching external IDs for %s %s", kind, tmdb_id)
r = requests.get(
url,
params={"api_key": api_key},
headers=HEADERS,
timeout=30,
)
r.raise_for_status()
js = r.json()
log.debug("External IDs response: %s", js)
return js
try:
session = _get_session()
r = session.get(
url,
params={"api_key": api_key},
timeout=30,
)
r.raise_for_status()
js = r.json()
log.debug("External IDs response: %s", js)
return js
except requests.RequestException as exc:
log.warning("Failed to fetch external IDs for %s %s: %s", kind, tmdb_id, exc)
return {}
def _apply_tags(path: Path, tags: dict[str, str]) -> None:

View File

@@ -15,6 +15,11 @@ set_terminal_bg: false
# false for style - Prime Suspect S07E01 The Final Act - Part One
scene_naming: true
# Whether to include the year in series names for episodes and folders (default: true)
# true for style - Show Name (2023) S01E01 Episode Name
# false for style - Show Name S01E01 Episode Name
series_year: true
# Check for updates from GitHub repository on startup (default: true)
update_checks: true