diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index ad06a181..ba41bf31 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -122,6 +122,7 @@ jobs:
--hidden-import=six --hidden-import=pathvalidate \
--hidden-import=xml.etree.ElementTree \
--hidden-import=pywidevine \
+ --hidden-import=pyplayready \
--hidden-import=Cryptodome.Cipher --hidden-import=Cryptodome.Cipher.AES \
--hidden-import=Cryptodome.Util --hidden-import=Cryptodome.Util.Padding \
--hidden-import=Cryptodome.Random \
diff --git a/.gitignore b/.gitignore
index 6a39012a..aed44be7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -56,4 +56,6 @@ working_proxies.json
start.sh
.DS_Store
GUI/db.sqlite3
-console.log
\ No newline at end of file
+console.log
+/.github/script/domains.json
+/.cache
\ No newline at end of file
diff --git a/GUI/searchapp/api/altadefinizione.py b/GUI/searchapp/api/altadefinizione.py
index 5b1f7323..7266d03d 100644
--- a/GUI/searchapp/api/altadefinizione.py
+++ b/GUI/searchapp/api/altadefinizione.py
@@ -24,7 +24,7 @@ def __init__(self):
def _load_config(self):
"""Load site configuration."""
- self.base_url = (config_manager.get_site("altadefinizione", "full_url") or "").rstrip("/")
+ self.base_url = (config_manager.domain.get("altadefinizione", "full_url") or "").rstrip("/")
def _get_search_fn(self):
"""Lazy load the search function."""
diff --git a/GUI/searchapp/api/animeunity.py b/GUI/searchapp/api/animeunity.py
index d61dda2e..08c555e2 100644
--- a/GUI/searchapp/api/animeunity.py
+++ b/GUI/searchapp/api/animeunity.py
@@ -25,7 +25,7 @@ def __init__(self):
def _load_config(self):
"""Load site configuration."""
- self.base_url = (config_manager.get_site("animeunity", "full_url") or "").rstrip("/")
+ self.base_url = config_manager.domain.get("animeunity", "full_url").rstrip("/")
def _get_search_fn(self):
"""Lazy load the search function."""
diff --git a/GUI/searchapp/api/streamingcommunity.py b/GUI/searchapp/api/streamingcommunity.py
index 1dff109f..b6cd5bb4 100644
--- a/GUI/searchapp/api/streamingcommunity.py
+++ b/GUI/searchapp/api/streamingcommunity.py
@@ -24,7 +24,7 @@ def __init__(self):
def _load_config(self):
"""Load site configuration."""
- self.base_url = config_manager.get_site("streamingcommunity", "full_url").rstrip("/") + "/it"
+ self.base_url = config_manager.domain.get("streamingcommunity", "full_url").rstrip("/") + "/it"
def _get_search_fn(self):
"""Lazy load the search function."""
diff --git a/README.md b/README.md
index 6f8b2061..a69d8039 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
-

+

[](https://pypi.org/project/streamingcommunity/)
[](https://github.com/Arrowar/StreamingCommunity/commits)
@@ -269,39 +269,6 @@ You can change some behaviors by tweaking the configuration file. The configurat
- 480p (640x480)
- 360p (640x360)
-
-#### Output format Options
-Final video will be saved with the selected extension. For each format, specific subtitles parameters need to be set in the `M3U8_CONVERSION` section.
-
-> Note: if you want **ASS subtitles**, use `extension: "mkv"` and set `param_subtitles` to `["-c:s","ass"]`
-> (or `["-c:s","copy"]` if the input subtitles are already ASS and you just want to mux them).
-
-MP4 example:
-```json
-{
- "M3U8_CONVERSION": {
- "param_subtitles": [
- "-c:s",
- "mov_text"
- ],
- "extension": "mp4"
- }
-}
-```
-
-MKV example (WebVTT):
-```json
-{
- "M3U8_CONVERSION": {
- "param_subtitles": [
- "-c:s",
- "webvtt"
- ],
- "extension": "mkv"
- }
-}
-```
-
#### Link options
- `get_only_link`: Return M3U8 playlist/index URL instead of downloading
@@ -555,14 +522,6 @@ make LOCAL_DIR=/path/to/download run-container
The `run-container` command mounts also the `config.json` file, so any change to the configuration file is reflected immediately without having to rebuild the image.
-
-# Tutorials
-
-- [Windows](https://www.youtube.com/watch?v=mZGqK4wdN-k)
-- [Linux](https://www.youtube.com/watch?v=0qUNXPE_mTg)
-- [Pypy](https://www.youtube.com/watch?v=C6m9ZKOK0p4)
-
-
# Useful Project
## 🎯 [Unit3Dup](https://github.com/31December99/Unit3Dup)
@@ -571,10 +530,6 @@ Bot in Python per la generazione e l'upload automatico di torrent su tracker bas
## 🇮🇹 [MammaMia](https://github.com/UrloMythus/MammaMia)
Addon per Stremio che consente lo streaming HTTPS di film, serie, anime e TV in diretta in lingua italiana.
-## 🧩 [streamingcommunity-unofficialapi](https://github.com/Blu-Tiger/streamingcommunity-unofficialapi)
-API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity.
-
-
# Disclaimer
> **Note:** This software is provided "as is", without warranty of any kind, express or implied, including but not limited to the warranties of merchantability, fitness for a particular purpose, and noninfringement. In no event shall the authors or copyright holders be liable for any claim, damages, or other liability, whether in an action of contract, tort, or otherwise, arising from, out of, or in connection with the software or the use or other dealings in the software.
@@ -585,4 +540,4 @@ API non ufficiale per accedere ai contenuti del sito italiano StreamingCommunity
**Made with ❤️ for streaming lovers**
*If you find this project useful, consider starring it! ⭐*
-
\ No newline at end of file
+
diff --git a/StreamingCommunity/Api/Player/vixcloud.py b/StreamingCommunity/Api/Player/vixcloud.py
index da85a598..fd16f004 100644
--- a/StreamingCommunity/Api/Player/vixcloud.py
+++ b/StreamingCommunity/Api/Player/vixcloud.py
@@ -193,7 +193,7 @@ def parse(cls, js_string):
class VideoSource:
- def __init__(self, url: str, is_series: bool, media_id: int = None):
+ def __init__(self, url: str, is_series: bool, media_id: int = None, tmdb_data: Dict[str, Any] = None):
"""
Initialize video source for streaming site.
@@ -201,6 +201,7 @@ def __init__(self, url: str, is_series: bool, media_id: int = None):
- url (str): The URL of the streaming site.
- is_series (bool): Flag for series or movie content
- media_id (int, optional): Unique identifier for media item
+ - tmdb_data (dict, optional): TMDB data with 'id', 's' (season), 'e' (episode)
"""
self.headers = {'user-agent': get_userAgent()}
self.url = url
@@ -208,6 +209,16 @@ def __init__(self, url: str, is_series: bool, media_id: int = None):
self.media_id = media_id
self.iframe_src = None
self.window_parameter = None
+
+ # Store TMDB data if provided
+ if tmdb_data is not None:
+ self.tmdb_id = tmdb_data.get('id')
+ self.season_number = tmdb_data.get('s')
+ self.episode_number = tmdb_data.get('e')
+ else:
+ self.tmdb_id = None
+ self.season_number = None
+ self.episode_number = None
def get_iframe(self, episode_id: int) -> None:
"""
@@ -259,13 +270,18 @@ def parse_script(self, script_text: str) -> None:
def get_content(self) -> None:
"""
Fetch and process video content from iframe source.
-
- Workflow:
- - Validate iframe source
- - Retrieve content
- - Parse embedded script
"""
try:
+ # If TMDB ID is provided, use direct vixsrc.to URL
+ if self.tmdb_id is not None:
+ console.print("[red]Using API V.2")
+ if self.is_series:
+ if self.season_number is not None and self.episode_number is not None:
+ self.iframe_src = f"https://vixsrc.to/tv/{self.tmdb_id}/{self.season_number}/{self.episode_number}/?lang=it"
+ else:
+ self.iframe_src = f"https://vixsrc.to/movie/{self.tmdb_id}/?lang=it"
+
+ # Fetch content from iframe source
if self.iframe_src is not None:
response = create_client(headers=self.headers).get(self.iframe_src)
response.raise_for_status()
@@ -357,4 +373,4 @@ def get_embed(self, episode_id: int):
except Exception as e:
logging.error(f"Error fetching embed URL: {e}")
- return None
+ return None
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/altadefinizione/film.py b/StreamingCommunity/Api/Service/altadefinizione/film.py
index efcf2367..f1ecf3c0 100644
--- a/StreamingCommunity/Api/Service/altadefinizione/film.py
+++ b/StreamingCommunity/Api/Service/altadefinizione/film.py
@@ -22,7 +22,7 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_film(select_title: MediaItem) -> str:
diff --git a/StreamingCommunity/Api/Service/altadefinizione/series.py b/StreamingCommunity/Api/Service/altadefinizione/series.py
index d7971a02..02e2a6de 100644
--- a/StreamingCommunity/Api/Service/altadefinizione/series.py
+++ b/StreamingCommunity/Api/Service/altadefinizione/series.py
@@ -31,7 +31,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/animeunity/film.py b/StreamingCommunity/Api/Service/animeunity/film.py
index f89d94aa..d0578c59 100644
--- a/StreamingCommunity/Api/Service/animeunity/film.py
+++ b/StreamingCommunity/Api/Service/animeunity/film.py
@@ -15,7 +15,6 @@
from StreamingCommunity.Api.Player.vixcloud import VideoSourceAnime
-
# Variable
console = Console()
diff --git a/StreamingCommunity/Api/Service/animeworld/site.py b/StreamingCommunity/Api/Service/animeworld/site.py
index 36660d3a..3df236f1 100644
--- a/StreamingCommunity/Api/Service/animeworld/site.py
+++ b/StreamingCommunity/Api/Service/animeworld/site.py
@@ -14,7 +14,6 @@
from StreamingCommunity.Util.table import TVShowManager
-
# Variable
console = Console()
media_search_manager = MediaManager()
diff --git a/StreamingCommunity/Api/Service/crunchyroll/film.py b/StreamingCommunity/Api/Service/crunchyroll/film.py
index d4ab3e8d..7533e874 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/film.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/film.py
@@ -14,18 +14,18 @@
from StreamingCommunity.Lib.DASH.downloader import DASH_Downloader
-# Logi
+# Logic
from .util.get_license import get_playback_session, CrunchyrollClient
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_film(select_title: MediaItem) -> str:
"""
- Downloads a film using the provided film ID, title name, and domain.
+ Downloads a film.
Parameters:
- select_title (MediaItem): The selected media item.
@@ -38,28 +38,21 @@ def download_film(select_title: MediaItem) -> str:
# Initialize Crunchyroll client
client = CrunchyrollClient()
- if not client.start():
- console.print("[red]Failed to authenticate with Crunchyroll.")
- return None, True
- # Define filename and path for the downloaded video
+ # Define filename and path
mp4_name = f"{os_manager.get_sanitize_file(select_title.name, select_title.date)}.{extension_output}"
mp4_path = os.path.join(site_constants.MOVIE_FOLDER, mp4_name.replace(f".{extension_output}", ""))
- # Get playback session
+ # Extract media ID
url_id = select_title.get('url').split('/')[-1]
- playback_result = get_playback_session(client, url_id)
-
- # Check if access was denied (403)
- if playback_result is None:
- console.print("[red]✗ Access denied: This content requires a premium subscription")
- return None, False
- mpd_url, mpd_headers, mpd_list_sub, token, _ = playback_result
+ # Get playback session
+ mpd_url, mpd_headers, mpd_list_sub, token, audio_locale = get_playback_session(client, url_id, None)
- # Parse playback token from mpd_url
+ # Parse playback token from URL
parsed_url = urlparse(mpd_url)
query_params = parse_qs(parsed_url.query)
+ playback_guid = query_params.get('playbackGuid', [token])[0] if query_params.get('playbackGuid') else token
# Download the film
dash_process = DASH_Downloader(
@@ -74,7 +67,7 @@ def download_film(select_title: MediaItem) -> str:
license_headers = mpd_headers.copy()
license_headers.update({
"x-cr-content-id": url_id,
- "x-cr-video-token": query_params['playbackGuid'][0],
+ "x-cr-video-token": playback_guid,
})
if dash_process.download_and_decrypt(custom_headers=license_headers):
@@ -88,11 +81,5 @@ def download_film(select_title: MediaItem) -> str:
os.remove(status['path'])
except Exception:
pass
-
- # Delete stream after download to avoid TOO_MANY_ACTIVE_STREAMS
- playback_token = token or query_params.get('playbackGuid', [None])[0]
- if playback_token:
- client.delete_active_stream(url_id, playback_token)
- console.print("[dim]Playback session closed")
-
+
return status['path'], status['stopped']
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/crunchyroll/series.py b/StreamingCommunity/Api/Service/crunchyroll/series.py
index 92eec317..6c01d66b 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/series.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/series.py
@@ -1,6 +1,7 @@
# 16.03.25
import os
+import time
from urllib.parse import urlparse, parse_qs
from typing import Tuple
@@ -32,7 +33,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str, bool]:
@@ -59,18 +60,17 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.get('name'))}.{extension_output}"
mp4_path = os_manager.get_sanitize_path(os.path.join(site_constants.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}"))
- # Get playback session
+ # Get media ID and main_guid for complete subtitles
url_id = obj_episode.get('url').split('/')[-1]
- playback_result = get_playback_session(client, url_id)
+ main_guid = obj_episode.get('main_guid')
- # Check if access was denied (403)
- if playback_result is None:
- console.print("[red]✗ Access denied: This episode requires a premium subscription")
- return None, False
+ # Get playback session
+ mpd_url, mpd_headers, mpd_list_sub, token, audio_locale = get_playback_session(client, url_id, main_guid)
- mpd_url, mpd_headers, mpd_list_sub, token, _ = playback_result
+ # Parse playback token from URL
parsed_url = urlparse(mpd_url)
query_params = parse_qs(parsed_url.query)
+ playback_guid = query_params.get('playbackGuid', [token])[0] if query_params.get('playbackGuid') else token
# Download the episode
dash_process = DASH_Downloader(
@@ -85,7 +85,7 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
license_headers = mpd_headers.copy()
license_headers.update({
"x-cr-content-id": url_id,
- "x-cr-video-token": query_params['playbackGuid'][0],
+ "x-cr-video-token": playback_guid,
})
if dash_process.download_and_decrypt(custom_headers=license_headers):
@@ -100,14 +100,11 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
except Exception:
pass
- # Delete episode stream to avoid TOO_MANY_ACTIVE_STREAMS
- playback_token = token or query_params.get('playbackGuid', [None])[0]
- if playback_token:
- client.delete_active_stream(url_id, playback_token)
- console.print("[dim]Playback session closed")
-
+ # Small delay between episodes to avoid rate limiting
+ time.sleep(1)
return status['path'], status['stopped']
+
def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
"""
Handle downloading episodes for a specific season.
@@ -116,7 +113,7 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
- index_season_selected (int): Season number
- scrape_serie (GetSerieInfo): Scraper object with series information
- download_all (bool): Whether to download all episodes
- - episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
+ - episode_selection (str, optional): Pre-defined episode selection
"""
# Get episodes for the selected season
episodes = scrape_serie.getEpisodeSeasons(index_season_selected)
@@ -154,19 +151,20 @@ def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, dow
if stopped:
break
+
def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
"""
Handle downloading a complete series.
Parameters:
- select_season (MediaItem): Series metadata from search
- - season_selection (str, optional): Pre-defined season selection that bypasses manual input
- - episode_selection (str, optional): Pre-defined episode selection that bypasses manual input
+ - season_selection (str, optional): Pre-defined season selection
+ - episode_selection (str, optional): Pre-defined episode selection
"""
scrape_serie = GetSerieInfo(select_season.url.split("/")[-1])
seasons_count = scrape_serie.getNumberSeason()
- # If season_selection is provided, use it instead of asking for input
+ # If season_selection is provided, use it
if season_selection is None:
index_season_selected = display_seasons_list(scrape_serie.seasons_manager)
else:
diff --git a/StreamingCommunity/Api/Service/crunchyroll/site.py b/StreamingCommunity/Api/Service/crunchyroll/site.py
index 2d545dc5..eb11a6b6 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/site.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/site.py
@@ -33,9 +33,7 @@ def title_search(query: str) -> int:
media_search_manager.clear()
table_show_manager.clear()
- config = config_manager.get_dict("SITE_LOGIN", "crunchyroll")
- if not config.get('device_id') or not config.get('etp_rt'):
- console.print("[red] device_id or etp_rt is missing or empty in config.json.")
+ if not config_manager.login.get('crunchyroll','device_id') or not config_manager.login.get('crunchyroll','etp_rt'):
raise Exception("device_id or etp_rt is missing or empty in config.json.")
client = CrunchyrollClient()
@@ -57,7 +55,7 @@ def title_search(query: str) -> int:
console.print(f"[cyan]Search url: [yellow]{api_url}")
try:
- response = client._request_with_retry('GET', api_url, params=params)
+ response = client.request('GET', api_url, params=params)
response.raise_for_status()
except Exception as e:
diff --git a/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py b/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py
index 8b681385..236e88d6 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/util/ScrapeSerie.py
@@ -1,250 +1,253 @@
# 16.03.25
+import re
import logging
-from typing import List, Dict
+from typing import Dict, List, Optional, Tuple
# Internal utilities
from StreamingCommunity.Api.Template.object import SeasonManager
-from StreamingCommunity.Util.config_json import config_manager
-from .get_license import CrunchyrollClient
-# Variable
-NORMALIZE_SEASON_NUMBERS = False # Set to True to remap seasons to 1..N range
-DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
+# Logic
+from .client import CrunchyrollClient
-def get_series_seasons(series_id, client: CrunchyrollClient, params):
- """Fetches seasons for a series."""
- url = f'https://www.crunchyroll.com/content/v2/cms/series/{series_id}/seasons'
- return client._request_with_retry('GET', url, params=params)
+# Constants
+_EP_NUM_RE = re.compile(r"^\d+(\.\d+)?$")
-def get_season_episodes(season_id, client: CrunchyrollClient, params):
- """Fetches episodes for a season."""
- url = f'https://www.crunchyroll.com/content/v2/cms/seasons/{season_id}/episodes'
- return client._request_with_retry('GET', url, params=params)
+def _fetch_api_seasons(series_id: str, client: CrunchyrollClient, params: Dict):
+ """Fetch seasons from API."""
+ url = f'{client.api_base_url}/content/v2/cms/series/{series_id}/seasons'
+ return client.request('GET', url, params=params)
+
+
+def _fetch_api_episodes(season_id: str, client: CrunchyrollClient, params: Dict):
+ """Fetch episodes from API."""
+ url = f'{client.api_base_url}/content/v2/cms/seasons/{season_id}/episodes'
+ return client.request('GET', url, params=params)
+
+
+def _extract_episode_number(episode_data: Dict) -> str:
+ """Extract episode number from episode data."""
+ meta = episode_data.get("episode_metadata") or {}
+ candidates = [
+ episode_data.get("episode"),
+ meta.get("episode"),
+ meta.get("episode_number"),
+ episode_data.get("episode_number"),
+ ]
+
+ for val in candidates:
+ if val is None:
+ continue
+ val_str = val.strip() if isinstance(val, str) else str(val)
+ if val_str:
+ return val_str
+ return ""
+
+
+def _is_special_episode(episode_number: str) -> bool:
+ """Check if episode is a special."""
+ if not episode_number:
+ return True
+ return not _EP_NUM_RE.match(episode_number)
+
+
+def _assign_display_numbers(episodes: List[Dict]) -> List[Dict]:
+ """Assign display numbers to episodes (normal and specials)."""
+ ep_counter = 1
+ sp_counter = 1
+
+ for episode in episodes:
+ if episode.get("is_special"):
+ raw_label = episode.get("raw_episode")
+ episode["display_number"] = f"SP{sp_counter}_{raw_label}" if raw_label else f"SP{sp_counter}"
+ sp_counter += 1
+ else:
+ episode["display_number"] = str(ep_counter)
+ ep_counter += 1
+
+ return episodes
class GetSerieInfo:
- def __init__(self, series_id):
- """
- Args:
- - series_id (str): The Crunchyroll series ID.
- """
+ def __init__(self, series_id: str, *, locale: str = "it-IT", preferred_audio_language: str = "it-IT"):
+ """Initialize series scraper with minimal API calls."""
self.series_id = series_id
self.seasons_manager = SeasonManager()
+ self.client = CrunchyrollClient(locale=locale)
- # Initialize Crunchyroll client
- self.client = CrunchyrollClient()
- if not self.client.start():
- raise Exception("Failed to authenticate with Crunchyroll")
-
- self.headers = self.client._get_headers()
self.params = {
'force_locale': '',
- 'preferred_audio_language': 'it-IT',
- 'locale': 'it-IT',
+ 'preferred_audio_language': preferred_audio_language,
+ 'locale': locale,
}
- self.series_name = None
self._episodes_cache = {}
- self.normalize_seasons = NORMALIZE_SEASON_NUMBERS
+ self._metadata_cache = {}
def collect_season(self) -> None:
- """
- Retrieve all seasons.
- If normalize_season_numbers=True: assigns 1..N and keeps cr_number.
- """
- response = get_series_seasons(self.series_id, self.client, self.params)
-
+ """Collect all seasons for the series - SINGLE API CALL."""
+ response = _fetch_api_seasons(self.series_id, self.client, self.params)
+
if response.status_code != 200:
- logging.error(f"Failed to fetch seasons for series {self.series_id}")
+ logging.error(f"Failed to fetch seasons: {response.status_code}")
return
-
+
data = response.json()
seasons = data.get("data", [])
-
- # Set series name from first season if available
+
+ # Extract basic series name from first season
if seasons:
- self.series_name = seasons[0].get("series_title") or seasons[0].get("title")
-
- # Extract raw data
- rows = []
- for s in seasons:
- raw_num = s.get("season_number", 0)
- rows.append({
- "id": s.get('id'),
- "title": s.get("title", f"Season {raw_num}"),
+ self.series_name = seasons[0].get("title")
+
+ # Process seasons
+ season_rows = []
+ for season in seasons:
+ raw_num = season.get("season_number", 0)
+ season_rows.append({
+ "id": season.get('id'),
+ "title": season.get("title", f"Season {raw_num}"),
"raw_number": int(raw_num or 0),
- "slug": s.get("slug", ""),
+ "slug": season.get("slug", ""),
+ })
+
+ # Sort by number then title
+ season_rows.sort(key=lambda r: (r["raw_number"], r["title"] or ""))
+
+ # Add to manager
+ for idx, row in enumerate(season_rows):
+ self.seasons_manager.add_season({
+ 'number': row["raw_number"],
+ 'name': row["title"],
+ 'id': row["id"],
+ 'slug': row["slug"],
})
- # Sort by raw number then title for stability
- rows.sort(key=lambda r: (r["raw_number"], r["title"] or ""))
-
- if self.normalize_seasons:
- # Normalize: assign 1..N, keep original as cr_number
- for i, r in enumerate(rows, start=1):
- self.seasons_manager.add_season({
- 'number': i,
- 'cr_number': r["raw_number"],
- 'name': r["title"],
- 'id': r["id"],
- 'slug': r["slug"],
- })
-
- else:
- # No normalization: use CR's number directly
- for r in rows:
- self.seasons_manager.add_season({
- 'number': r["raw_number"],
- 'name': r["title"],
- 'id': r["id"],
- 'slug': r["slug"],
- })
-
- def _fetch_episodes_for_season(self, season_number: int) -> List[Dict]:
- """Fetch and cache episodes for a specific season number."""
- season = self.seasons_manager.get_season_by_number(season_number)
- if not season:
- logging.error(f"Season {season_number} not found")
- return []
+ def _fetch_episodes_for_season(self, season_index: int) -> List[Dict]:
+ """Fetch and cache episodes for a season - SINGLE API CALL per season."""
+ season = self.seasons_manager.seasons[season_index-1]
+ response = _fetch_api_episodes(season.id, self.client, self.params)
+
+ # Get response json
+ data = response.json()
+ episodes_data = data.get("data", [])
+
+ # Build episode list
+ episodes = []
+ for ep_data in episodes_data:
+ ep_number = _extract_episode_number(ep_data)
+ is_special = _is_special_episode(ep_number)
+ ep_id = ep_data.get("id")
- ep_response = get_season_episodes(season.id, self.client, self.params)
-
- if ep_response.status_code != 200:
- logging.error(f"Failed to fetch episodes for season {season.id}")
- return []
-
- ep_data = ep_response.json()
- episodes = ep_data.get("data", [])
- episode_list = []
-
- for ep in episodes:
- ep_num = ep.get("episode_number")
- ep_title = ep.get("title", f"Episodio {ep_num}")
- ep_id = ep.get("id")
- ep_url = f"https://www.crunchyroll.com/watch/{ep_id}"
+ # Cache metadata for later use
+ if ep_id:
+ self._metadata_cache[ep_id] = ep_data
- episode_list.append({
- 'number': ep_num,
- 'name': ep_title,
- 'url': ep_url,
- 'duration': int(ep.get('duration_ms', 0) / 60000),
+ episodes.append({
+ 'id': ep_id,
+ 'number': ep_number,
+ 'is_special': is_special,
+ 'name': ep_data.get("title", f"Episodio {ep_data.get('episode_number')}"),
+ 'url': f"{self.client.web_base_url}/watch/{ep_id}",
+ 'duration': int(ep_data.get('duration_ms', 0) / 60000),
})
-
- self._episodes_cache[season_number] = episode_list
- return episode_list
-
- def _get_preferred_audio_locale(self) -> str:
- lang_mapping = {
- 'ita': 'it-IT',
- 'eng': 'en-US',
- 'jpn': 'ja-JP',
- 'ger': 'de-DE',
- 'fre': 'fr-FR',
- 'spa': 'es-419',
- 'por': 'pt-BR'
- }
- preferred_lang = DOWNLOAD_SPECIFIC_AUDIO[0] if DOWNLOAD_SPECIFIC_AUDIO else 'ita'
- preferred_locale = lang_mapping.get(preferred_lang.lower(), 'it-IT')
- return preferred_locale
-
- def _get_episode_id_for_preferred_language(self, base_episode_id: str) -> str:
- """Get the correct episode ID for the preferred audio language."""
- preferred_locale = self._get_preferred_audio_locale()
- url = f'https://www.crunchyroll.com/content/v2/cms/objects/{base_episode_id}'
- params = {
- 'ratings': 'true',
- 'locale': 'it-IT',
- }
+ # Sort: normal episodes first, then specials
+ normal = [e for e in episodes if not e.get("is_special")]
+ specials = [e for e in episodes if e.get("is_special")]
+ episodes = normal + specials
- try:
- response = self.client._request_with_retry('GET', url, params=params)
-
- if response.status_code != 200:
- logging.warning(f"Failed to fetch episode details for {base_episode_id}")
- return base_episode_id
-
- data = response.json()
- item = (data.get("data") or [{}])[0] or {}
- meta = item.get('episode_metadata', {}) or {}
-
- versions = meta.get("versions") or []
-
- # Print all available audio locales
- available_locales = []
- for version in versions:
- if isinstance(version, dict):
- locale = version.get("audio_locale")
- if locale:
- available_locales.append(locale)
- print(f"Available audio locales: {available_locales}")
-
- # Find matching version by audio_locale
- for i, version in enumerate(versions):
- if isinstance(version, dict):
- audio_locale = version.get("audio_locale")
- guid = version.get("guid")
-
- if audio_locale == preferred_locale:
- print(f"Found matching locale! Selected: {audio_locale} -> {guid}")
- return version.get("guid", base_episode_id)
+ # Assign display numbers
+ episodes = _assign_display_numbers(episodes)
+
+ # Cache and return
+ self._episodes_cache[season_index] = episodes
+ return episodes
+
+ def _get_episode_audio_locales(self, episode_id: str) -> Tuple[List[str], Dict[str, str], Optional[str]]:
+ """
+ Get available audio locales WITHOUT calling playback API.
+ Uses cached metadata from episode list API call.
+
+ Returns:
+ Tuple[List[str], Dict[str, str], Optional[str]]: (audio_locales, urls_by_locale, main_guid)
+ """
+ cached_data = self._metadata_cache.get(episode_id)
+
+ if cached_data:
+ meta = cached_data.get('episode_metadata', {}) or {}
+ versions = meta.get("versions") or cached_data.get("versions") or []
- # Fallback: try to find any available version if preferred not found
- if versions and isinstance(versions[0], dict):
- fallback_guid = versions[0].get("guid")
- fallback_locale = versions[0].get("audio_locale")
- if fallback_guid:
- print(f"Preferred locale {preferred_locale} not found, using fallback: {fallback_locale} -> {fallback_guid}")
- return fallback_guid
-
- except Exception as e:
- logging.error(f"Error getting episode ID for preferred language: {e}")
-
- print(f"[DEBUG] No suitable version found, returning original episode ID: {base_episode_id}")
- return base_episode_id
+ if versions:
+ main_guid = None
+
+ # First pass: find main track (for complete subtitles)
+ for v in versions:
+ roles = v.get("roles", [])
+ if "main" in roles:
+ main_guid = v.get("guid")
+ break
+
+ # Second pass: find preferred audio locale
+ audio_locales = []
+ urls_by_locale = {}
+ seen_locales = set()
+
+ for v in versions:
+ locale = v.get("audio_locale")
+ guid = v.get("guid")
+ if locale and guid and locale not in seen_locales:
+ seen_locales.add(locale)
+ audio_locales.append(locale)
+ urls_by_locale[locale] = f"{self.client.web_base_url}/watch/{guid}"
+
+ if audio_locales:
+ return audio_locales, urls_by_locale, main_guid
+
+ return [], {episode_id: f"{self.client.web_base_url}/watch/{episode_id}"}, None
+
# ------------- FOR GUI -------------
def getNumberSeason(self) -> int:
- """
- Get the total number of seasons available for the series.
- """
+ """Get total number of seasons."""
if not self.seasons_manager.seasons:
self.collect_season()
return len(self.seasons_manager.seasons)
-
- def getEpisodeSeasons(self, season_number: int) -> list:
- """
- Get all episodes for a specific season (fetches only when needed).
- """
+
+ def getEpisodeSeasons(self, season_index: int) -> List[Dict]:
+ """Get all episodes for a season."""
if not self.seasons_manager.seasons:
self.collect_season()
- if season_number not in self._episodes_cache:
- episodes = self._fetch_episodes_for_season(season_number)
- else:
- episodes = self._episodes_cache[season_number]
- return episodes
-
- def selectEpisode(self, season_number: int, episode_index: int) -> dict:
- """
- Get information for a specific episode in a specific season.
- """
- episodes = self.getEpisodeSeasons(season_number)
- if not episodes or episode_index < 0 or episode_index >= len(episodes):
- logging.error(f"Episode index {episode_index} is out of range for season {season_number}")
- return None
+
+ if season_index not in self._episodes_cache:
+ self._fetch_episodes_for_season(season_index)
+
+ return self._episodes_cache.get(season_index, [])
+ def selectEpisode(self, season_index: int, episode_index: int) -> Optional[Dict]:
+ """Get specific episode with audio information."""
+ episodes = self.getEpisodeSeasons(season_index)
episode = episodes[episode_index]
- base_episode_id = episode.get("url", "").split("/")[-1] if "url" in episode else None
-
- if not base_episode_id:
+ episode_id = episode.get("url", "").split("/")[-1] if episode.get("url") else None
+
+ if not episode_id:
return episode
-
- preferred_episode_id = self._get_episode_id_for_preferred_language(base_episode_id)
- episode["url"] = f"https://www.crunchyroll.com/watch/{preferred_episode_id}"
- #print(f"Updated episode URL: {episode['url']}")
-
+
+ # Update URL to preferred language if available
+ audio_locales, urls_by_locale, main_guid = self._get_episode_audio_locales(episode_id)
+
+ # Store main_guid for complete subtitles access
+ if main_guid:
+ episode["main_guid"] = main_guid
+ episode["main_url"] = f"{self.client.web_base_url}/watch/{main_guid}"
+
+ # Continue with normal audio preference logic
+ if urls_by_locale:
+ preferred_lang = self.params.get("preferred_audio_language", "it-IT")
+ new_url = urls_by_locale.get(preferred_lang) or urls_by_locale.get("en-US") or list(urls_by_locale.values())[0]
+ if new_url:
+ episode["url"] = new_url
+
return episode
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/crunchyroll/util/client.py b/StreamingCommunity/Api/Service/crunchyroll/util/client.py
new file mode 100644
index 00000000..31b89747
--- /dev/null
+++ b/StreamingCommunity/Api/Service/crunchyroll/util/client.py
@@ -0,0 +1,352 @@
+# 29.12.25
+
+import time
+import os
+import logging
+import json
+import base64
+from typing import Dict, Optional
+
+
+# Internal utilities
+from StreamingCommunity.Util.config_json import config_manager
+from StreamingCommunity.Util.http_client import create_client_curl, get_userAgent
+
+
+# Constants
+PUBLIC_TOKEN = "bm9haWhkZXZtXzZpeWcwYThsMHE6"
+BASE_URL = "https://www.crunchyroll.com"
+API_BETA_BASE_URL = "https://beta-api.crunchyroll.com"
+PLAY_SERVICE_URL = "https://cr-play-service.prd.crunchyrollsvc.com"
+
+
+class CrunchyrollClient:
+ def __init__(self, locale: str = "it-IT", **kwargs) -> None:
+ self.device_id = config_manager.login.get('crunchyroll', 'device_id')
+ self.etp_rt = config_manager.login.get('crunchyroll', 'etp_rt')
+ self.locale = locale
+
+ self.web_base_url = BASE_URL
+ self.api_base_url = self._resolve_api_base_url()
+ self.play_service_url = PLAY_SERVICE_URL
+ self.token_cache_path = self._resolve_token_cache_path()
+ self.token_cache_enabled = True
+ self.user_agent = None
+
+ self.access_token: Optional[str] = None
+ self.refresh_token: Optional[str] = None
+ self.account_id: Optional[str] = None
+ self.expires_at: float = 0.0
+
+ # Load cached tokens
+ cache_data = self._load_token_cache()
+ if not self.user_agent:
+ cached_ua = cache_data.get("user_agent") if isinstance(cache_data, dict) else None
+ self.user_agent = cached_ua if isinstance(cached_ua, str) and cached_ua.strip() else get_userAgent()
+
+ self.session = create_client_curl(headers=self._get_headers(), cookies=self._get_cookies())
+
+ @staticmethod
+ def _resolve_api_base_url() -> str:
+ """Determine the correct API base URL - defaults to beta API."""
+ return API_BETA_BASE_URL
+
+ @staticmethod
+ def _resolve_token_cache_path() -> str:
+ """Resolve absolute path for token cache file - always enabled."""
+ base_dir = os.getcwd()
+ path = os.path.join(base_dir, ".cache", "crunchyroll_token.json")
+ return path
+
+ @staticmethod
+ def _jwt_exp(token: Optional[str]) -> Optional[int]:
+ """Extract expiration timestamp from JWT token payload."""
+ if not isinstance(token, str) or token.count(".") < 2:
+ return None
+
+ try:
+ payload_b64 = token.split(".", 2)[1]
+ padding = "=" * (-len(payload_b64) % 4)
+ payload = base64.urlsafe_b64decode(payload_b64 + padding).decode("utf-8", errors="replace")
+ obj = json.loads(payload)
+ exp = obj.get("exp")
+
+ if isinstance(exp, int):
+ return exp
+ if isinstance(exp, str) and exp.isdigit():
+ return int(exp)
+
+ except Exception:
+ pass
+ return None
+
+ def _set_expires_at(self, *, expires_in: Optional[int] = None) -> None:
+ """Set token expiration time from JWT or expires_in value."""
+ exp = self._jwt_exp(self.access_token)
+ if isinstance(exp, int) and exp > 0:
+ self.expires_at = float(exp - 60)
+ return
+
+ if expires_in is None:
+ self.expires_at = 0.0
+ return
+
+ self.expires_at = time.time() + max(0, int(expires_in) - 60)
+
+ def _load_token_cache(self) -> Dict:
+ """Load cached authentication tokens from file if available."""
+ if not self.token_cache_path:
+ return {}
+
+ try:
+ if not os.path.exists(self.token_cache_path):
+ return {}
+
+ with open(self.token_cache_path, "r", encoding="utf-8") as f:
+ data = json.load(f)
+
+ if not isinstance(data, dict):
+ return {}
+
+ cached_device_id = data.get("device_id")
+ if self.device_id and isinstance(cached_device_id, str) and cached_device_id != self.device_id:
+ return {}
+
+ access = data.get("access_token")
+ refresh = data.get("refresh_token")
+ if isinstance(access, str) and access:
+ self.access_token = access
+ if isinstance(refresh, str) and refresh:
+ self.refresh_token = refresh
+
+ account_id = data.get("account_id")
+ if isinstance(account_id, str) and account_id:
+ self.account_id = account_id
+
+ try:
+ self.expires_at = float(data.get("expires_at") or 0.0)
+ except Exception:
+ self.expires_at = 0.0
+
+ return data
+ except Exception as e:
+ logging.debug(f"Token cache load failed: {e}")
+ return {}
+
+ def _save_token_cache(self) -> None:
+ """Save current authentication tokens to cache file."""
+ if not self.token_cache_path:
+ return
+
+ try:
+ cache_dir = os.path.dirname(self.token_cache_path)
+ if cache_dir:
+ os.makedirs(cache_dir, exist_ok=True)
+
+ payload = {
+ "device_id": self.device_id,
+ "account_id": self.account_id,
+ "access_token": self.access_token,
+ "refresh_token": self.refresh_token,
+ "expires_at": self.expires_at,
+ "user_agent": self.user_agent,
+ "api_base_url": self.api_base_url,
+ "saved_at": time.time(),
+ }
+
+ with open(self.token_cache_path, "w", encoding="utf-8") as f:
+ json.dump(payload, f, indent=2)
+
+ except Exception as e:
+ logging.debug(f"Token cache save failed: {e}")
+
+ def _get_headers(self) -> Dict:
+ """Generate HTTP headers for API requests including authorization."""
+ headers = {
+ 'user-agent': self.user_agent or get_userAgent(),
+ 'accept': 'application/json, text/plain, */*',
+ 'origin': self.web_base_url,
+ 'referer': f'{self.web_base_url}/',
+ 'accept-language': f'{self.locale.replace("_", "-")},en-US;q=0.8,en;q=0.7',
+ }
+ if self.access_token:
+ headers['authorization'] = f'Bearer {self.access_token}'
+
+ return headers
+
+ def _get_cookies(self) -> Dict:
+ """Generate cookies for API requests including device_id and etp_rt."""
+ cookies = {'device_id': self.device_id}
+ if self.etp_rt:
+ cookies['etp_rt'] = self.etp_rt
+ return cookies
+
+ def start(self) -> bool:
+ """Authenticate using etp_rt cookie - single attempt."""
+ headers = self._get_headers()
+ headers['authorization'] = f'Basic {PUBLIC_TOKEN}'
+ headers['content-type'] = 'application/x-www-form-urlencoded'
+
+ data = {
+ 'device_id': self.device_id,
+ 'device_type': 'Chrome on Windows',
+ 'grant_type': 'etp_rt_cookie',
+ }
+
+ response = self.session.post(
+ f'{self.api_base_url}/auth/v1/token',
+ cookies=self._get_cookies(),
+ headers=headers,
+ data=data
+ )
+
+ if response.status_code != 200:
+ logging.error(f"Authentication failed: {response.status_code}")
+ return False
+
+ result = response.json()
+
+ self.access_token = result.get('access_token')
+ self.refresh_token = result.get('refresh_token')
+ self.account_id = result.get('account_id')
+
+ expires_in = int(result.get('expires_in', 3600) or 3600)
+ self._set_expires_at(expires_in=expires_in)
+ self._save_token_cache()
+
+ return True
+
+ def _refresh(self) -> None:
+ """Refresh access token - single attempt."""
+ if not self.refresh_token:
+ raise RuntimeError("refresh_token missing")
+
+ headers = self._get_headers()
+ headers['authorization'] = f'Basic {PUBLIC_TOKEN}'
+ headers['content-type'] = 'application/x-www-form-urlencoded'
+
+ data = {
+ 'grant_type': 'refresh_token',
+ 'refresh_token': self.refresh_token,
+ 'device_type': 'Chrome on Windows',
+ }
+ if self.device_id:
+ data['device_id'] = self.device_id
+
+ response = self.session.post(
+ f'{self.api_base_url}/auth/v1/token',
+ cookies=self._get_cookies(),
+ headers=headers,
+ data=data
+ )
+
+ if response.status_code != 200:
+ raise RuntimeError(f"Token refresh failed: {response.status_code}")
+
+ result = response.json()
+ self.access_token = result.get('access_token')
+ self.refresh_token = result.get('refresh_token') or self.refresh_token
+
+ expires_in = int(result.get('expires_in', 3600) or 3600)
+ self._set_expires_at(expiresIn=expires_in)
+ self._save_token_cache()
+
+ def _ensure_token(self) -> None:
+ """Ensure valid access token - no retries."""
+ if not self.access_token:
+ if not self.start():
+ raise RuntimeError("Authentication failed")
+ return
+
+ # Refresh if expiring soon
+ if time.time() >= (self.expires_at - 30):
+ try:
+ self._refresh()
+ except Exception:
+ if not self.start():
+ raise RuntimeError("Re-authentication failed")
+
+ def request(self, method: str, url: str, **kwargs):
+ """Single request attempt - no retries."""
+ self._ensure_token()
+
+ headers = kwargs.pop('headers', {}) or {}
+ merged_headers = {**self._get_headers(), **headers}
+ kwargs['headers'] = merged_headers
+ kwargs.setdefault('cookies', self._get_cookies())
+ kwargs.setdefault('timeout', config_manager.config.get_int('REQUESTS', 'timeout', default=30))
+
+ response = self.session.request(method, url, **kwargs)
+
+ # Only handle 401 once
+ if response.status_code == 401:
+ try:
+ self._refresh()
+ except Exception:
+ self.start()
+ kwargs['headers'] = {**self._get_headers(), **headers}
+ response = self.session.request(method, url, **kwargs)
+
+ return response
+
+ def refresh(self) -> None:
+ """Public refresh method."""
+ self._refresh()
+
+ def get_streams(self, media_id: str) -> Dict:
+ """Get playback data - single attempt only."""
+ pb_url = f'{self.play_service_url}/v3/{media_id}/web/chrome/play'
+
+ response = self.request('GET', pb_url, params={'locale': self.locale})
+
+ if response.status_code == 403:
+ raise Exception("Playback Rejected: Subscription required")
+
+ if response.status_code == 404:
+ raise Exception(f"Playback endpoint not found: {pb_url}")
+
+ if response.status_code == 420:
+ try:
+ payload = response.json()
+ error_code = payload.get("error")
+ active_streams = payload.get("activeStreams", [])
+
+ if error_code in ("TOO_MANY_ACTIVE_STREAMS", "TOO_MANY_CONCURRENT_STREAMS") and active_streams:
+ logging.warning(f"TOO_MANY_ACTIVE_STREAMS: cleaning up {len(active_streams)} streams")
+ for s in active_streams:
+ if isinstance(s, dict):
+ content_id = s.get("contentId")
+ token = s.get("token")
+ if content_id and token:
+ self.deauth_video(content_id, token)
+ except Exception:
+ pass
+
+ raise Exception("TOO_MANY_ACTIVE_STREAMS. Wait and try again.")
+
+ if response.status_code != 200:
+ raise Exception(f"Playback failed: {response.status_code}")
+
+ data = response.json()
+
+ if data.get('error') == 'Playback is Rejected':
+ raise Exception("Playback Rejected: Premium required")
+
+ return data
+
+ def deauth_video(self, media_id: str, token: str) -> bool:
+ """Mark playback token as inactive to free stream slot."""
+ if not media_id or not token:
+ return False
+
+ try:
+ response = self.session.patch(
+ f'{PLAY_SERVICE_URL}/v1/token/{media_id}/{token}/inactive',
+ cookies=self._get_cookies(),
+ headers=self._get_headers(),
+ )
+ return response.status_code in (200, 204)
+
+ except Exception as e:
+ logging.debug(f"Failed to deauth stream token: {e}")
+ return False
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py b/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py
index f651601f..2e3bb940 100644
--- a/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py
+++ b/StreamingCommunity/Api/Service/crunchyroll/util/get_license.py
@@ -1,306 +1,111 @@
# 28.07.25
-import time
import logging
from typing import Tuple, List, Dict, Optional
# Internal utilities
-from StreamingCommunity.Util.config_json import config_manager
-from StreamingCommunity.Util.http_client import create_client_curl, get_userAgent
+from .client import CrunchyrollClient
-# Variable
-PUBLIC_TOKEN = "bm9haWhkZXZtXzZpeWcwYThsMHE6"
-BASE_URL = "https://www.crunchyroll.com"
-DEFAULT_QPS = 3.0 # Queries per second to avoid rate limiting
-DEFAULT_MAX_RETRIES = 3 # Maximum retry attempts for failed requests
-DEFAULT_BASE_BACKOFF_MS = 300 # Base backoff time in milliseconds
-DEFAULT_SLOWDOWN_AFTER = 50 # Number of requests before introducing slowdown
-
-
-class PlaybackError(Exception):
- pass
-
-
-class RateLimiter:
- def __init__(self, qps: float):
- self.qps = max(0.1, float(qps))
- self._last = 0.0
-
- def wait(self):
- if self.qps <= 0:
- return
- now = time.time()
- min_dt = 1.0 / self.qps
- dt = now - self._last
- if dt < min_dt:
- time.sleep(min_dt - dt)
- self._last = time.time()
-
-
-class CrunchyrollClient:
- def __init__(self) -> None:
- config = config_manager.get_dict("SITE_LOGIN", "crunchyroll")
- self.device_id = str(config.get('device_id')).strip()
- self.etp_rt = str(config.get('etp_rt')).strip()
- self.locale = "it-IT"
-
- self.access_token: Optional[str] = None
- self.refresh_token: Optional[str] = None
- self.account_id: Optional[str] = None
- self.expires_at: float = 0.0
-
- # Rate limiting configuration
- self.rate_limiter = RateLimiter(qps=DEFAULT_QPS)
- self._req_count = 0
-
- # Retry configuration
- self.max_retries = DEFAULT_MAX_RETRIES
- self.base_backoff_ms = DEFAULT_BASE_BACKOFF_MS
- self.slowdown_after = DEFAULT_SLOWDOWN_AFTER
-
- def _get_headers(self) -> Dict:
- headers = {
- 'user-agent': get_userAgent(),
- 'accept': 'application/json, text/plain, */*',
- 'origin': BASE_URL,
- 'referer': f'{BASE_URL}/',
- }
- if self.access_token:
- headers['authorization'] = f'Bearer {self.access_token}'
- return headers
-
- def _get_cookies(self) -> Dict:
- cookies = {'device_id': self.device_id}
- if self.etp_rt:
- cookies['etp_rt'] = self.etp_rt
- return cookies
-
- def start(self) -> bool:
- """Authorize the client with etp_rt_cookie grant."""
- headers = self._get_headers()
- headers['authorization'] = f'Basic {PUBLIC_TOKEN}'
- headers['content-type'] = 'application/x-www-form-urlencoded'
-
- data = {
- 'device_id': self.device_id,
- 'device_type': 'Chrome on Windows',
- 'grant_type': 'etp_rt_cookie',
- }
-
- self.rate_limiter.wait()
- response = create_client_curl(headers=headers).post(
- f'{BASE_URL}/auth/v1/token',
- cookies=self._get_cookies(),
- data=data
- )
- self._req_count += 1
-
- if response.status_code == 400:
- logging.error("Error 400: Invalid 'etp_rt' in config.json")
- return False
-
- response.raise_for_status()
- result = response.json()
-
- self.access_token = result.get('access_token')
- self.refresh_token = result.get('refresh_token')
- self.account_id = result.get('account_id')
-
- # Set expiration with 60s margin to refresh proactively
- expires_in = int(result.get('expires_in', 3600) or 3600)
- self.expires_at = time.time() + max(0, expires_in - 60)
-
- return True
-
- def _refresh(self) -> None:
- """Refresh access token using refresh_token."""
- if not self.refresh_token:
- raise RuntimeError("refresh_token missing")
-
- headers = self._get_headers()
- headers['authorization'] = f'Basic {PUBLIC_TOKEN}'
- headers['content-type'] = 'application/x-www-form-urlencoded'
-
- data = {
- 'grant_type': 'refresh_token',
- 'refresh_token': self.refresh_token,
- 'device_type': 'Chrome on Windows',
- }
- if self.device_id:
- data['device_id'] = self.device_id
-
- self.rate_limiter.wait()
- response = create_client_curl(headers=headers).post(
- f'{BASE_URL}/auth/v1/token',
- cookies=self._get_cookies(),
- data=data
- )
- self._req_count += 1
- response.raise_for_status()
-
- result = response.json()
- self.access_token = result.get('access_token')
- self.refresh_token = result.get('refresh_token') or self.refresh_token
-
- # Set expiration with 60s margin to refresh proactively
- expires_in = int(result.get('expires_in', 3600) or 3600)
- self.expires_at = time.time() + max(0, expires_in - 60)
-
- def _ensure_token(self) -> None:
- """Ensure access_token is valid and not expired."""
- if not self.access_token:
- if not self.start():
- raise RuntimeError("Authentication failed")
- return
-
- # Proactive refresh if token is expiring soon
- if time.time() >= (self.expires_at - 30):
- try:
- self._refresh()
-
- except Exception as e:
- logging.warning(f"Refresh failed, re-authenticating: {e}")
- if not self.start():
- raise RuntimeError("Re-authentication failed")
-
- def _request_with_retry(self, method: str, url: str, **kwargs):
- """
- Make HTTP request with automatic retry on transient errors.
- """
- self._ensure_token()
-
- headers = kwargs.pop('headers', {}) or {}
- merged_headers = {**self._get_headers(), **headers}
- kwargs['headers'] = merged_headers
- kwargs.setdefault('cookies', self._get_cookies())
-
- attempt = 0
- while True:
- self.rate_limiter.wait()
-
- # Introduce slowdown after many requests
- if self._req_count >= self.slowdown_after:
- time.sleep((self.base_backoff_ms + 200) / 1000.0)
-
- response = create_client_curl(headers=kwargs['headers']).request(method, url, **kwargs)
- self._req_count += 1
-
- # Retry on 401 (token expired)
- if response.status_code == 401 and attempt < self.max_retries:
- attempt += 1
- logging.warning(f"401 error, refreshing token (attempt {attempt}/{self.max_retries})")
-
- try:
- self._refresh()
- except Exception:
- self.start()
-
- kwargs['headers'] = {**self._get_headers(), **headers}
- time.sleep((self.base_backoff_ms * attempt) / 1000.0)
- continue
-
- # Retry on transient server errors
- if response.status_code in (502, 503, 504) and attempt < self.max_retries:
- attempt += 1
- backoff = (self.base_backoff_ms * attempt + 100) / 1000.0
- logging.warning(f"{response.status_code} error, backing off {backoff}s (attempt {attempt}/{self.max_retries})")
- time.sleep(backoff)
- continue
-
- return response
-
- def get_streams(self, media_id: str) -> Optional[Dict]:
- """
- Get available streams for media_id.
- """
- response = self._request_with_retry(
- 'GET', f'{BASE_URL}/playback/v3/{media_id}/web/chrome/play',
- params={'locale': self.locale}
- )
-
- if response.status_code == 403:
- logging.warning(f"Access denied for media {media_id}: Subscription required")
- return None
-
- if response.status_code == 420:
- raise PlaybackError("TOO_MANY_ACTIVE_STREAMS. Wait a few minutes and try again.")
-
- response.raise_for_status()
- data = response.json()
-
- if data.get('error') == 'Playback is Rejected':
- logging.warning(f"Playback rejected for media {media_id}: Premium required")
- return None
-
- return data
-
- def delete_active_stream(self, media_id: str, token: str) -> bool:
- """Delete an active stream session (cleanup to avoid TOO_MANY_ACTIVE_STREAMS)."""
- if not token:
- return False
-
- try:
- self.rate_limiter.wait()
- response = create_client_curl(headers=self._get_headers()).delete(
- f'{BASE_URL}/playback/v1/token/{media_id}/{token}',
- cookies=self._get_cookies()
- )
- self._req_count += 1
- return response.status_code in [200, 204]
-
- except Exception as e:
- logging.warning(f"Failed to delete stream: {e}")
- return False
-
-
-def _find_token_anywhere(obj) -> Optional[str]:
+def _find_token_recursive(obj) -> Optional[str]:
"""Recursively search for 'token' field in playback response."""
if isinstance(obj, dict):
for k, v in obj.items():
- if k.lower() == "token" and isinstance(v, str) and len(v) > 10:
+ if str(k).lower() == "token" and isinstance(v, str) and len(v) > 10:
return v
- t = _find_token_anywhere(v)
- if t:
- return t
-
+ token = _find_token_recursive(v)
+ if token:
+ return token
elif isinstance(obj, list):
for el in obj:
- t = _find_token_anywhere(el)
- if t:
- return t
-
+ token = _find_token_recursive(el)
+ if token:
+ return token
return None
-def get_playback_session(client: CrunchyrollClient, url_id: str) -> Optional[Tuple[str, Dict, List[Dict], Optional[str], Optional[str]]]:
+def _extract_subtitles(data: Dict) -> List[Dict]:
+ """Extract all subtitles from playback data."""
+ subtitles = []
+
+ # Process regular subtitles
+ subs_obj = data.get('subtitles') or {}
+ for lang, info in subs_obj.items():
+ if not info or not info.get('url'):
+ continue
+
+ subtitles.append({
+ 'language': lang,
+ 'url': info['url'],
+ 'format': info.get('format'),
+ 'type': info.get('type'),
+ 'closed_caption': bool(info.get('closed_caption')),
+ 'label': info.get('display') or info.get('title') or info.get('language')
+ })
+
+ # Process captions/closed captions
+ captions_obj = data.get('captions') or data.get('closed_captions') or {}
+ for lang, info in captions_obj.items():
+ if not info or not info.get('url'):
+ continue
+
+ subtitles.append({
+ 'language': lang,
+ 'url': info['url'],
+ 'format': info.get('format'),
+ 'type': info.get('type') or 'captions',
+ 'closed_caption': True if info.get('closed_caption') is None else bool(info.get('closed_caption')),
+ 'label': info.get('display') or info.get('title') or info.get('language')
+ })
+
+ return subtitles
+
+
+def get_playback_session(client: CrunchyrollClient, url_id: str, main_guid: Optional[str] = None) -> Tuple[str, Dict, List[Dict], Optional[str], Optional[str]]:
"""
- Return the playback session details.
-
+ Get playback session with SINGLE API call.
+ If main_guid is provided, fetch subtitles from main track for complete subs.
+
Returns:
- Tuple with (mpd_url, headers, subtitles, token, audio_locale) or None if access denied
+ - mpd_url: str
+ - headers: Dict
+ - subtitles: List[Dict]
+ - token: Optional[str]
+ - audio_locale: Optional[str]
"""
- data = client.get_streams(url_id)
- try:
- url = data.get('url')
- audio_locale_current = data.get('audio_locale') or data.get('audio', {}).get('locale')
+ playback_data = client.get_streams(url_id)
- # Collect subtitles with metadata
- subtitles = []
- subtitles_data = data.get('subtitles', {})
- for lang_code, sub_info in subtitles_data.items():
- if sub_info.get('url'):
- subtitles.append({
- 'language': sub_info.get('language'),
- 'format': sub_info.get('format'),
- 'url': sub_info.get('url'),
- })
+ # Extract relevant data
+ mpd_url = playback_data.get('url')
+ audio_locale = playback_data.get('audio_locale') or playback_data.get('audio', {}).get('locale')
+ token = playback_data.get("token") or _find_token_recursive(playback_data)
+
+ # Get subtitles: prefer main_guid for complete subtitles if available
+ if main_guid and main_guid != url_id:
+ try:
+ # Fetch subtitles from main track
+ main_playback_data = client.get_streams(main_guid)
+ subtitles = _extract_subtitles(main_playback_data)
+
+ # Deauth main track token
+ main_token = main_playback_data.get("token") or _find_token_recursive(main_playback_data)
+ if main_token:
+ client.deauth_video(main_guid, main_token)
- token = _find_token_anywhere(data)
- headers = client._get_headers()
- return url, headers, subtitles, token, audio_locale_current
+ except Exception as e:
+ logging.debug(f"Failed to fetch subtitles from main track: {e}")
+ subtitles = _extract_subtitles(playback_data)
- except Exception as e:
- logging.error(f"Failed to parse playback session: {e}, Premium subscription may be required.")
- return None
\ No newline at end of file
+ else:
+ subtitles = _extract_subtitles(playback_data)
+
+ # Immediately deauth to free stream slot (non-blocking)
+ if token:
+ try:
+ client.deauth_video(url_id, token)
+ except Exception as e:
+ logging.debug(f"Deauth during playback failed: {e}")
+
+ headers = client._get_headers()
+ return mpd_url, headers, subtitles, token, audio_locale
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discovery/series.py b/StreamingCommunity/Api/Service/discovery/series.py
index 14c8e1fb..aa5cc588 100644
--- a/StreamingCommunity/Api/Service/discovery/series.py
+++ b/StreamingCommunity/Api/Service/discovery/series.py
@@ -31,7 +31,7 @@
# Variables
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str, bool]:
diff --git a/StreamingCommunity/Api/Service/discoveryeu/__init__.py b/StreamingCommunity/Api/Service/discoveryeu/__init__.py
new file mode 100644
index 00000000..5560eff6
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discoveryeu/__init__.py
@@ -0,0 +1,102 @@
+# 22.12.25
+
+# External library
+from rich.console import Console
+from rich.prompt import Prompt
+
+
+# Internal utilities
+from StreamingCommunity.Api.Template import site_constants, MediaItem, get_select_title
+
+
+# Logic
+from .site import title_search, table_show_manager, media_search_manager
+from .series import download_series
+
+
+# Variables
+indice = 15
+_useFor = "Film_&_Serie"
+_region = "EU"
+_deprecate = False
+_stream_type = "DASH"
+_maxResolution = "1080p"
+_drm = True
+
+
+msg = Prompt()
+console = Console()
+
+
+def process_search_result(select_title, selections=None):
+ """
+ Handles the search result and initiates download for film or series
+
+ Parameters:
+ select_title (MediaItem): The selected media item
+ selections (dict, optional): Dictionary containing selection inputs
+ {'season': season_selection, 'episode': episode_selection}
+
+ Returns:
+ bool: True if processing was successful, False otherwise
+ """
+ if not select_title:
+ console.print("[yellow]No title selected or selection cancelled.")
+ return False
+
+ if select_title.type == 'tv':
+ season_selection = None
+ episode_selection = None
+
+ if selections:
+ season_selection = selections.get('season')
+ episode_selection = selections.get('episode')
+
+ download_series(select_title, season_selection, episode_selection)
+ media_search_manager.clear()
+ table_show_manager.clear()
+ return True
+
+
+def search(string_to_search: str = None, get_onlyDatabase: bool = False, direct_item: dict = None, selections: dict = None):
+ """
+ Main function for searching and downloading content
+
+ Parameters:
+ string_to_search (str, optional): Search query string
+ get_onlyDatabase (bool, optional): If True, return only the database object
+ direct_item (dict, optional): Direct item to process (bypass search)
+ selections (dict, optional): Dictionary containing selection inputs
+ {'season': season_selection, 'episode': episode_selection}
+ """
+ if direct_item:
+ select_title = MediaItem(**direct_item)
+ result = process_search_result(select_title, selections)
+ return result
+
+ # Get search query from user
+ actual_search_query = None
+ if string_to_search is not None:
+ actual_search_query = string_to_search.strip()
+ else:
+ actual_search_query = msg.ask(f"\n[purple]Insert a word to search in [green]{site_constants.SITE_NAME}").strip()
+
+ # Handle empty input
+ if not actual_search_query:
+ return False
+
+ # Search on database
+ len_database = title_search(actual_search_query)
+
+ # If only database is needed, return the manager
+ if get_onlyDatabase:
+ return media_search_manager
+
+ if len_database > 0:
+ select_title = get_select_title(table_show_manager, media_search_manager, len_database)
+ result = process_search_result(select_title, selections)
+ return result
+
+ else:
+ console.print(f"\n[red]Nothing matching was found for[white]: [purple]{actual_search_query}")
+ return False
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/discoveryeu/series.py b/StreamingCommunity/Api/Service/discoveryeu/series.py
new file mode 100644
index 00000000..443e60fb
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discoveryeu/series.py
@@ -0,0 +1,191 @@
+# 22.12.25
+
+import os
+from typing import Tuple
+
+
+# External library
+from rich.console import Console
+from rich.prompt import Prompt
+
+
+# Internal utilities
+from StreamingCommunity.Util import os_manager, config_manager, start_message
+from StreamingCommunity.Api.Template import site_constants, MediaItem
+from StreamingCommunity.Api.Template.episode_manager import (
+ manage_selection,
+ map_episode_title,
+ validate_selection,
+ validate_episode_selection,
+ display_episodes_list,
+ display_seasons_list
+)
+from StreamingCommunity.Lib.DASH.downloader import DASH_Downloader
+from StreamingCommunity.Lib.HLS import HLS_Downloader
+
+
+
+# Logic
+from .util.ScrapeSerie import GetSerieInfo
+from .util.get_license import get_playback_info, generate_license_headers,DiscoveryEUAPI
+
+
+# Variables
+msg = Prompt()
+console = Console()
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
+
+
+def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str, bool]:
+ """
+ Download a specific episode
+
+ Parameters:
+ index_season_selected (int): Season number
+ index_episode_selected (int): Episode index
+ scrape_serie (GetSerieInfo): Series scraper instance
+
+ Returns:
+ Tuple[str, bool]: (output_path, stopped_status)
+ """
+ start_message()
+
+ # Get episode information
+ obj_episode = scrape_serie.selectEpisode(index_season_selected, index_episode_selected - 1)
+
+ # Get the real season number. Due to some seasons not having free episodes there's a mismatch between seasons and their index number.
+ index_season_selected = scrape_serie.getRealNumberSeason(index_season_selected)
+
+ console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{scrape_serie.series_name} \\ [magenta]{obj_episode.name} ([cyan]S{index_season_selected}E{index_episode_selected}) \n")
+
+ # Define output path
+ mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
+ mp4_path = os_manager.get_sanitize_path(
+ os.path.join(site_constants.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
+ )
+
+ # Get playback information using video_id
+ playback_info = get_playback_info(obj_episode.video_id)
+
+ if (str(playback_info['type']).strip().lower() == 'dash' and playback_info['license_url'] is None) or (str(playback_info['type']).strip().lower() != 'hls' and str(playback_info['type']).strip().lower() != 'dash' ):
+ console.print(f"[red]Unsupported streaming type. Playbackk info: {playback_info}")
+ return None, False
+
+ # Check the type of stream
+ status = None
+ if playback_info['type'] == 'dash':
+ license_headers = generate_license_headers(playback_info['license_token'])
+
+ # Download the episode
+ dash_process = DASH_Downloader(
+ license_url=playback_info['license_url'],
+ mpd_url=playback_info['mpd_url'],
+ output_path=os.path.join(mp4_path, mp4_name),
+ )
+
+ dash_process.parse_manifest(custom_headers=license_headers)
+
+ if dash_process.download_and_decrypt(custom_headers=license_headers):
+ dash_process.finalize_output()
+
+ # Get final status
+ status = dash_process.get_status()
+
+ elif playback_info['type'] == 'hls':
+
+ api = DiscoveryEUAPI()
+ headers = api.get_request_headers()
+
+ # Download the episode
+ status = HLS_Downloader(
+ m3u8_url=playback_info['mpd_url'], #mpd_url is just a typo: it is a hls
+ headers=headers,
+ output_path=os.path.join(mp4_path, mp4_name),
+ ).start()
+
+ if status['error'] is not None and status['path']:
+ try:
+ os.remove(status['path'])
+ except Exception:
+ pass
+
+ return status['path'], status['stopped']
+
+
+def download_episode(index_season_selected: int, scrape_serie: GetSerieInfo, download_all: bool = False, episode_selection: str = None) -> None:
+ """
+ Handle downloading episodes for a specific season
+
+ Parameters:
+ index_season_selected (int): Season number
+ scrape_serie (GetSerieInfo): Series scraper instance
+ download_all (bool): Whether to download all episodes
+ episode_selection (str, optional): Pre-defined episode selection
+ """
+ # Get episodes for the selected season
+ episodes = scrape_serie.getEpisodeSeasons(index_season_selected)
+ episodes_count = len(episodes)
+
+ if episodes_count == 0:
+ console.print(f"[red]No episodes found for season {index_season_selected}")
+ return
+
+ if download_all:
+ for i_episode in range(1, episodes_count + 1):
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
+ if stopped:
+ break
+ else:
+ if episode_selection is not None:
+ last_command = episode_selection
+ console.print(f"\n[cyan]Using provided episode selection: [yellow]{episode_selection}")
+ else:
+ last_command = display_episodes_list(episodes)
+
+ # Prompt user for episode selection
+ list_episode_select = manage_selection(last_command, episodes_count)
+ list_episode_select = validate_episode_selection(list_episode_select, episodes_count)
+
+ # Download selected episodes
+ for i_episode in list_episode_select:
+ path, stopped = download_video(index_season_selected, i_episode, scrape_serie)
+ if stopped:
+ break
+
+
+def download_series(select_season: MediaItem, season_selection: str = None, episode_selection: str = None) -> None:
+ """
+ Handle downloading a complete series
+
+ Parameters:
+ select_season (MediaItem): Series metadata from search
+ season_selection (str, optional): Pre-defined season selection
+ episode_selection (str, optional): Pre-defined episode selection
+ """
+ id_parts = select_season.id.split('|')
+
+ # Initialize series scraper
+ scrape_serie = GetSerieInfo(id_parts[1], id_parts[0])
+ seasons_count = scrape_serie.getNumberSeason()
+
+ if seasons_count == 0:
+ console.print("[red]No seasons found for this series")
+ return
+
+ # Handle season selection
+ if season_selection is None:
+ index_season_selected = display_seasons_list(scrape_serie.seasons_manager)
+ else:
+ index_season_selected = season_selection
+ console.print(f"\n[cyan]Using provided season selection: [yellow]{season_selection}")
+
+ # Validate the selection
+ list_season_select = manage_selection(index_season_selected, seasons_count)
+ list_season_select = validate_selection(list_season_select, seasons_count)
+
+ # Loop through selected seasons and download episodes
+ for i_season in list_season_select:
+ if len(list_season_select) > 1 or index_season_selected == "*":
+ download_episode(i_season, scrape_serie, download_all=True)
+ else:
+ download_episode(i_season, scrape_serie, download_all=False, episode_selection=episode_selection)
diff --git a/StreamingCommunity/Api/Service/discoveryeu/site.py b/StreamingCommunity/Api/Service/discoveryeu/site.py
new file mode 100644
index 00000000..3a068ff8
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discoveryeu/site.py
@@ -0,0 +1,106 @@
+# 22.12.25
+
+# External libraries
+from rich.console import Console
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client
+from StreamingCommunity.Api.Template import site_constants, MediaManager
+from StreamingCommunity.Util.table import TVShowManager
+
+
+# Logic
+from .util.get_license import get_api
+
+
+# Variables
+console = Console()
+media_search_manager = MediaManager()
+table_show_manager = TVShowManager()
+
+
+def determine_type(attributes: dict)-> str:
+ """
+ Determines the item type
+
+ Parameters:
+ attributes (dict): Dictionary with item info
+
+ Returns:
+ str: Type of the item
+ """
+ episode_count = attributes.get('episodeCount', 0)
+ video_count = attributes.get('videoCount', 0)
+ if episode_count == 0 and video_count > 1000:
+ return 'channel'
+ elif episode_count > 0:
+ return 'show'
+ elif video_count == 1 and episode_count == 0:
+ return 'movie'
+ else:
+ return "unknown"
+
+
+def title_search(query: str) -> int:
+ """
+ Search for titles on Discovery+
+
+ Parameters:
+ query (str): Search query
+
+ Returns:
+ int: Number of results found
+ """
+ media_search_manager.clear()
+ table_show_manager.clear()
+
+ api = get_api()
+ search_url = 'https://eu1-prod-direct.discoveryplus.com/cms/routes/search/result'
+ console.print(f"[cyan]Search url: [yellow]{search_url}")
+
+ params = {
+ 'include': 'default',
+ 'decorators': 'viewingHistory,isFavorite,playbackAllowed',
+ 'contentFilter[query]': query
+ }
+
+ try:
+ response = create_client(headers=api.get_request_headers()).get(
+ search_url,
+ params=params,
+ cookies=api.get_cookies()
+ )
+ response.raise_for_status()
+ except Exception as e:
+ console.print(f"[red]Site: {site_constants.SITE_NAME}, request search error: {e}")
+ return 0
+
+ # Parse response
+ data = response.json()
+ for element in data.get('included', []):
+ element_type = element.get('type')
+ attributes = element.get('attributes', {})
+ type_element = determine_type(attributes)
+
+ if element_type != type_element:
+ continue
+
+ # Handle both shows and movies
+ if element_type in ['show', 'movie']:
+ if 'name' in attributes:
+ if element_type == 'show':
+ date = attributes.get('newestEpisodeDate', '').split("T")[0]
+ else:
+ date = attributes.get('airDate', '').split("T")[0]
+
+ combined_id = f"{element.get('id')}|{attributes.get('alternateId')}"
+ media_search_manager.add_media({
+ 'id': combined_id,
+ 'name': attributes.get('name', 'No Title'),
+ 'type': 'tv' if element_type == 'show' else 'movie',
+ 'image': None,
+ 'date': date
+ })
+
+ return media_search_manager.get_length()
diff --git a/StreamingCommunity/Api/Service/discoveryeu/util/ScrapeSerie.py b/StreamingCommunity/Api/Service/discoveryeu/util/ScrapeSerie.py
new file mode 100644
index 00000000..d47962a0
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discoveryeu/util/ScrapeSerie.py
@@ -0,0 +1,166 @@
+# 22.12.25
+
+import logging
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client
+from StreamingCommunity.Api.Template.object import SeasonManager
+from .get_license import get_api
+
+
+class GetSerieInfo:
+ def __init__(self, show_alternate_id, show_id):
+ """
+ Initialize series scraper for Discovery+
+
+ Args:
+ show_alternate_id (str): The alternate ID of the show (e.g., 'homestead-rescue-discovery')
+ show_id (str): The numeric ID of the show
+ """
+ self.api = get_api()
+ self.show_alternate_id = show_alternate_id
+ self.show_id = show_id
+ self.series_name = ""
+ self.seasons_manager = SeasonManager()
+ self.n_seasons = 0
+ self.collection_id = None
+ self._get_show_info()
+
+
+ def _get_show_info(self):
+ """Get show information including number of seasons and collection ID"""
+ try:
+ response = create_client(headers=self.api.get_request_headers()).get(
+ f'https://eu1-prod-direct.discoveryplus.com/cms/routes/show/{self.show_alternate_id}',
+ params={
+ 'include': 'default',
+ 'decorators': 'viewingHistory,isFavorite,playbackAllowed'
+ },
+ cookies=self.api.get_cookies()
+ )
+ response.raise_for_status()
+ data = response.json()
+
+ # Get series name from first show element
+ for element in data.get('included', []):
+ if element.get('type') == 'show':
+ self.series_name = element.get('attributes', {}).get('name', '')
+ break
+
+
+ # Get collection ID
+ for element in data.get('included', []):
+ if element.get('type') == 'collection':
+ self.collection_id = element.get('id')
+
+ # Get number of seasons
+ if 'filters' in element.get('attributes',{}).get('component',{}):
+ filters = element.get('attributes', {}).get('component', {}).get('filters', [])
+ if filters[0]:
+ self.n_seasons = int(filters[0].get('options',[])[-1].get('value',{}))
+ return True
+
+ except Exception as e:
+ logging.error(f"Failed to get show info: {e}")
+ return False
+
+ def _get_season_episodes(self, season_number):
+ """
+ Get episodes for a specific season
+
+ Args:
+ season_number (int): Season number
+ """
+ try:
+ response = create_client(headers=self.api.get_request_headers()).get(
+ f'https://eu1-prod-direct.discoveryplus.com/cms/collections/{self.collection_id}',
+ params={
+ 'include': 'default',
+ 'decorators': 'viewingHistory,isFavorite,playbackAllowed',
+ 'pf[seasonNumber]': season_number,
+ 'pf[show.id]': self.show_id
+ },
+ cookies=self.api.get_cookies()
+ )
+ response.raise_for_status()
+
+ data = response.json()
+ episodes = []
+
+ for element in data.get('included', []):
+ if element.get('type') == 'video':
+ attributes = element.get('attributes', {})
+ if 'episodeNumber' in attributes:
+ episodes.append({
+ 'id': attributes.get('alternateId', ''),
+ 'video_id': element.get('id', ''),
+ 'name': attributes.get('name', ''),
+ 'episode_number': attributes.get('episodeNumber', 0),
+ 'duration': attributes.get('videoDuration', 0) // 60000
+ })
+
+ # Sort by episode number
+ episodes.sort(key=lambda x: x['episode_number'])
+ #print("Add n_episodes:", len(episodes), "for season:", season_number)
+ return episodes
+
+ except Exception as e:
+ logging.error(f"Failed to get episodes for season {season_number}: {e}")
+ return []
+
+ def collect_season(self):
+ """Collect all seasons and episodes"""
+ try:
+ for season_num in range(1, self.n_seasons + 1):
+ episodes = self._get_season_episodes(season_num)
+
+ if episodes:
+ season_obj = self.seasons_manager.add_season({
+ 'number': season_num,
+ 'name': f"Season {season_num}",
+ 'id': f"season_{season_num}"
+ })
+
+ if season_obj:
+ for episode in episodes:
+ season_obj.episodes.add(episode)
+
+ except Exception as e:
+ logging.error(f"Error in collect_season: {e}")
+
+
+ # ------------- FOR GUI -------------
+ def getNumberSeason(self) -> int:
+ """Get total number of seasons"""
+ if not self.seasons_manager.seasons:
+ self.collect_season()
+ return len(self.seasons_manager.seasons)
+
+ def getRealNumberSeason(self,index_season:int) -> int:
+ """Get the real number of season, not the index"""
+ seasons = self.seasons_manager.seasons
+ i = 0
+ for season in seasons:
+ i+=1
+ if i == index_season:
+ return season.__str__().split("id=season_")[1].split(',')[0]
+ return None
+
+ def getEpisodeSeasons(self, season_number: int) -> list:
+ """Get all episodes for a specific season"""
+ if not self.seasons_manager.seasons:
+ self.collect_season()
+
+ season_index = season_number - 1
+ season = self.seasons_manager.seasons[season_index]
+ return season.episodes.episodes
+
+ def selectEpisode(self, season_number: int, episode_index: int) -> dict:
+ """Get information for a specific episode"""
+ episodes = self.getEpisodeSeasons(season_number)
+ if not episodes or episode_index < 0 or episode_index >= len(episodes):
+ logging.error(f"Episode index {episode_index} out of range for season {season_number}")
+ return None
+
+ return episodes[episode_index]
diff --git a/StreamingCommunity/Api/Service/discoveryeu/util/get_license.py b/StreamingCommunity/Api/Service/discoveryeu/util/get_license.py
new file mode 100644
index 00000000..f94299e8
--- /dev/null
+++ b/StreamingCommunity/Api/Service/discoveryeu/util/get_license.py
@@ -0,0 +1,177 @@
+# 22.12.25
+
+import uuid
+import random
+
+
+# External library
+from ua_generator import generate
+
+
+# Internal utilities
+from StreamingCommunity.Util.http_client import create_client_curl
+
+
+# Variable
+_discovery_api = None
+
+
+class DiscoveryEUAPI:
+ def __init__(self):
+ self.device_id = str(uuid.uuid4())
+ self.device_info = self._generate_device_info()
+ self.user_agent = self.device_info['user_agent']
+ self.bearer_token = None
+ self._initialize()
+
+ def _generate_device_info(self):
+ ua = generate(device='desktop', browser=random.choice(['chrome', 'firefox', 'edge', 'safari']))
+
+ browser_name_map = {
+ 'chrome': 'chrome',
+ 'firefox': 'firefox',
+ 'edge': 'edge',
+ 'safari': 'safari'
+ }
+
+ browser_name = browser_name_map.get(ua.browser.lower(), 'chrome')
+ browser_version = ua.ch.browser_full_version if hasattr(ua.ch, 'browser_full_version') else '125.0.0.0'
+ os_version = ua.ch.platform_version if hasattr(ua.ch, 'platform_version') else 'NT 10.0'
+
+ device_info = {
+ 'user_agent': ua.text,
+ 'device': {
+ 'browser': {
+ 'name': browser_name,
+ 'version': browser_version,
+ },
+ 'id': '',
+ 'language': random.choice(['en', 'en-US', 'en-GB']),
+ 'make': '',
+ 'model': '',
+ 'name': browser_name,
+ 'os': ua.ch.platform if hasattr(ua.ch, 'platform') else 'Windows',
+ 'osVersion': os_version,
+ 'player': {
+ 'name': 'Discovery Player Web',
+ 'version': '3.1.0',
+ },
+ 'type': 'desktop',
+ }
+ }
+
+ return device_info
+
+ def _initialize(self):
+ headers = {
+ 'user-agent': self.user_agent,
+ 'x-device-info': f'dsc/4.4.1 (desktop/desktop; Windows/NT 10.0; {self.device_id})',
+ 'x-disco-client': 'WEB:UNKNOWN:dsc:4.4.1'
+ }
+ params = {
+ 'deviceId': self.device_id,
+ 'realm': 'dplay',
+ 'shortlived': 'true'
+ }
+
+ try:
+ response = create_client_curl(headers=headers).get('https://eu1-prod-direct.discoveryplus.com/token', params=params)
+ response.raise_for_status()
+ self.bearer_token = response.json()['data']['attributes']['token']
+
+ except Exception as e:
+ raise RuntimeError(f"Failed to get bearer token: {e}")
+
+ def get_request_headers(self):
+ return {
+ 'accept': '*/*',
+ 'user-agent': self.user_agent,
+ 'x-disco-client': 'WEB:UNKNOWN:dsc:4.4.1',
+ 'x-disco-params': 'realm=dplay,siteLookupKey=dsc,bid=dsc,hn=go.discovery.com,hth=us,features=ar',
+ }
+
+ def get_cookies(self):
+ return {'st': self.bearer_token}
+
+
+def get_api():
+ """Get or create Discovery API instance"""
+ global _discovery_api
+ if _discovery_api is None:
+ _discovery_api = DiscoveryEUAPI()
+ return _discovery_api
+
+
+def get_playback_info(video_id):
+ """
+ Get playback information for a video including MPD URL and license token
+
+ Args:
+ video_id (str): The video ID
+ """
+ api = get_api()
+
+ cookies = api.get_cookies()
+ headers = {
+ 'user-agent': api.user_agent,
+ 'x-disco-client': 'WEB:UNKNOWN:dsc:4.4.1',
+ }
+
+ json_data = {
+ 'videoId': video_id,
+ 'wisteriaProperties': {
+ 'advertiser': {},
+ 'appBundle': '',
+ 'device': api.device_info['device'],
+ 'gdpr': 0,
+ 'platform': 'desktop',
+ 'product': 'dsc',
+ 'siteId': 'dsc'
+ },
+ 'deviceInfo': {
+ 'adBlocker': False,
+ 'deviceId': '',
+ 'drmTypes': {
+ 'widevine': True,
+ 'playready': False,
+ 'fairplay': False,
+ 'clearkey': True,
+ },
+ 'drmSupported': True
+ },
+ }
+
+ response = create_client_curl().post('https://eu1-prod-direct.discoveryplus.com/playback/v3/videoPlaybackInfo', cookies=cookies, headers=headers, json=json_data)
+ if response.status_code == 403:
+ json_response = response.json()
+ errors = json_response.get('errors', [])
+ if errors and errors[0].get('code') == 'access.denied.missingpackage':
+ raise RuntimeError("Content requires a subscription/account to view")
+ else:
+ raise RuntimeError("Content is geo-restricted")
+
+ response.raise_for_status()
+ json_response = response.json()
+
+ streaming_data = json_response['data']['attributes']['streaming']
+ widevine_scheme = streaming_data[0]['protection']['schemes'].get('widevine')
+ return {
+ 'mpd_url': streaming_data[0]['url'],
+ 'license_url': widevine_scheme['licenseUrl'] if widevine_scheme else None,
+ 'license_token': streaming_data[0]['protection']['drmToken'] if widevine_scheme else None,
+ 'type': streaming_data[0]['type']
+ }
+
+
+
+def generate_license_headers(license_token):
+ """
+ Generate headers for license requests
+
+ Args:
+ license_token (str): The DRM token from playback info
+ """
+ return {
+ 'preauthorization': license_token,
+ 'user-agent': get_api().user_agent,
+ }
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Service/dmax/series.py b/StreamingCommunity/Api/Service/dmax/series.py
index 5372d9e7..58fda6c6 100644
--- a/StreamingCommunity/Api/Service/dmax/series.py
+++ b/StreamingCommunity/Api/Service/dmax/series.py
@@ -31,7 +31,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/guardaserie/series.py b/StreamingCommunity/Api/Service/guardaserie/series.py
index 460d5a97..936b979d 100644
--- a/StreamingCommunity/Api/Service/guardaserie/series.py
+++ b/StreamingCommunity/Api/Service/guardaserie/series.py
@@ -32,7 +32,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/hd4me/film.py b/StreamingCommunity/Api/Service/hd4me/film.py
index dc7acd9a..d9b1c6da 100644
--- a/StreamingCommunity/Api/Service/hd4me/film.py
+++ b/StreamingCommunity/Api/Service/hd4me/film.py
@@ -18,7 +18,7 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_film(select_title: MediaItem) -> str:
diff --git a/StreamingCommunity/Api/Service/ipersphera/film.py b/StreamingCommunity/Api/Service/ipersphera/film.py
index d7af415e..33853531 100644
--- a/StreamingCommunity/Api/Service/ipersphera/film.py
+++ b/StreamingCommunity/Api/Service/ipersphera/film.py
@@ -16,7 +16,7 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_film(select_title: MediaItem) -> str:
diff --git a/StreamingCommunity/Api/Service/mediasetinfinity/film.py b/StreamingCommunity/Api/Service/mediasetinfinity/film.py
index d79472cd..6f9e0e0d 100644
--- a/StreamingCommunity/Api/Service/mediasetinfinity/film.py
+++ b/StreamingCommunity/Api/Service/mediasetinfinity/film.py
@@ -23,7 +23,7 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_film(select_title: MediaItem) -> Tuple[str, bool]:
diff --git a/StreamingCommunity/Api/Service/mediasetinfinity/series.py b/StreamingCommunity/Api/Service/mediasetinfinity/series.py
index 9ff5f859..db464a6e 100644
--- a/StreamingCommunity/Api/Service/mediasetinfinity/series.py
+++ b/StreamingCommunity/Api/Service/mediasetinfinity/series.py
@@ -33,7 +33,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/mediasetinfinity/util/get_license.py b/StreamingCommunity/Api/Service/mediasetinfinity/util/get_license.py
index da385012..17f6b30b 100644
--- a/StreamingCommunity/Api/Service/mediasetinfinity/util/get_license.py
+++ b/StreamingCommunity/Api/Service/mediasetinfinity/util/get_license.py
@@ -16,7 +16,6 @@
# Variable
console = Console()
-network_data = []
class_mediaset_api = None
@@ -184,11 +183,17 @@ def parse_smil_for_media_info(smil_xml):
lang = textstream.attrib.get('lang', 'unknown')
sub_type = textstream.attrib.get('type', 'unknown')
+ # Map MIME type to format
+ if sub_type == 'text/vtt':
+ sub_format = 'vtt'
+ elif sub_type == 'text/srt':
+ sub_format = 'srt'
+
if sub_url:
subtitle_info = {
'url': sub_url,
'language': lang,
- 'type': sub_type
+ 'format': sub_format
}
subtitles_raw.append(subtitle_info)
@@ -202,12 +207,12 @@ def parse_smil_for_media_info(smil_xml):
subtitles = []
for lang, subs in subtitles_by_lang.items():
- vtt_subs = [s for s in subs if s['type'] == 'text/vtt']
+ vtt_subs = [s for s in subs if s['format'] == 'vtt']
if vtt_subs:
subtitles.append(vtt_subs[0]) # Take first VTT
else:
- srt_subs = [s for s in subs if s['type'] == 'text/srt']
+ srt_subs = [s for s in subs if s['format'] == 'srt']
if srt_subs:
subtitles.append(srt_subs[0]) # Take first SRT
diff --git a/StreamingCommunity/Api/Service/nove/series.py b/StreamingCommunity/Api/Service/nove/series.py
index 5372d9e7..58fda6c6 100644
--- a/StreamingCommunity/Api/Service/nove/series.py
+++ b/StreamingCommunity/Api/Service/nove/series.py
@@ -31,7 +31,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/raiplay/film.py b/StreamingCommunity/Api/Service/raiplay/film.py
index e1392356..f552b1b4 100644
--- a/StreamingCommunity/Api/Service/raiplay/film.py
+++ b/StreamingCommunity/Api/Service/raiplay/film.py
@@ -24,7 +24,7 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_film(select_title: MediaItem) -> Tuple[str, bool]:
diff --git a/StreamingCommunity/Api/Service/raiplay/series.py b/StreamingCommunity/Api/Service/raiplay/series.py
index 06fee7eb..d0122617 100644
--- a/StreamingCommunity/Api/Service/raiplay/series.py
+++ b/StreamingCommunity/Api/Service/raiplay/series.py
@@ -36,7 +36,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/realtime/series.py b/StreamingCommunity/Api/Service/realtime/series.py
index 160d2b55..09493193 100644
--- a/StreamingCommunity/Api/Service/realtime/series.py
+++ b/StreamingCommunity/Api/Service/realtime/series.py
@@ -31,7 +31,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo) -> Tuple[str,bool]:
diff --git a/StreamingCommunity/Api/Service/streamingcommunity/film.py b/StreamingCommunity/Api/Service/streamingcommunity/film.py
index dd801dc8..12ba2abf 100644
--- a/StreamingCommunity/Api/Service/streamingcommunity/film.py
+++ b/StreamingCommunity/Api/Service/streamingcommunity/film.py
@@ -10,6 +10,7 @@
# Internal utilities
from StreamingCommunity.Util import os_manager, config_manager, start_message
from StreamingCommunity.Api.Template import site_constants, MediaItem
+from StreamingCommunity.Lib.TMDB.tmdb import tmdb
from StreamingCommunity.Lib.HLS import HLS_Downloader
@@ -19,7 +20,8 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
+use_other_api = config_manager.login.get("TMDB", "api_key") != ""
def download_film(select_title: MediaItem) -> str:
@@ -27,8 +29,7 @@ def download_film(select_title: MediaItem) -> str:
Downloads a film using the provided film ID, title name, and domain.
Parameters:
- - domain (str): The domain of the site
- - version (str): Version of site.
+ - select_title (MediaItem): Media item with title information
Return:
- str: output path
@@ -36,11 +37,22 @@ def download_film(select_title: MediaItem) -> str:
start_message()
console.print(f"\n[yellow]Download: [red]{site_constants.SITE_NAME} → [cyan]{select_title.name} \n")
+ # Prepare TMDB data
+ tmdb_data = None
+ if use_other_api:
+ year = int(select_title.date[:4])
+ result = tmdb.get_type_and_id_by_slug_year(select_title.slug, year)
+
+ if result and result.get('id') and result.get('type') == 'movie':
+ tmdb_data = {'id': result.get('id')}
+
# Init class
- video_source = VideoSource(f"{site_constants.FULL_URL}/it", False, select_title.id)
+ video_source = VideoSource(f"{site_constants.FULL_URL}/it", False, select_title.id, tmdb_data=tmdb_data)
- # Retrieve scws and if available master playlist
- video_source.get_iframe(select_title.id)
+ # Retrieve iframe only if not using TMDB API
+ if tmdb_data is None:
+ video_source.get_iframe(select_title.id)
+
video_source.get_content()
master_playlist = video_source.get_playlist()
diff --git a/StreamingCommunity/Api/Service/streamingcommunity/series.py b/StreamingCommunity/Api/Service/streamingcommunity/series.py
index 0e9ac2a0..b5e5bca1 100644
--- a/StreamingCommunity/Api/Service/streamingcommunity/series.py
+++ b/StreamingCommunity/Api/Service/streamingcommunity/series.py
@@ -12,6 +12,7 @@
# Internal utilities
from StreamingCommunity.Util import config_manager, start_message
from StreamingCommunity.Api.Template import site_constants, MediaItem
+from StreamingCommunity.Lib.TMDB.tmdb import tmdb
from StreamingCommunity.Api.Template.episode_manager import (
manage_selection,
map_episode_title,
@@ -31,7 +32,8 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
+use_other_api = config_manager.login.get("TMDB", "api_key") != ""
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo, video_source: VideoSource) -> Tuple[str,bool]:
@@ -58,8 +60,24 @@ def download_video(index_season_selected: int, index_episode_selected: int, scra
mp4_name = f"{map_episode_title(scrape_serie.series_name, index_season_selected, index_episode_selected, obj_episode.name)}.{extension_output}"
mp4_path = os.path.join(site_constants.SERIES_FOLDER, scrape_serie.series_name, f"S{index_season_selected}")
- # Retrieve scws and if available master playlist
- video_source.get_iframe(obj_episode.id)
+ if use_other_api:
+ series_slug = scrape_serie.series_name.lower().replace(' ', '-').replace("'", '')
+ result = tmdb.get_type_and_id_by_slug_year(str(series_slug), int(scrape_serie.years))
+
+ if result and result.get('id') and result.get('type') == 'tv':
+ tmdb_id = result.get('id')
+ video_source.tmdb_id = tmdb_id
+ video_source.season_number = index_season_selected
+ video_source.episode_number = index_episode_selected
+
+ else:
+ console.print("[yellow]TMDB ID not found or not a TV show, falling back to original method")
+ video_source.get_iframe(obj_episode.id)
+
+ else:
+ # Retrieve iframe using original method
+ video_source.get_iframe(obj_episode.id)
+
video_source.get_content()
master_playlist = video_source.get_playlist()
@@ -140,7 +158,7 @@ def download_series(select_season: MediaItem, season_selection: str = None, epis
# Init class
video_source = VideoSource(f"{site_constants.FULL_URL}/it", True, select_season.id)
- scrape_serie = GetSerieInfo(f"{site_constants.FULL_URL}/it", select_season.id, select_season.slug)
+ scrape_serie = GetSerieInfo(f"{site_constants.FULL_URL}/it", select_season.id, select_season.slug, select_season.date.split("-")[0])
# Collect information about season
scrape_serie.getNumberSeason()
diff --git a/StreamingCommunity/Api/Service/streamingcommunity/util/ScrapeSerie.py b/StreamingCommunity/Api/Service/streamingcommunity/util/ScrapeSerie.py
index 6a7882ee..ab85a918 100644
--- a/StreamingCommunity/Api/Service/streamingcommunity/util/ScrapeSerie.py
+++ b/StreamingCommunity/Api/Service/streamingcommunity/util/ScrapeSerie.py
@@ -14,7 +14,7 @@
class GetSerieInfo:
- def __init__(self, url, media_id: int = None, series_name: str = None):
+ def __init__(self, url, media_id: int = None, series_name: str = None, years: int = None):
"""
Initialize the GetSerieInfo class for scraping TV series information.
@@ -27,6 +27,7 @@ def __init__(self, url, media_id: int = None, series_name: str = None):
self.headers = get_headers()
self.url = url
self.media_id = media_id
+ self.years = years
self.seasons_manager = SeasonManager()
if series_name is not None:
diff --git a/StreamingCommunity/Api/Service/tubitv/film.py b/StreamingCommunity/Api/Service/tubitv/film.py
index 9048030e..dcf0ca7e 100644
--- a/StreamingCommunity/Api/Service/tubitv/film.py
+++ b/StreamingCommunity/Api/Service/tubitv/film.py
@@ -21,7 +21,7 @@
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def extract_content_id(url: str) -> str:
diff --git a/StreamingCommunity/Api/Service/tubitv/series.py b/StreamingCommunity/Api/Service/tubitv/series.py
index 1eb11a10..780e82d4 100644
--- a/StreamingCommunity/Api/Service/tubitv/series.py
+++ b/StreamingCommunity/Api/Service/tubitv/series.py
@@ -31,7 +31,7 @@
# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
def download_video(index_season_selected: int, index_episode_selected: int, scrape_serie: GetSerieInfo, bearer_token: str) -> Tuple[str, bool]:
diff --git a/StreamingCommunity/Api/Service/tubitv/site.py b/StreamingCommunity/Api/Service/tubitv/site.py
index b46c83b6..3830c4dd 100644
--- a/StreamingCommunity/Api/Service/tubitv/site.py
+++ b/StreamingCommunity/Api/Service/tubitv/site.py
@@ -23,7 +23,6 @@
table_show_manager = TVShowManager()
-
def title_to_slug(title):
"""Convert a title to a URL-friendly slug"""
slug = title.lower()
diff --git a/StreamingCommunity/Api/Service/tubitv/util/get_license.py b/StreamingCommunity/Api/Service/tubitv/util/get_license.py
index 7ab089aa..a440aaa6 100644
--- a/StreamingCommunity/Api/Service/tubitv/util/get_license.py
+++ b/StreamingCommunity/Api/Service/tubitv/util/get_license.py
@@ -10,7 +10,8 @@
# Variable
-config = config_manager.get_dict("SITE_LOGIN", "tubi")
+tubi_email = config_manager.login.get('tubi', 'email')
+tubi_password = config_manager.login.get('tubi', 'password')
def generate_device_id():
@@ -25,7 +26,7 @@ def get_bearer_token():
Returns:
str: Bearer token
"""
- if not config.get('email') or not config.get('password'):
+ if not tubi_email or not tubi_password:
raise Exception("Email or Password not set in configuration.")
json_data = {
@@ -33,8 +34,8 @@ def get_bearer_token():
'platform': 'web',
'device_id': generate_device_id(),
'credentials': {
- 'email': str(config.get('email')).strip(),
- 'password': str(config.get('password')).strip()
+ 'email': str(tubi_email).strip(),
+ 'password': str(tubi_password).strip()
},
}
diff --git a/StreamingCommunity/Api/Template/config_loader.py b/StreamingCommunity/Api/Template/config_loader.py
index f70ebb2f..61df2d58 100644
--- a/StreamingCommunity/Api/Template/config_loader.py
+++ b/StreamingCommunity/Api/Template/config_loader.py
@@ -30,31 +30,31 @@ def SITE_NAME(self):
@property
def ROOT_PATH(self):
- return config_manager.get('OUT_FOLDER', 'root_path')
+ return config_manager.config.get('OUT_FOLDER', 'root_path')
@property
def FULL_URL(self):
- return config_manager.get_site(self.SITE_NAME, 'full_url').rstrip('/')
+ return config_manager.domain.get(self.SITE_NAME, 'full_url').rstrip('/')
@property
def SERIES_FOLDER(self):
base_path = self.ROOT_PATH
- if config_manager.get_bool("OUT_FOLDER", "add_siteName"):
+ if config_manager.config.get_bool("OUT_FOLDER", "add_siteName"):
base_path = os.path.join(base_path, self.SITE_NAME)
- return os.path.join(base_path, config_manager.get('OUT_FOLDER', 'serie_folder_name'))
+ return os.path.join(base_path, config_manager.config.get('OUT_FOLDER', 'serie_folder_name'))
@property
def MOVIE_FOLDER(self):
base_path = self.ROOT_PATH
- if config_manager.get_bool("OUT_FOLDER", "add_siteName"):
+ if config_manager.config.get_bool("OUT_FOLDER", "add_siteName"):
base_path = os.path.join(base_path, self.SITE_NAME)
- return os.path.join(base_path, config_manager.get('OUT_FOLDER', 'movie_folder_name'))
+ return os.path.join(base_path, config_manager.config.get('OUT_FOLDER', 'movie_folder_name'))
@property
def ANIME_FOLDER(self):
base_path = self.ROOT_PATH
- if config_manager.get_bool("OUT_FOLDER", "add_siteName"):
+ if config_manager.config.get_bool("OUT_FOLDER", "add_siteName"):
base_path = os.path.join(base_path, self.SITE_NAME)
- return os.path.join(base_path, config_manager.get('OUT_FOLDER', 'anime_folder_name'))
+ return os.path.join(base_path, config_manager.config.get('OUT_FOLDER', 'anime_folder_name'))
site_constants = SiteConstant()
\ No newline at end of file
diff --git a/StreamingCommunity/Api/Template/episode_manager.py b/StreamingCommunity/Api/Template/episode_manager.py
index e20f4296..e1b5ca82 100644
--- a/StreamingCommunity/Api/Template/episode_manager.py
+++ b/StreamingCommunity/Api/Template/episode_manager.py
@@ -19,7 +19,7 @@
# Variable
msg = Prompt()
console = Console()
-MAP_EPISODE = config_manager.get('OUT_FOLDER', 'map_episode_name')
+MAP_EPISODE = config_manager.config.get('OUT_FOLDER', 'map_episode_name')
def dynamic_format_number(number_str: str) -> str:
@@ -290,41 +290,61 @@ def display_episodes_list(episodes_manager) -> str:
# Set up table for displaying episodes
table_show_manager = TVShowManager()
- # Check if any episode has a non-empty category
+ # Check if any episode has non-empty fields
has_category = False
+ has_number = False
+ has_duration = False
+
for media in episodes_manager:
category = media.get('category') if isinstance(media, dict) else getattr(media, 'category', None)
+ number = media.get('number') if isinstance(media, dict) else getattr(media, 'number', None)
+ duration = media.get('duration') if isinstance(media, dict) else getattr(media, 'duration', None)
+
if category is not None and str(category).strip() != '':
has_category = True
- break
+ if number is not None and str(number).strip() != '':
+ has_number = True
+ if duration is not None and str(duration).strip() != '':
+ has_duration = True
# Add columns to the table
column_info = {
"Index": {'color': 'red'},
- "Name": {'color': 'magenta'},
}
+ if has_number:
+ column_info["Number"] = {'color': 'cyan'}
+
+ column_info["Name"] = {'color': 'magenta'}
+
if has_category:
column_info["Category"] = {'color': 'green'}
- column_info["Duration"] = {'color': 'blue'}
+ if has_duration:
+ column_info["Duration"] = {'color': 'blue'}
table_show_manager.add_column(column_info)
# Populate the table with episodes information
for i, media in enumerate(episodes_manager):
name = media.get('name') if isinstance(media, dict) else getattr(media, 'name', None)
+ number = media.get('number') if isinstance(media, dict) else getattr(media, 'number', None)
duration = media.get('duration') if isinstance(media, dict) else getattr(media, 'duration', None)
category = media.get('category') if isinstance(media, dict) else getattr(media, 'category', None)
episode_info = {
'Index': str(i + 1),
'Name': name,
- 'Duration': duration,
}
+ if has_number:
+ episode_info['Number'] = number
+
if has_category:
episode_info['Category'] = category
+
+ if has_duration:
+ episode_info['Duration'] = duration
table_show_manager.add_tv_show(episode_info)
diff --git a/StreamingCommunity/Api/Template/object.py b/StreamingCommunity/Api/Template/object.py
index 71fcb77d..f9d61c5d 100644
--- a/StreamingCommunity/Api/Template/object.py
+++ b/StreamingCommunity/Api/Template/object.py
@@ -69,7 +69,7 @@ def __init__(self, data: Dict[str, Any]):
self.episodes: EpisodeManager = EpisodeManager()
def __str__(self):
- return f"Season(id={self.id}, number={self.number}, name='{self.name}', episodes={self.episodes.length()})"
+ return f"Season(id={self.id}, number={self.number}, name='{self.name}', episodes={self.episodes.__len__()})"
class SeasonManager:
def __init__(self):
diff --git a/StreamingCommunity/Lib/DASH/decrypt.py b/StreamingCommunity/Lib/DASH/decrypt.py
index 23a4301d..2633e426 100644
--- a/StreamingCommunity/Lib/DASH/decrypt.py
+++ b/StreamingCommunity/Lib/DASH/decrypt.py
@@ -3,7 +3,6 @@
import os
import time
import subprocess
-import logging
import threading
@@ -13,19 +12,19 @@
# Internal utilities
+from StreamingCommunity.Util.os import get_mp4decrypt_path
from StreamingCommunity.Util import config_manager, Colors
# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
-CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-SHOW_DECRYPT_PROGRESS = True
+extension_output = config_manager.config.get("M3U8_CONVERSION", "extension")
+CLEANUP_TMP = config_manager.config.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-def decrypt_with_mp4decrypt(type, encrypted_path, kid, key, output_path=None):
+def decrypt_with_mp4decrypt(type, encrypted_path, kid, key, output_path=None, encryption_method=None):
"""
- Decrypt an mp4/m4s file using mp4decrypt.
+ Decrypt an mp4/m4s file using mp4decrypt with automatic method detection.
Args:
type (str): Type of file ('video' or 'audio').
@@ -33,24 +32,13 @@ def decrypt_with_mp4decrypt(type, encrypted_path, kid, key, output_path=None):
kid (str): Hexadecimal KID.
key (str): Hexadecimal key.
output_path (str): Output decrypted file path (optional).
- cleanup (bool): If True, remove temporary files after decryption.
+ encryption_method (str): Encryption method ('ctr', 'cbc', 'cenc', 'cbcs', etc.)
Returns:
str: Path to decrypted file, or None if error.
"""
- from StreamingCommunity.Util.os import get_mp4decrypt_path
-
- # Check if input file exists
if not os.path.isfile(encrypted_path):
- console.print(f"[red] Encrypted file not found: {encrypted_path}")
- return None
-
- # Check if kid and key are valid hex
- try:
- bytes.fromhex(kid)
- bytes.fromhex(key)
- except Exception:
- console.print("[red] Invalid KID or KEY (not hex).")
+ console.print(f"[bold red] Encrypted file not found: {encrypted_path}")
return None
if not output_path:
@@ -59,67 +47,80 @@ def decrypt_with_mp4decrypt(type, encrypted_path, kid, key, output_path=None):
# Get file size for progress tracking
file_size = os.path.getsize(encrypted_path)
- key_format = f"{kid.lower()}:{key.lower()}"
- cmd = [get_mp4decrypt_path(), "--key", key_format, encrypted_path, output_path]
- logging.info(f"Running mp4decrypt command: {' '.join(cmd)}")
-
- progress_bar = None
- monitor_thread = None
+ # Determine decryption command based on encryption method
+ method_display = "UNKNOWN"
+ cmd = None
- if SHOW_DECRYPT_PROGRESS:
- bar_format = (
- f"{Colors.YELLOW}DECRYPT{Colors.CYAN} {type}{Colors.WHITE}: "
- f"{Colors.MAGENTA}{{bar:40}} "
- f"{Colors.LIGHT_GREEN}{{n_fmt}}{Colors.WHITE}/{Colors.CYAN}{{total_fmt}} "
- f"{Colors.DARK_GRAY}[{Colors.YELLOW}{{elapsed}}{Colors.WHITE} < {Colors.CYAN}{{remaining}}{Colors.DARK_GRAY}] "
- f"{Colors.WHITE}{{postfix}}"
- )
-
- progress_bar = tqdm(
- total=100,
- bar_format=bar_format,
- unit="",
- ncols=150
- )
+ if encryption_method in ['ctr', 'cenc', 'cens']:
+ method_display = "AES CTR"
+ key_format = f"1:{key.lower()}"
+ cmd = [get_mp4decrypt_path(), "--key", key_format, encrypted_path, output_path]
- def monitor_output_file():
- """Monitor output file growth and update progress bar."""
- last_size = 0
- while True:
- if os.path.exists(output_path):
- current_size = os.path.getsize(output_path)
- if current_size > 0:
- progress_percent = min(int((current_size / file_size) * 100), 100)
- progress_bar.n = progress_percent
- progress_bar.refresh()
-
- if current_size == last_size and current_size > 0:
- break
-
- last_size = current_size
-
- time.sleep(0.1)
+ elif encryption_method in ['cbc', 'cbcs', 'cbc1']:
+ method_display = "AES CBC"
+ key_format = f"{kid.lower()}:{key.lower()}"
+ cmd = [get_mp4decrypt_path(), "--key", key_format, encrypted_path, output_path]
- # Start monitoring thread
- monitor_thread = threading.Thread(target=monitor_output_file, daemon=True)
- monitor_thread.start()
+ else:
+ console.print(f"[yellow]Warning: Unknown encryption method '{encryption_method}', trying KID:KEY format")
+ key_format = f"{kid.lower()}:{key.lower()}"
+ cmd = [get_mp4decrypt_path(), "--key", key_format, encrypted_path, output_path]
+
+ console.print(f"[cyan]Decryption method: [yellow]{method_display}")
+
+ # Create progress bar with custom format
+ bar_format = (
+ f"{Colors.YELLOW}DECRYPT{Colors.CYAN} {type}{Colors.WHITE}: "
+ f"{Colors.MAGENTA}{{bar:40}} "
+ f"{Colors.LIGHT_GREEN}{{n_fmt}}{Colors.WHITE}/{Colors.CYAN}{{total_fmt}} "
+ f"{Colors.DARK_GRAY}[{Colors.YELLOW}{{elapsed}}{Colors.WHITE} < {Colors.CYAN}{{remaining}}{Colors.DARK_GRAY}] "
+ f"{Colors.WHITE}{{postfix}}"
+ )
+
+ progress_bar = tqdm(
+ total=100,
+ bar_format=bar_format,
+ unit="",
+ ncols=150
+ )
+
+ def monitor_output_file():
+ """Monitor output file growth and update progress bar."""
+ last_size = 0
+ while True:
+ if os.path.exists(output_path):
+ current_size = os.path.getsize(output_path)
+ if current_size > 0:
+ progress_percent = min(int((current_size / file_size) * 100), 100)
+ progress_bar.n = progress_percent
+ progress_bar.refresh()
+
+ if current_size == last_size and current_size > 0:
+ break
+
+ last_size = current_size
+
+ time.sleep(0.1)
+
+ # Start monitoring thread
+ monitor_thread = threading.Thread(target=monitor_output_file, daemon=True)
+ monitor_thread.start()
try:
result = subprocess.run(cmd, capture_output=True, text=True, timeout=300)
except Exception as e:
- if progress_bar:
- progress_bar.close()
- console.print(f"[red] mp4decrypt execution failed: {e}")
+ progress_bar.close()
+ console.print(f"[bold red] mp4decrypt execution failed: {e}[/bold red]")
return None
- if progress_bar:
- progress_bar.n = 100
- progress_bar.refresh()
- progress_bar.close()
+ # Ensure progress bar reaches 100%
+ progress_bar.n = 100
+ progress_bar.refresh()
+ progress_bar.close()
if result.returncode == 0 and os.path.exists(output_path):
- # Cleanup temporary files
+ # Cleanup temporary files if requested
if CLEANUP_TMP:
if os.path.exists(encrypted_path):
os.remove(encrypted_path)
@@ -130,13 +131,8 @@ def monitor_output_file():
if temp_dec != output_path and os.path.exists(temp_dec):
os.remove(temp_dec)
- # Check if output file is not empty
- if os.path.getsize(output_path) == 0:
- console.print(f"[red] Decrypted file is empty: {output_path}")
- return None
-
return output_path
else:
- console.print(f"[red] mp4decrypt failed: {result.stderr}")
+ console.print(f"[bold red] mp4decrypt failed: {result.stderr}")
return None
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/downloader.py b/StreamingCommunity/Lib/DASH/downloader.py
index bc4789a8..14f15918 100644
--- a/StreamingCommunity/Lib/DASH/downloader.py
+++ b/StreamingCommunity/Lib/DASH/downloader.py
@@ -1,6 +1,7 @@
# 25.07.25
import os
+import sys
import shutil
import logging
from typing import Optional, Dict
@@ -12,15 +13,15 @@
# Internal utilities
from StreamingCommunity.Util import config_manager, os_manager, internet_manager
-from StreamingCommunity.Util.os import get_wvd_path
+from StreamingCommunity.Util.os import get_wvd_path, get_prd_path
from StreamingCommunity.Util.http_client import create_client, get_userAgent
# Logic class
-from .parser import MPD_Parser
+from ..MPD import MPD_Parser, DRMSystem
from .segments import MPD_Segments
from .decrypt import decrypt_with_mp4decrypt
-from .cdm_helpher import get_widevine_keys, map_keys_to_representations
+from .extractor import get_widevine_keys, get_playready_keys, map_keys_to_representations
# FFmpeg functions
@@ -29,19 +30,15 @@
# Config
-DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
-MERGE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_subs')
-CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-EXTENSION_OUTPUT = config_manager.get("M3U8_CONVERSION", "extension")
-
-
-# Variable
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.config.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
+MERGE_SUBTITLE = config_manager.config.get_bool('M3U8_DOWNLOAD', 'merge_subs')
+CLEANUP_TMP = config_manager.config.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
+EXTENSION_OUTPUT = config_manager.config.get("M3U8_CONVERSION", "extension")
class DASH_Downloader:
- def __init__(self, license_url, mpd_url, mpd_sub_list: list = None, output_path: str = None):
+ def __init__(self, license_url, mpd_url, mpd_sub_list: list = None, output_path: str = None, drm_preference: str = 'widevine'):
"""
Initialize the DASH Downloader with necessary parameters.
@@ -52,9 +49,15 @@ def __init__(self, license_url, mpd_url, mpd_sub_list: list = None, output_path:
- output_path (str): Path to save the final output file.
"""
self.cdm_device = get_wvd_path()
+ self.prd_device = get_prd_path()
self.license_url = str(license_url).strip() if license_url else None
self.mpd_url = str(mpd_url).strip()
self.mpd_sub_list = mpd_sub_list
+
+ if drm_preference.lower() in [DRMSystem.WIDEVINE, DRMSystem.PLAYREADY]:
+ self.PREFERRED_DRM = drm_preference.lower()
+ else:
+ sys.exit(f"Invalid DRM preference: {drm_preference}. Use 'widevine', 'playready'.")
# Sanitize the output path to remove invalid characters
sanitized_output_path = os_manager.get_sanitize_path(output_path)
@@ -88,12 +91,10 @@ def _setup_temp_dirs(self):
self.tmp_dir = os.path.join(self.out_path, "tmp")
self.encrypted_dir = os.path.join(self.tmp_dir, "encrypted")
self.decrypted_dir = os.path.join(self.tmp_dir, "decrypted")
- self.optimize_dir = os.path.join(self.tmp_dir, "optimize")
self.subs_dir = os.path.join(self.tmp_dir, "subs")
os.makedirs(self.encrypted_dir, exist_ok=True)
os.makedirs(self.decrypted_dir, exist_ok=True)
- os.makedirs(self.optimize_dir, exist_ok=True)
os.makedirs(self.subs_dir, exist_ok=True)
def parse_manifest(self, custom_headers):
@@ -170,19 +171,40 @@ def download_subtitles(self) -> bool:
for sub in self.selected_subs:
try:
language = sub.get('language')
- fmt = sub.get('format')
-
- # Download subtitle
+ fmt = sub.get('format', 'vtt')
+
console.log(f"[cyan]Downloading subtitle[white]: [red]{language} ({fmt})")
- response = client.get(sub.get('url'))
- response.raise_for_status()
- # Save subtitle file and make request
+ # Get segment URLs (can be single or multiple)
+ segment_urls = sub.get('segment_urls')
+ single_url = sub.get('url')
+
+ # Build list of URLs to download
+ urls_to_download = []
+ if segment_urls:
+ urls_to_download = segment_urls
+ elif single_url:
+ urls_to_download = [single_url]
+ else:
+ console.print(f"[yellow]Warning: No URL found for subtitle {language}")
+ continue
+
+ # Download all segments
+ all_content = []
+ for seg_url in urls_to_download:
+ response = client.get(seg_url)
+ response.raise_for_status()
+ all_content.append(response.content)
+
+ # Concatenate all segments
+ final_content = b''.join(all_content)
+
+ # Save to file
sub_filename = f"{language}.{fmt}"
sub_path = os.path.join(self.subs_dir, sub_filename)
with open(sub_path, 'wb') as f:
- f.write(response.content)
+ f.write(final_content)
except Exception as e:
console.print(f"[red]Error downloading subtitle {language}: {e}")
@@ -206,21 +228,28 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
self.error = None
self.stopped = False
- video_segments_count = 0
- # Fetch keys immediately after obtaining PSSH
- if not self.parser.pssh:
- self.download_segments(clear=True)
- return True
+ # Check if any representation is protected
+ has_protected_content = any(rep.get('protected', False) for rep in self.parser.representations)
+
+ # If no protection found, download without decryption
+ if not has_protected_content:
+ console.log("[yellow]Warning: Content is not protected, downloading without decryption.")
+ return self.download_segments(clear=True)
+
+ # Determine which DRM to use
+ drm_type = self._determine_drm_type()
+
+ if not drm_type:
+ console.print("[red]Content is protected but no DRM system found")
+ return False
- keys = get_widevine_keys(
- pssh=self.parser.pssh,
- license_url=self.license_url,
- cdm_device_path=self.cdm_device,
- headers=custom_headers,
- query_params=query_params,
- key=key
- )
+ # Fetch keys based on DRM type
+ keys = self._fetch_drm_keys(drm_type, custom_headers, query_params, key)
+
+ if not keys:
+ console.print(f"[red]Failed to obtain keys for {drm_type}")
+ return False
# Map keys to representations based on default_KID
key_mapping = map_keys_to_representations(keys, self.parser.representations)
@@ -237,25 +266,20 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
}
else:
console.print("[red]Could not map any keys to representations.")
+ console.print(f"[red]Available keys: {[k['kid'] for k in keys]}")
+ console.print(f"[red]Representation KIDs: {[r.get('default_kid') for r in self.parser.representations if r.get('default_kid')]}")
return False
# Download subtitles
self.download_subtitles()
+ # Get encryption method from parser
+ encryption_method = self.parser.encryption_method
+
# Download and decrypt video
video_rep = self.get_representation_by_type("video")
if video_rep:
- video_key_info = key_mapping.get("video")
- if not video_key_info and single_key:
- console.print("[yellow]Warning: no mapped key found for video; using the single available key.")
- video_key_info = {"kid": single_key["kid"], "key": single_key["key"], "representation_id": None, "default_kid": None}
- if not video_key_info:
- self.error = "No key found for video representation"
- return False
-
- console.log(f"[cyan]Using video key: [red]{video_key_info['kid']} [cyan]for representation [yellow]{video_key_info.get('representation_id')}")
-
- video_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=video_rep, pssh=self.parser.pssh, custom_headers=custom_headers)
+ video_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=video_rep, pssh=self._get_pssh_for_drm(drm_type), custom_headers=custom_headers)
encrypted_path = video_downloader.get_concat_path(self.encrypted_dir)
# If m4s file doesn't exist, start downloading
@@ -265,10 +289,7 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
try:
result = video_downloader.download_streams(description="Video")
-
- # Store the video segment count for limiting audio
- video_segments_count = video_downloader.get_segments_count()
-
+
# Check for interruption or failure
if result.get("stopped"):
self.stopped = True
@@ -287,13 +308,31 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
self.current_downloader = None
self.current_download_type = None
- # Decrypt video using the mapped key
+ # Decrypt video ONLY if it's protected
decrypted_path = os.path.join(self.decrypted_dir, f"video.{EXTENSION_OUTPUT}")
- result_path = decrypt_with_mp4decrypt("Video", encrypted_path, video_key_info['kid'], video_key_info['key'], output_path=decrypted_path)
+
+ if video_rep.get('protected', False):
+ video_key_info = key_mapping.get("video")
+ if not video_key_info and single_key:
+ console.print("[yellow]Warning: no mapped key found for video; using the single available key.")
+ video_key_info = {"kid": single_key["kid"], "key": single_key["key"], "representation_id": None, "default_kid": None}
+
+ if not video_key_info:
+ self.error = "No key found for video representation"
+ return False
- if not result_path:
- self.error = f"Video decryption failed with key {video_key_info['kid']}"
- return False
+ console.log(f"[cyan]Using video key: [red]{video_key_info['kid']}[white]: [red]{video_key_info['key']} [cyan]for representation [yellow]{video_key_info.get('representation_id', 'N/A')}")
+
+ # Use encryption method from video representation or parser
+ video_encryption = video_rep.get('encryption_method') or encryption_method
+ result_path = decrypt_with_mp4decrypt("Video", encrypted_path, video_key_info['kid'], video_key_info['key'], output_path=decrypted_path, encryption_method=video_encryption)
+
+ if not result_path:
+ self.error = f"Video decryption failed with key {video_key_info['kid']}"
+ return False
+ else:
+ console.log("[cyan]Video is not protected, copying without decryption")
+ shutil.copy2(encrypted_path, decrypted_path)
else:
self.error = "No video found"
@@ -310,10 +349,9 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
self.error = "No key found for audio representation"
return False
- console.log(f"[cyan]Using audio key: [red]{audio_key_info['kid']} [cyan]for representation [yellow]{audio_key_info.get('representation_id')}")
-
+ console.log(f"[cyan]Using audio key: [red]{audio_key_info['kid']}[white]: [red]{audio_key_info['key']} [cyan]for representation [yellow]{audio_key_info.get('representation_id', 'N/A')}")
audio_language = audio_rep.get('language', 'Unknown')
- audio_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=audio_rep, pssh=self.parser.pssh, limit_segments=video_segments_count if video_segments_count > 0 else None, custom_headers=custom_headers)
+ audio_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=audio_rep, pssh=self._get_pssh_for_drm(drm_type), custom_headers=custom_headers)
encrypted_path = audio_downloader.get_concat_path(self.encrypted_dir)
# If m4s file doesn't exist, start downloading
@@ -344,9 +382,12 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
self.current_downloader = None
self.current_download_type = None
- # Decrypt audio using the mapped key
+ # Decrypt audio using the mapped key and encryption method
decrypted_path = os.path.join(self.decrypted_dir, f"audio.{EXTENSION_OUTPUT}")
- result_path = decrypt_with_mp4decrypt(f"Audio {audio_language}", encrypted_path, audio_key_info['kid'], audio_key_info['key'], output_path=decrypted_path)
+
+ # Use encryption method from audio representation or parser
+ audio_encryption = audio_rep.get('encryption_method') or encryption_method
+ result_path = decrypt_with_mp4decrypt(f"Audio {audio_language}", encrypted_path, audio_key_info['kid'], audio_key_info['key'], output_path=decrypted_path, encryption_method=audio_encryption)
if not result_path:
self.error = f"Audio decryption failed with key {audio_key_info['kid']}"
@@ -357,7 +398,50 @@ def download_and_decrypt(self, custom_headers=None, query_params=None, key=None)
return False
return True
-
+
+ def _determine_drm_type(self) -> Optional[str]:
+ """
+ Determine which DRM type to use based on available PSSH and preference.
+ Returns: 'widevine', 'playready', or None
+ """
+ # Check if DRM types are available from parsed representations
+ available_drm_types = self.parser.available_drm_types or []
+
+ if not available_drm_types:
+ return None
+
+ # Check if preferred DRM is available
+ if self.PREFERRED_DRM in available_drm_types:
+ console.log(f"[cyan]Using {self.PREFERRED_DRM.upper()} DRM")
+ return self.PREFERRED_DRM
+
+ # Fallback to first available DRM type
+ fallback_drm = available_drm_types[0]
+ console.log(f"[yellow]Preferred DRM {self.PREFERRED_DRM.upper()} not available, using {fallback_drm.upper()}")
+ return fallback_drm
+
+ def _get_pssh_for_drm(self, drm_type: str) -> Optional[str]:
+ """Get PSSH for specific DRM type"""
+ if drm_type == DRMSystem.WIDEVINE:
+ return self.parser.pssh_widevine
+ elif drm_type == DRMSystem.PLAYREADY:
+ return self.parser.pssh_playready
+ return None
+
+ def _fetch_drm_keys(self, drm_type: str, custom_headers: dict, query_params: dict, key: str) -> Optional[list]:
+ """Fetch decryption keys for specific DRM type"""
+ pssh = self._get_pssh_for_drm(drm_type)
+
+ if not pssh:
+ console.print(f"[red]No PSSH found for {drm_type}")
+ return None
+
+ if drm_type == DRMSystem.WIDEVINE:
+ return get_widevine_keys(pssh=pssh, license_url=self.license_url, cdm_device_path=self.cdm_device, headers=custom_headers, query_params=query_params, key=key)
+ elif drm_type == DRMSystem.PLAYREADY:
+ return get_playready_keys(pssh=pssh, license_url=self.license_url, cdm_device_path=self.prd_device, headers=custom_headers, query_params=query_params, key=key)
+ return None
+
def download_segments(self, clear=False):
"""
Download video/audio segments without decryption (for clear content).
@@ -369,8 +453,6 @@ def download_segments(self, clear=False):
console.print("[yellow]Warning: download_segments called with clear=False")
return False
- video_segments_count = 0
-
# Download subtitles
self.download_subtitles()
@@ -392,9 +474,6 @@ def download_segments(self, clear=False):
try:
result = video_downloader.download_streams(description="Video")
- # Store the video segment count for limiting audio
- video_segments_count = video_downloader.get_segments_count()
-
# Check for interruption or failure
if result.get("stopped"):
self.stopped = True
@@ -428,7 +507,7 @@ def download_segments(self, clear=False):
audio_rep = self.get_representation_by_type("audio")
if audio_rep:
audio_language = audio_rep.get('language', 'Unknown')
- audio_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=audio_rep, pssh=self.parser.pssh, limit_segments=video_segments_count if video_segments_count > 0 else None)
+ audio_downloader = MPD_Segments(tmp_folder=self.encrypted_dir, representation=audio_rep, pssh=self.parser.pssh)
encrypted_path = audio_downloader.get_concat_path(self.encrypted_dir)
# If m4s file doesn't exist, start downloading
@@ -527,7 +606,7 @@ def finalize_output(self):
if existing_sub_tracks:
# Create temporary file for subtitle merge
- temp_output = output_file.replace(f'.{extension_output}', f'_temp.{extension_output}')
+ temp_output = output_file.replace(f'.{EXTENSION_OUTPUT}', f'_temp.{EXTENSION_OUTPUT}')
try:
final_file = join_subtitle(
diff --git a/StreamingCommunity/Lib/DASH/extractor/__init__.py b/StreamingCommunity/Lib/DASH/extractor/__init__.py
new file mode 100644
index 00000000..99eb5e73
--- /dev/null
+++ b/StreamingCommunity/Lib/DASH/extractor/__init__.py
@@ -0,0 +1,13 @@
+# 29.12.25
+
+from .ex_widevine import get_widevine_keys
+from .ex_playready import get_playready_keys
+from .ex_clearkey import ClearKey
+from .util import map_keys_to_representations
+
+__all__ = [
+ 'get_widevine_keys',
+ 'get_playready_keys',
+ 'ClearKey',
+ 'map_keys_to_representations'
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/HLS/decrypt.py b/StreamingCommunity/Lib/DASH/extractor/ex_clearkey.py
similarity index 88%
rename from StreamingCommunity/Lib/HLS/decrypt.py
rename to StreamingCommunity/Lib/DASH/extractor/ex_clearkey.py
index 9f898ae4..f44ba74f 100644
--- a/StreamingCommunity/Lib/HLS/decrypt.py
+++ b/StreamingCommunity/Lib/DASH/extractor/ex_clearkey.py
@@ -26,7 +26,7 @@
-class M3U8_Decryption:
+class ClearKey:
def __init__(self, key: bytes, iv: bytes, method: str, pssh: bytes = None) -> None:
"""
Initialize the M3U8_Decryption object.
@@ -53,11 +53,11 @@ def __init__(self, key: bytes, iv: bytes, method: str, pssh: bytes = None) -> No
message = None
if self.method is not None:
- message = f"Method: [green]{self.method}"
+ message = f"[green]Method: [red]{self.method}"
if self.key is not None:
- message += f" | Key: [green]{self.key.hex()}"
+ message += f" [white]| [green]Key: [red]{self.key.hex()}"
if self.iv is not None:
- message += f" | IV: [green]{self.iv.hex()}"
+ message += f" [white]| [green]IV: [red]{self.iv.hex()}"
console.log(f"[cyan]Decryption {message}")
def decrypt(self, ciphertext: bytes) -> bytes:
@@ -76,6 +76,6 @@ def decrypt(self, ciphertext: bytes) -> bytes:
elif self.method == "AES-128-CTR":
decrypted_content = self.cipher.decrypt(ciphertext)
else:
- raise ValueError("Invalid or unsupported method")
+ raise ValueError("Invalid or unsupported method: {}".format(self.method))
return decrypted_content
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/extractor/ex_playready.py b/StreamingCommunity/Lib/DASH/extractor/ex_playready.py
new file mode 100644
index 00000000..4f360910
--- /dev/null
+++ b/StreamingCommunity/Lib/DASH/extractor/ex_playready.py
@@ -0,0 +1,136 @@
+# 29.12.25
+
+from urllib.parse import urlencode
+
+
+# External libraries
+from curl_cffi import requests
+from rich.console import Console
+from pyplayready.cdm import Cdm
+from pyplayready.device import Device
+from pyplayready.system.pssh import PSSH
+
+
+# Variable
+console = Console()
+
+
+def get_playready_keys(pssh: str, license_url: str, cdm_device_path: str, headers: dict = None, query_params: dict = None, key: str = None):
+ """
+ Extract PlayReady CONTENT keys (KID/KEY) from a license using pyplayready.
+
+ Args:
+ - pssh (str): PSSH base64 or PlayReady PRO header.
+ - license_url (str): PlayReady license URL.
+ - cdm_device_path (str): Path to CDM file (device.prd).
+ - headers (dict): Optional HTTP headers for the license request.
+ - query_params (dict): Optional query parameters to append to the URL.
+ - key (str): Optional raw license data to bypass HTTP request.
+
+ Returns:
+ list: List of dicts {'kid': ..., 'key': ...} (only CONTENT keys) or None if error.
+ """
+ if cdm_device_path is None:
+ console.print("[red]Device prd path is None.")
+ return None
+
+ device = Device.load(cdm_device_path)
+ cdm = Cdm.from_device(device)
+ session_id = cdm.open()
+ console.log(f"[cyan]Session ID: [green]{session_id}")
+
+ try:
+ console.log(f"[cyan]PSSH (PR): [green]{pssh[:30]}..." if len(pssh) > 30 else f"[cyan]PSSH (PR): [green]{pssh}")
+
+ try:
+ pssh_obj = PSSH(pssh)
+ except Exception as e:
+ console.print(f"[red]Invalid PlayReady PSSH/PRO header: {e}")
+ return None
+
+ if not pssh_obj.wrm_headers:
+ console.print("[red]No WRM headers found in PSSH")
+ return None
+
+ challenge = cdm.get_license_challenge(session_id, pssh_obj.wrm_headers[0])
+
+ # With request license
+ if key is None:
+
+ # Build request URL with query params
+ request_url = license_url
+ if query_params:
+ request_url = f"{license_url}?{urlencode(query_params)}"
+
+ # Prepare headers
+ req_headers = headers.copy() if headers else {}
+ request_kwargs = {}
+ request_kwargs['data'] = challenge
+
+ # Keep original Content-Type or default to text/xml for PlayReady
+ if 'Content-Type' not in req_headers:
+ req_headers['Content-Type'] = 'text/xml; charset=utf-8'
+
+ if license_url is None:
+ console.print("[red]License URL is None.")
+ return None
+
+ response = requests.post(request_url, headers=req_headers, impersonate="chrome124", **request_kwargs)
+
+ if response.status_code != 200:
+ console.print(f"[red]License error: {response.status_code}, {response.text}")
+ return None
+
+ # Parse license
+ try:
+ cdm.parse_license(session_id, response.text)
+ except Exception as e:
+ console.print(f"[red]Error parsing license: {e}")
+ return None
+
+ # Extract CONTENT keys
+ content_keys = []
+ for key_obj in cdm.get_keys(session_id):
+ kid = key_obj.key_id.hex
+ key_val = key_obj.key.hex()
+
+ content_keys.append({
+ 'kid': kid.replace('-', '').strip(),
+ 'key': key_val.replace('-', '').strip()
+ })
+
+ # Return keys
+ console.log(f"[cyan]Extracted [red]{len(content_keys)} CONTENT [cyan]keys from license.")
+ return content_keys
+
+ else:
+ content_keys = []
+ raw_kid = key.split(":")[0]
+ raw_key = key.split(":")[1]
+
+ content_keys.append({
+ 'kid': raw_kid.replace('-', '').strip(),
+ 'key': raw_key.replace('-', '').strip()
+ })
+
+ console.log(f"[cyan]KID: [green]{content_keys[0]['kid']} [white]| [cyan]KEY: [green]{content_keys[0]['key']}")
+ return content_keys
+
+ finally:
+ cdm.close(session_id)
+
+
+def get_info_prd(cdm_device_path):
+ """
+ Extract device information from a PlayReady CDM device file (.prd).
+
+ Args:
+ cdm_device_path (str): Path to CDM file (device.prd).
+ """
+ device = Device.load(cdm_device_path)
+
+ console.print(
+ f"[cyan]Load PRD: "
+ f"[red]SL{device.security_level} [cyan]| "
+ f"[yellow]{device.get_name()} "
+ )
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/cdm_helpher.py b/StreamingCommunity/Lib/DASH/extractor/ex_widevine.py
similarity index 71%
rename from StreamingCommunity/Lib/DASH/cdm_helpher.py
rename to StreamingCommunity/Lib/DASH/extractor/ex_widevine.py
index 91bd51a7..6e362832 100644
--- a/StreamingCommunity/Lib/DASH/cdm_helpher.py
+++ b/StreamingCommunity/Lib/DASH/extractor/ex_widevine.py
@@ -1,4 +1,4 @@
-# 25.07.25
+# 29.12.25
import base64
from urllib.parse import urlencode
@@ -16,7 +16,6 @@
console = Console()
-
def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers: dict = None, query_params: dict =None, key: str=None):
"""
Extract Widevine CONTENT keys (KID/KEY) from a license using pywidevine.
@@ -32,16 +31,17 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
Returns:
list: List of dicts {'kid': ..., 'key': ...} (only CONTENT keys) or None if error.
"""
- if not cdm_device_path:
- console.print("[red]Invalid CDM device path.")
+ if cdm_device_path is None:
+ console.print("[red]Device cdm path is None.")
return None
device = Device.load(cdm_device_path)
cdm = Cdm.from_device(device)
session_id = cdm.open()
+ console.log(f"[cyan]Session ID: [green]{session_id}")
try:
- console.log(f"[cyan]PSSH: [green]{pssh}")
+ console.log(f"[cyan]PSSH (WV): [green]{pssh[:30]}..." if len(pssh) > 30 else f"[cyan]PSSH (WV): [green]{pssh}")
challenge = cdm.get_license_challenge(session_id, PSSH(pssh))
# With request license
@@ -61,6 +61,10 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
if 'Content-Type' not in req_headers:
req_headers['Content-Type'] = 'application/octet-stream'
+ if license_url is None:
+ console.print("[red]License URL is None.")
+ return None
+
response = requests.post(request_url, headers=req_headers, impersonate="chrome124", **request_kwargs)
if response.status_code != 200:
@@ -99,8 +103,8 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
content_keys = []
for key in cdm.get_keys(session_id):
if key.type == "CONTENT":
- kid = key.kid.hex() if isinstance(key.kid, bytes) else str(key.kid)
- key_val = key.key.hex() if isinstance(key.key, bytes) else str(key.key)
+ kid = key.kid.hex
+ key_val = key.key.hex()
content_keys.append({
'kid': kid.replace('-', '').strip(),
@@ -108,17 +112,14 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
})
# Return keys
- console.log(f"[cyan]Extracted {len(content_keys)} CONTENT keys from license.")
+ console.log(f"[cyan]Extracted [red]{len(content_keys)} CONTENT [cyan]keys from license.")
return content_keys
else:
content_keys = []
- raw_kid = key.split(":")[0]
- raw_key = key.split(":")[1]
-
content_keys.append({
- 'kid': raw_kid.replace('-', '').strip(),
- 'key': raw_key.replace('-', '').strip()
+ 'kid': key.split(":")[0].replace('-', '').strip(),
+ 'key': key.split(":")[1].replace('-', '').strip()
})
console.log(f"[cyan]KID: [green]{content_keys[0]['kid']} [white]| [cyan]KEY: [green]{content_keys[0]['key']}")
@@ -128,40 +129,6 @@ def get_widevine_keys(pssh: str, license_url: str, cdm_device_path: str, headers
cdm.close(session_id)
-def map_keys_to_representations(keys: list, representations: list) -> dict:
- """
- Map decryption keys to representations based on their default_KID.
-
- Args:
- keys (list): List of key dictionaries with 'kid' and 'key' fields
- representations (list): List of representation dictionaries with 'default_kid' field
-
- Returns:
- dict: Mapping of representation type to key info
- """
- key_mapping = {}
-
- for rep in representations:
- rep_type = rep.get('type', 'unknown')
- default_kid = rep.get('default_kid')
-
- if default_kid is None:
- console.log(f"[yellow]Representation [yellow]{rep.get('id')} [yellow]has no default_kid, maybe problem with parser.")
- continue
-
- for key_info in keys:
- if key_info['kid'].lower() == default_kid.lower():
- key_mapping[rep_type] = {
- 'kid': key_info['kid'],
- 'key': key_info['key'],
- 'representation_id': rep.get('id'),
- 'default_kid': default_kid
- }
- break
-
- return key_mapping
-
-
def get_info_wvd(cdm_device_path):
"""
Extract device information from a Widevine CDM device file (.wvd).
@@ -173,15 +140,9 @@ def get_info_wvd(cdm_device_path):
# Extract client info
info = {ci.name: ci.value for ci in device.client_id.client_info}
- caps = device.client_id.client_capabilities
-
- company = info.get("company_name", "N/A")
model = info.get("model_name", "N/A")
-
device_name = info.get("device_name", "").lower()
build_info = info.get("build_info", "").lower()
-
- # Extract device type
is_emulator = any(x in device_name for x in [
"generic", "sdk", "emulator", "x86"
]) or "test-keys" in build_info or "userdebug" in build_info
@@ -196,6 +157,5 @@ def get_info_wvd(cdm_device_path):
console.print(
f"[cyan]Load WVD: "
f"[red]L{device.security_level} [cyan]| [red]{dev_type} [cyan]| "
- f"[red]{company} {model} [cyan]| API [red]{caps.oem_crypto_api_version} [cyan]| "
f"[cyan]SysID: [red]{device.system_id}"
)
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/extractor/util.py b/StreamingCommunity/Lib/DASH/extractor/util.py
new file mode 100644
index 00000000..282e28bf
--- /dev/null
+++ b/StreamingCommunity/Lib/DASH/extractor/util.py
@@ -0,0 +1,46 @@
+# 29.12.25
+
+from rich.console import Console
+
+
+# Variable
+console = Console()
+
+
+def map_keys_to_representations(keys: list, representations: list) -> dict:
+ """
+ Map decryption keys to representations based on their default_KID.
+
+ Args:
+ keys (list): List of key dictionaries with 'kid' and 'key' fields
+ representations (list): List of representation dictionaries with 'default_kid' field
+
+ Returns:
+ dict: Mapping of representation type to key info
+ """
+ key_mapping = {}
+
+ for rep in representations:
+ rep_type = rep.get('type', 'unknown')
+ default_kid = rep.get('default_kid')
+
+ if not default_kid:
+ continue
+
+ # Normalize KID (remove dashes, lowercase)
+ normalized_rep_kid = default_kid.replace('-', '').lower()
+
+ for key_info in keys:
+ normalized_key_kid = key_info['kid'].replace('-', '').lower()
+
+ if normalized_key_kid == normalized_rep_kid:
+ key_mapping[rep_type] = {
+ 'kid': key_info['kid'],
+ 'key': key_info['key'],
+ 'representation_id': rep.get('id'),
+ 'default_kid': default_kid
+ }
+ #console.print(f"[cyan]Mapped [red]{rep_type} [cyan]key: [red]{key_info['kid']} [cyan]→ representation [red]{rep.get('id')}")
+ break
+
+ return key_mapping
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/parser.py b/StreamingCommunity/Lib/DASH/parser.py
deleted file mode 100644
index bcddaec2..00000000
--- a/StreamingCommunity/Lib/DASH/parser.py
+++ /dev/null
@@ -1,1032 +0,0 @@
-# 25.07.25
-
-import json
-import logging
-from urllib.parse import urljoin, urlparse
-from typing import List, Dict, Optional, Tuple, Any
-from pathlib import Path
-from datetime import datetime
-from isodate import parse_duration
-
-
-# External libraries
-from lxml import etree
-from curl_cffi import requests
-from rich.console import Console
-from rich.table import Table
-
-
-# Internal utilities
-from StreamingCommunity.Util.config_json import config_manager
-
-
-# Variables
-console = Console()
-max_timeout = config_manager.get_int('REQUESTS', 'timeout')
-FILTER_CUSTOM_RESOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
-DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
-
-
-class CodecQuality:
- VIDEO_CODEC_RANK = {
- 'av01': 5, 'vp9': 4, 'vp09': 4, 'hev1': 3,
- 'hvc1': 3, 'avc1': 2, 'avc3': 2, 'mp4v': 1,
- }
-
- AUDIO_CODEC_RANK = {
- 'opus': 5, 'mp4a.40.2': 4, 'mp4a.40.5': 3,
- 'mp4a': 2, 'ac-3': 2, 'ec-3': 3,
- }
-
- @staticmethod
- def get_video_codec_rank(codec: Optional[str]) -> int:
- if not codec:
- return 0
- codec_lower = codec.lower()
- for key, rank in CodecQuality.VIDEO_CODEC_RANK.items():
- if codec_lower.startswith(key):
- return rank
- return 0
-
- @staticmethod
- def get_audio_codec_rank(codec: Optional[str]) -> int:
- if not codec:
- return 0
- codec_lower = codec.lower()
- for key, rank in CodecQuality.AUDIO_CODEC_RANK.items():
- if codec_lower.startswith(key):
- return rank
- return 0
-
-
-class DurationUtils:
- """Utilities for handling ISO-8601 durations"""
-
- @staticmethod
- def parse_duration(duration_str: Optional[str]) -> int:
- """Parse ISO-8601 duration to seconds using isodate library"""
- if not duration_str:
- return 0
- try:
- duration = parse_duration(duration_str)
- return int(duration.total_seconds())
- except Exception:
- return 0
-
- @staticmethod
- def format_duration(seconds: int) -> str:
- """Format seconds like '~48m55s' or '~1h02m03s'"""
- if not seconds or seconds < 0:
- return ""
-
- h = seconds // 3600
- m = (seconds % 3600) // 60
- s = seconds % 60
-
- if h > 0:
- return f"~{h}h{m:02d}m{s:02d}s"
- return f"~{m}m{s:02d}s"
-
-
-class URLBuilder:
- """Handles URL construction with template substitution"""
-
- @staticmethod
- def build_url(base: str, template: str, rep_id: Optional[str] = None, number: Optional[int] = None, time: Optional[int] = None, bandwidth: Optional[int] = None) -> Optional[str]:
- if not template:
- return None
-
- # Substitute placeholders
- if rep_id is not None:
- template = template.replace('$RepresentationID$', rep_id)
- if bandwidth is not None:
- template = template.replace('$Bandwidth$', str(bandwidth))
- if time is not None:
- template = template.replace('$Time$', str(time))
-
- # Handle $Number$ with optional formatting (e.g., $Number%05d$)
- if '$Number' in template:
- num_str = str(number if number is not None else 0)
-
- # Check for formatting like $Number%05d$
- if '%0' in template and 'd$' in template:
- start = template.find('%0')
- end = template.find('d$', start)
- if start != -1 and end != -1:
- width_str = template[start+2:end]
- try:
- width = int(width_str)
- num_str = str(number if number is not None else 0).zfill(width)
- except ValueError:
- pass
-
- template = template.replace('$Number%05d$', num_str)
- template = template.replace('$Number$', num_str)
-
- return URLBuilder._finalize_url(base, template)
-
- @staticmethod
- def _finalize_url(base: str, template: str) -> str:
- """Finalize URL construction preserving query and fragment"""
- parts = template.split('#', 1)
- path_and_query = parts[0]
- fragment = ('#' + parts[1]) if len(parts) == 2 else ''
-
- if '?' in path_and_query:
- path, query = path_and_query.split('?', 1)
- abs_path = urljoin(base, path)
- return abs_path + '?' + query + fragment
- else:
- return urljoin(base, path_and_query) + fragment
-
-
-class NamespaceManager:
- """Manages XML namespaces for DASH manifests"""
-
- def __init__(self, root: etree._Element):
- self.nsmap = self._extract_namespaces(root)
-
- @staticmethod
- def _extract_namespaces(root: etree._Element) -> Dict[str, str]:
- """Extract namespaces from root element"""
- nsmap = {}
- if root.nsmap:
- # Use 'mpd' as default prefix for the main namespace
- nsmap['mpd'] = root.nsmap.get(None) or 'urn:mpeg:dash:schema:mpd:2011'
- nsmap['cenc'] = 'urn:mpeg:cenc:2013'
-
- # Add other namespaces if present
- for prefix, uri in root.nsmap.items():
- if prefix is not None:
- nsmap[prefix] = uri
-
- else:
- # Fallback to default DASH namespace
- nsmap['mpd'] = 'urn:mpeg:dash:schema:mpd:2011'
- nsmap['cenc'] = 'urn:mpeg:cenc:2013'
- return nsmap
-
- def find(self, element: etree._Element, path: str) -> Optional[etree._Element]:
- """Find element using namespace-aware XPath"""
- return element.find(path, namespaces=self.nsmap)
-
- def findall(self, element: etree._Element, path: str) -> List[etree._Element]:
- """Find all elements using namespace-aware XPath"""
- return element.findall(path, namespaces=self.nsmap)
-
-
-class BaseURLResolver:
- """Resolves base URLs at different MPD hierarchy levels"""
-
- def __init__(self, mpd_url: str, ns_manager: NamespaceManager):
- self.mpd_url = mpd_url
- self.ns = ns_manager
-
- def get_initial_base_url(self, root: etree._Element) -> str:
- """Get base URL from MPD root"""
- base_url = self.mpd_url.rsplit('/', 1)[0] + '/'
-
- base_elem = self.ns.find(root, 'mpd:BaseURL')
- if base_elem is not None and base_elem.text:
- base_text = base_elem.text.strip()
- base_url = base_text if base_text.startswith('http') else urljoin(base_url, base_text)
-
- return base_url
-
- def resolve_base_url(self, element: etree._Element, current_base: str) -> str:
- """Resolve base URL for any element"""
- base_elem = self.ns.find(element, 'mpd:BaseURL')
- if base_elem is not None and base_elem.text:
- base_text = base_elem.text.strip()
- return base_text if base_text.startswith('http') else urljoin(current_base, base_text)
- return current_base
-
-
-class ContentProtectionHandler:
- """Handles DRM and content protection"""
- def __init__(self, ns_manager: NamespaceManager):
- self.ns = ns_manager
-
- def is_protected(self, element: etree._Element) -> bool:
- """Check if element has DRM protection"""
- for cp in self.ns.findall(element, 'mpd:ContentProtection'):
- scheme_id = (cp.get('schemeIdUri') or '').lower()
- value = (cp.get('value') or '').lower()
-
- # Check for CENC or Widevine
- if 'urn:mpeg:dash:mp4protection:2011' in scheme_id and ('cenc' in value or value):
- return True
- if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id: # Widevine UUID
- return True
-
- return False
-
- def extract_default_kid(self, element: etree._Element) -> Optional[str]:
- """Extract default_KID from ContentProtection elements (Widevine/PlayReady/CENC).
- """
- def _extract_kid_from_cp(cp: etree._Element) -> Optional[str]:
- kid = (cp.get('{urn:mpeg:cenc:2013}default_KID') or cp.get('default_KID') or cp.get('cenc:default_KID'))
-
- # Fallback: any attribute key that ends with 'default_KID' (case-insensitive)
- if not kid:
- for k, v in (cp.attrib or {}).items():
- if isinstance(k, str) and k.lower().endswith('default_kid') and v:
- kid = v
- break
-
- if not kid:
- return None
-
- # Normalize UUID -> hex (no dashes), lowercase
- return kid.strip().replace('-', '').lower()
-
- cps = self.ns.findall(element, 'mpd:ContentProtection')
- if not cps:
- return None
-
- # Prefer Widevine KID, then mp4protection, then any other CP that has it.
- preferred = []
- fallback = []
-
- for cp in cps:
- scheme_id = (cp.get('schemeIdUri') or '').lower()
- if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id: # Widevine
- preferred.append(cp)
- elif 'urn:mpeg:dash:mp4protection:2011' in scheme_id:
- preferred.append(cp)
- else:
- fallback.append(cp)
-
- for cp in preferred + fallback:
- kid = _extract_kid_from_cp(cp)
- if kid:
- return kid
-
- return None
-
- def extract_pssh(self, root: etree._Element) -> Optional[str]:
- """Extract PSSH (Protection System Specific Header)"""
- # Try Widevine first
- for cp in self.ns.findall(root, './/mpd:ContentProtection'):
- scheme_id = cp.get('schemeIdUri', '')
- if 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed' in scheme_id:
- pssh = self.ns.find(cp, 'cenc:pssh')
- if pssh is not None and pssh.text:
- return pssh.text.strip()
-
- # Fallback to any PSSH
- for cp in self.ns.findall(root, './/mpd:ContentProtection'):
- pssh = self.ns.find(cp, 'cenc:pssh')
- if pssh is not None and pssh.text:
- console.print("Found PSSH (fallback)")
- return pssh.text.strip()
-
- return None
-
-
-class SegmentTimelineParser:
- """Parses SegmentTimeline elements"""
-
- def __init__(self, ns_manager: NamespaceManager):
- self.ns = ns_manager
-
- def parse(self, seg_template: etree._Element, start_number: int = 1) -> Tuple[List[int], List[int]]:
- """Parse SegmentTimeline and return (number_list, time_list)"""
- seg_timeline = self.ns.find(seg_template, 'mpd:SegmentTimeline')
- if seg_timeline is None:
- return [], []
-
- number_list = []
- time_list = []
- current_time = 0
- current_number = start_number
-
- for s_elem in self.ns.findall(seg_timeline, 'mpd:S'):
- d = s_elem.get('d')
- if d is None:
- continue
-
- d = int(d)
-
- # Explicit time
- if s_elem.get('t') is not None:
- current_time = int(s_elem.get('t'))
-
- # Repeat count
- r = int(s_elem.get('r', 0))
- if r == -1:
- r = 0 # Special case: repeat until end
-
- # Add segments
- for _ in range(r + 1):
- number_list.append(current_number)
- time_list.append(current_time)
- current_number += 1
- current_time += d
-
- return number_list, time_list
-
-
-class SegmentURLBuilder:
- """Builds segment URLs from SegmentTemplate"""
-
- def __init__(self, ns_manager: NamespaceManager):
- self.ns = ns_manager
- self.timeline_parser = SegmentTimelineParser(ns_manager)
-
- def build_urls(self, seg_template: etree._Element, rep_id: str, bandwidth: int, base_url: str, period_duration: int = 0) -> Tuple[Optional[str], List[str], int, float]:
- """Build initialization and segment URLs"""
- init_template = seg_template.get('initialization')
- media_template = seg_template.get('media')
- start_number = int(seg_template.get('startNumber', 1))
- timescale = int(seg_template.get('timescale', 1) or 1)
- duration_attr = seg_template.get('duration')
-
- # Build init URL
- init_url = None
- if init_template:
- init_url = URLBuilder.build_url(base_url, init_template, rep_id=rep_id, bandwidth=bandwidth)
-
- # Parse timeline
- number_list, time_list = self.timeline_parser.parse(seg_template, start_number)
-
- segment_count = 0
- segment_duration = 0.0
-
- # Determine segment count
- if time_list:
- segment_count = len(time_list)
- elif number_list:
- segment_count = len(number_list)
- elif duration_attr:
-
- # Estimate from duration
- d = int(duration_attr)
- segment_duration = d / float(timescale)
-
- if period_duration > 0 and segment_duration > 0:
- segment_count = int((period_duration / segment_duration) + 0.5)
- else:
- segment_count = 100
-
- max_segments = min(segment_count, 20000)
- number_list = list(range(start_number, start_number + max_segments))
- else:
- segment_count = 100
- number_list = list(range(start_number, start_number + 100))
-
- # Build segment URLs
- segment_urls = self._build_segment_urls(
- media_template, base_url, rep_id, bandwidth, number_list, time_list
- )
-
- if not segment_count:
- segment_count = len(segment_urls)
-
- return init_url, segment_urls, segment_count, segment_duration
-
- def _build_segment_urls(self, template: str, base_url: str, rep_id: str, bandwidth: int, number_list: List[int], time_list: List[int]) -> List[str]:
- """Build list of segment URLs"""
- if not template:
- return []
-
- urls = []
-
- if '$Time$' in template and time_list:
- for t in time_list:
- urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, time=t, bandwidth=bandwidth))
- elif '$Number' in template and number_list:
- for n in number_list:
- urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, number=n, bandwidth=bandwidth))
- else:
- urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, bandwidth=bandwidth))
-
- return urls
-
-
-class MetadataExtractor:
- """Extracts metadata from representations"""
-
- def __init__(self, ns_manager: NamespaceManager):
- self.ns = ns_manager
-
- def get_audio_channels(self, rep_elem: etree._Element, adapt_elem: etree._Element) -> int:
- """Extract audio channel count"""
- for parent in (rep_elem, adapt_elem):
- if parent is None:
- continue
-
- for acc in self.ns.findall(parent, 'mpd:AudioChannelConfiguration'):
- val = acc.get('value')
- if val:
- try:
- return int(val)
- except ValueError:
- pass
- return 0
-
- @staticmethod
- def parse_frame_rate(frame_rate: Optional[str]) -> float:
- """Parse frame rate (e.g., '25' or '30000/1001')"""
- if not frame_rate:
- return 0.0
-
- fr = frame_rate.strip()
- if '/' in fr:
- try:
- num, den = fr.split('/', 1)
- return float(num) / float(den)
- except Exception:
- return 0.0
-
- try:
- return float(fr)
- except Exception:
- return 0.0
-
- @staticmethod
- def determine_content_type(mime_type: str, width: int, height: int, audio_sampling_rate: int, codecs: str) -> str:
- """Determine if content is video, audio, or other"""
- if mime_type:
- return mime_type.split('/')[0]
- elif width or height:
- return 'video'
- elif audio_sampling_rate or (codecs and 'mp4a' in codecs.lower()):
- return 'audio'
- return 'unknown'
-
- @staticmethod
- def clean_language(lang: str, content_type: str, rep_id: str, bandwidth: int) -> Optional[str]:
- """Clean and normalize language tag"""
- if lang and lang.lower() not in ['undefined', 'none', '']:
- return lang
- elif content_type == 'audio':
- return f"aud_{rep_id}" if rep_id else f"aud_{bandwidth or 0}"
- return None
-
-
-class RepresentationParser:
- """Parses DASH representations"""
-
- def __init__(self, ns_manager: NamespaceManager, url_resolver: BaseURLResolver):
- self.ns = ns_manager
- self.url_resolver = url_resolver
- self.segment_builder = SegmentURLBuilder(ns_manager)
- self.protection_handler = ContentProtectionHandler(ns_manager)
- self.metadata_extractor = MetadataExtractor(ns_manager)
-
- def parse_adaptation_set(
- self,
- adapt_set: etree._Element,
- base_url: str,
- period_duration: int = 0
- ) -> List[Dict[str, Any]]:
- """Parse all representations in adaptation set"""
- representations = []
-
- # Adaptation set attributes
- mime_type = adapt_set.get('mimeType', '')
- lang = adapt_set.get('lang', '')
- adapt_frame_rate = adapt_set.get('frameRate')
- content_type = adapt_set.get('contentType', '')
-
- # Resolve base URL
- adapt_base = self.url_resolver.resolve_base_url(adapt_set, base_url)
-
- # Check protection and extract default_KID
- adapt_protected = self.protection_handler.is_protected(adapt_set)
- adapt_default_kid = self.protection_handler.extract_default_kid(adapt_set)
-
- # Get segment template
- adapt_seg_template = self.ns.find(adapt_set, 'mpd:SegmentTemplate')
-
- # Parse each representation
- for rep_elem in self.ns.findall(adapt_set, 'mpd:Representation'):
- rep = self._parse_representation(
- rep_elem, adapt_set, adapt_seg_template,
- adapt_base, mime_type, lang, period_duration
- )
-
- if rep:
- rep_frame_rate = rep_elem.get('frameRate') or adapt_frame_rate
- rep['frame_rate'] = self.metadata_extractor.parse_frame_rate(rep_frame_rate)
- rep['channels'] = self.metadata_extractor.get_audio_channels(rep_elem, adapt_set)
- rep_protected = adapt_protected or self.protection_handler.is_protected(rep_elem)
- rep['protected'] = bool(rep_protected)
- rep_default_kid = self.protection_handler.extract_default_kid(rep_elem) or adapt_default_kid
- rep['default_kid'] = rep_default_kid
- if content_type:
- rep['type'] = content_type
-
- representations.append(rep)
-
- return representations
-
- def _parse_representation(self, rep_elem: etree._Element, adapt_set: etree._Element, adapt_seg_template: Optional[etree._Element], base_url: str, mime_type: str, lang: str, period_duration: int) -> Optional[Dict[str, Any]]:
- """Parse single representation"""
- rep_id = rep_elem.get('id')
- bandwidth = int(rep_elem.get('bandwidth', 0))
- codecs = rep_elem.get('codecs')
- width = int(rep_elem.get('width', 0))
- height = int(rep_elem.get('height', 0))
- audio_sampling_rate = int(rep_elem.get('audioSamplingRate', 0))
-
- # Find segment template
- rep_seg_template = self.ns.find(rep_elem, 'mpd:SegmentTemplate')
- seg_template = rep_seg_template if rep_seg_template is not None else adapt_seg_template
-
- # Handle SegmentBase (single file)
- if seg_template is None:
- return self._parse_segment_base(rep_elem, base_url, rep_id, bandwidth, codecs, width, height, audio_sampling_rate, mime_type, lang)
-
- # Build segment URLs
- rep_base = self.url_resolver.resolve_base_url(rep_elem, base_url)
- init_url, segment_urls, seg_count, seg_duration = self.segment_builder.build_urls(
- seg_template, rep_id, bandwidth, rep_base, period_duration
- )
-
- # Determine content type and language
- content_type = self.metadata_extractor.determine_content_type(mime_type, width, height, audio_sampling_rate, codecs)
- clean_lang = self.metadata_extractor.clean_language(lang, content_type, rep_id, bandwidth)
-
- rep_data = {
- 'id': rep_id,
- 'type': content_type,
- 'codec': codecs,
- 'bandwidth': bandwidth,
- 'width': width,
- 'height': height,
- 'audio_sampling_rate': audio_sampling_rate,
- 'language': clean_lang,
- 'init_url': init_url,
- 'segment_urls': segment_urls,
- 'segment_count': seg_count,
- }
-
- if seg_duration:
- rep_data['segment_duration_seconds'] = seg_duration
-
- return rep_data
-
- def _parse_segment_base(self, rep_elem: etree._Element, base_url: str, rep_id: str, bandwidth: int, codecs: str, width: int, height: int, audio_sampling_rate: int, mime_type: str, lang: str) -> Optional[Dict[str, Any]]:
- """Parse representation with SegmentBase (single file)"""
- seg_base = self.ns.find(rep_elem, 'mpd:SegmentBase')
- rep_base = self.ns.find(rep_elem, 'mpd:BaseURL')
-
- if seg_base is None or rep_base is None or not (rep_base.text or "").strip():
- return None
-
- media_url = urljoin(base_url, rep_base.text.strip())
- content_type = self.metadata_extractor.determine_content_type(mime_type, width, height, audio_sampling_rate, codecs)
- clean_lang = self.metadata_extractor.clean_language(lang, content_type, rep_id, bandwidth)
-
- return {
- 'id': rep_id,
- 'type': content_type,
- 'codec': codecs,
- 'bandwidth': bandwidth,
- 'width': width,
- 'height': height,
- 'audio_sampling_rate': audio_sampling_rate,
- 'language': clean_lang,
- 'init_url': media_url,
- 'segment_urls': [media_url],
- 'segment_count': 1,
- }
-
-
-class RepresentationFilter:
- """Filters and deduplicates representations"""
-
- @staticmethod
- def deduplicate_videos(reps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """Keep best video per resolution"""
- resolution_map = {}
-
- for rep in reps:
- key = (rep['width'], rep['height'])
-
- if key not in resolution_map:
- resolution_map[key] = rep
- else:
- existing = resolution_map[key]
- existing_rank = CodecQuality.get_video_codec_rank(existing['codec'])
- new_rank = CodecQuality.get_video_codec_rank(rep['codec'])
-
- if new_rank > existing_rank or (new_rank == existing_rank and rep['bandwidth'] > existing['bandwidth']):
- resolution_map[key] = rep
-
- return list(resolution_map.values())
-
- @staticmethod
- def deduplicate_audios(reps: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
- """Keep best audio per language"""
- audio_map = {}
-
- for rep in reps:
- key = (rep['language'], rep['audio_sampling_rate'])
-
- if key not in audio_map:
- audio_map[key] = rep
- else:
- existing = audio_map[key]
- existing_rank = CodecQuality.get_audio_codec_rank(existing['codec'])
- new_rank = CodecQuality.get_audio_codec_rank(rep['codec'])
-
- if new_rank > existing_rank or (new_rank == existing_rank and rep['bandwidth'] > existing['bandwidth']):
- audio_map[key] = rep
-
- return list(audio_map.values())
-
-
-class AdPeriodDetector:
- """Detects advertisement periods"""
-
- AD_INDICATORS = ['_ad/', 'ad_bumper', '/creative/', '_OandO/']
-
- @staticmethod
- def is_ad_period(period_id: str, base_url: str) -> bool:
- """Check if period is an advertisement"""
- for indicator in AdPeriodDetector.AD_INDICATORS:
- if indicator in base_url:
- return True
-
- if period_id and '_subclip_' in period_id:
- return False
-
- return False
-
-
-class FileTypeDetector:
- """Detects file types from URLs"""
-
- @staticmethod
- def infer_url_type(url: Optional[str]) -> Optional[str]:
- if not url:
- return None
- try:
- path = urlparse(url).path
- ext = Path(path).suffix
- return ext.lstrip(".").lower() if ext else None
- except Exception:
- return None
-
- @staticmethod
- def infer_segment_urls_type(urls: Optional[List[str]]) -> Optional[str]:
- if not urls:
- return None
-
- types = {FileTypeDetector.infer_url_type(u) for u in urls if u}
- types.discard(None)
-
- if not types:
- return None
- return next(iter(types)) if len(types) == 1 else "mixed"
-
-
-class TablePrinter:
- """Prints representation tables"""
-
- def __init__(self, mpd_duration: int, mpd_sub_list: list = None):
- self.mpd_duration = mpd_duration
- self.mpd_sub_list = mpd_sub_list or []
-
- def print_table(self, representations: List[Dict[str, Any]], selected_video: Optional[Dict[str, Any]] = None, selected_audio: Optional[Dict[str, Any]] = None, selected_subs: list = None):
- """Print tracks table using Rich tables"""
- approx = DurationUtils.format_duration(self.mpd_duration)
-
- videos = sorted([r for r in representations if r['type'] == 'video'],
- key=lambda r: (r['height'], r['width'], r['bandwidth']), reverse=True)
- audios = sorted([r for r in representations if r['type'] == 'audio'],
- key=lambda r: r['bandwidth'], reverse=True)
-
- # Create single table
- table = Table(show_header=True, header_style="bold")
- table.add_column("Type", style="cyan")
- table.add_column("Sel", width=3, style="green bold")
- table.add_column("Info", style="white")
- table.add_column("Resolution/ID", style="yellow")
- table.add_column("Bitrate", style="green")
- table.add_column("Codec", style="white")
- table.add_column("Lang/FPS", style="blue")
- table.add_column("Channels", style="magenta")
- table.add_column("Segments", style="white")
- table.add_column("Duration", style="white")
-
- # Add video tracks
- for vid in videos:
- checked = 'X' if selected_video and vid['id'] == selected_video['id'] else ' '
- cenc = "*CENC" if vid.get('protected') else ""
- fps = f"{vid['frame_rate']:.0f}" if vid.get('frame_rate') else ""
-
- table.add_row("Video", checked, f"Vid {cenc}", f"{vid['width']}x{vid['height']}", f"{vid['bandwidth'] // 1000} Kbps", vid.get('codec', ''), fps, vid['id'], str(vid['segment_count']), approx or "")
-
- # Add audio tracks
- for aud in audios:
- checked = 'X' if selected_audio and aud['id'] == selected_audio['id'] else ' '
- cenc = "*CENC" if aud.get('protected') else ""
- ch = f"{aud['channels']}CH" if aud.get('channels') else ""
-
- table.add_row("Audio", checked, f"Aud {cenc}", aud['id'], f"{aud['bandwidth'] // 1000} Kbps", aud.get('codec', ''), aud.get('language', ''), ch, str(aud['segment_count']), approx or "")
-
- # Add subtitle tracks from mpd_sub_list
- if self.mpd_sub_list:
- for sub in self.mpd_sub_list:
- checked = 'X' if selected_subs and sub in selected_subs else ' '
- language = sub.get('language')
- sub_type = sub.get('format')
-
- table.add_row("Subtitle", checked, f"Sub {sub_type}", language, "", "", language, "", "", approx or "")
-
- console.print(table)
-
-
-class MPD_Parser:
- """Main MPD parser class"""
-
- def __init__(self, mpd_url: str, auto_save: bool = True, save_dir: Optional[str] = None, mpd_sub_list: list = None):
- self.mpd_url = mpd_url
- self.auto_save = auto_save
- self.save_dir = Path(save_dir) if save_dir else None
- self.mpd_sub_list = mpd_sub_list or []
-
- self.root = None
- self.mpd_content = None
- self.pssh = None
- self.representations = []
- self.mpd_duration = 0
-
- # Initialize utility classes (will be set after parsing)
- self.ns_manager = None
- self.url_resolver = None
- self.protection_handler = None
- self.rep_parser = None
- self.table_printer = None
-
- def parse(self, custom_headers: Optional[Dict[str, str]] = None) -> None:
- """Parse the MPD file and extract all representations"""
- self._fetch_and_parse_mpd(custom_headers or {})
-
- # Initialize utility classes
- self.ns_manager = NamespaceManager(self.root)
- self.url_resolver = BaseURLResolver(self.mpd_url, self.ns_manager)
- self.protection_handler = ContentProtectionHandler(self.ns_manager)
- self.rep_parser = RepresentationParser(self.ns_manager, self.url_resolver)
-
- # Extract MPD duration
- duration_str = self.root.get('mediaPresentationDuration')
- self.mpd_duration = DurationUtils.parse_duration(duration_str)
- self.table_printer = TablePrinter(self.mpd_duration, self.mpd_sub_list)
-
- # Extract PSSH and representations
- self.pssh = self.protection_handler.extract_pssh(self.root)
- self._parse_representations()
- self._deduplicate_representations()
-
- # Auto-save if enabled
- if self.auto_save:
- self._auto_save_files()
-
- def _fetch_and_parse_mpd(self, custom_headers: Dict[str, str]) -> None:
- """Fetch MPD content and parse XML"""
- response = requests.get(self.mpd_url, headers=custom_headers, timeout=max_timeout, impersonate="chrome124")
- response.raise_for_status()
-
- logging.info(f"Successfully fetched MPD: {len(response.content)} bytes")
- self.mpd_content = response.content
- self.root = etree.fromstring(response.content)
-
- def _parse_representations(self) -> None:
- """Parse all representations from the MPD"""
- base_url = self.url_resolver.get_initial_base_url(self.root)
- rep_aggregator = {}
-
- periods = self.ns_manager.findall(self.root, './/mpd:Period')
-
- for period_idx, period in enumerate(periods):
- period_id = period.get('id', f'period_{period_idx}')
- period_base_url = self.url_resolver.resolve_base_url(period, base_url)
-
- # Skip ad periods
- if AdPeriodDetector.is_ad_period(period_id, period_base_url):
- continue
-
- # Get period duration
- period_duration_str = period.get('duration')
- period_duration = DurationUtils.parse_duration(period_duration_str) or self.mpd_duration
-
- # Parse adaptation sets
- for adapt_set in self.ns_manager.findall(period, 'mpd:AdaptationSet'):
- representations = self.rep_parser.parse_adaptation_set(
- adapt_set, period_base_url, period_duration
- )
-
- # Aggregate representations by ID
- for rep in representations:
- rep_id = rep['id']
- if rep_id not in rep_aggregator:
- rep_aggregator[rep_id] = rep
- else:
- # Concatenate segment URLs for multi-period content
- existing = rep_aggregator[rep_id]
- if rep['segment_urls']:
- existing['segment_urls'].extend(rep['segment_urls'])
- if not existing['init_url'] and rep['init_url']:
- existing['init_url'] = rep['init_url']
-
- self.representations = list(rep_aggregator.values())
-
- def _deduplicate_representations(self) -> None:
- """Remove duplicate representations"""
- videos = [r for r in self.representations if r['type'] == 'video']
- audios = [r for r in self.representations if r['type'] == 'audio']
- others = [r for r in self.representations if r['type'] not in ['video', 'audio']]
-
- deduplicated_videos = RepresentationFilter.deduplicate_videos(videos)
- deduplicated_audios = RepresentationFilter.deduplicate_audios(audios)
-
- self.representations = deduplicated_videos + deduplicated_audios + others
-
- def get_resolutions(self) -> List[Dict[str, Any]]:
- """Return list of video representations"""
- return [r for r in self.representations if r['type'] == 'video']
-
- def get_audios(self) -> List[Dict[str, Any]]:
- """Return list of audio representations"""
- return [r for r in self.representations if r['type'] == 'audio']
-
- def get_best_video(self) -> Optional[Dict[str, Any]]:
- """Return the best video representation"""
- videos = self.get_resolutions()
- if not videos:
- return None
- return max(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
-
- def get_best_audio(self) -> Optional[Dict[str, Any]]:
- """Return the best audio representation"""
- audios = self.get_audios()
- if not audios:
- return None
- return max(audios, key=lambda r: r['bandwidth'])
-
- @staticmethod
- def get_worst(representations: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
- """Return the worst representation"""
- videos = [r for r in representations if r['type'] == 'video']
- audios = [r for r in representations if r['type'] == 'audio']
-
- if videos:
- return min(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
- elif audios:
- return min(audios, key=lambda r: r['bandwidth'])
- return None
-
- @staticmethod
- def get_list(representations: List[Dict[str, Any]], type_filter: Optional[str] = None) -> List[Dict[str, Any]]:
- """Return filtered list of representations"""
- if type_filter:
- return [r for r in representations if r['type'] == type_filter]
- return representations
-
- def select_video(self, force_resolution: str = None) -> Tuple[Optional[Dict[str, Any]], List[str], str, str]:
- """Select video representation based on resolution preference"""
- video_reps = self.get_resolutions()
- available_resolutions = [f"{rep['width']}x{rep['height']}" for rep in video_reps]
-
- # Use parameter or global config
- resolution = (force_resolution or FILTER_CUSTOM_RESOLUTION or "best").lower()
-
- if resolution == "best":
- selected_video = self.get_best_video()
- filter_custom_resolution = "Best"
- elif resolution == "worst":
- selected_video = self.get_worst(video_reps)
- filter_custom_resolution = "Worst"
- else:
- # Try to find specific resolution (e.g., "1080p" -> "1920x1080")
- selected_video = None
- for rep in video_reps:
- rep_res = f"{rep['width']}x{rep['height']}"
- if (resolution in rep_res.lower() or
- resolution.replace('p', '') in str(rep['height']) or
- rep_res.lower() == resolution):
- selected_video = rep
- break
-
- if not selected_video:
- # Fallback to best if specific resolution not found
- selected_video = self.get_best_video()
- filter_custom_resolution = f"{resolution} (fallback to Best)"
- else:
- filter_custom_resolution = resolution
-
- downloadable_video = f"{selected_video['width']}x{selected_video['height']}" if selected_video else "N/A"
- return selected_video, available_resolutions, filter_custom_resolution, downloadable_video
-
- def select_audio(self, preferred_audio_langs: Optional[List[str]] = None) -> Tuple[Optional[Dict[str, Any]], List[str], str, str]:
- """Select audio representation based on language preference"""
- audio_reps = self.get_audios()
- available_langs = [rep['language'] for rep in audio_reps if rep['language']]
-
- # Use parameter or global config
- preferred_langs = preferred_audio_langs or DOWNLOAD_SPECIFIC_AUDIO
-
- selected_audio = None
- filter_custom_audio = "First"
-
- if preferred_langs:
- for lang in preferred_langs:
- for rep in audio_reps:
- if rep['language'] and rep['language'].lower() == lang.lower():
- selected_audio = rep
- filter_custom_audio = lang
- break
- if selected_audio:
- break
-
- if not selected_audio:
- selected_audio = self.get_best_audio()
-
- downloadable_audio = selected_audio['language'] if selected_audio else "N/A"
- return selected_audio, available_langs, filter_custom_audio, downloadable_audio
-
- def print_tracks_table(self, selected_video: Optional[Dict[str, Any]] = None, selected_audio: Optional[Dict[str, Any]] = None, selected_subs: list = None) -> None:
- """Print tracks table"""
- if self.table_printer:
- self.table_printer.print_table(self.representations, selected_video, selected_audio, selected_subs)
-
- def save_mpd(self, output_path: str) -> None:
- """Save raw MPD manifest"""
- if self.mpd_content is None:
- raise ValueError("MPD content not available. Call parse() first.")
-
- output_file = Path(output_path)
- output_file.parent.mkdir(parents=True, exist_ok=True)
-
- with open(output_file, 'wb') as f:
- f.write(self.mpd_content)
-
- logging.info(f"MPD manifest saved to: {output_file}")
-
- def save_best_video_json(self, output_path: str) -> None:
- """Save best video representation as JSON"""
- best_video = self.get_best_video()
- if best_video is None:
- raise ValueError("No video representation available.")
-
- video_json = dict(best_video)
- video_json["stream_type"] = "dash"
- video_json["init_url_type"] = FileTypeDetector.infer_url_type(video_json.get("init_url"))
- video_json["segment_url_type"] = FileTypeDetector.infer_segment_urls_type(video_json.get("segment_urls"))
-
- output_file = Path(output_path)
- output_file.parent.mkdir(parents=True, exist_ok=True)
-
- with open(output_file, 'w', encoding='utf-8') as f:
- json.dump(video_json, f, indent=2, ensure_ascii=False)
-
- logging.info(f"Best video JSON saved to: {output_file}")
-
- def save_best_audio_json(self, output_path: str) -> None:
- """Save best audio representation as JSON"""
- best_audio = self.get_best_audio()
- if best_audio is None:
- raise ValueError("No audio representation available.")
-
- audio_json = dict(best_audio)
- audio_json["stream_type"] = "dash"
- audio_json["init_url_type"] = FileTypeDetector.infer_url_type(audio_json.get("init_url"))
- audio_json["segment_url_type"] = FileTypeDetector.infer_segment_urls_type(audio_json.get("segment_urls"))
-
- output_file = Path(output_path)
- output_file.parent.mkdir(parents=True, exist_ok=True)
-
- with open(output_file, 'w', encoding='utf-8') as f:
- json.dump(audio_json, f, indent=2, ensure_ascii=False)
-
- logging.info(f"Best audio JSON saved to: {output_file}")
-
- def _auto_save_files(self) -> None:
- """Auto-save MPD files to tmp directory"""
- if not self.save_dir:
- return
-
- try:
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- self.save_dir.mkdir(parents=True, exist_ok=True)
-
- # Save MPD manifest
- mpd_path = self.save_dir / f"manifest_{timestamp}.mpd"
- self.save_mpd(str(mpd_path))
-
- # Save JSON files
- if self.get_best_video():
- video_path = self.save_dir / f"best_video_{timestamp}.json"
- self.save_best_video_json(str(video_path))
-
- if self.get_best_audio():
- audio_path = self.save_dir / f"best_audio_{timestamp}.json"
- self.save_best_audio_json(str(audio_path))
-
- except Exception as e:
- console.print(f"[red]Error during auto-save: {e}")
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/DASH/segments.py b/StreamingCommunity/Lib/DASH/segments.py
index 48f5e0d4..5ad4bbbb 100644
--- a/StreamingCommunity/Lib/DASH/segments.py
+++ b/StreamingCommunity/Lib/DASH/segments.py
@@ -1,8 +1,9 @@
# 25.07.25
import os
-import asyncio
import time
+import struct
+import asyncio
from typing import Dict, Optional
from urllib.parse import urlparse
from pathlib import Path
@@ -25,21 +26,17 @@
# Config
-REQUEST_MAX_RETRY = config_manager.get_int('REQUESTS', 'max_retry')
-DEFAULT_VIDEO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_video_workers')
-DEFAULT_AUDIO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_audio_workers')
-SEGMENT_MAX_TIMEOUT = config_manager.get_int("M3U8_DOWNLOAD", "segment_timeout")
-LIMIT_SEGMENT = config_manager.get_int('M3U8_DOWNLOAD', 'limit_segment')
-ENABLE_RETRY = config_manager.get_bool('M3U8_DOWNLOAD', 'enable_retry')
-CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-
-
-# Variable
console = Console()
+REQUEST_MAX_RETRY = config_manager.config.get_int('REQUESTS', 'max_retry')
+DEFAULT_VIDEO_WORKERS = config_manager.config.get_int('M3U8_DOWNLOAD', 'default_video_workers')
+DEFAULT_AUDIO_WORKERS = config_manager.config.get_int('M3U8_DOWNLOAD', 'default_audio_workers')
+SEGMENT_MAX_TIMEOUT = config_manager.config.get_int("M3U8_DOWNLOAD", "segment_timeout")
+ENABLE_RETRY = config_manager.config.get_bool('M3U8_DOWNLOAD', 'enable_retry')
+CLEANUP_TMP = config_manager.config.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
class MPD_Segments:
- def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, limit_segments: int = None, custom_headers: Optional[Dict[str, str]] = None):
+ def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, custom_headers: Optional[Dict[str, str]] = None):
"""
Initialize MPD_Segments with temp folder, representation, optional pssh, and segment limit.
@@ -47,19 +44,12 @@ def __init__(self, tmp_folder: str, representation: dict, pssh: str = None, limi
- tmp_folder (str): Temporary folder to store downloaded segments
- representation (dict): Selected representation with segment URLs
- pssh (str, optional): PSSH string for decryption
- - limit_segments (int, optional): Optional limit for number of segments to download
"""
self.tmp_folder = tmp_folder
self.selected_representation = representation
self.pssh = pssh
self.custom_headers = custom_headers or {}
- # Use LIMIT_SEGMENT from config if limit_segments is not specified or is 0
- if limit_segments is None or limit_segments == 0:
- self.limit_segments = LIMIT_SEGMENT if LIMIT_SEGMENT > 0 else None
- else:
- self.limit_segments = limit_segments
-
self.enable_retry = ENABLE_RETRY
self.download_interrupted = False
self.info_nFailed = 0
@@ -87,14 +77,46 @@ def _infer_url_ext(url: Optional[str]) -> Optional[str]:
ext = Path(path).suffix
return ext.lstrip(".").lower() if ext else None
+ @staticmethod
+ def _has_varying_segment_urls(segment_urls: list) -> bool:
+ """
+ Check if segment URLs represent different files (not just different query params).
+ """
+ if not segment_urls or len(segment_urls) <= 1:
+ return False
+
+ # Extract base paths (without query/fragment)
+ base_paths = []
+ for url in segment_urls:
+ parsed = urlparse(url)
+ base_path = parsed.path
+ base_paths.append(base_path)
+
+ # If all paths are identical, URLs only differ in query params
+ unique_paths = set(base_paths)
+ return len(unique_paths) > 1
+
def _get_segment_url_type(self) -> Optional[str]:
"""Prefer representation field, otherwise infer from first segment URL."""
rep = self.selected_representation or {}
t = (rep.get("segment_url_type") or "").strip().lower()
if t:
return t
- urls = rep.get("segment_urls") or []
- return self._infer_url_ext(urls[0]) if urls else None
+
+ segment_urls = rep.get("segment_urls") or []
+ init_url = rep.get("init_url")
+
+ # NEW: Se c'è un solo segmento e init_url == segment_url, trattalo come mp4 unico
+ if len(segment_urls) == 1 and init_url and segment_urls[0] == init_url:
+ return "mp4"
+
+ # Check if segment URLs vary (different files vs same file with different params)
+ if self._has_varying_segment_urls(segment_urls):
+ # Different files = treat as segments (m4s-like)
+ return "m4s"
+
+ # Fallback to extension inference
+ return self._infer_url_ext(segment_urls[0]) if segment_urls else None
def _merged_headers(self) -> Dict[str, str]:
"""Ensure UA exists while keeping caller-provided headers."""
@@ -107,7 +129,14 @@ def get_concat_path(self, output_dir: str = None):
Get the path for the concatenated output file.
"""
rep_id = self.selected_representation['id']
- ext = "mp4" if (self._get_segment_url_type() == "mp4") else "m4s"
+ seg_type = self._get_segment_url_type()
+
+ # Use mp4 extension for both single MP4 and MP4 segments
+ if seg_type in ("mp4", "m4s"):
+ ext = "mp4"
+ else:
+ ext = "m4s"
+
return os.path.join(output_dir or self.tmp_folder, f"{rep_id}_encrypted.{ext}")
def get_segments_count(self) -> int:
@@ -173,12 +202,6 @@ def download_streams(self, output_dir: str = None, description: str = "DASH"):
"pssh": self.pssh,
}
- # Apply segment limit if specified
- if self.limit_segments is not None:
- orig_count = len(self.selected_representation.get('segment_urls', []))
- if orig_count > self.limit_segments:
- self.selected_representation['segment_urls'] = self.selected_representation['segment_urls'][:self.limit_segments]
-
# Run async download in sync mode
try:
res = asyncio.run(self.download_segments(output_dir=output_dir, description=description))
@@ -277,7 +300,18 @@ async def download_segments(self, output_dir: str = None, concurrent_downloads:
async def _download_init_segment(self, client, init_url, concat_path, progress_bar):
"""
Download the init segment and update progress/estimator.
+ For MP4 segments, skip init segment as each segment is a complete MP4.
"""
+ seg_type = self._get_segment_url_type()
+
+ # Skip init segment for MP4 segment files
+ if seg_type == "mp4" and self._has_varying_segment_urls(self.selected_representation.get('segment_urls', [])):
+ with open(concat_path, 'wb') as outfile:
+ pass
+
+ progress_bar.update(1)
+ return
+
if not init_url:
with open(concat_path, 'wb') as outfile:
pass
@@ -452,24 +486,71 @@ async def download_single(url, idx):
self.info_nFailed = nFailed_this_round
global_retry_count += 1
+ def _extract_moof_mdat_atoms(self, file_path):
+ """
+ Extracts only 'moof' and 'mdat' atoms from a fragmented MP4 file.
+ Returns a generator of bytes chunks.
+ """
+ with open(file_path, 'rb') as f:
+ while True:
+ header = f.read(8)
+ if len(header) < 8:
+ break
+
+ size, atom_type = struct.unpack(">I4s", header)
+ atom_type = atom_type.decode("ascii", errors="replace")
+ if size < 8:
+ break # Invalid atom
+
+ data = header + f.read(size - 8)
+ if atom_type in ("moof", "mdat"):
+ yield data
+
async def _concatenate_segments_in_order(self, temp_dir, concat_path, total_segments):
"""
Concatenate all segment files IN ORDER to the final output file.
+ For MP4 segments, write full init, then only moof/mdat from others.
+ For m4s segments, use init + segments approach.
"""
- with open(concat_path, 'ab') as outfile:
- for idx in range(total_segments):
- temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
-
- # Only concatenate successfully downloaded segments
- if idx in self.downloaded_segments and os.path.exists(temp_file):
- with open(temp_file, 'rb') as infile:
-
- # Read and write in chunks to avoid memory issues
- while True:
- chunk = infile.read(8192) # 8KB chunks
- if not chunk:
- break
- outfile.write(chunk)
+ seg_type = self._get_segment_url_type()
+ console.print(f"\n[cyan]Detected stream type: [green]{seg_type}")
+ is_mp4_segments = seg_type == "mp4" and self._has_varying_segment_urls(self.selected_representation.get('segment_urls', []))
+
+ if is_mp4_segments:
+ console.print("[cyan]Concatenating MP4 segments with moof/mdat extraction...")
+
+ # Write VIDEO0.mp4 fully, then only moof/mdat from VIDEO1+.mp4
+ with open(concat_path, 'wb') as outfile:
+ for idx in range(total_segments):
+ temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
+ if idx in self.downloaded_segments and os.path.exists(temp_file):
+ if idx == 0:
+
+ # Write full init segment
+ with open(temp_file, 'rb') as infile:
+ while True:
+ chunk = infile.read(8192)
+ if not chunk:
+ break
+ outfile.write(chunk)
+ else:
+ # Write only moof/mdat atoms
+ for atom in self._extract_moof_mdat_atoms(temp_file):
+ outfile.write(atom)
+
+ else:
+ console.print("[cyan]Concatenating m4s segments...")
+ with open(concat_path, 'ab') as outfile:
+ for idx in range(total_segments):
+ temp_file = os.path.join(temp_dir, f"seg_{idx:06d}.tmp")
+
+ if idx in self.downloaded_segments and os.path.exists(temp_file):
+ with open(temp_file, 'rb') as infile:
+ while True:
+ chunk = infile.read(8192)
+ if not chunk:
+ break
+ outfile.write(chunk)
def _get_bar_format(self, description: str) -> str:
"""
diff --git a/StreamingCommunity/Lib/FFmpeg/merge.py b/StreamingCommunity/Lib/FFmpeg/merge.py
index 0b7f3ce2..340c19bd 100644
--- a/StreamingCommunity/Lib/FFmpeg/merge.py
+++ b/StreamingCommunity/Lib/FFmpeg/merge.py
@@ -1,5 +1,6 @@
# 31.01.24
+import os
import logging
import subprocess
from typing import List, Dict
@@ -21,17 +22,14 @@
# Config
-DEBUG_MODE = config_manager.get_bool("DEFAULT", "debug")
-DEBUG_FFMPEG = "debug" if DEBUG_MODE else "error"
-USE_GPU = config_manager.get_bool("M3U8_CONVERSION", "use_gpu")
-PARAM_VIDEO = config_manager.get_list("M3U8_CONVERSION", "param_video")
-PARAM_AUDIO = config_manager.get_list("M3U8_CONVERSION", "param_audio")
-PARAM_FINAL = config_manager.get_list("M3U8_CONVERSION", "param_final")
-PARAM_SUBTITLES = config_manager.get_list("M3U8_CONVERSION", "param_subtitles")
-
-
-# Variable
console = Console()
+DEBUG_MODE = config_manager.config.get_bool("DEFAULT", "debug")
+DEBUG_FFMPEG = "debug" if DEBUG_MODE else "error"
+USE_GPU = config_manager.config.get_bool("M3U8_CONVERSION", "use_gpu")
+PARAM_VIDEO = config_manager.config.get_list("M3U8_CONVERSION", "param_video")
+PARAM_AUDIO = config_manager.config.get_list("M3U8_CONVERSION", "param_audio")
+PARAM_FINAL = config_manager.config.get_list("M3U8_CONVERSION", "param_final")
+SUBTITLE_DISPOSITION = config_manager.config.get_bool("M3U8_CONVERSION", "subtitle_disposition")
def add_encoding_params(ffmpeg_cmd: List[str]):
@@ -169,43 +167,52 @@ def join_audios(video_path: str, audio_tracks: List[Dict[str, str]], out_path: s
def join_subtitle(video_path: str, subtitles_list: List[Dict[str, str]], out_path: str):
"""
Joins subtitles with a video file using FFmpeg.
-
+
Parameters:
- - video (str): The path to the video file.
+ - video_path (str): The path to the video file.
- subtitles_list (list[dict[str, str]]): A list of dictionaries containing information about subtitles.
Each dictionary should contain the 'path' key with the path to the subtitle file and the 'name' key with the name of the subtitle.
- out_path (str): The path to save the output file.
"""
ffmpeg_cmd = [get_ffmpeg_path(), "-i", video_path]
-
+ output_ext = os.path.splitext(out_path)[1].lower()
+
+ # Determine subtitle codec based on output format
+ if output_ext == '.mp4':
+ subtitle_codec = 'mov_text'
+ elif output_ext == '.mkv':
+ subtitle_codec = 'copy'
+ else:
+ subtitle_codec = 'copy'
+
# Add subtitle input files first
for subtitle in subtitles_list:
ffmpeg_cmd += ["-i", subtitle['path']]
-
+
# Add maps for video and audio streams
ffmpeg_cmd += ["-map", "0:v", "-map", "0:a"]
-
+
# Add subtitle maps and metadata
for idx, subtitle in enumerate(subtitles_list):
ffmpeg_cmd += ["-map", f"{idx + 1}:s"]
ffmpeg_cmd += ["-metadata:s:s:{}".format(idx), "title={}".format(subtitle['language'])]
-
- # For subtitles, we always use copy for video/audio and configured encoder for subtitles
- ffmpeg_cmd.extend(['-c:v', 'copy', '-c:a', 'copy'])
- # Add subtitle encoding parameters from config
- if PARAM_SUBTITLES:
- ffmpeg_cmd.extend(PARAM_SUBTITLES)
-
+ # For subtitles, we always use copy for video/audio
+ ffmpeg_cmd.extend(['-c:v', 'copy', '-c:a', 'copy', '-c:s', subtitle_codec])
+
+ # Set disposition for first subtitle if enabled
+ if SUBTITLE_DISPOSITION and len(subtitles_list) > 0:
+ ffmpeg_cmd.extend(['-disposition:s:0', 'default+forced'])
+
# Overwrite
ffmpeg_cmd += [out_path, "-y"]
logging.info(f"FFMPEG Command: {' '.join(ffmpeg_cmd)} \n")
-
+
# Run join
if DEBUG_MODE:
subprocess.run(ffmpeg_cmd, check=True)
else:
capture_ffmpeg_real_time(ffmpeg_cmd, "[yellow]FFMPEG [cyan]Join subtitle")
- print()
-
+
+ print()
return out_path
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/FFmpeg/util.py b/StreamingCommunity/Lib/FFmpeg/util.py
index 5700be36..6ea3615a 100644
--- a/StreamingCommunity/Lib/FFmpeg/util.py
+++ b/StreamingCommunity/Lib/FFmpeg/util.py
@@ -165,7 +165,6 @@ def is_png_format_or_codec(file_info):
# Handle None values in format_name gracefully
format_name = file_info.get('format_name')
codec_names = file_info.get('codec_names', [])
-
console.log(f"[cyan]FFMPEG detect format: [green]{format_name}[cyan], codec: [green]{codec_names}")
return format_name == 'png_pipe' or 'png' in codec_names
@@ -177,10 +176,7 @@ def need_to_force_to_ts(file_path):
Parameters:
- file_path (str): Path to the input media file.
"""
- file_info = get_ffprobe_info(file_path)
-
- if is_png_format_or_codec(file_info):
- logging.info(f"File {file_path} is in PNG format or contains a PNG codec. Need to convert to TS format.")
+ if is_png_format_or_codec(get_ffprobe_info(file_path)):
return True
return False
diff --git a/StreamingCommunity/Lib/HLS/__init__.py b/StreamingCommunity/Lib/HLS/__init__.py
index 2a111a9a..c0282f18 100644
--- a/StreamingCommunity/Lib/HLS/__init__.py
+++ b/StreamingCommunity/Lib/HLS/__init__.py
@@ -1,7 +1,6 @@
# 17.12.25
from .downloader import HLS_Downloader
-from .decrypt import M3U8_Decryption
from .estimator import M3U8_Ts_Estimator
from .parser import M3U8_Parser
from .segments import M3U8_Segments
@@ -10,7 +9,6 @@
__all__ = [
"HLS_Downloader",
- "M3U8_Decryption",
"M3U8_Ts_Estimator",
"M3U8_Parser",
"M3U8_Segments",
diff --git a/StreamingCommunity/Lib/HLS/downloader.py b/StreamingCommunity/Lib/HLS/downloader.py
index bdabdfd5..8742f7b9 100644
--- a/StreamingCommunity/Lib/HLS/downloader.py
+++ b/StreamingCommunity/Lib/HLS/downloader.py
@@ -27,13 +27,13 @@
# Config
console = Console()
-DOWNLOAD_SPECIFIC_AUDIO = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
-DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
-MERGE_SUBTITLE = config_manager.get_bool('M3U8_DOWNLOAD', 'merge_subs')
-CLEANUP_TMP = config_manager.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
-GET_ONLY_LINK = config_manager.get_int('M3U8_DOWNLOAD', 'get_only_link')
-FILTER_CUSTOM_RESOLUTION = str(config_manager.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
-EXTENSION_OUTPUT = config_manager.get("M3U8_CONVERSION", "extension")
+DOWNLOAD_SPECIFIC_AUDIO = config_manager.config.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
+DOWNLOAD_SPECIFIC_SUBTITLE = config_manager.config.get_list('M3U8_DOWNLOAD', 'specific_list_subtitles')
+MERGE_SUBTITLE = config_manager.config.get_bool('M3U8_DOWNLOAD', 'merge_subs')
+CLEANUP_TMP = config_manager.config.get_bool('M3U8_DOWNLOAD', 'cleanup_tmp_folder')
+GET_ONLY_LINK = config_manager.config.get_int('M3U8_DOWNLOAD', 'get_only_link')
+FILTER_CUSTOM_RESOLUTION = str(config_manager.config.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
+EXTENSION_OUTPUT = config_manager.config.get("M3U8_CONVERSION", "extension")
class HLSClient:
@@ -356,7 +356,6 @@ def download_audio(self, audio: Dict) -> bool:
url=audio_full_url,
tmp_folder=audio_tmp_dir,
license_url=self.license_url,
- limit_segments=self.video_segments_count if self.video_segments_count > 0 else None,
custom_headers=self.custom_headers
)
diff --git a/StreamingCommunity/Lib/HLS/segments.py b/StreamingCommunity/Lib/HLS/segments.py
index 96413516..5a2b65b7 100644
--- a/StreamingCommunity/Lib/HLS/segments.py
+++ b/StreamingCommunity/Lib/HLS/segments.py
@@ -23,7 +23,7 @@
# Logic class
-from .decrypt import M3U8_Decryption
+from ..DASH.extractor import ClearKey
from .estimator import M3U8_Ts_Estimator
from .parser import M3U8_Parser
from .url_fixer import M3U8_UrlFix
@@ -31,27 +31,23 @@
# External
from ..MP4 import MP4_Downloader
-from ..DASH.cdm_helpher import get_widevine_keys
+from ..DASH.extractor import get_widevine_keys
from ..DASH.decrypt import decrypt_with_mp4decrypt
# Config
-REQUEST_MAX_RETRY = config_manager.get_int('REQUESTS', 'max_retry')
-REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
-DEFAULT_VIDEO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_video_workers')
-DEFAULT_AUDIO_WORKERS = config_manager.get_int('M3U8_DOWNLOAD', 'default_audio_workers')
-MAX_TIMEOUT = config_manager.get_int("REQUESTS", "timeout")
-SEGMENT_MAX_TIMEOUT = config_manager.get_int("M3U8_DOWNLOAD", "segment_timeout")
-LIMIT_SEGMENT = config_manager.get_int('M3U8_DOWNLOAD', 'limit_segment')
-ENABLE_RETRY = config_manager.get_bool('M3U8_DOWNLOAD', 'enable_retry')
-
-
-# Variable
console = Console()
+REQUEST_MAX_RETRY = config_manager.config.get_int('REQUESTS', 'max_retry')
+REQUEST_VERIFY = config_manager.config.get_bool('REQUESTS', 'verify')
+DEFAULT_VIDEO_WORKERS = config_manager.config.get_int('M3U8_DOWNLOAD', 'default_video_workers')
+DEFAULT_AUDIO_WORKERS = config_manager.config.get_int('M3U8_DOWNLOAD', 'default_audio_workers')
+MAX_TIMEOUT = config_manager.config.get_int("REQUESTS", "timeout")
+SEGMENT_MAX_TIMEOUT = config_manager.config.get_int("M3U8_DOWNLOAD", "segment_timeout")
+ENABLE_RETRY = config_manager.config.get_bool('M3U8_DOWNLOAD', 'enable_retry')
class M3U8_Segments:
- def __init__(self, url: str, tmp_folder: str, license_url: Optional[str] = None, is_index_url: bool = True, limit_segments: int = None, custom_headers: Optional[Dict[str, str]] = None):
+ def __init__(self, url: str, tmp_folder: str, license_url: Optional[str] = None, is_index_url: bool = True, custom_headers: Optional[Dict[str, str]] = None):
"""
Initializes the M3U8_Segments object.
@@ -59,7 +55,6 @@ def __init__(self, url: str, tmp_folder: str, license_url: Optional[str] = None,
- url (str): The URL of the M3U8 playlist.
- tmp_folder (str): The temporary folder to store downloaded segments.
- is_index_url (bool): Flag indicating if url is a URL (default True).
- - limit_segments (int): Optional limit for number of segments (overrides LIMIT_SEGMENT if provided).
- custom_headers (Dict[str, str]): Optional custom headers to use for all requests.
"""
self.url = url
@@ -70,17 +65,10 @@ def __init__(self, url: str, tmp_folder: str, license_url: Optional[str] = None,
self.final_output_path = os.path.join(self.tmp_folder, "0.ts")
self.drm_method = None
os.makedirs(self.tmp_folder, exist_ok=True)
-
- # Use LIMIT_SEGMENT from config if limit_segments not specified or is 0
- if limit_segments is None or limit_segments == 0:
- self.limit_segments = LIMIT_SEGMENT if LIMIT_SEGMENT > 0 else None
- else:
- self.limit_segments = limit_segments
-
self.enable_retry = ENABLE_RETRY
# Util class
- self.decryption: M3U8_Decryption = None
+ self.decryption: ClearKey = None
self.class_ts_estimator = M3U8_Ts_Estimator(0, self)
self.class_url_fixer = M3U8_UrlFix(url)
@@ -130,18 +118,12 @@ def parse_data(self, m3u8_content: str) -> None:
if m3u8_parser.keys:
key = self.__get_key__(m3u8_parser)
- self.decryption = M3U8_Decryption(key, m3u8_parser.keys.get('iv'), m3u8_parser.keys.get('method'), m3u8_parser.keys.get('pssh'))
+ self.decryption = ClearKey(key, m3u8_parser.keys.get('iv'), m3u8_parser.keys.get('method'), m3u8_parser.keys.get('pssh'))
segments = [
self.class_url_fixer.generate_full_url(seg) if "http" not in seg else seg
for seg in m3u8_parser.segments
]
-
- # Apply segment limit
- if self.limit_segments and len(segments) > self.limit_segments:
- logging.info(f"Limiting segments from {len(segments)} to {self.limit_segments}")
- segments = segments[:self.limit_segments]
-
self.segments = segments
self.stream_type = self.get_type_stream(self.segments)
self.class_ts_estimator.total_segments = len(self.segments)
diff --git a/StreamingCommunity/Lib/MP4/downloader.py b/StreamingCommunity/Lib/MP4/downloader.py
index f2633944..29e55535 100644
--- a/StreamingCommunity/Lib/MP4/downloader.py
+++ b/StreamingCommunity/Lib/MP4/downloader.py
@@ -25,14 +25,10 @@
# Config
-REQUEST_VERIFY = config_manager.get_bool('REQUESTS', 'verify')
-REQUEST_TIMEOUT = config_manager.get_float('REQUESTS', 'timeout')
-
-
-# Variable
msg = Prompt()
console = Console()
-extension_output = config_manager.get("M3U8_CONVERSION", "extension")
+REQUEST_VERIFY = config_manager.config.get_bool('REQUESTS', 'verify')
+REQUEST_TIMEOUT = config_manager.config.get_float('REQUESTS', 'timeout')
class InterruptHandler:
diff --git a/StreamingCommunity/Lib/MPD/__init__.py b/StreamingCommunity/Lib/MPD/__init__.py
new file mode 100644
index 00000000..b6818db5
--- /dev/null
+++ b/StreamingCommunity/Lib/MPD/__init__.py
@@ -0,0 +1,10 @@
+# 29.12.25
+
+from .parser import MPD_Parser
+from .constants import DRMSystem, CodecQuality
+
+__all__ = [
+ 'MPD_Parser',
+ 'DRMSystem',
+ 'CodecQuality',
+]
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/MPD/constants.py b/StreamingCommunity/Lib/MPD/constants.py
new file mode 100644
index 00000000..9f4d45a4
--- /dev/null
+++ b/StreamingCommunity/Lib/MPD/constants.py
@@ -0,0 +1,100 @@
+# 29.12.25
+
+from typing import Optional
+
+
+class DRMSystem:
+ """DRM system constants and utilities"""
+ WIDEVINE = 'widevine'
+ PLAYREADY = 'playready'
+ FAIRPLAY = 'fairplay'
+
+ # UUID mappings
+ UUIDS = {
+ WIDEVINE: 'edef8ba9-79d6-4ace-a3c8-27dcd51d21ed',
+ PLAYREADY: '9a04f079-9840-4286-ab92-e65be0885f95',
+ FAIRPLAY: '94ce86fb-07ff-4f43-adb8-93d2fa968ca2'
+ }
+
+ # Display abbreviations
+ ABBREV = {
+ WIDEVINE: 'WV',
+ PLAYREADY: 'PR',
+ FAIRPLAY: 'FP'
+ }
+
+ # Fallback priority order
+ PRIORITY = [WIDEVINE, PLAYREADY, FAIRPLAY]
+
+ # CENC protection scheme
+ CENC_SCHEME = 'urn:mpeg:dash:mp4protection:2011'
+
+ @classmethod
+ def get_uuid(cls, drm_type: str) -> Optional[str]:
+ """Get UUID for DRM type"""
+ return cls.UUIDS.get(drm_type.lower())
+
+ @classmethod
+ def get_abbrev(cls, drm_type: str) -> str:
+ """Get abbreviation for DRM type"""
+ return cls.ABBREV.get(drm_type.lower(), drm_type.upper()[:2])
+
+ @classmethod
+ def from_uuid(cls, uuid: str) -> Optional[str]:
+ """Get DRM type from UUID"""
+ uuid_lower = uuid.lower()
+ for drm_type, drm_uuid in cls.UUIDS.items():
+ if drm_uuid in uuid_lower:
+ return drm_type
+ return None
+
+
+class CodecQuality:
+ """Codec quality rankings"""
+ VIDEO_CODEC_RANK = {
+ 'av01': 5, 'vp9': 4, 'vp09': 4, 'hev1': 3,
+ 'hvc1': 3, 'avc1': 2, 'avc3': 2, 'mp4v': 1,
+ }
+
+ AUDIO_CODEC_RANK = {
+ 'opus': 5, 'mp4a.40.2': 4, 'mp4a.40.5': 3,
+ 'mp4a': 2, 'ac-3': 2, 'ec-3': 3,
+ }
+
+ @staticmethod
+ def get_video_codec_rank(codec: Optional[str]) -> int:
+ if not codec:
+ return 0
+ codec_lower = codec.lower()
+ for key, rank in CodecQuality.VIDEO_CODEC_RANK.items():
+ if codec_lower.startswith(key):
+ return rank
+ return 0
+
+ @staticmethod
+ def get_audio_codec_rank(codec: Optional[str]) -> int:
+ if not codec:
+ return 0
+ codec_lower = codec.lower()
+ for key, rank in CodecQuality.AUDIO_CODEC_RANK.items():
+ if codec_lower.startswith(key):
+ return rank
+ return 0
+
+
+class AdPeriodDetector:
+ """Detects advertisement periods"""
+
+ AD_INDICATORS = ['_ad/', 'ad_bumper', '/creative/', '_OandO/']
+
+ @staticmethod
+ def is_ad_period(period_id: str, base_url: str) -> bool:
+ """Check if period is an advertisement"""
+ for indicator in AdPeriodDetector.AD_INDICATORS:
+ if indicator in base_url:
+ return True
+
+ if period_id and '_subclip_' in period_id:
+ return False
+
+ return False
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/MPD/handlers.py b/StreamingCommunity/Lib/MPD/handlers.py
new file mode 100644
index 00000000..f0076797
--- /dev/null
+++ b/StreamingCommunity/Lib/MPD/handlers.py
@@ -0,0 +1,489 @@
+# 29.12.25
+
+from typing import List, Dict, Optional, Tuple, Any
+from urllib.parse import urljoin
+
+
+# External library
+from lxml import etree
+from rich.console import Console
+
+
+# Logic
+from .constants import DRMSystem, CodecQuality
+from .utils import URLBuilder, NamespaceManager, MetadataExtractor
+
+
+# Variable
+console = Console()
+
+
+class ContentProtectionHandler:
+ """Handles DRM and content protection"""
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+
+ def is_protected(self, element: etree._Element) -> bool:
+ """Check if element has DRM protection"""
+ for cp in self.ns.findall(element, 'mpd:ContentProtection'):
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ value = (cp.get('value') or '').lower()
+
+ # Check for CENC
+ if DRMSystem.CENC_SCHEME in scheme_id and ('cenc' in value or value):
+ return True
+
+ # Check for any DRM UUID
+ if DRMSystem.from_uuid(scheme_id):
+ return True
+
+ return False
+
+ def get_encryption_method(self, element: etree._Element) -> Optional[str]:
+ """
+ Extract encryption method from ContentProtection elements.
+ Returns: 'ctr', 'cbc', 'cenc', 'cbcs', 'cbc1', 'cens' or None
+ """
+ for cp in self.ns.findall(element, 'mpd:ContentProtection'):
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ value = (cp.get('value') or '').lower()
+
+ # Check CENC scheme with value attribute
+ if DRMSystem.CENC_SCHEME in scheme_id and value:
+ if value in ['cenc', 'cens']:
+ return 'ctr' # AES CTR mode
+ elif value in ['cbc1', 'cbcs']:
+ return 'cbc' # AES CBC mode
+ return value
+
+ return None
+
+ def get_drm_types(self, element: etree._Element) -> List[str]:
+ """Determine all DRM types from ContentProtection elements that actually have PSSH data."""
+ drm_types = []
+
+ for cp in self.ns.findall(element, 'mpd:ContentProtection'):
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ drm_type = DRMSystem.from_uuid(scheme_id)
+
+ if drm_type and drm_type not in drm_types:
+ if self._has_pssh_data(cp, drm_type):
+ drm_types.append(drm_type)
+
+ return drm_types
+
+ def _has_pssh_data(self, cp_element: etree._Element, drm_type: str) -> bool:
+ """Check if ContentProtection element has actual PSSH data for the DRM type."""
+ pssh = self.ns.find(cp_element, 'cenc:pssh')
+ if pssh is not None and pssh.text and pssh.text.strip():
+ return True
+
+ # For PlayReady, check mspr:pro
+ if drm_type == DRMSystem.PLAYREADY:
+ pro = self.ns.find(cp_element, 'mspr:pro')
+ if pro is not None and pro.text and pro.text.strip():
+ return True
+ return False
+
+ def get_primary_drm_type(self, element: etree._Element, preferred_drm: str = DRMSystem.WIDEVINE) -> Optional[str]:
+ """
+ Get primary DRM type based on preference.
+
+ Args:
+ element: XML element to check
+ preferred_drm: Preferred DRM system ('widevine', 'playready', 'auto')
+
+ Returns: Primary DRM type to use
+ """
+ drm_types = self.get_drm_types(element)
+
+ if not drm_types:
+ return None
+
+ # If only one DRM, return it
+ if len(drm_types) == 1:
+ return drm_types[0]
+
+ # Multiple DRM systems, apply preference
+ if preferred_drm in drm_types:
+ return preferred_drm
+
+ # Fallback to priority order
+ for fallback in DRMSystem.PRIORITY:
+ if fallback in drm_types:
+ return fallback
+
+ return drm_types[0]
+
+ def extract_default_kid(self, element: etree._Element) -> Optional[str]:
+ """Extract default_KID from ContentProtection elements (Widevine/PlayReady/CENC)."""
+ def _extract_kid_from_cp(cp: etree._Element) -> Optional[str]:
+ kid = (cp.get('{urn:mpeg:cenc:2013}default_KID') or
+ cp.get('default_KID') or
+ cp.get('cenc:default_KID'))
+
+ # Fallback: any attribute key that ends with 'default_KID' (case-insensitive)
+ if not kid:
+ for k, v in (cp.attrib or {}).items():
+ if isinstance(k, str) and k.lower().endswith('default_kid') and v:
+ kid = v
+ break
+
+ if not kid:
+ return None
+
+ # Normalize UUID -> hex (no dashes), lowercase
+ return kid.strip().replace('-', '').lower()
+
+ cps = self.ns.findall(element, 'mpd:ContentProtection')
+ if not cps:
+ return None
+
+ # Prefer Widevine KID, then CENC protection, then any other CP
+ preferred = []
+ fallback = []
+
+ for cp in cps:
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ if DRMSystem.UUIDS[DRMSystem.WIDEVINE] in scheme_id:
+ preferred.append(cp)
+ elif DRMSystem.CENC_SCHEME in scheme_id:
+ preferred.append(cp)
+ else:
+ fallback.append(cp)
+
+ for cp in preferred + fallback:
+ kid = _extract_kid_from_cp(cp)
+ if kid:
+ return kid
+
+ return None
+
+ def extract_pssh(self, root: etree._Element, drm_type: str = DRMSystem.WIDEVINE) -> Optional[str]:
+ """
+ Extract PSSH (Protection System Specific Header) for specific DRM type.
+
+ Args:
+ root: XML root element
+ drm_type: DRM type ('widevine', 'playready', 'fairplay')
+ """
+ target_uuid = DRMSystem.get_uuid(drm_type)
+ if not target_uuid:
+ return None
+
+ # Search in all ContentProtection elements in the entire MPD
+ all_cps = self.ns.findall(root, './/mpd:ContentProtection')
+
+ # Try specific DRM type first
+ for cp in all_cps:
+ scheme_id = (cp.get('schemeIdUri') or '').lower()
+ if target_uuid in scheme_id:
+ pssh = self.ns.find(cp, 'cenc:pssh')
+ if pssh is not None and pssh.text and pssh.text.strip():
+ return pssh.text.strip()
+
+ if drm_type == DRMSystem.PLAYREADY:
+ pro = self.ns.find(cp, 'mspr:pro')
+ if pro is not None and pro.text and pro.text.strip():
+ return pro.text.strip()
+
+ return None
+
+
+class SegmentTimelineParser:
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+
+ def parse(self, seg_template: etree._Element, start_number: int = 1) -> Tuple[List[int], List[int]]:
+ """Parse SegmentTimeline and return (number_list, time_list)"""
+ seg_timeline = self.ns.find(seg_template, 'mpd:SegmentTimeline')
+ if seg_timeline is None:
+ return [], []
+
+ number_list = []
+ time_list = []
+ current_time = 0
+ current_number = start_number
+
+ for s_elem in self.ns.findall(seg_timeline, 'mpd:S'):
+ d = s_elem.get('d')
+ if d is None:
+ continue
+
+ d = int(d)
+
+ # Explicit time
+ if s_elem.get('t') is not None:
+ current_time = int(s_elem.get('t'))
+
+ # Repeat count
+ r = int(s_elem.get('r', 0))
+ if r == -1:
+ r = 0 # Special case: repeat until end
+
+ # Add segments
+ for _ in range(r + 1):
+ number_list.append(current_number)
+ time_list.append(current_time)
+ current_number += 1
+ current_time += d
+
+ return number_list, time_list
+
+
+class SegmentURLBuilder:
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+ self.timeline_parser = SegmentTimelineParser(ns_manager)
+
+ def build_urls(self, seg_template: etree._Element, rep_id: str, bandwidth: int, base_url: str, period_duration: int = 0) -> Tuple[Optional[str], List[str], int, float]:
+ """Build initialization and segment URLs"""
+ init_template = seg_template.get('initialization')
+ media_template = seg_template.get('media')
+ start_number = int(seg_template.get('startNumber', 1))
+ timescale = int(seg_template.get('timescale', 1) or 1)
+ duration_attr = seg_template.get('duration')
+
+ # Build init URL
+ init_url = None
+ if init_template:
+ init_url = URLBuilder.build_url(base_url, init_template, rep_id=rep_id, bandwidth=bandwidth)
+
+ # Parse timeline
+ number_list, time_list = self.timeline_parser.parse(seg_template, start_number)
+
+ segment_count = 0
+ segment_duration = 0.0
+
+ # Determine segment count
+ if time_list:
+ segment_count = len(time_list)
+ elif number_list:
+ segment_count = len(number_list)
+ elif duration_attr:
+ d = int(duration_attr)
+ segment_duration = d / float(timescale)
+
+ if period_duration > 0 and segment_duration > 0:
+ segment_count = int((period_duration / segment_duration) + 0.5)
+ else:
+ segment_count = 100
+
+ max_segments = min(segment_count, 20000)
+ number_list = list(range(start_number, start_number + max_segments))
+ else:
+ segment_count = 100
+ number_list = list(range(start_number, start_number + 100))
+
+ # Build segment URLs
+ segment_urls = self._build_segment_urls(
+ media_template, base_url, rep_id, bandwidth, number_list, time_list
+ )
+
+ if not segment_count:
+ segment_count = len(segment_urls)
+
+ return init_url, segment_urls, segment_count, segment_duration
+
+ def _build_segment_urls(self, template: str, base_url: str, rep_id: str, bandwidth: int, number_list: List[int], time_list: List[int]) -> List[str]:
+ """Build list of segment URLs"""
+ if not template:
+ return []
+
+ urls = []
+
+ if '$Time$' in template and time_list:
+ for t in time_list:
+ urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, time=t, bandwidth=bandwidth))
+ elif '$Number' in template and number_list:
+ for n in number_list:
+ urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, number=n, bandwidth=bandwidth))
+ else:
+ urls.append(URLBuilder.build_url(base_url, template, rep_id=rep_id, bandwidth=bandwidth))
+
+ return urls
+
+
+class RepresentationParser:
+ def __init__(self, ns_manager: NamespaceManager, url_resolver):
+ self.ns = ns_manager
+ self.url_resolver = url_resolver
+ self.segment_builder = SegmentURLBuilder(ns_manager)
+ self.protection_handler = ContentProtectionHandler(ns_manager)
+ self.metadata_extractor = MetadataExtractor(ns_manager)
+
+ def parse_adaptation_set(self, adapt_set: etree._Element, base_url: str, period_duration: int = 0) -> List[Dict[str, Any]]:
+ """Parse all representations in adaptation set"""
+ representations = []
+
+ # Adaptation set attributes
+ mime_type = adapt_set.get('mimeType', '')
+ lang = adapt_set.get('lang', '')
+ adapt_frame_rate = adapt_set.get('frameRate')
+ content_type = adapt_set.get('contentType', '')
+ adapt_width = int(adapt_set.get('width', 0))
+ adapt_height = int(adapt_set.get('height', 0))
+
+ # Resolve base URL
+ adapt_base = self.url_resolver.resolve_base_url(adapt_set, base_url)
+
+ # Check protection and extract default_KID and encryption method
+ adapt_protected = self.protection_handler.is_protected(adapt_set)
+ adapt_default_kid = self.protection_handler.extract_default_kid(adapt_set)
+ adapt_encryption_method = self.protection_handler.get_encryption_method(adapt_set)
+ adapt_drm_types = self.protection_handler.get_drm_types(adapt_set)
+ adapt_drm_type = self.protection_handler.get_primary_drm_type(adapt_set)
+
+ # Get segment template
+ adapt_seg_template = self.ns.find(adapt_set, 'mpd:SegmentTemplate')
+
+ # Parse each representation
+ for rep_elem in self.ns.findall(adapt_set, 'mpd:Representation'):
+ rep_mime_type = rep_elem.get('mimeType', mime_type)
+ if rep_mime_type and 'webm' in rep_mime_type.lower():
+ continue
+
+ rep = self._parse_representation(
+ rep_elem, adapt_set, adapt_seg_template,
+ adapt_base, mime_type, lang, period_duration,
+ adapt_width, adapt_height
+ )
+
+ if rep:
+ rep_frame_rate = rep_elem.get('frameRate') or adapt_frame_rate
+ rep['frame_rate'] = self.metadata_extractor.parse_frame_rate(rep_frame_rate)
+ rep['channels'] = self.metadata_extractor.get_audio_channels(rep_elem, adapt_set)
+ rep_protected = adapt_protected or self.protection_handler.is_protected(rep_elem)
+ rep['protected'] = bool(rep_protected)
+ rep_default_kid = self.protection_handler.extract_default_kid(rep_elem) or adapt_default_kid
+ rep['default_kid'] = rep_default_kid
+ rep_encryption_method = self.protection_handler.get_encryption_method(rep_elem) or adapt_encryption_method
+ rep['encryption_method'] = rep_encryption_method
+
+ # Get all DRM types and primary DRM type
+ rep_drm_types = self.protection_handler.get_drm_types(rep_elem) or adapt_drm_types
+ rep_drm_type = self.protection_handler.get_primary_drm_type(rep_elem) or adapt_drm_type
+ rep['drm_types'] = rep_drm_types
+ rep['drm_type'] = rep_drm_type
+
+ if content_type:
+ rep['type'] = content_type
+
+ representations.append(rep)
+
+ return representations
+
+ def _parse_representation(self, rep_elem: etree._Element, adapt_set: etree._Element,
+ adapt_seg_template: Optional[etree._Element], base_url: str,
+ mime_type: str, lang: str, period_duration: int,
+ adapt_width: int = 0, adapt_height: int = 0) -> Optional[Dict[str, Any]]:
+ """Parse single representation"""
+ rep_id = rep_elem.get('id')
+ bandwidth = int(rep_elem.get('bandwidth', 0))
+ codecs = rep_elem.get('codecs')
+
+ width = int(rep_elem.get('width') or adapt_width or 0)
+ height = int(rep_elem.get('height') or adapt_height or 0)
+ audio_sampling_rate = int(rep_elem.get('audioSamplingRate', 0))
+
+ # Find segment template
+ rep_seg_template = self.ns.find(rep_elem, 'mpd:SegmentTemplate')
+ seg_template = rep_seg_template if rep_seg_template is not None else adapt_seg_template
+
+ # Handle SegmentBase (single file)
+ if seg_template is None:
+ return self._parse_segment_base(rep_elem, base_url, rep_id, bandwidth, codecs, width, height, audio_sampling_rate, mime_type, lang)
+
+ # Build segment URLs
+ rep_base = self.url_resolver.resolve_base_url(rep_elem, base_url)
+ init_url, segment_urls, seg_count, seg_duration = self.segment_builder.build_urls(
+ seg_template, rep_id, bandwidth, rep_base, period_duration
+ )
+
+ # Determine content type and language
+ content_type = self.metadata_extractor.determine_content_type(mime_type, width, height, audio_sampling_rate, codecs)
+ clean_lang = self.metadata_extractor.clean_language(lang, content_type, rep_id, bandwidth)
+
+ rep_data = {
+ 'id': rep_id,
+ 'type': content_type,
+ 'codec': codecs,
+ 'bandwidth': bandwidth,
+ 'width': width,
+ 'height': height,
+ 'audio_sampling_rate': audio_sampling_rate,
+ 'language': clean_lang,
+ 'init_url': init_url,
+ 'segment_urls': segment_urls,
+ 'segment_count': seg_count,
+ }
+
+ if seg_duration:
+ rep_data['segment_duration_seconds'] = seg_duration
+
+ return rep_data
+
+ def _parse_segment_base(self, rep_elem: etree._Element, base_url: str, rep_id: str,
+ bandwidth: int, codecs: str, width: int, height: int,
+ audio_sampling_rate: int, mime_type: str, lang: str) -> Optional[Dict[str, Any]]:
+ """Parse representation with SegmentBase (single file)"""
+ seg_base = self.ns.find(rep_elem, 'mpd:SegmentBase')
+ rep_base = self.ns.find(rep_elem, 'mpd:BaseURL')
+
+ if seg_base is None or rep_base is None or not (rep_base.text or "").strip():
+ return None
+
+ media_url = urljoin(base_url, rep_base.text.strip())
+ content_type = self.metadata_extractor.determine_content_type(mime_type, width, height, audio_sampling_rate, codecs)
+ clean_lang = self.metadata_extractor.clean_language(lang, content_type, rep_id, bandwidth)
+
+ return {
+ 'id': rep_id,
+ 'type': content_type,
+ 'codec': codecs,
+ 'bandwidth': bandwidth,
+ 'width': width,
+ 'height': height,
+ 'audio_sampling_rate': audio_sampling_rate,
+ 'language': clean_lang,
+ 'init_url': media_url,
+ 'segment_urls': [media_url],
+ 'segment_count': 1,
+ }
+
+class RepresentationFilter:
+ @staticmethod
+ def deduplicate_by_quality(reps: List[Dict[str, Any]], content_type: str) -> List[Dict[str, Any]]:
+ """Keep BEST quality representation per resolution/language"""
+ quality_map = {}
+
+ # Define grouping key based on content type
+ def get_grouping_key(rep):
+ if content_type == 'video':
+ return (rep['width'], rep['height'])
+ else: # audio
+ return (rep['language'], rep['audio_sampling_rate'])
+
+ # Define quality comparison
+ def get_quality_rank(rep):
+ if content_type == 'video':
+ return CodecQuality.get_video_codec_rank(rep['codec'])
+ else:
+ return CodecQuality.get_audio_codec_rank(rep['codec'])
+
+ # Group and select best quality
+ for rep in reps:
+ key = get_grouping_key(rep)
+
+ if key not in quality_map:
+ quality_map[key] = rep
+ else:
+ existing = quality_map[key]
+ existing_rank = get_quality_rank(existing)
+ new_rank = get_quality_rank(rep)
+
+ # Select BEST quality (higher codec rank or higher bandwidth)
+ if new_rank > existing_rank or (new_rank == existing_rank and rep['bandwidth'] > existing['bandwidth']):
+ quality_map[key] = rep
+
+ return list(quality_map.values())
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/MPD/parser.py b/StreamingCommunity/Lib/MPD/parser.py
new file mode 100644
index 00000000..9590303e
--- /dev/null
+++ b/StreamingCommunity/Lib/MPD/parser.py
@@ -0,0 +1,480 @@
+# 29.12.25
+
+import json
+import logging
+from urllib.parse import urljoin
+from typing import List, Dict, Optional, Tuple, Any
+from pathlib import Path
+from datetime import datetime
+
+
+# External Libraries
+from lxml import etree
+from curl_cffi import requests
+from rich.console import Console
+
+
+# Internal utilities
+from StreamingCommunity.Util.config_json import config_manager
+
+
+# Logic
+from .constants import DRMSystem
+from .utils import (DurationUtils, NamespaceManager, BaseURLResolver, FileTypeDetector, TablePrinter)
+from .handlers import (ContentProtectionHandler, RepresentationParser, RepresentationFilter, SegmentTimelineParser)
+
+
+# Variables
+console = Console()
+max_timeout = config_manager.config.get_int('REQUESTS', 'timeout')
+FILTER_CUSTOM_RESOLUTION = str(config_manager.config.get('M3U8_CONVERSION', 'force_resolution')).strip().lower()
+DOWNLOAD_SPECIFIC_AUDIO = config_manager.config.get_list('M3U8_DOWNLOAD', 'specific_list_audio')
+
+
+class MPD_Parser:
+ def __init__(self, mpd_url: str, auto_save: bool = True, save_dir: Optional[str] = None, mpd_sub_list: list = None):
+ self.mpd_url = mpd_url
+ self.auto_save = auto_save
+ self.save_dir = Path(save_dir) if save_dir else None
+ self.mpd_sub_list = mpd_sub_list or []
+
+ self.root = None
+ self.mpd_content = None
+ self.pssh = None
+ self.representations = []
+ self.mpd_duration = 0
+ self.encryption_method = None
+
+ # Initialize utility classes (will be set after parsing)
+ self.ns_manager = None
+ self.url_resolver = None
+ self.protection_handler = None
+ self.rep_parser = None
+ self.table_printer = None
+
+ def parse(self, custom_headers: Optional[Dict[str, str]] = None) -> None:
+ """Parse the MPD file and extract all representations"""
+ self._fetch_and_parse_mpd(custom_headers or {})
+
+ # Initialize utility classes
+ self.ns_manager = NamespaceManager(self.root)
+ self.url_resolver = BaseURLResolver(self.mpd_url, self.ns_manager)
+ self.protection_handler = ContentProtectionHandler(self.ns_manager)
+ self.rep_parser = RepresentationParser(self.ns_manager, self.url_resolver)
+
+ # Extract MPD duration
+ duration_str = self.root.get('mediaPresentationDuration')
+ self.mpd_duration = DurationUtils.parse_duration(duration_str)
+
+ # Extract PSSH for all DRM types
+ self.pssh_widevine = self.protection_handler.extract_pssh(self.root, DRMSystem.WIDEVINE)
+ self.pssh_playready = self.protection_handler.extract_pssh(self.root, DRMSystem.PLAYREADY)
+ self.pssh_fairplay = self.protection_handler.extract_pssh(self.root, DRMSystem.FAIRPLAY)
+ self.encryption_method = self.protection_handler.get_encryption_method(self.root)
+
+ # Get all available DRM types
+ self.available_drm_types = []
+ if self.pssh_widevine:
+ self.available_drm_types.append(DRMSystem.WIDEVINE)
+ if self.pssh_playready:
+ self.available_drm_types.append(DRMSystem.PLAYREADY)
+ if self.pssh_fairplay:
+ self.available_drm_types.append(DRMSystem.FAIRPLAY)
+
+ self.pssh = self.pssh_widevine or self.pssh_playready or self.pssh_fairplay
+
+ self._parse_representations()
+ self._deduplicate_representations()
+ self._extract_and_merge_subtitles()
+ self.table_printer = TablePrinter(self.mpd_duration, self.mpd_sub_list)
+
+ # Auto-save if enabled
+ if self.auto_save:
+ self._auto_save_files()
+
+ def _fetch_and_parse_mpd(self, custom_headers: Dict[str, str]) -> None:
+ """Fetch MPD content and parse XML"""
+ response = requests.get(self.mpd_url, headers=custom_headers, timeout=max_timeout, impersonate="chrome124")
+ response.raise_for_status()
+
+ logging.info(f"Successfully fetched MPD: {len(response.content)} bytes")
+ self.mpd_content = response.content
+ self.root = etree.fromstring(response.content)
+
+ def _parse_representations(self) -> None:
+ """Parse all representations from the MPD"""
+ base_url = self.url_resolver.get_initial_base_url(self.root)
+ rep_aggregator = {}
+
+ periods = self.ns_manager.findall(self.root, './/mpd:Period')
+
+ for period_idx, period in enumerate(periods):
+ period_base_url = self.url_resolver.resolve_base_url(period, base_url)
+
+ # Get period duration and protection info
+ period_duration_str = period.get('duration')
+ period_duration = DurationUtils.parse_duration(period_duration_str) or self.mpd_duration
+ period_protected = self.protection_handler.is_protected(period)
+ period_drm_types = self.protection_handler.get_drm_types(period)
+ period_drm_type = self.protection_handler.get_primary_drm_type(period)
+ period_encryption_method = self.protection_handler.get_encryption_method(period)
+
+ # Parse adaptation sets
+ for adapt_set in self.ns_manager.findall(period, 'mpd:AdaptationSet'):
+ representations = self.rep_parser.parse_adaptation_set(
+ adapt_set, period_base_url, period_duration
+ )
+
+ # Apply Period-level protection if needed
+ for rep in representations:
+ if not rep.get('protected') and period_protected:
+ rep['protected'] = True
+ if not rep.get('drm_types'):
+ rep['drm_types'] = period_drm_types
+ if not rep.get('drm_type'):
+ rep['drm_type'] = period_drm_type
+ if not rep.get('encryption_method') and period_encryption_method:
+ rep['encryption_method'] = period_encryption_method
+
+ # Aggregate representations with unique keys
+ self._aggregate_representations(rep_aggregator, representations)
+
+ self.representations = list(rep_aggregator.values())
+
+ def _aggregate_representations(self, aggregator: dict, representations: List[Dict]) -> None:
+ """Aggregate representations with unique keys (helper method)"""
+ for rep in representations:
+ rep_id = rep['id']
+ unique_key = f"{rep_id}_{rep.get('protected', False)}_{rep.get('width', 0)}x{rep.get('height', 0)}"
+
+ if unique_key not in aggregator:
+ aggregator[unique_key] = rep
+ else:
+ # Concatenate segment URLs for multi-period content
+ existing = aggregator[unique_key]
+ if rep['segment_urls']:
+ existing['segment_urls'].extend(rep['segment_urls'])
+ if not existing['init_url'] and rep['init_url']:
+ existing['init_url'] = rep['init_url']
+
+ def _deduplicate_representations(self) -> None:
+ """Remove duplicate representations - KEEP BEST QUALITY regardless of DRM"""
+ videos = [r for r in self.representations if r['type'] == 'video']
+ audios = [r for r in self.representations if r['type'] == 'audio']
+ others = [r for r in self.representations if r['type'] not in ['video', 'audio']]
+
+ deduplicated_videos = RepresentationFilter.deduplicate_by_quality(videos, 'video')
+ deduplicated_audios = RepresentationFilter.deduplicate_by_quality(audios, 'audio')
+
+ self.representations = deduplicated_videos + deduplicated_audios + others
+
+ def get_resolutions(self) -> List[Dict[str, Any]]:
+ """Return list of video representations"""
+ return [r for r in self.representations if r['type'] == 'video']
+
+ def get_audios(self) -> List[Dict[str, Any]]:
+ """Return list of audio representations"""
+ return [r for r in self.representations if r['type'] == 'audio']
+
+ def get_best_video(self) -> Optional[Dict[str, Any]]:
+ """Return the best video representation"""
+ videos = self.get_resolutions()
+ if not videos:
+ return None
+ return max(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
+
+ def get_best_audio(self) -> Optional[Dict[str, Any]]:
+ """Return the best audio representation"""
+ audios = self.get_audios()
+ if not audios:
+ return None
+ return max(audios, key=lambda r: r['bandwidth'])
+
+ @staticmethod
+ def get_worst(representations: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
+ """Return the worst representation"""
+ videos = [r for r in representations if r['type'] == 'video']
+ audios = [r for r in representations if r['type'] == 'audio']
+
+ if videos:
+ return min(videos, key=lambda r: (r['height'], r['width'], r['bandwidth']))
+ elif audios:
+ return min(audios, key=lambda r: r['bandwidth'])
+ return None
+
+ @staticmethod
+ def get_list(representations: List[Dict[str, Any]], type_filter: Optional[str] = None) -> List[Dict[str, Any]]:
+ """Return filtered list of representations"""
+ if type_filter:
+ return [r for r in representations if r['type'] == type_filter]
+ return representations
+
+ def select_video(self, force_resolution: str = None) -> Tuple[Optional[Dict[str, Any]], List[str], str, str]:
+ """Select video representation based on resolution preference"""
+ video_reps = self.get_resolutions()
+ available_resolutions = [f"{rep['width']}x{rep['height']}" for rep in video_reps]
+ resolution = (force_resolution or FILTER_CUSTOM_RESOLUTION or "best").lower()
+
+ # Select based on preference
+ if resolution == "best":
+ selected_video = self.get_best_video()
+ filter_custom_resolution = "Best"
+ elif resolution == "worst":
+ selected_video = self.get_worst(video_reps)
+ filter_custom_resolution = "Worst"
+ else:
+ # Try to find specific resolution
+ selected_video = self._find_specific_resolution(video_reps, resolution)
+ filter_custom_resolution = resolution if selected_video else f"{resolution} (fallback to Best)"
+ if not selected_video:
+ selected_video = self.get_best_video()
+
+ downloadable_video = f"{selected_video['width']}x{selected_video['height']}" if selected_video else "N/A"
+ return selected_video, available_resolutions, filter_custom_resolution, downloadable_video
+
+ def _find_specific_resolution(self, video_reps: List[Dict], resolution: str) -> Optional[Dict]:
+ """Find video representation matching specific resolution"""
+ for rep in video_reps:
+ rep_res = f"{rep['width']}x{rep['height']}"
+ if (resolution in rep_res.lower() or
+ resolution.replace('p', '') in str(rep['height']) or
+ rep_res.lower() == resolution):
+ return rep
+ return None
+
+ def select_audio(self, preferred_audio_langs: Optional[List[str]] = None) -> Tuple[Optional[Dict[str, Any]], List[str], str, str]:
+ """Select audio representation based on language preference"""
+ audio_reps = self.get_audios()
+ available_langs = [rep['language'] for rep in audio_reps if rep['language']]
+ preferred_langs = preferred_audio_langs or DOWNLOAD_SPECIFIC_AUDIO
+
+ # Try to find preferred language
+ selected_audio = None
+ filter_custom_audio = "First"
+
+ if preferred_langs:
+ for lang in preferred_langs:
+ for rep in audio_reps:
+ if rep['language'] and rep['language'].lower() == lang.lower():
+ selected_audio = rep
+ filter_custom_audio = lang
+ break
+ if selected_audio:
+ break
+
+ if not selected_audio:
+ selected_audio = self.get_best_audio()
+
+ downloadable_audio = selected_audio['language'] if selected_audio else "N/A"
+ return selected_audio, available_langs, filter_custom_audio, downloadable_audio
+
+ def print_tracks_table(self, selected_video: Optional[Dict[str, Any]] = None, selected_audio: Optional[Dict[str, Any]] = None, selected_subs: list = None) -> None:
+ """Print tracks table"""
+ if self.table_printer:
+ self.table_printer.print_table(self.representations, selected_video, selected_audio, selected_subs, self.available_drm_types)
+
+ def save_mpd(self, output_path: str) -> None:
+ """Save raw MPD manifest"""
+ if self.mpd_content is None:
+ raise ValueError("MPD content not available. Call parse() first.")
+
+ output_file = Path(output_path)
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'wb') as f:
+ f.write(self.mpd_content)
+
+ logging.info(f"MPD manifest saved to: {output_file}")
+
+ def save_best_video_json(self, output_path: str) -> None:
+ """Save best video representation as JSON"""
+ best_video = self.get_best_video()
+ if best_video is None:
+ raise ValueError("No video representation available.")
+
+ video_json = dict(best_video)
+ video_json["stream_type"] = "dash"
+ video_json["init_url_type"] = FileTypeDetector.infer_url_type(video_json.get("init_url"))
+ video_json["segment_url_type"] = FileTypeDetector.infer_segment_urls_type(video_json.get("segment_urls"))
+
+ output_file = Path(output_path)
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'w', encoding='utf-8') as f:
+ json.dump(video_json, f, indent=2, ensure_ascii=False)
+
+ logging.info(f"Best video JSON saved to: {output_file}")
+
+ def save_best_audio_json(self, output_path: str) -> None:
+ """Save best audio representation as JSON"""
+ best_audio = self.get_best_audio()
+ if best_audio is None:
+ raise ValueError("No audio representation available.")
+
+ audio_json = dict(best_audio)
+ audio_json["stream_type"] = "dash"
+ audio_json["init_url_type"] = FileTypeDetector.infer_url_type(audio_json.get("init_url"))
+ audio_json["segment_url_type"] = FileTypeDetector.infer_segment_urls_type(audio_json.get("segment_urls"))
+
+ output_file = Path(output_path)
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+
+ with open(output_file, 'w', encoding='utf-8') as f:
+ json.dump(audio_json, f, indent=2, ensure_ascii=False)
+
+ logging.info(f"Best audio JSON saved to: {output_file}")
+
+ def _auto_save_files(self) -> None:
+ """Auto-save MPD files to tmp directory"""
+ if not self.save_dir:
+ return
+
+ try:
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ self.save_dir.mkdir(parents=True, exist_ok=True)
+
+ # Save MPD manifest
+ mpd_path = self.save_dir / f"manifest_{timestamp}.mpd"
+ self.save_mpd(str(mpd_path))
+
+ # Save JSON files
+ if self.get_best_video():
+ video_path = self.save_dir / f"best_video_{timestamp}.json"
+ self.save_best_video_json(str(video_path))
+
+ if self.get_best_audio():
+ audio_path = self.save_dir / f"best_audio_{timestamp}.json"
+ self.save_best_audio_json(str(audio_path))
+
+ except Exception as e:
+ console.print(f"[red]Error during auto-save: {e}")
+
+ def _extract_and_merge_subtitles(self) -> None:
+ """Extract subtitles from MPD manifest and merge with external mpd_sub_list"""
+ base_url = self.url_resolver.get_initial_base_url(self.root)
+ extracted_subs = []
+ seen_subs = set()
+
+ periods = self.ns_manager.findall(self.root, './/mpd:Period')
+
+ for period in periods:
+ period_base_url = self.url_resolver.resolve_base_url(period, base_url)
+
+ for adapt_set in self.ns_manager.findall(period, 'mpd:AdaptationSet'):
+ if adapt_set.get('contentType', '') != 'text':
+ continue
+
+ self._extract_subtitle_from_adaptation_set(
+ adapt_set, period_base_url, extracted_subs, seen_subs
+ )
+
+ # Merge with external subtitles
+ self._merge_external_subtitles(extracted_subs)
+
+ def _extract_subtitle_from_adaptation_set(self, adapt_set, period_base_url, extracted_subs, seen_subs):
+ """Extract subtitle from a single adaptation set (helper method)"""
+ language = adapt_set.get('lang', 'unknown')
+ label_elem = self.ns_manager.find(adapt_set, 'mpd:Label')
+ label = label_elem.text.strip() if label_elem is not None and label_elem.text else None
+
+ for rep_elem in self.ns_manager.findall(adapt_set, 'mpd:Representation'):
+ mime_type = rep_elem.get('mimeType', '')
+ rep_id = rep_elem.get('id', '')
+
+ # Determine format
+ sub_format = self._determine_subtitle_format(mime_type)
+
+ # Try SegmentTemplate first, then BaseURL
+ seg_template = self.ns_manager.find(rep_elem, 'mpd:SegmentTemplate') or self.ns_manager.find(adapt_set, 'mpd:SegmentTemplate')
+
+ if seg_template is not None:
+ self._process_subtitle_template(seg_template, rep_elem, rep_id, period_base_url, language, label, sub_format, extracted_subs, seen_subs)
+ else:
+ self._process_subtitle_baseurl(rep_elem, period_base_url, language, label, sub_format, rep_id, extracted_subs, seen_subs)
+
+ def _determine_subtitle_format(self, mime_type: str) -> str:
+ """Determine subtitle format from mimeType"""
+ mime_lower = mime_type.lower()
+ if 'vtt' in mime_lower:
+ return 'vtt'
+ elif 'ttml' in mime_lower or 'xml' in mime_lower:
+ return 'ttml'
+ elif 'srt' in mime_lower:
+ return 'srt'
+ return 'vtt'
+
+ def _process_subtitle_template(self, seg_template, rep_elem, rep_id, period_base_url, language, label, sub_format, extracted_subs, seen_subs):
+ """Process subtitle with SegmentTemplate"""
+ media_template = seg_template.get('media')
+ if not media_template:
+ return
+
+ number_list, time_list = SegmentTimelineParser(self.ns_manager).parse(seg_template, 1)
+ rep_base = self.url_resolver.resolve_base_url(rep_elem, period_base_url)
+
+ # Build segment URLs
+ from .utils import URLBuilder
+ segment_urls = []
+ if '$Time$' in media_template and time_list:
+ segment_urls = [URLBuilder.build_url(rep_base, media_template, rep_id=rep_id, time=t) for t in time_list]
+ elif '$Number' in media_template and number_list:
+ segment_urls = [URLBuilder.build_url(rep_base, media_template, rep_id=rep_id, number=n) for n in number_list]
+ else:
+ segment_urls = [URLBuilder.build_url(rep_base, media_template, rep_id=rep_id)]
+
+ if not segment_urls:
+ return
+
+ # Create subtitle entry
+ first_url = segment_urls[0]
+ unique_key = f"{language}_{label}_{first_url}"
+
+ if unique_key not in seen_subs:
+ seen_subs.add(unique_key)
+ extracted_subs.append({
+ 'language': language,
+ 'label': label or language,
+ 'format': sub_format,
+ 'url': segment_urls[0] if len(segment_urls) == 1 else None,
+ 'segment_urls': segment_urls if len(segment_urls) > 1 else None,
+ 'id': rep_id
+ })
+
+ def _process_subtitle_baseurl(self, rep_elem, period_base_url, language, label, sub_format, rep_id, extracted_subs, seen_subs):
+ """Process subtitle with BaseURL"""
+ base_url_elem = self.ns_manager.find(rep_elem, 'mpd:BaseURL')
+ if base_url_elem is None or not base_url_elem.text:
+ return
+
+ url = urljoin(period_base_url, base_url_elem.text.strip())
+ unique_key = f"{language}_{label}_{url}"
+
+ if unique_key not in seen_subs:
+ seen_subs.add(unique_key)
+ extracted_subs.append({
+ 'language': language,
+ 'label': label or language,
+ 'format': sub_format,
+ 'url': url,
+ 'id': rep_id
+ })
+
+ def _merge_external_subtitles(self, extracted_subs):
+ """Merge extracted subtitles with external list"""
+ existing_keys = set()
+
+ # Track existing subtitles
+ for sub in self.mpd_sub_list:
+ if sub.get('language'):
+ first_url = sub.get('segment_urls', [None])[0] if sub.get('segment_urls') else sub.get('url', '')
+ sub_key = f"{sub['language']}_{sub.get('label')}_{first_url}"
+ existing_keys.add(sub_key)
+
+ # Add new subtitles
+ for sub in extracted_subs:
+ first_url = sub.get('segment_urls', [None])[0] if sub.get('segment_urls') else sub.get('url', '')
+ sub_key = f"{sub['language']}_{sub.get('label')}_{first_url}"
+
+ if sub_key not in existing_keys:
+ self.mpd_sub_list.append(sub)
+ existing_keys.add(sub_key)
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/MPD/utils.py b/StreamingCommunity/Lib/MPD/utils.py
new file mode 100644
index 00000000..28c698d3
--- /dev/null
+++ b/StreamingCommunity/Lib/MPD/utils.py
@@ -0,0 +1,325 @@
+# 29.12.25
+
+from typing import Optional, Dict, List
+from urllib.parse import urljoin, urlparse
+from pathlib import Path
+
+
+# External Libraries
+from isodate import parse_duration
+from lxml import etree
+from rich.console import Console
+from rich.table import Table
+
+
+# Logic
+from .constants import DRMSystem
+
+
+# Variables
+console = Console()
+
+
+class DurationUtils:
+ @staticmethod
+ def parse_duration(duration_str: Optional[str]) -> int:
+ """Parse ISO-8601 duration to seconds using isodate library"""
+ if not duration_str:
+ return 0
+ try:
+ duration = parse_duration(duration_str)
+ return int(duration.total_seconds())
+ except Exception:
+ return 0
+
+ @staticmethod
+ def format_duration(seconds: int) -> str:
+ """Format seconds like '~48m55s' or '~1h02m03s'"""
+ if not seconds or seconds < 0:
+ return ""
+
+ h = seconds // 3600
+ m = (seconds % 3600) // 60
+ s = seconds % 60
+
+ if h > 0:
+ return f"~{h}h{m:02d}m{s:02d}s"
+ return f"~{m}m{s:02d}s"
+
+
+class URLBuilder:
+ @staticmethod
+ def build_url(base: str, template: str, rep_id: Optional[str] = None, number: Optional[int] = None, time: Optional[int] = None, bandwidth: Optional[int] = None) -> Optional[str]:
+ if not template:
+ return None
+
+ # Substitute placeholders
+ if rep_id is not None:
+ template = template.replace('$RepresentationID$', rep_id)
+ if bandwidth is not None:
+ template = template.replace('$Bandwidth$', str(bandwidth))
+ if time is not None:
+ template = template.replace('$Time$', str(time))
+
+ # Handle $Number$ with optional formatting (e.g., $Number%05d$)
+ if '$Number' in template:
+ num_str = str(number if number is not None else 0)
+
+ # Check for formatting like $Number%05d$
+ if '%0' in template and 'd$' in template:
+ start = template.find('%0')
+ end = template.find('d$', start)
+ if start != -1 and end != -1:
+ width_str = template[start+2:end]
+ try:
+ width = int(width_str)
+ num_str = str(number if number is not None else 0).zfill(width)
+ except ValueError:
+ pass
+
+ template = template.replace('$Number%05d$', num_str)
+ template = template.replace('$Number$', num_str)
+
+ return URLBuilder._finalize_url(base, template)
+
+ @staticmethod
+ def _finalize_url(base: str, template: str) -> str:
+ """Finalize URL construction preserving query and fragment"""
+ parts = template.split('#', 1)
+ path_and_query = parts[0]
+ fragment = ('#' + parts[1]) if len(parts) == 2 else ''
+
+ if '?' in path_and_query:
+ path, query = path_and_query.split('?', 1)
+ abs_path = urljoin(base, path)
+ return abs_path + '?' + query + fragment
+ else:
+ return urljoin(base, path_and_query) + fragment
+
+
+class FileTypeDetector:
+ @staticmethod
+ def infer_url_type(url: Optional[str]) -> Optional[str]:
+ if not url:
+ return None
+ try:
+ path = urlparse(url).path
+ ext = Path(path).suffix
+ return ext.lstrip(".").lower() if ext else None
+ except Exception:
+ return None
+
+ @staticmethod
+ def infer_segment_urls_type(urls: Optional[List[str]]) -> Optional[str]:
+ if not urls:
+ return None
+
+ types = {FileTypeDetector.infer_url_type(u) for u in urls if u}
+ types.discard(None)
+
+ if not types:
+ return None
+ return next(iter(types)) if len(types) == 1 else "mixed"
+
+
+class NamespaceManager:
+ def __init__(self, root: etree._Element):
+ self.nsmap = self._extract_namespaces(root)
+
+ @staticmethod
+ def _extract_namespaces(root: etree._Element) -> Dict[str, str]:
+ """Extract namespaces from root element"""
+ nsmap = {}
+ if root.nsmap:
+ # Use 'mpd' as default prefix for the main namespace
+ nsmap['mpd'] = root.nsmap.get(None) or 'urn:mpeg:dash:schema:mpd:2011'
+ nsmap['cenc'] = 'urn:mpeg:cenc:2013'
+ nsmap['mspr'] = 'urn:microsoft:playready'
+
+ # Add other namespaces if present
+ for prefix, uri in root.nsmap.items():
+ if prefix is not None:
+ nsmap[prefix] = uri
+
+ else:
+ # Fallback to default DASH namespace
+ nsmap['mpd'] = 'urn:mpeg:dash:schema:mpd:2011'
+ nsmap['cenc'] = 'urn:mpeg:cenc:2013'
+ nsmap['mspr'] = 'urn:microsoft:playready'
+ return nsmap
+
+ def find(self, element: etree._Element, path: str) -> Optional[etree._Element]:
+ """Find element using namespace-aware XPath"""
+ return element.find(path, namespaces=self.nsmap)
+
+ def findall(self, element: etree._Element, path: str) -> List[etree._Element]:
+ """Find all elements using namespace-aware XPath"""
+ return element.findall(path, namespaces=self.nsmap)
+
+
+class BaseURLResolver:
+ def __init__(self, mpd_url: str, ns_manager: NamespaceManager):
+ self.mpd_url = mpd_url
+ self.ns = ns_manager
+
+ def get_initial_base_url(self, root: etree._Element) -> str:
+ """Get base URL from MPD root"""
+ base_url = self.mpd_url.rsplit('/', 1)[0] + '/'
+
+ base_elem = self.ns.find(root, 'mpd:BaseURL')
+ if base_elem is not None and base_elem.text:
+ base_text = base_elem.text.strip()
+ base_url = base_text if base_text.startswith('http') else urljoin(base_url, base_text)
+
+ return base_url
+
+ def resolve_base_url(self, element: etree._Element, current_base: str) -> str:
+ """Resolve base URL for any element"""
+ base_elem = self.ns.find(element, 'mpd:BaseURL')
+ if base_elem is not None and base_elem.text:
+ base_text = base_elem.text.strip()
+ return base_text if base_text.startswith('http') else urljoin(current_base, base_text)
+ return current_base
+
+
+class MetadataExtractor:
+ def __init__(self, ns_manager: NamespaceManager):
+ self.ns = ns_manager
+
+ def get_audio_channels(self, rep_elem: etree._Element, adapt_elem: etree._Element) -> int:
+ """Extract audio channel count"""
+ for parent in (rep_elem, adapt_elem):
+ if parent is None:
+ continue
+
+ for acc in self.ns.findall(parent, 'mpd:AudioChannelConfiguration'):
+ val = acc.get('value')
+ if val:
+ try:
+ return int(val)
+ except ValueError:
+ pass
+ return 0
+
+ @staticmethod
+ def parse_frame_rate(frame_rate: Optional[str]) -> float:
+ """Parse frame rate (e.g., '25' or '30000/1001')"""
+ if not frame_rate:
+ return 0.0
+
+ fr = frame_rate.strip()
+ if '/' in fr:
+ try:
+ num, den = fr.split('/', 1)
+ return float(num) / float(den)
+ except Exception:
+ return 0.0
+
+ try:
+ return float(fr)
+ except Exception:
+ return 0.0
+
+ @staticmethod
+ def determine_content_type(mime_type: str, width: int, height: int, audio_sampling_rate: int, codecs: str) -> str:
+ """Determine if content is video, audio, or other"""
+ if mime_type:
+ return mime_type.split('/')[0]
+ elif width or height:
+ return 'video'
+ elif audio_sampling_rate or (codecs and 'mp4a' in codecs.lower()):
+ return 'audio'
+ return 'unknown'
+
+ @staticmethod
+ def clean_language(lang: str, content_type: str, rep_id: str, bandwidth: int) -> Optional[str]:
+ """Clean and normalize language tag"""
+ if lang and lang.lower() not in ['undefined', 'none', '']:
+ return lang
+ elif content_type == 'audio':
+ return f"aud_{rep_id}" if rep_id else f"aud_{bandwidth or 0}"
+ return None
+
+
+class TablePrinter:
+ def __init__(self, mpd_duration: int, mpd_sub_list: list = None):
+ self.mpd_duration = mpd_duration
+ self.mpd_sub_list = mpd_sub_list or []
+
+ def print_table(self, representations: List[Dict], selected_video: Optional[Dict] = None, selected_audio: Optional[Dict] = None, selected_subs: list = None, available_drm_types: list = None):
+ """Print tracks table using Rich tables"""
+ approx = DurationUtils.format_duration(self.mpd_duration)
+
+ videos = sorted([r for r in representations if r['type'] == 'video'],
+ key=lambda r: (r['height'], r['width'], r['bandwidth']), reverse=True)
+ audios = sorted([r for r in representations if r['type'] == 'audio'],
+ key=lambda r: r['bandwidth'], reverse=True)
+
+ # Create main tracks table with DRM column
+ table = Table(show_header=True, header_style="bold")
+ table.add_column("Type", style="cyan")
+ table.add_column("Sel", width=3, style="green bold")
+ table.add_column("Info", style="white")
+ table.add_column("Resolution/ID", style="yellow")
+ table.add_column("Bitrate", style="green")
+ table.add_column("Codec", style="white")
+ table.add_column("Lang/FPS", style="blue")
+ table.add_column("Channels", style="magenta")
+ table.add_column("Segments", style="white")
+ table.add_column("Duration", style="white")
+ table.add_column("DRM", style="red")
+
+ # Add video tracks
+ for vid in videos:
+ checked = 'X' if selected_video and vid['id'] == selected_video['id'] else ' '
+ drm_info = self._get_drm_display(vid)
+ drm_systems = self._get_drm_systems_display(vid)
+ fps = f"{vid['frame_rate']:.0f}" if vid.get('frame_rate') else ""
+
+ table.add_row("Video", checked, drm_info, f"{vid['width']}x{vid['height']}", f"{vid['bandwidth'] // 1000} Kbps", vid.get('codec', ''), fps, vid['id'], str(vid['segment_count']), approx or "", drm_systems)
+
+ # Add audio tracks
+ for aud in audios:
+ checked = 'X' if selected_audio and aud['id'] == selected_audio['id'] else ' '
+ drm_info = self._get_drm_display(aud)
+ drm_systems = self._get_drm_systems_display(aud)
+ ch = f"{aud['channels']}CH" if aud.get('channels') else ""
+
+ table.add_row("Audio", checked, drm_info, aud['id'], f"{aud['bandwidth'] // 1000} Kbps", aud.get('codec', ''), aud.get('language', ''), ch, str(aud['segment_count']), approx or "", drm_systems)
+
+ # Add subtitle tracks from mpd_sub_list
+ if self.mpd_sub_list:
+ for sub in self.mpd_sub_list:
+ checked = 'X' if selected_subs and sub in selected_subs else ' '
+ language = sub.get('language')
+ sub_type = str(sub.get('format')).upper()
+ table.add_row("Subtitle", checked, f"Sub ({sub_type})", language, "", "", language, "", "", approx or "", "")
+
+ console.print(table)
+
+ def _get_drm_display(self, rep: Dict) -> str:
+ """Generate DRM display string for table (only shows CENC)"""
+ content_type = "Vid" if rep['type'] == 'video' else "Aud"
+
+ if not rep.get('protected'):
+ return content_type
+
+ return f"{content_type} *CENC"
+
+ def _get_drm_systems_display(self, rep: Dict) -> str:
+ """Generate DRM systems display for the DRM column"""
+ if not rep.get('protected'):
+ return ""
+
+ # Get all DRM types available for this stream
+ drm_types = rep.get('drm_types', [])
+ if not drm_types:
+ drm_type = rep.get('drm_type', '').lower()
+ if drm_type:
+ drm_types = [drm_type]
+
+ if not drm_types:
+ return "DRM"
+
+ drm_abbrevs = [DRMSystem.get_abbrev(drm) for drm in drm_types]
+ return '+'.join(drm_abbrevs)
\ No newline at end of file
diff --git a/StreamingCommunity/Lib/TMDB/tmdb.py b/StreamingCommunity/Lib/TMDB/tmdb.py
new file mode 100644
index 00000000..6ce5af3f
--- /dev/null
+++ b/StreamingCommunity/Lib/TMDB/tmdb.py
@@ -0,0 +1,100 @@
+# 24.08.24
+
+import re
+import unicodedata
+
+
+# External libraries
+import httpx
+from rich.console import Console
+
+
+# Internal utilities
+from StreamingCommunity.Util.config_json import config_manager
+
+
+# Variable
+console = Console()
+api_key = config_manager.login.get("TMDB", "api_key")
+MAX_TIMEOUT = config_manager.config.get_int("REQUESTS", "timeout")
+
+
+class TheMovieDB:
+ def __init__(self, api_key):
+ """
+ Initialize the class with the API key.
+
+ Parameters:
+ - api_key (str): The API key for authenticating requests to TheMovieDB.
+ """
+ self.api_key = api_key
+ self.base_url = "https://api.themoviedb.org/3"
+
+ def _make_request(self, endpoint, params=None):
+ """
+ Make a request to the given API endpoint with optional parameters.
+
+ Parameters:
+ - endpoint (str): The API endpoint to hit.
+ - params (dict): Additional parameters for the request.
+
+ Returns:
+ dict: JSON response as a dictionary.
+ """
+ try:
+ if params is None:
+ params = {}
+
+ params['api_key'] = self.api_key
+ url = f"{self.base_url}/{endpoint}"
+ response = httpx.get(url, params=params, timeout=MAX_TIMEOUT)
+ response.raise_for_status()
+
+ return response.json()
+ except Exception as e:
+ console.log(f"[red]Error making request to {endpoint}: {e}[/red]")
+ return {}
+
+ def _slugify(self, text):
+ """Normalize and slugify a given text."""
+ text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('ascii')
+ text = re.sub(r'[^\w\s-]', '', text).strip().lower()
+ text = re.sub(r'[-\s]+', '-', text)
+ return text
+
+ def get_type_and_id_by_slug_year(self, slug: str, year: int):
+ """
+ Get the type (movie or tv) and ID from TMDB based on slug and year.
+ """
+ movie_results = self._make_request("search/movie", {"query": slug.replace('-', ' ')}).get("results", [])
+ for movie in movie_results:
+ title = movie.get('title', '')
+ release_date = movie.get('release_date', '')
+
+ if release_date:
+ movie_year = int(release_date[:4])
+ else:
+ continue
+
+ movie_slug = self._slugify(title)
+ if movie_slug == slug and movie_year == year:
+ return {'type': "movie", 'id': movie['id']}
+
+ tv_results = self._make_request("search/tv", {"query": slug.replace('-', ' ')}).get("results", [])
+ for show in tv_results:
+ name = show.get('name', '')
+ first_air_date = show.get('first_air_date', '')
+
+ if first_air_date:
+ show_year = int(first_air_date[:4])
+ else:
+ continue
+
+ show_slug = self._slugify(name)
+ if show_slug == slug and show_year == year:
+ return {'type': "tv", 'id': show['id']}
+
+ return None
+
+
+tmdb = TheMovieDB(api_key)
\ No newline at end of file
diff --git a/StreamingCommunity/Upload/update.py b/StreamingCommunity/Upload/update.py
index 7b36df93..03e7f625 100644
--- a/StreamingCommunity/Upload/update.py
+++ b/StreamingCommunity/Upload/update.py
@@ -31,7 +31,7 @@ async def fetch_github_data(client, url):
response = await client.get(
url=url,
headers={'user-agent': get_userAgent()},
- timeout=config_manager.get_int("REQUESTS", "timeout"),
+ timeout=config_manager.config.get_int("REQUESTS", "timeout"),
follow_redirects=True
)
return response.json()
diff --git a/StreamingCommunity/Util/config_json.py b/StreamingCommunity/Util/config_json.py
index e605dc5d..b653bc08 100644
--- a/StreamingCommunity/Util/config_json.py
+++ b/StreamingCommunity/Util/config_json.py
@@ -15,73 +15,253 @@
# Variable
console = Console()
CONFIG_FILENAME = 'config.json'
+LOGIN_FILENAME = 'login.json'
DOMAINS_FILENAME = 'domains.json'
GITHUB_DOMAINS_PATH = '.github/script/domains.json'
CONFIG_DOWNLOAD_URL = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/config.json'
+CONFIG_LOGIN_DOWNLOAD_URL = 'https://raw.githubusercontent.com/Arrowar/StreamingCommunity/refs/heads/main/login.json'
DOMAINS_DOWNLOAD_URL = 'https://raw.githubusercontent.com/Arrowar/SC_Domains/refs/heads/main/domains.json'
+class ConfigAccessor:
+ def __init__(self, config_dict: Dict, cache: Dict, cache_prefix: str, cache_enabled: bool = True):
+ self._config_dict = config_dict
+ self._cache = cache
+ self._cache_prefix = cache_prefix
+ self._cache_enabled = cache_enabled
+
+ def get(self, section: str, key: str, data_type: type = str, default: Any = None) -> Any:
+ """
+ Read a value from the configuration with caching.
+
+ Args:
+ section (str): Section in the configuration
+ key (str): Key to read
+ data_type (type, optional): Expected data type. Default: str
+ default (Any, optional): Default value if key is not found. Default: None
+
+ Returns:
+ Any: The key value converted to the specified data type, or default if not found
+ """
+ cache_key = f"{self._cache_prefix}.{section}.{key}"
+
+ # Check if the value is in the cache
+ if self._cache_enabled and cache_key in self._cache:
+ return self._cache[cache_key]
+
+ # Log only if not in cache
+ logging.info(f"Reading key: {cache_key}")
+
+ # Check if the section and key exist
+ if section not in self._config_dict:
+ if default is not None:
+ logging.info(f"Section '{section}' not found. Returning default value.")
+ return default
+ raise ValueError(f"Section '{section}' not found in {self._cache_prefix} configuration")
+
+ if key not in self._config_dict[section]:
+ if default is not None:
+ logging.info(f"Key '{key}' not found in section '{section}'. Returning default value.")
+ return default
+ raise ValueError(f"Key '{key}' not found in section '{section}' of {self._cache_prefix} configuration")
+
+ # Get and convert the value
+ value = self._config_dict[section][key]
+ converted_value = self._convert_to_data_type(value, data_type)
+
+ # Save in cache
+ if self._cache_enabled:
+ self._cache[cache_key] = converted_value
+
+ return converted_value
+
+ def _convert_to_data_type(self, value: Any, data_type: type) -> Any:
+ """
+ Convert the value to the specified data type.
+
+ Args:
+ value (Any): Value to convert
+ data_type (type): Target data type
+
+ Returns:
+ Any: Converted value
+ """
+ try:
+ if data_type is int:
+ return int(value)
+
+ elif data_type is float:
+ return float(value)
+
+ elif data_type is bool:
+ if isinstance(value, str):
+ return value.lower() in ("yes", "true", "t", "1")
+ return bool(value)
+
+ elif data_type is list:
+ if isinstance(value, list):
+ return value
+ if isinstance(value, str):
+ return [item.strip() for item in value.split(',')]
+ return [value]
+
+ elif data_type is dict:
+ if isinstance(value, dict):
+ return value
+
+ raise ValueError(f"Cannot convert {type(value).__name__} to dict")
+ else:
+ return value
+
+ except Exception as e:
+ error_msg = f"Error converting: {data_type.__name__} to value '{value}' with error: {e}"
+ console.print(f"[red]{error_msg}")
+ raise ValueError(f"Error converting: {data_type.__name__} to value '{value}' with error: {e}")
+
+ def get_int(self, section: str, key: str, default: int = None) -> int:
+ """Read an integer from the configuration."""
+ return self.get(section, key, int, default=default)
+
+ def get_float(self, section: str, key: str, default: float = None) -> float:
+ """Read a float from the configuration."""
+ return self.get(section, key, float, default=default)
+
+ def get_bool(self, section: str, key: str, default: bool = None) -> bool:
+ """Read a boolean from the configuration."""
+ return self.get(section, key, bool, default=default)
+
+ def get_list(self, section: str, key: str, default: List[str] = None) -> List[str]:
+ """Read a list from the configuration."""
+ return self.get(section, key, list, default=default)
+
+ def get_dict(self, section: str, key: str, default: dict = None) -> dict:
+ """Read a dictionary from the configuration."""
+ return self.get(section, key, dict, default=default)
+
+ def set_key(self, section: str, key: str, value: Any) -> None:
+ """
+ Set a key in the configuration and update cache.
+
+ Args:
+ section (str): Section in the configuration
+ key (str): Key to set
+ value (Any): Value to associate with the key
+ """
+ try:
+ if section not in self._config_dict:
+ self._config_dict[section] = {}
+
+ self._config_dict[section][key] = value
+
+ # Update the cache
+ cache_key = f"{self._cache_prefix}.{section}.{key}"
+ self._cache[cache_key] = value
+
+ logging.info(f"Key '{key}' set in section '{section}' of {self._cache_prefix} configuration")
+
+ except Exception as e:
+ error_msg = f"Error setting key '{key}' in section '{section}' of {self._cache_prefix} configuration: {e}"
+ console.print(f"[red]{error_msg}")
+
+
class ConfigManager:
def __init__(self) -> None:
"""Initialize the ConfigManager with caching."""
- file_name = CONFIG_FILENAME
-
+
self.base_path = None
if getattr(sys, 'frozen', False):
- self.base_path = os.path.dirname(sys.executable) # PyInstaller
+ self.base_path = os.path.dirname(sys.executable) # PyInstaller
else:
self.base_path = os.getcwd()
# Initialize file paths using static variables
- self.file_path = os.path.join(self.base_path, file_name)
+ self.config_file_path = os.path.join(self.base_path, CONFIG_FILENAME)
+ self.login_file_path = os.path.join(self.base_path, LOGIN_FILENAME)
self.domains_path = os.path.join(self.base_path, DOMAINS_FILENAME)
self.github_domains_path = os.path.join(self.base_path, GITHUB_DOMAINS_PATH)
- # Display the actual file path for debugging
- console.print(f"[cyan]Config path: [green]{self.file_path}")
+ # Display the actual file paths for debugging
+ console.print(f"[cyan]Config path: [green]{self.config_file_path}")
+ console.print(f"[cyan]Login path: [green]{self.login_file_path}")
# Initialize data structures
- self.config = {}
- self.configSite = {}
+ self._config_data = {}
+ self._login_data = {}
+ self._domains_data = {}
# Enhanced caching system
self.cache: Dict[str, Any] = {}
self._cache_enabled = True
+ # Create accessors
+ self.config = ConfigAccessor(self._config_data, self.cache, "config", self._cache_enabled)
+ self.login = ConfigAccessor(self._login_data, self.cache, "login", self._cache_enabled)
+ self.domain = ConfigAccessor(self._domains_data, self.cache, "domain", self._cache_enabled)
+
# Load the configuration
self.fetch_domain_online = True
- self.load_config()
+ self.load_all_configs()
- def load_config(self) -> None:
- """Load the configuration and initialize all settings."""
- if not os.path.exists(self.file_path):
- console.print(f"[red]WARNING: Configuration file not found: {self.file_path}")
+ def load_all_configs(self) -> None:
+ """Load all configuration files."""
+ self._load_config()
+ self._load_login()
+ self._update_settings_from_config()
+ self._load_site_data()
+
+ def _load_config(self) -> None:
+ """Load the main configuration file."""
+ if not os.path.exists(self.config_file_path):
+ console.print(f"[red]WARNING: Configuration file not found: {self.config_file_path}")
console.print("[yellow]Downloading from repository...")
- self._download_reference_config()
+ self._download_file(CONFIG_DOWNLOAD_URL, self.config_file_path, "config.json")
- # Load the configuration file
try:
- with open(self.file_path, 'r') as f:
- self.config = json.load(f)
+ with open(self.config_file_path, 'r') as f:
+ self._config_data.clear()
+ self._config_data.update(json.load(f))
# Pre-cache commonly used configuration values
- self._precache_common_configs()
-
- # Update settings from the configuration
- self._update_settings_from_config()
-
- # Load site data based on fetch_domain_online setting
- self._load_site_data()
+ self._precache_config_values()
except json.JSONDecodeError as e:
- console.print(f"[red]Error parsing JSON: {str(e)}")
+ console.print(f"[red]Error parsing config JSON: {str(e)}")
self._handle_config_error()
except Exception as e:
console.print(f"[red]Error loading configuration: {str(e)}")
self._handle_config_error()
- def _precache_common_configs(self) -> None:
+ def _load_login(self) -> None:
+ """Load the login configuration file."""
+ if not os.path.exists(self.login_file_path):
+ console.print(f"[yellow]WARNING: Login file not found: {self.login_file_path}")
+ console.print("[yellow]Downloading from repository...")
+ try:
+ self._download_file(CONFIG_LOGIN_DOWNLOAD_URL, self.login_file_path, "login.json")
+ except Exception as e:
+ console.print(f"[yellow]Could not download login.json: {str(e)}")
+ console.print("[yellow]Creating empty login configuration...")
+ self._login_data.clear()
+ return
+
+ try:
+ with open(self.login_file_path, 'r') as f:
+ self._login_data.clear()
+ self._login_data.update(json.load(f))
+
+ console.print("[green]Login configuration loaded successfully")
+
+ except json.JSONDecodeError as e:
+ console.print(f"[red]Error parsing login JSON: {str(e)}")
+ self._login_data.clear()
+
+ except Exception as e:
+ console.print(f"[red]Error loading login configuration: {str(e)}")
+ self._login_data.clear()
+
+ def _precache_config_values(self) -> None:
+ """Pre-cache commonly used configuration values."""
common_keys = [
('DEFAULT', 'debug', bool),
('M3U8_CONVERSION', 'use_gpu', bool),
@@ -104,9 +284,9 @@ def _precache_common_configs(self) -> None:
try:
cache_key = f"config.{section}.{key}"
- if section in self.config and key in self.config[section]:
- value = self.config[section][key]
- converted_value = self._convert_to_data_type(value, data_type)
+ if section in self._config_data and key in self._config_data[section]:
+ value = self._config_data[section][key]
+ converted_value = self.config._convert_to_data_type(value, data_type)
self.cache[cache_key] = converted_value
cached_count += 1
@@ -116,15 +296,16 @@ def _precache_common_configs(self) -> None:
def _handle_config_error(self) -> None:
"""Handle configuration errors by downloading the reference version."""
console.print("[yellow]Attempting to retrieve reference configuration...")
- self._download_reference_config()
+ self._download_file(CONFIG_DOWNLOAD_URL, self.config_file_path, "config.json")
# Reload the configuration
try:
- with open(self.file_path, 'r') as f:
- self.config = json.load(f)
+ with open(self.config_file_path, 'r') as f:
+ self._config_data.clear()
+ self._config_data.update(json.load(f))
# Pre-cache after reload
- self._precache_common_configs()
+ self._precache_config_values()
self._update_settings_from_config()
console.print("[green]Reference configuration loaded successfully")
@@ -135,28 +316,28 @@ def _handle_config_error(self) -> None:
def _update_settings_from_config(self) -> None:
"""Update internal settings from loaded configurations."""
- default_section = self.config.get('DEFAULT', {})
+ default_section = self._config_data.get('DEFAULT', {})
# Get fetch_domain_online setting (True by default)
self.fetch_domain_online = default_section.get('fetch_domain_online', True)
-
- def _download_reference_config(self) -> None:
- """Download the reference configuration from GitHub."""
+
+ def _download_file(self, url: str, file_path: str, file_name: str) -> None:
+ """Download a file from a URL."""
try:
- response = requests.get(CONFIG_DOWNLOAD_URL, timeout=8, headers={'User-Agent': "Mozilla/5.0"})
+ response = requests.get(url, timeout=8, headers={'User-Agent': "Mozilla/5.0"})
if response.status_code == 200:
- with open(self.file_path, 'wb') as f:
+ with open(file_path, 'wb') as f:
f.write(response.content)
file_size = len(response.content) / 1024
- console.print(f"[green]Download complete: {os.path.basename(self.file_path)} ({file_size:.2f} KB)")
+ console.print(f"[green]Download complete: {file_name} ({file_size:.2f} KB)")
else:
error_msg = f"HTTP Error: {response.status_code}, Response: {response.text[:100]}"
console.print(f"[red]Download failed: {error_msg}")
raise Exception(error_msg)
except Exception as e:
- console.print(f"[red]Download error: {str(e)} for url: {CONFIG_DOWNLOAD_URL}")
+ console.print(f"[red]Download error: {str(e)} for url: {url}")
raise
def _load_site_data(self) -> None:
@@ -175,7 +356,8 @@ def _load_site_data_online(self) -> None:
response = requests.get(DOMAINS_DOWNLOAD_URL, timeout=8, headers=headers)
if response.ok:
- self.configSite = response.json()
+ self._domains_data.clear()
+ self._domains_data.update(response.json())
# Determine which file to save to
self._save_domains_to_appropriate_location()
@@ -205,17 +387,14 @@ def _save_domains_to_appropriate_location(self) -> None:
try:
if not os.path.exists(target_path):
with open(target_path, 'w', encoding='utf-8') as f:
- json.dump(self.configSite, f, indent=4, ensure_ascii=False)
- else:
- console.print(f"[yellow]Local domains.json already exists, not overwriting: {target_path}")
- console.print("[yellow]Tip: Delete the file if you want to recreate it from GitHub")
+ json.dump(self._domains_data, f, indent=4, ensure_ascii=False)
except Exception as save_error:
console.print(f"[yellow]Warning: Could not save domains to file: {str(save_error)}")
if target_path != self.domains_path and not os.path.exists(self.domains_path):
try:
with open(self.domains_path, 'w', encoding='utf-8') as f:
- json.dump(self.configSite, f, indent=4, ensure_ascii=False)
+ json.dump(self._domains_data, f, indent=4, ensure_ascii=False)
console.print(f"[green]Domains saved to fallback location: {self.domains_path}")
except Exception as fallback_error:
console.print(f"[red]Failed to save to fallback location: {str(fallback_error)}")
@@ -226,25 +405,27 @@ def _load_site_data_from_file(self) -> None:
if os.path.exists(self.github_domains_path):
console.print(f"[cyan]Domain path: [green]{self.github_domains_path}")
with open(self.github_domains_path, 'r', encoding='utf-8') as f:
- self.configSite = json.load(f)
+ self._domains_data.clear()
+ self._domains_data.update(json.load(f))
- site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
+ site_count = len(self._domains_data) if isinstance(self._domains_data, dict) else 0
elif os.path.exists(self.domains_path):
console.print(f"[cyan]Reading domains from root: {self.domains_path}")
with open(self.domains_path, 'r', encoding='utf-8') as f:
- self.configSite = json.load(f)
+ self._domains_data.clear()
+ self._domains_data.update(json.load(f))
- site_count = len(self.configSite) if isinstance(self.configSite, dict) else 0
+ site_count = len(self._domains_data) if isinstance(self._domains_data, dict) else 0
console.print(f"[green]Domains loaded from root file: {site_count} streaming services")
else:
- console.print(f"[cyan]Domain path: [red]Disabled")
- self.configSite = {}
+ console.print("[cyan]Domain path: [red]Disabled")
+ self._domains_data.clear()
except Exception as e:
console.print(f"[red]Local domain file error: {str(e)}")
- self.configSite = {}
+ self._domains_data.clear()
def _handle_site_data_fallback(self) -> None:
"""Handle site data fallback in case of error."""
@@ -252,7 +433,8 @@ def _handle_site_data_fallback(self) -> None:
console.print("[yellow]Attempting fallback to GitHub structure domains.json file...")
try:
with open(self.github_domains_path, 'r', encoding='utf-8') as f:
- self.configSite = json.load(f)
+ self._domains_data.clear()
+ self._domains_data.update(json.load(f))
console.print("[green]Fallback to GitHub structure successful")
return
except Exception as fallback_error:
@@ -262,173 +444,54 @@ def _handle_site_data_fallback(self) -> None:
console.print("[yellow]Attempting fallback to root domains.json file...")
try:
with open(self.domains_path, 'r', encoding='utf-8') as f:
- self.configSite = json.load(f)
+ self._domains_data.clear()
+ self._domains_data.update(json.load(f))
console.print("[green]Fallback to root domains successful")
return
except Exception as fallback_error:
console.print(f"[red]Root domains fallback failed: {str(fallback_error)}")
console.print("[red]No local domains.json file available for fallback")
- self.configSite = {}
+ self._domains_data.clear()
- def get(self, section: str, key: str, data_type: type = str, from_site: bool = False, default: Any = None) -> Any:
- """
- Read a value from the configuration with caching.
-
- Args:
- section (str): Section in the configuration
- key (str): Key to read
- data_type (type, optional): Expected data type. Default: str
- from_site (bool, optional): Whether to read from the site configuration. Default: False
- default (Any, optional): Default value if key is not found. Default: None
-
- Returns:
- Any: The key value converted to the specified data type, or default if not found
- """
- cache_key = f"{'site' if from_site else 'config'}.{section}.{key}"
-
- # Check if the value is in the cache
- if self._cache_enabled and cache_key in self.cache:
- return self.cache[cache_key]
-
- # Log only if not in cache
- logging.info(f"Reading key: {cache_key}")
-
- # Choose the appropriate source
- config_source = self.configSite if from_site else self.config
-
- # Check if the section and key exist
- if section not in config_source:
- if default is not None:
- logging.info(f"Section '{section}' not found. Returning default value.")
- return default
- raise ValueError(f"Section '{section}' not found in {'site' if from_site else 'main'} configuration")
-
- if key not in config_source[section]:
- if default is not None:
- logging.info(f"Key '{key}' not found in section '{section}'. Returning default value.")
- return default
- raise ValueError(f"Key '{key}' not found in section '{section}' of {'site' if from_site else 'main'} configuration")
-
- # Get and convert the value
- value = config_source[section][key]
- converted_value = self._convert_to_data_type(value, data_type)
-
- # Save in cache
- if self._cache_enabled:
- self.cache[cache_key] = converted_value
-
- return converted_value
-
- def _convert_to_data_type(self, value: Any, data_type: type) -> Any:
- """
- Convert the value to the specified data type.
-
- Args:
- value (Any): Value to convert
- data_type (type): Target data type
-
- Returns:
- Any: Converted value
- """
+ def save_config(self) -> None:
+ """Save the main configuration to file."""
try:
- if data_type is int:
- return int(value)
-
- elif data_type is float:
- return float(value)
-
- elif data_type is bool:
- if isinstance(value, str):
- return value.lower() in ("yes", "true", "t", "1")
- return bool(value)
-
- elif data_type is list:
- if isinstance(value, list):
- return value
- if isinstance(value, str):
- return [item.strip() for item in value.split(',')]
- return [value]
+ with open(self.config_file_path, 'w') as f:
+ json.dump(self._config_data, f, indent=4)
+
+ logging.info(f"Configuration saved to: {self.config_file_path}")
- elif data_type is dict:
- if isinstance(value, dict):
- return value
-
- raise ValueError(f"Cannot convert {type(value).__name__} to dict")
- else:
- return value
-
except Exception as e:
- logging.error(f"Error converting: {data_type.__name__} to value '{value}' with error: {e}")
- raise ValueError(f"Error converting: {data_type.__name__} to value '{value}' with error: {e}")
+ console.print(f"[red]Error saving configuration: {e}")
- # Getters for main configuration
- def get_int(self, section: str, key: str, default: int = None) -> int:
- """Read an integer from the main configuration."""
- return self.get(section, key, int, default=default)
-
- def get_float(self, section: str, key: str, default: float = None) -> float:
- """Read a float from the main configuration."""
- return self.get(section, key, float, default=default)
-
- def get_bool(self, section: str, key: str, default: bool = None) -> bool:
- """Read a boolean from the main configuration."""
- return self.get(section, key, bool, default=default)
-
- def get_list(self, section: str, key: str, default: List[str] = None) -> List[str]:
- """Read a list from the main configuration."""
- return self.get(section, key, list, default=default)
+ def save_login(self) -> None:
+ """Save the login configuration to file."""
+ try:
+ with open(self.login_file_path, 'w') as f:
+ json.dump(self._login_data, f, indent=4)
- def get_dict(self, section: str, key: str, default: dict = None) -> dict:
- """Read a dictionary from the main configuration."""
- return self.get(section, key, dict, default=default)
+ logging.info(f"Login configuration saved to: {self.login_file_path}")
- # Getters for site configuration
- def get_site(self, section: str, key: str) -> Any:
- """Read a value from the site configuration."""
- return self.get(section, key, str, True)
-
- def set_key(self, section: str, key: str, value: Any, to_site: bool = False) -> None:
- """
- Set a key in the configuration and update cache.
-
- Args:
- section (str): Section in the configuration
- key (str): Key to set
- value (Any): Value to associate with the key
- to_site (bool, optional): Whether to set in the site configuration. Default: False
- """
- try:
- config_target = self.configSite if to_site else self.config
-
- if section not in config_target:
- config_target[section] = {}
-
- config_target[section][key] = value
-
- # Update the cache
- cache_key = f"{'site' if to_site else 'config'}.{section}.{key}"
- self.cache[cache_key] = value
-
- logging.info(f"Key '{key}' set in section '{section}' of {'site' if to_site else 'main'} configuration")
-
except Exception as e:
- error_msg = f"Error setting key '{key}' in section '{section}' of {'site' if to_site else 'main'} configuration: {e}"
- logging.error(error_msg)
- console.print(f"[red]{error_msg}")
+ console.print(f"[red]Error saving login configuration: {e}")
- def save_config(self) -> None:
- """Save the main configuration to file."""
+ def save_domains(self) -> None:
+ """Save the domains configuration to file."""
try:
- with open(self.file_path, 'w') as f:
- json.dump(self.config, f, indent=4)
+ script_dir = os.path.join(self.base_path, ".github", "script")
+ if os.path.isdir(script_dir):
+ target_path = os.path.join(script_dir, DOMAINS_FILENAME)
+ else:
+ target_path = self.domains_path
+
+ with open(target_path, 'w', encoding='utf-8') as f:
+ json.dump(self._domains_data, f, indent=4, ensure_ascii=False)
- logging.info(f"Configuration saved to: {self.file_path}")
+ logging.info(f"Domains configuration saved to: {target_path}")
except Exception as e:
- error_msg = f"Error saving configuration: {e}"
- console.print(f"[red]{error_msg}")
- logging.error(error_msg)
+ console.print(f"[red]Error saving domains configuration: {e}")
# Initialize the ConfigManager when the module is imported
diff --git a/StreamingCommunity/Util/http_client.py b/StreamingCommunity/Util/http_client.py
index bd840760..3991b250 100644
--- a/StreamingCommunity/Util/http_client.py
+++ b/StreamingCommunity/Util/http_client.py
@@ -23,21 +23,21 @@
# Defaults from config
def _get_timeout() -> int:
try:
- return int(config_manager.get_int("REQUESTS", "timeout"))
+ return int(config_manager.config.get_int("REQUESTS", "timeout"))
except Exception:
return 20
def _get_max_retry() -> int:
try:
- return int(config_manager.get_int("REQUESTS", "max_retry"))
+ return int(config_manager.config.get_int("REQUESTS", "max_retry"))
except Exception:
return 3
def _get_verify() -> bool:
try:
- return bool(config_manager.get_bool("REQUESTS", "verify"))
+ return bool(config_manager.config.get_bool("REQUESTS", "verify"))
except Exception:
return True
@@ -45,7 +45,7 @@ def _get_verify() -> bool:
def _get_proxies() -> Optional[Dict[str, str]]:
"""Return proxies dict if present in config and non-empty, else None."""
try:
- proxies = config_manager.get_dict("REQUESTS", "proxy")
+ proxies = config_manager.config.get_dict("REQUESTS", "proxy")
if not isinstance(proxies, dict):
return None
# Normalize empty strings to None (httpx ignores None)
diff --git a/StreamingCommunity/Util/installer/__init__.py b/StreamingCommunity/Util/installer/__init__.py
index 94907c4e..d5a36b09 100644
--- a/StreamingCommunity/Util/installer/__init__.py
+++ b/StreamingCommunity/Util/installer/__init__.py
@@ -2,12 +2,13 @@
from .ffmpeg_install import check_ffmpeg
from .bento4_install import check_mp4decrypt
-from .device_install import check_device_wvd_path
+from .device_install import check_device_wvd_path, check_device_prd_path
from .megatool_installer import check_megatools
__all__ = [
"check_ffmpeg",
"check_mp4decrypt",
"check_device_wvd_path",
+ "check_device_prd_path",
"check_megatools"
]
\ No newline at end of file
diff --git a/StreamingCommunity/Util/installer/bento4_install.py b/StreamingCommunity/Util/installer/bento4_install.py
index 9f66e9b8..7b773508 100644
--- a/StreamingCommunity/Util/installer/bento4_install.py
+++ b/StreamingCommunity/Util/installer/bento4_install.py
@@ -2,180 +2,50 @@
import os
import shutil
-import zipfile
-import logging
from typing import Optional
# External library
-import requests
from rich.console import Console
-# Internal utilities
+# Logic
from .binary_paths import binary_paths
# Variable
console = Console()
-BENTO4_CONFIGURATION = {
- 'windows': {
- 'download_url': 'https://www.bok.net/Bento4/binaries/Bento4-SDK-{version}.{platform}.zip',
- 'versions': {
- 'x64': 'x86_64-microsoft-win32',
- 'x86': 'x86-microsoft-win32-vs2010',
- },
- 'executables': ['mp4decrypt.exe']
- },
- 'darwin': {
- 'download_url': 'https://www.bok.net/Bento4/binaries/Bento4-SDK-{version}.{platform}.zip',
- 'versions': {
- 'x64': 'universal-apple-macosx',
- 'arm64': 'universal-apple-macosx'
- },
- 'executables': ['mp4decrypt']
- },
- 'linux': {
- 'download_url': 'https://www.bok.net/Bento4/binaries/Bento4-SDK-{version}.{platform}.zip',
- 'versions': {
- 'x64': 'x86_64-unknown-linux',
- 'x86': 'x86-unknown-linux',
- 'arm64': 'x86_64-unknown-linux'
- },
- 'executables': ['mp4decrypt']
- }
-}
-
-
-class Bento4Downloader:
- def __init__(self):
- self.os_name = binary_paths.system
- self.arch = binary_paths.arch
- self.home_dir = binary_paths.home_dir
- self.base_dir = binary_paths.ensure_binary_directory()
- self.version = "1-6-0-641" # Latest stable version as of Nov 2023
-
- def _download_file(self, url: str, destination: str) -> bool:
- try:
- response = requests.get(url, stream=True)
- response.raise_for_status()
-
- with open(destination, 'wb') as file:
- for chunk in response.iter_content(chunk_size=8192):
- file.write(chunk)
-
- return True
-
- except Exception as e:
- logging.error(f"Download error: {e}")
- return False
-
- def _extract_executables(self, zip_path: str) -> list:
- try:
- extracted_files = []
- config = BENTO4_CONFIGURATION[self.os_name]
- executables = config['executables']
-
- with zipfile.ZipFile(zip_path, 'r') as zip_ref:
- for zip_info in zip_ref.filelist:
- for executable in executables:
- if zip_info.filename.endswith(executable):
-
- # Extract to base directory
- zip_ref.extract(zip_info, self.base_dir)
- src_path = os.path.join(self.base_dir, zip_info.filename)
- dst_path = os.path.join(self.base_dir, executable)
-
- # Move to final location
- shutil.move(src_path, dst_path)
- os.chmod(dst_path, 0o755)
- extracted_files.append(dst_path)
-
- # Clean up intermediate directories
- parts = zip_info.filename.split('/')
- if len(parts) > 1:
- shutil.rmtree(os.path.join(self.base_dir, parts[0]))
-
- return extracted_files
-
- except Exception as e:
- logging.error(f"Extraction error: {e}")
- return []
-
- def download(self) -> list:
- try:
- config = BENTO4_CONFIGURATION[self.os_name]
- platform_str = config['versions'].get(self.arch)
-
- if not platform_str:
- raise ValueError(f"Unsupported architecture: {self.arch}")
-
- download_url = config['download_url'].format(
- version=self.version,
- platform=platform_str
- )
-
- zip_path = os.path.join(self.base_dir, "bento4.zip")
- console.print(f"[blue]Downloading Bento4 from {download_url}")
-
- if self._download_file(download_url, zip_path):
- extracted_files = self._extract_executables(zip_path)
- os.remove(zip_path)
-
- if extracted_files:
- return extracted_files
-
- raise Exception("Failed to install Bento4")
-
- except Exception as e:
- logging.error(f"Error downloading Bento4: {e}")
- console.print(f"[red]Error downloading Bento4: {str(e)}")
- return []
-
def check_mp4decrypt() -> Optional[str]:
"""
- Check for mp4decrypt in the system and download if not found.
- Order: binary directory -> system PATH -> download
+ Check for mp4decrypt and download if not found.
+ Order: system PATH -> binary directory -> download from GitHub
Returns:
- Optional[str]: Path to mp4decrypt executable or None if not found/downloaded
+ Optional[str]: Path to mp4decrypt executable or None if not found
"""
- try:
- system_platform = binary_paths.system
- mp4decrypt_name = "mp4decrypt.exe" if system_platform == "windows" else "mp4decrypt"
-
- # STEP 1: Check binary directory FIRST (fastest - single file check)
- binary_dir = binary_paths.get_binary_directory()
- local_path = os.path.join(binary_dir, mp4decrypt_name)
-
- if os.path.isfile(local_path):
-
- # Only check execution permissions on Unix systems
- if system_platform != 'windows' and not os.access(local_path, os.X_OK):
- try:
- os.chmod(local_path, 0o755)
- except Exception:
- pass # Ignore chmod errors
-
- logging.info("mp4decrypt found in binary directory")
- return local_path
-
- # STEP 2: Check system PATH (slower - searches multiple directories)
- mp4decrypt_path = shutil.which(mp4decrypt_name)
-
- if mp4decrypt_path:
- logging.info("mp4decrypt found in system PATH")
- return mp4decrypt_path
-
- # STEP 3: Download if not found anywhere
- console.print("[cyan]mp4decrypt not found. Downloading...")
- downloader = Bento4Downloader()
- extracted_files = downloader.download()
-
- return extracted_files[0] if extracted_files else None
-
- except Exception as e:
- logging.error(f"Error checking or downloading mp4decrypt: {e}")
- return None
\ No newline at end of file
+ system_platform = binary_paths.system
+ mp4decrypt_name = "mp4decrypt.exe" if system_platform == "windows" else "mp4decrypt"
+
+ # STEP 1: Check system PATH
+ mp4decrypt_path = shutil.which(mp4decrypt_name)
+
+ if mp4decrypt_path:
+ return mp4decrypt_path
+
+ # STEP 2: Check binary directory
+ mp4decrypt_local = binary_paths.get_binary_path("bento4", mp4decrypt_name)
+
+ if mp4decrypt_local and os.path.isfile(mp4decrypt_local):
+ return mp4decrypt_local
+
+ # STEP 3: Download from GitHub repository
+ console.print("[red]mp4decrypt not found. Downloading ...")
+ mp4decrypt_downloaded = binary_paths.download_binary("bento4", mp4decrypt_name)
+
+ if mp4decrypt_downloaded:
+ return mp4decrypt_downloaded
+
+ console.print("Failed to download mp4decrypt", style="red")
+ return None
\ No newline at end of file
diff --git a/StreamingCommunity/Util/installer/binary_paths.py b/StreamingCommunity/Util/installer/binary_paths.py
index 7557888b..9a50ea8e 100644
--- a/StreamingCommunity/Util/installer/binary_paths.py
+++ b/StreamingCommunity/Util/installer/binary_paths.py
@@ -2,6 +2,11 @@
import os
import platform
+from typing import Optional
+
+
+# External library
+import requests
class BinaryPaths:
@@ -9,75 +14,117 @@ def __init__(self):
self.system = self._detect_system()
self.arch = self._detect_arch()
self.home_dir = os.path.expanduser('~')
+ self.github_repo = "https://raw.githubusercontent.com/Arrowar/SC_Binary/main"
+ self.paths_cache = None
def _detect_system(self) -> str:
- """
- Detect and normalize the operating system name.
-
- Returns:
- str: Normalized operating system name ('windows', 'darwin', or 'linux')
-
- Raises:
- ValueError: If the operating system is not supported
- """
+ """Detect and normalize the operating system name."""
system = platform.system().lower()
supported_systems = ['windows', 'darwin', 'linux']
if system not in supported_systems:
- raise ValueError(f"Unsupported operating system: {system}. Supported: {supported_systems}")
+ raise ValueError(f"Unsupported OS: {system}")
return system
def _detect_arch(self) -> str:
- """
- Detect and normalize the system architecture.
-
- Returns:
- str: Normalized architecture name
- """
+ """Detect and normalize the system architecture."""
machine = platform.machine().lower()
arch_map = {
'amd64': 'x64',
'x86_64': 'x64',
- 'x64': 'x64',
'arm64': 'arm64',
'aarch64': 'arm64',
- 'armv7l': 'arm',
- 'i386': 'ia32',
- 'i686': 'ia32',
- 'x86': 'x86'
}
- return arch_map.get(machine, machine)
+ return arch_map.get(machine, 'x64')
def get_binary_directory(self) -> str:
- """
- Get the binary directory path based on the operating system.
-
- Returns:
- str: Path to the binary directory
- """
+ """Get the binary directory path based on the operating system."""
if self.system == 'windows':
return os.path.join(os.path.splitdrive(self.home_dir)[0] + os.path.sep, 'binary')
-
elif self.system == 'darwin':
return os.path.join(self.home_dir, 'Applications', 'binary')
-
else: # linux
return os.path.join(self.home_dir, '.local', 'bin', 'binary')
def ensure_binary_directory(self, mode: int = 0o755) -> str:
+ """Create the binary directory if it doesn't exist."""
+ binary_dir = self.get_binary_directory()
+ os.makedirs(binary_dir, mode=mode, exist_ok=True)
+ return binary_dir
+
+ def _load_paths_json(self) -> dict:
+ """Load binary paths from GitHub repository."""
+ if self.paths_cache is not None:
+ return self.paths_cache
+
+ try:
+ url = f"{self.github_repo}/binary_paths.json"
+ response = requests.get(url, timeout=10)
+ response.raise_for_status()
+ self.paths_cache = response.json()
+ return self.paths_cache
+ except Exception:
+ return {}
+
+ def get_binary_path(self, tool: str, binary_name: str) -> Optional[str]:
"""
- Create the binary directory if it doesn't exist and return its path.
+ Get the full path to a binary from the repository.
Args:
- mode (int, optional): Directory permissions. Defaults to 0o755.
+ tool: Tool name (ffmpeg, bento4, megatools)
+ binary_name: Binary name (ffmpeg.exe, mp4decrypt, etc.)
Returns:
- str: Path to the binary directory
+ Full local path to the binary or None if not found
"""
binary_dir = self.get_binary_directory()
- os.makedirs(binary_dir, mode=mode, exist_ok=True)
- return binary_dir
+ local_path = os.path.join(binary_dir, binary_name)
+
+ if os.path.isfile(local_path):
+ return local_path
+
+ return None
+
+ def download_binary(self, tool: str, binary_name: str) -> Optional[str]:
+ """
+ Download a specific binary from GitHub repository directly to binary directory.
+
+ Args:
+ tool: Tool name (ffmpeg, bento4, megatools)
+ binary_name: Binary name to download
+
+ Returns:
+ Full local path to the downloaded binary or None if failed
+ """
+ paths_json = self._load_paths_json()
+ key = f"{self.system}_{self.arch}_{tool}"
+
+ if key not in paths_json:
+ return None
+
+ for rel_path in paths_json[key]:
+ if rel_path.endswith(binary_name):
+ url = f"{self.github_repo}/binaries/{rel_path}"
+ local_path = os.path.join(self.get_binary_directory(), binary_name)
+
+ try:
+ response = requests.get(url, stream=True, timeout=60)
+ response.raise_for_status()
+
+ with open(local_path, 'wb') as f:
+ for chunk in response.iter_content(chunk_size=8192):
+ f.write(chunk)
+
+ # Set executable permission on Unix systems
+ if self.system != 'windows':
+ os.chmod(local_path, 0o755)
+
+ return local_path
+ except Exception:
+ return None
+
+ return None
binary_paths = BinaryPaths()
\ No newline at end of file
diff --git a/StreamingCommunity/Util/installer/device_install.py b/StreamingCommunity/Util/installer/device_install.py
index 238d1961..60b3a875 100644
--- a/StreamingCommunity/Util/installer/device_install.py
+++ b/StreamingCommunity/Util/installer/device_install.py
@@ -2,7 +2,6 @@
import os
import struct
-import logging
from typing import Optional
@@ -19,79 +18,99 @@
console = Console()
-class DeviceDownloader:
+class DeviceSearcher:
def __init__(self):
self.base_dir = binary_paths.ensure_binary_directory()
- self.github_png_url = "https://github.com/Arrowar/StreamingCommunity/raw/main/.github/doc/img/crunchyroll_etp_rt.png"
- def extract_png_chunk(self, png_with_wvd: str, out_wvd_path: str) -> bool:
- """Extract WVD data"""
+ def _check_existing(self, ext: str) -> Optional[str]:
+ """Check for existing files with given extension in binary directory."""
try:
- with open(png_with_wvd, "rb") as f:
- data = f.read()
- pos = 8
-
- while pos < len(data):
- length = struct.unpack(">I", data[pos:pos+4])[0]
- chunk_type = data[pos+4:pos+8]
- chunk_data = data[pos+8:pos+8+length]
-
- if chunk_type == b"stEg":
- with open(out_wvd_path, "wb") as f:
- f.write(chunk_data)
- return True
-
- pos += 12 + length
-
- return False
-
- except Exception as e:
- logging.error(f"Error extracting PNG chunk: {e}")
- return False
-
- def _check_existing_wvd(self) -> Optional[str]:
- """Check for existing WVD files in binary directory."""
- try:
- if not os.path.exists(self.base_dir):
- return None
-
- # Look for any .wvd file first
for file in os.listdir(self.base_dir):
- if file.lower().endswith('.wvd'):
- wvd_path = os.path.join(self.base_dir, file)
- if os.path.exists(wvd_path) and os.path.getsize(wvd_path) > 0:
- logging.info(f"Found existing .wvd file: {file}")
- return wvd_path
+ if file.lower().endswith(ext):
+ path = os.path.join(self.base_dir, file)
+ return path
return None
-
+
except Exception as e:
- logging.error(f"Error checking existing WVD files: {e}")
+ console.print(f"[red]Error checking existing {ext} files: {e}")
return None
- def _find_png_recursively(self, start_dir: str = ".") -> Optional[str]:
- """Find crunchyroll_etp_rt.png recursively starting from start_dir."""
- target_filename = "crunchyroll_etp_rt.png"
-
+ def _find_recursively(self, ext: str = None, start_dir: str = ".", filename: str = None) -> Optional[str]:
+ """
+ Find file recursively by extension or exact filename starting from start_dir.
+ If filename is provided, search for that filename. Otherwise, search by extension.
+ """
try:
for root, dirs, files in os.walk(start_dir):
- if target_filename in files:
- png_path = os.path.join(root, target_filename)
- logging.info(f"Found PNG file at: {png_path}")
- return png_path
-
- logging.warning(f"PNG file '{target_filename}' not found in '{start_dir}' and subdirectories")
+ for file in files:
+ if filename:
+ if file == filename:
+ path = os.path.join(root, file)
+ return path
+
+ elif ext:
+ if file.lower().endswith(ext):
+ path = os.path.join(root, file)
+ return path
+
return None
-
except Exception as e:
- logging.error(f"Error during recursive PNG search: {e}")
+ console.print(f"[red]Error during recursive search for filename {filename}: {e}")
return None
+ def search(self, ext: str = None, filename: str = None) -> Optional[str]:
+ """
+ Search for file with given extension or exact filename in binary directory or recursively.
+ If filename is provided, search for that filename. Otherwise, search by extension.
+ """
+ if filename:
+ try:
+ target_path = os.path.join(self.base_dir, filename)
+ if os.path.exists(target_path) and os.path.getsize(target_path) > 0:
+ return target_path
+
+ except Exception as e:
+ console.print(f"[red]Error checking for existing file {filename}: {e}")
+ return None
+
+ return self._find_recursively(filename=filename)
+
+ else:
+ path = self._check_existing(ext)
+ if path:
+ return path
+ return self._find_recursively(ext=ext)
+
+
+class DeviceDownloader:
+ def __init__(self):
+ self.base_dir = binary_paths.ensure_binary_directory()
+ self.github_png_url = "https://github.com/Arrowar/StreamingCommunity/raw/main/.github/doc/img/crunchyroll_etp_rt.png"
+
+ def extract_png_chunk(self, png_with_wvd: str, out_wvd_path: str) -> bool:
+ """Extract WVD data"""
+ with open(png_with_wvd, "rb") as f:
+ data = f.read()
+ pos = 8
+
+ while pos < len(data):
+ length = struct.unpack(">I", data[pos:pos+4])[0]
+ chunk_type = data[pos+4:pos+8]
+ chunk_data = data[pos+8:pos+8+length]
+
+ if chunk_type == b"stEg":
+ with open(out_wvd_path, "wb") as f:
+ f.write(chunk_data)
+ return True
+
+ pos += 12 + length
+
+ return False
+
def _download_png_from_github(self, output_path: str) -> bool:
"""Download PNG file from GitHub repository."""
- try:
- logging.info(f"Downloading PNG from GitHub: {self.github_png_url}")
-
+ try:
with httpx.Client(timeout=30.0, follow_redirects=True) as client:
response = client.get(self.github_png_url)
response.raise_for_status()
@@ -99,14 +118,10 @@ def _download_png_from_github(self, output_path: str) -> bool:
with open(output_path, "wb") as f:
f.write(response.content)
- logging.info(f"Successfully downloaded PNG to: {output_path}")
return True
- except httpx.HTTPError as e:
- logging.error(f"HTTP error downloading PNG from GitHub: {e}")
- return False
except Exception as e:
- logging.error(f"Error downloading PNG from GitHub: {e}")
+ console.print(f"[red]Error downloading PNG from GitHub: {e}")
return False
def download(self) -> Optional[str]:
@@ -115,65 +130,51 @@ def download(self) -> Optional[str]:
Downloads PNG from GitHub if not found locally.
"""
try:
- # Try to find PNG locally first
- png_path = self._find_png_recursively()
+ searcher = DeviceSearcher()
+ target_filename = "crunchyroll_etp_rt.png"
+ png_path = searcher.search(filename=target_filename)
temp_png_path = None
-
- # If not found locally, download from GitHub
+
if not png_path:
- logging.info("PNG not found locally, downloading from GitHub")
- temp_png_path = os.path.join(self.base_dir, 'crunchyroll_etp_rt.png')
-
+ temp_png_path = os.path.join(self.base_dir, target_filename)
if not self._download_png_from_github(temp_png_path):
- logging.error("Failed to download PNG from GitHub")
return None
png_path = temp_png_path
device_wvd_path = os.path.join(self.base_dir, 'device.wvd')
-
- # Extract WVD from PNG
extraction_success = self.extract_png_chunk(png_path, device_wvd_path)
-
- # Clean up temporary PNG file if it was downloaded
+
if temp_png_path and os.path.exists(temp_png_path):
- try:
- os.remove(temp_png_path)
- logging.info("Removed temporary PNG file")
- except Exception as e:
- logging.warning(f"Could not remove temporary PNG file: {e}")
-
- # Check extraction result
+ os.remove(temp_png_path)
+
if extraction_success:
if os.path.exists(device_wvd_path) and os.path.getsize(device_wvd_path) > 0:
- logging.info("Successfully extracted device.wvd from PNG")
return device_wvd_path
- else:
- logging.error("Extraction completed but resulting file is invalid")
- return None
- else:
- logging.error("Failed to extract device.wvd from PNG")
- return None
-
- except Exception as e:
- logging.error(f"Error during WVD extraction: {e}")
+
+ except Exception:
return None
def check_device_wvd_path() -> Optional[str]:
- """
- Check for device.wvd file in binary directory and extract from PNG if not found.
- """
+ """Check for device.wvd file in binary directory and extract from PNG if not found."""
try:
- downloader = DeviceDownloader()
-
- existing_wvd = downloader._check_existing_wvd()
+ searcher = DeviceSearcher()
+ existing_wvd = searcher.search('.wvd')
if existing_wvd:
return existing_wvd
-
- logging.info("device.wvd not found, attempting extraction from PNG")
+
+ downloader = DeviceDownloader()
return downloader.download()
- except Exception as e:
- logging.error(f"Error checking for device.wvd: {e}")
+ except Exception:
+ return None
+
+def check_device_prd_path() -> Optional[str]:
+ """Check for device.prd file in binary directory and search recursively if not found."""
+ try:
+ searcher = DeviceSearcher()
+ return searcher.search('.prd')
+
+ except Exception:
return None
\ No newline at end of file
diff --git a/StreamingCommunity/Util/installer/ffmpeg_install.py b/StreamingCommunity/Util/installer/ffmpeg_install.py
index 12081432..b2f8f458 100644
--- a/StreamingCommunity/Util/installer/ffmpeg_install.py
+++ b/StreamingCommunity/Util/installer/ffmpeg_install.py
@@ -1,20 +1,15 @@
# 24.01.2024
import os
-import glob
-import gzip
import shutil
-import logging
-import subprocess
from typing import Optional, Tuple
# External library
-import requests
from rich.console import Console
-# Internal utilities
+# Logic
from .binary_paths import binary_paths
@@ -22,282 +17,39 @@
console = Console()
-FFMPEG_CONFIGURATION = {
- 'windows': {
- 'download_url': 'https://github.com/eugeneware/ffmpeg-static/releases/latest/download/ffmpeg-win32-{arch}.gz',
- 'file_extension': '.gz',
- 'executables': ['ffmpeg-win32-{arch}', 'ffprobe-win32-{arch}']
- },
- 'darwin': {
- 'download_url': 'https://github.com/eugeneware/ffmpeg-static/releases/latest/download/ffmpeg-darwin-{arch}.gz',
- 'file_extension': '.gz',
- 'executables': ['ffmpeg-darwin-{arch}', 'ffprobe-darwin-{arch}']
- },
- 'linux': {
- 'download_url': 'https://github.com/eugeneware/ffmpeg-static/releases/latest/download/ffmpeg-linux-{arch}.gz',
- 'file_extension': '.gz',
- 'executables': ['ffmpeg-linux-{arch}', 'ffprobe-linux-{arch}']
- }
-}
-
-
-class FFMPEGDownloader:
- def __init__(self):
- self.os_name = binary_paths.system
- self.arch = binary_paths.arch
- self.home_dir = binary_paths.home_dir
- self.base_dir = binary_paths.ensure_binary_directory()
-
- def _check_existing_binaries(self) -> Tuple[Optional[str], Optional[str], Optional[str]]:
- """
- Check if FFmpeg binaries already exist.
- Order: system PATH (where/which) -> binary directory
-
- Returns:
- Tuple[Optional[str], Optional[str], Optional[str]]: Paths to ffmpeg, ffprobe, ffplay
- """
- try:
-
- # STEP 1: Check system PATH first
- if self.os_name == 'windows':
- try:
- ffmpeg_path = subprocess.check_output(
- ['where', 'ffmpeg'], stderr=subprocess.DEVNULL, text=True
- ).strip().split('\n')[0]
-
- ffprobe_path = subprocess.check_output(
- ['where', 'ffprobe'], stderr=subprocess.DEVNULL, text=True
- ).strip().split('\n')[0]
-
- try:
- ffplay_path = subprocess.check_output(
- ['where', 'ffplay'], stderr=subprocess.DEVNULL, text=True
- ).strip().split('\n')[0]
- except subprocess.CalledProcessError:
- ffplay_path = None
-
- return ffmpeg_path, ffprobe_path, ffplay_path
-
- except subprocess.CalledProcessError:
- pass
-
- else:
- ffmpeg_path = shutil.which('ffmpeg')
- ffprobe_path = shutil.which('ffprobe')
- ffplay_path = shutil.which('ffplay')
-
- if ffmpeg_path and ffprobe_path:
- return ffmpeg_path, ffprobe_path, ffplay_path
-
- # STEP 2: Check in binary directory
- console.print("[cyan]Checking for FFmpeg in binary directory...")
- config = FFMPEG_CONFIGURATION[self.os_name]
- executables = [exe.format(arch=self.arch) for exe in config['executables']]
- found_executables = []
-
- for executable in executables:
-
- # Check for exact match first
- exe_paths = glob.glob(os.path.join(self.base_dir, executable))
- if exe_paths:
- found_executables.append(exe_paths[0])
-
- else:
- # Check for standard names
- if self.os_name == 'windows':
- if 'ffmpeg' in executable:
- standard_path = os.path.join(self.base_dir, 'ffmpeg.exe')
- elif 'ffprobe' in executable:
- standard_path = os.path.join(self.base_dir, 'ffprobe.exe')
- else:
- standard_path = None
- else:
- if 'ffmpeg' in executable:
- standard_path = os.path.join(self.base_dir, 'ffmpeg')
- elif 'ffprobe' in executable:
- standard_path = os.path.join(self.base_dir, 'ffprobe')
- else:
- standard_path = None
-
- if standard_path and os.path.exists(standard_path):
- found_executables.append(standard_path)
- else:
- found_executables.append(None)
-
- # Return found executables if we have at least ffmpeg and ffprobe
- if len(found_executables) >= 2 and found_executables[0] and found_executables[1]:
- ffplay_path = found_executables[2] if len(found_executables) > 2 else None
- return found_executables[0], found_executables[1], ffplay_path
-
- return (None, None, None)
-
- except Exception as e:
- logging.error(f"Error checking existing binaries: {e}")
- return (None, None, None)
-
- def _download_file(self, url: str, destination: str) -> bool:
- """
- Download a file from URL.
-
- Parameters:
- url (str): The URL to download the file from. Should be a direct download link.
- destination (str): Local file path where the downloaded file will be saved.
-
- Returns:
- bool: True if download was successful, False otherwise.
- """
- try:
- response = requests.get(url, stream=True)
- response.raise_for_status()
-
- with open(destination, 'wb') as file:
- for chunk in response.iter_content(chunk_size=8192):
- file.write(chunk)
- return True
-
- except Exception as e:
- logging.error(f"Download error: {e}")
- return False
-
- def _extract_file(self, gz_path: str, final_path: str) -> bool:
- """
- Extract a gzipped file and set proper permissions.
-
- Parameters:
- gz_path (str): Path to the gzipped file
- final_path (str): Path where the extracted file should be saved
-
- Returns:
- bool: True if extraction was successful, False otherwise
- """
- try:
- logging.info(f"Attempting to extract {gz_path} to {final_path}")
-
- # Check if source file exists and is readable
- if not os.path.exists(gz_path):
- logging.error(f"Source file {gz_path} does not exist")
- return False
-
- if not os.access(gz_path, os.R_OK):
- logging.error(f"Source file {gz_path} is not readable")
- return False
-
- # Extract the file
- with gzip.open(gz_path, 'rb') as f_in:
- # Test if the gzip file is valid
- try:
- f_in.read(1)
- f_in.seek(0)
- except Exception as e:
- logging.error(f"Invalid gzip file {gz_path}: {e}")
- return False
-
- # Extract the file
- with open(final_path, 'wb') as f_out:
- shutil.copyfileobj(f_in, f_out)
-
- # Set executable permissions
- os.chmod(final_path, 0o755)
- logging.info(f"Successfully extracted {gz_path} to {final_path}")
-
- # Remove the gzip file
- os.remove(gz_path)
- return True
-
- except Exception as e:
- logging.error(f"Extraction error for {gz_path}: {e}")
- return False
-
- def download(self) -> Tuple[Optional[str], Optional[str], Optional[str]]:
- """
- Main method to download and set up FFmpeg executables.
-
- Returns:
- Tuple[Optional[str], Optional[str], Optional[str]]: Paths to ffmpeg, ffprobe, and ffplay executables.
- """
- if self.os_name == 'linux':
- try:
- # Attempt to install FFmpeg using apt
- console.print("[blue]Trying to install FFmpeg using 'sudo apt install ffmpeg'")
- result = subprocess.run(
- ['sudo', 'apt', 'install', '-y', 'ffmpeg'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE,
- text=True
- )
- if result.returncode == 0:
- ffmpeg_path = shutil.which('ffmpeg')
- ffprobe_path = shutil.which('ffprobe')
-
- if ffmpeg_path and ffprobe_path:
- return ffmpeg_path, ffprobe_path, None
- else:
- console.print("[yellow]Failed to install FFmpeg via apt. Proceeding with static download.")
-
- except Exception as e:
- logging.error(f"Error during 'sudo apt install ffmpeg': {e}")
- console.print("[red]Error during 'sudo apt install ffmpeg'. Proceeding with static download.")
-
- # Proceed with static download if apt installation fails or is not applicable
- config = FFMPEG_CONFIGURATION[self.os_name]
- executables = [exe.format(arch=self.arch) for exe in config['executables']]
- successful_extractions = []
-
- for executable in executables:
- try:
- download_url = f"https://github.com/eugeneware/ffmpeg-static/releases/latest/download/{executable}.gz"
- download_path = os.path.join(self.base_dir, f"{executable}.gz")
- final_path = os.path.join(self.base_dir, executable)
-
- # Log the current operation
- logging.info(f"Processing {executable}")
- console.print(f"[blue]Downloading {executable} from GitHub")
-
- # Download the file
- if not self._download_file(download_url, download_path):
- console.print(f"[red]Failed to download {executable}")
- continue
-
- # Extract the file
- if self._extract_file(download_path, final_path):
- successful_extractions.append(final_path)
- else:
- console.print(f"[red]Failed to extract {executable}")
-
- except Exception as e:
- logging.error(f"Error processing {executable}: {e}")
- console.print(f"[red]Error processing {executable}: {str(e)}")
- continue
-
- # Return the results based on successful extractions
- return (
- successful_extractions[0] if len(successful_extractions) > 0 else None,
- successful_extractions[1] if len(successful_extractions) > 1 else None,
- None # ffplay is not included in the current implementation
- )
-
-def check_ffmpeg() -> Tuple[Optional[str], Optional[str], Optional[str]]:
+def check_ffmpeg() -> Tuple[Optional[str], Optional[str]]:
"""
- Check for FFmpeg executables in the system and download them if not found.
- Order: system PATH (where/which) -> binary directory -> download
+ Check for FFmpeg executables and download if not found.
+ Order: system PATH -> binary directory -> download from GitHub
Returns:
- Tuple[Optional[str], Optional[str], Optional[str]]: Paths to ffmpeg, ffprobe, and ffplay
+ Tuple[Optional[str], Optional[str]]: Paths to ffmpeg and ffprobe
"""
- try:
- # Create downloader instance to use its existing check method
- downloader = FFMPEGDownloader()
-
- # STEP 1 & 2: Check existing binaries (system PATH + binary directory)
- ffmpeg_path, ffprobe_path, ffplay_path = downloader._check_existing_binaries()
-
- # If found, return them
- if ffmpeg_path and ffprobe_path:
- return ffmpeg_path, ffprobe_path, ffplay_path
-
- # STEP 3: Download if not found
- return downloader.download()
-
- except Exception as e:
- logging.error(f"Error checking or downloading FFmpeg executables: {e}")
- return None, None, None
\ No newline at end of file
+ system_platform = binary_paths.system
+ ffmpeg_name = "ffmpeg.exe" if system_platform == "windows" else "ffmpeg"
+ ffprobe_name = "ffprobe.exe" if system_platform == "windows" else "ffprobe"
+
+ # STEP 1: Check system PATH
+ ffmpeg_path = shutil.which(ffmpeg_name)
+ ffprobe_path = shutil.which(ffprobe_name)
+
+ if ffmpeg_path and ffprobe_path:
+ return ffmpeg_path, ffprobe_path
+
+ # STEP 2: Check binary directory
+ ffmpeg_local = binary_paths.get_binary_path("ffmpeg", ffmpeg_name)
+ ffprobe_local = binary_paths.get_binary_path("ffmpeg", ffprobe_name)
+
+ if ffmpeg_local and os.path.isfile(ffmpeg_local) and ffprobe_local and os.path.isfile(ffprobe_local):
+ return ffmpeg_local, ffprobe_local
+
+ # STEP 3: Download from GitHub repository
+ console.print("[red]FFmpeg not found. Downloading ...")
+ ffmpeg_downloaded = binary_paths.download_binary("ffmpeg", ffmpeg_name)
+ ffprobe_downloaded = binary_paths.download_binary("ffmpeg", ffprobe_name)
+
+ if ffmpeg_downloaded and ffprobe_downloaded:
+ return ffmpeg_downloaded, ffprobe_downloaded
+
+ console.print("Failed to download FFmpeg", style="red")
+ return None, None
\ No newline at end of file
diff --git a/StreamingCommunity/Util/installer/megatool_installer.py b/StreamingCommunity/Util/installer/megatool_installer.py
index cb3c8ffe..fcdb9993 100644
--- a/StreamingCommunity/Util/installer/megatool_installer.py
+++ b/StreamingCommunity/Util/installer/megatool_installer.py
@@ -2,18 +2,14 @@
import os
import shutil
-import tarfile
-import zipfile
-import logging
from typing import Optional
# External library
-import requests
from rich.console import Console
-# Internal utilities
+# Logic
from .binary_paths import binary_paths
@@ -21,247 +17,35 @@
console = Console()
-MEGATOOLS_CONFIGURATION = {
- 'windows': {
- 'download_url': 'https://xff.cz/megatools/builds/builds/megatools-{version}-{platform}.zip',
- 'versions': {
- 'x64': 'win64',
- 'x86': 'win32',
- },
- 'executables': ['megatools.exe']
- },
- 'darwin': {
- 'download_url': 'https://xff.cz/megatools/builds/builds/megatools-{version}-{platform}.tar.gz',
- 'versions': {
- 'x64': 'linux-x86_64',
- 'arm64': 'linux-aarch64'
- },
- 'executables': ['megatools']
- },
- 'linux': {
- 'download_url': 'https://xff.cz/megatools/builds/builds/megatools-{version}-{platform}.tar.gz',
- 'versions': {
- 'x64': 'linux-x86_64',
- 'x86': 'linux-i686',
- 'arm64': 'linux-aarch64'
- },
- 'executables': ['megatools']
- }
-}
-
-
-class MegatoolsDownloader:
- def __init__(self):
- self.os_name = binary_paths.system
- self.arch = binary_paths.arch
- self.home_dir = binary_paths.home_dir
- self.base_dir = binary_paths.ensure_binary_directory()
- self.version = "1.11.5.20250706"
-
- def _download_file(self, url: str, destination: str) -> bool:
- try:
- response = requests.get(url, stream=True)
- response.raise_for_status()
-
- with open(destination, 'wb') as file:
- for chunk in response.iter_content(chunk_size=8192):
- file.write(chunk)
-
- return True
-
- except Exception as e:
- logging.error(f"Download error: {e}")
- return False
-
- def _extract_executables(self, archive_path: str) -> list:
- try:
- extracted_files = []
- config = MEGATOOLS_CONFIGURATION[self.os_name]
- executables = config['executables']
-
- # Determine if it's a zip or tar.gz
- is_zip = archive_path.endswith('.zip')
-
- if is_zip:
- with zipfile.ZipFile(archive_path, 'r') as archive:
-
- # Extract all contents to a temporary location
- temp_extract_dir = os.path.join(self.base_dir, 'temp_megatools')
- archive.extractall(temp_extract_dir)
-
- # Find executables in the extracted folder (search recursively)
- for executable in executables:
- found = False
- for root, dirs, files in os.walk(temp_extract_dir):
- if executable in files:
- src_path = os.path.join(root, executable)
- dst_path = os.path.join(self.base_dir, executable)
-
- shutil.copy2(src_path, dst_path)
- extracted_files.append(dst_path)
- found = True
- break
-
- if not found:
- logging.warning(f"Executable {executable} not found in archive")
-
- # Clean up temporary extraction directory
- if os.path.exists(temp_extract_dir):
- shutil.rmtree(temp_extract_dir)
-
- else:
- with tarfile.open(archive_path, 'r:gz') as archive:
-
- # Extract all contents to a temporary location
- temp_extract_dir = os.path.join(self.base_dir, 'temp_megatools')
- archive.extractall(temp_extract_dir)
-
- # Find executables in the extracted folder (search recursively)
- for executable in executables:
- found = False
- for root, dirs, files in os.walk(temp_extract_dir):
- if executable in files:
- src_path = os.path.join(root, executable)
- dst_path = os.path.join(self.base_dir, executable)
-
- shutil.copy2(src_path, dst_path)
- os.chmod(dst_path, 0o755)
- extracted_files.append(dst_path)
- found = True
- break
-
- if not found:
- logging.warning(f"Executable {executable} not found in archive")
-
- # Clean up temporary extraction directory
- if os.path.exists(temp_extract_dir):
- shutil.rmtree(temp_extract_dir)
-
- return extracted_files
-
- except Exception as e:
- logging.error(f"Extraction error: {e}")
- return []
-
- def _verify_executable(self, executable_path: str) -> bool:
- """Verify that the executable works by running --version."""
- try:
- import subprocess
-
- result = subprocess.run(
- [executable_path, '--version'],
- capture_output=True,
- text=True,
- timeout=5
- )
-
- # megatools returns exit code 1 when showing version/help, but still outputs correctly
- if result.returncode in [0, 1] and ('megatools' in result.stdout.lower() or 'megatools' in result.stderr.lower()):
- version_output = result.stdout or result.stderr
- logging.info(f"Megatools executable verified: {version_output.splitlines()[0] if version_output else 'OK'}")
- return True
-
- else:
- logging.error(f"Executable verification failed with code: {result.returncode}")
- return False
-
- except Exception as e:
- logging.error(f"Failed to verify executable: {e}")
- return False
-
- def download(self) -> list:
- try:
- config = MEGATOOLS_CONFIGURATION[self.os_name]
- platform_str = config['versions'].get(self.arch)
-
- if not platform_str:
- raise ValueError(f"Unsupported architecture: {self.arch}")
-
- download_url = config['download_url'].format(
- version=self.version,
- platform=platform_str
- )
-
- # Determine file extension
- extension = '.zip' if self.os_name == 'windows' else '.tar.gz'
- archive_path = os.path.join(self.base_dir, f"megatools{extension}")
-
- console.print(f"[blue]Downloading Megatools {self.version}")
-
- if self._download_file(download_url, archive_path):
- extracted_files = self._extract_executables(archive_path)
-
- # Verify each extracted executable
- if extracted_files:
- verified_files = []
-
- for exe_path in extracted_files:
- if self._verify_executable(exe_path):
- verified_files.append(exe_path)
-
- if verified_files:
- console.print("[green]Successfully installed Megatools")
- os.remove(archive_path)
- return verified_files
- else:
- logging.error("No executables were verified successfully")
- else:
- logging.error("No executables were extracted")
-
- # Clean up archive
- if os.path.exists(archive_path):
- os.remove(archive_path)
-
- raise Exception("Failed to install Megatools")
-
- except Exception as e:
- logging.error(f"Error downloading Megatools: {e}")
- console.print(f"[red]Error downloading Megatools: {str(e)}")
- return []
-
-
def check_megatools() -> Optional[str]:
"""
- Check for megatools in the system and download if not found.
- Order: binary directory -> system PATH -> download
+ Check for megatools and download if not found.
+ Order: system PATH -> binary directory -> download from GitHub
Returns:
- Optional[str]: Path to megatools executable or None if not found/downloaded
+ Optional[str]: Path to megatools executable or None if not found
"""
- try:
- system_platform = binary_paths.system
- megatools_name = "megatools.exe" if system_platform == "windows" else "megatools"
-
- # STEP 1: Check binary directory FIRST
- binary_dir = binary_paths.get_binary_directory()
- local_path = os.path.join(binary_dir, megatools_name)
-
- if os.path.isfile(local_path):
-
- # Only check execution permissions on Unix systems
- if system_platform != 'windows' and not os.access(local_path, os.X_OK):
- try:
- os.chmod(local_path, 0o755)
- except Exception:
- pass
-
- logging.info("megatools found in binary directory")
- return local_path
-
- # STEP 2: Check system PATH
- megatools_path = shutil.which(megatools_name)
-
- if megatools_path:
- logging.info("megatools found in system PATH")
- return megatools_path
-
- # STEP 3: Download if not found anywhere
- console.print("[cyan]megatools not found. Downloading...")
- downloader = MegatoolsDownloader()
- extracted_files = downloader.download()
-
- return extracted_files[0] if extracted_files else None
-
- except Exception as e:
- logging.error(f"Error checking or downloading megatools: {e}")
- return None
\ No newline at end of file
+ system_platform = binary_paths.system
+ megatools_name = "megatools.exe" if system_platform == "windows" else "megatools"
+
+ # STEP 1: Check system PATH
+ megatools_path = shutil.which(megatools_name)
+
+ if megatools_path:
+ return megatools_path
+
+ # STEP 2: Check binary directory
+ megatools_local = binary_paths.get_binary_path("megatools", megatools_name)
+
+ if megatools_local and os.path.isfile(megatools_local):
+ return megatools_local
+
+ # STEP 3: Download from GitHub repository
+ console.print("[red]megatools not found. Downloading ...")
+ megatools_downloaded = binary_paths.download_binary("megatools", megatools_name)
+
+ if megatools_downloaded:
+ return megatools_downloaded
+
+ console.print("Failed to download megatools", style="red")
+ return None
\ No newline at end of file
diff --git a/StreamingCommunity/Util/logger.py b/StreamingCommunity/Util/logger.py
index b2f59ded..e5a72c86 100644
--- a/StreamingCommunity/Util/logger.py
+++ b/StreamingCommunity/Util/logger.py
@@ -24,7 +24,7 @@ def __init__(self):
return
# Configure root logger
- self.debug_mode = config_manager.get_bool('DEFAULT', "debug")
+ self.debug_mode = config_manager.config.get_bool('DEFAULT', "debug")
self.logger = logging.getLogger('')
# Remove any existing handlers to avoid duplication
diff --git a/StreamingCommunity/Util/message.py b/StreamingCommunity/Util/message.py
index 0d2d8ac0..76120dc5 100644
--- a/StreamingCommunity/Util/message.py
+++ b/StreamingCommunity/Util/message.py
@@ -14,14 +14,13 @@
# Variable
console = Console()
-CLEAN = config_manager.get_bool('DEFAULT', 'show_message')
-SHOW = config_manager.get_bool('DEFAULT', 'show_message')
+CLEAN = config_manager.config.get_bool('DEFAULT', 'show_message')
+SHOW = config_manager.config.get_bool('DEFAULT', 'show_message')
def start_message(clean: bool=True):
"""Display a stylized start message in the console."""
msg = r'''
-[red]+[cyan]=======================================================================================[red]+[purple]
___ ______ _
/ _ | ___________ _ _____ _____[yellow] __ __[purple] / __/ /________ ___ ___ _ (_)__ ___ _
/ __ |/ __/ __/ _ \ |/|/ / _ `/ __/[yellow] \ \ /[purple] _\ \/ __/ __/ -_) _ `/ ' \/ / _ \/ _ `/
diff --git a/StreamingCommunity/Util/os.py b/StreamingCommunity/Util/os.py
index 4fa583bd..b70f4e4e 100644
--- a/StreamingCommunity/Util/os.py
+++ b/StreamingCommunity/Util/os.py
@@ -14,8 +14,9 @@
# Internal utilities
-from .installer import check_ffmpeg, check_mp4decrypt, check_device_wvd_path, check_megatools
-from StreamingCommunity.Lib.DASH.cdm_helpher import get_info_wvd
+from .installer import check_ffmpeg, check_mp4decrypt, check_device_wvd_path, check_device_prd_path, check_megatools
+from StreamingCommunity.Lib.DASH.extractor.ex_widevine import get_info_wvd
+from StreamingCommunity.Lib.DASH.extractor.ex_playready import get_info_prd
# Variable
@@ -209,20 +210,10 @@ def format_transfer_speed(self, bytes: float) -> str:
class OsSummary:
def __init__(self):
- self.ffmpeg_path = None
- self.ffprobe_path = None
- self.ffplay_path = None
- self.mp4decrypt_path = None
- self.wvd_path = None
- self.megatools_path = None
- self.init()
-
- def init(self):
-
- # Check for binaries
- self.ffmpeg_path, self.ffprobe_path, _ = check_ffmpeg()
+ self.ffmpeg_path, self.ffprobe_path = check_ffmpeg()
self.mp4decrypt_path = check_mp4decrypt()
self.wvd_path = check_device_wvd_path()
+ self.prd_path = check_device_prd_path()
self.megatools_path = check_megatools()
self._display_binary_paths()
@@ -242,7 +233,10 @@ def _display_binary_paths(self):
path_strings.append(f"[red]{name} [yellow]{path_str}")
console.print(f"[cyan]Utilities: {', [white]'.join(path_strings)}")
- get_info_wvd(self.wvd_path)
+ if self.wvd_path:
+ get_info_wvd(self.wvd_path)
+ if self.prd_path:
+ get_info_prd(self.prd_path)
# Initialize the os_summary, internet_manager, and os_manager when the module is imported
@@ -267,6 +261,10 @@ def get_wvd_path():
"""Returns the path of wvd."""
return os_summary.wvd_path
+def get_prd_path():
+ """Returns the path of prd."""
+ return os_summary.prd_path
+
def get_megatools_path():
"""Returns the path of megatools."""
return os_summary.megatools_path
\ No newline at end of file
diff --git a/StreamingCommunity/global_search.py b/StreamingCommunity/global_search.py
index 19a2615d..04beeb0d 100644
--- a/StreamingCommunity/global_search.py
+++ b/StreamingCommunity/global_search.py
@@ -21,7 +21,6 @@
msg = Prompt()
-
def global_search(search_terms: str = None, selected_sites: list = None):
"""
Perform a search across multiple sites based on selection.
diff --git a/StreamingCommunity/run.py b/StreamingCommunity/run.py
index 507e3529..6f47d43f 100644
--- a/StreamingCommunity/run.py
+++ b/StreamingCommunity/run.py
@@ -26,6 +26,8 @@
# Config
+console = Console()
+msg = Prompt()
COLOR_MAP = {
"anime": "red",
"film_&_serie": "yellow",
@@ -34,11 +36,6 @@
CATEGORY_MAP = {1: "anime", 2: "film_&_serie", 3: "serie"}
-# Variable
-console = Console()
-msg = Prompt()
-
-
def run_function(func: Callable[..., None], close_console: bool = False, search_terms: str = None) -> None:
"""Run function once or indefinitely based on close_console flag."""
if close_console:
@@ -145,7 +142,7 @@ def _build_command_for_hook(hook: dict) -> Tuple[list, dict]:
def _iter_hooks(stage: str):
"""Yield hook dicts for a given stage ('pre_run' | 'post_run')."""
try:
- hooks_section = config_manager.config.get('HOOKS', {})
+ hooks_section = config_manager.config.get('HOOKS')
hooks_list = hooks_section.get(stage, []) or []
if not isinstance(hooks_list, list):
return
@@ -331,7 +328,7 @@ def apply_config_updates(args):
# Apply updates
for key, value in config_updates.items():
section, option = key.split('.')
- config_manager.set_key(section, option, value)
+ config_manager.config.set_key(section, option, value)
if config_updates:
config_manager.save_config()
diff --git a/config.json b/config.json
index 4c3cc593..b6bef384 100644
--- a/config.json
+++ b/config.json
@@ -18,14 +18,14 @@
"segment_timeout": 6,
"enable_retry": true,
"specific_list_audio": [
- "ita"
+ "ita",
+ "it-IT"
],
"merge_subs": true,
"specific_list_subtitles": [
"ita",
"it-IT"
],
- "limit_segment": 0,
"cleanup_tmp_folder": true,
"get_only_link": false
},
@@ -33,7 +33,7 @@
"use_gpu": false,
"param_video": ["-c:v", "libx265", "-crf", "28", "-preset", "medium"],
"param_audio": ["-c:a", "libopus", "-b:a", "128k"],
- "param_subtitles": ["-c:s", "copy"],
+ "subtitle_disposition": false,
"param_final": ["-c", "copy"],
"force_resolution": "Best",
"extension": "mkv"
@@ -46,15 +46,5 @@
"HOOKS": {
"pre_run": [],
"post_run": []
- },
- "SITE_LOGIN": {
- "crunchyroll": {
- "device_id": "",
- "etp_rt": ""
- },
- "tubi": {
- "email": "",
- "password": ""
- }
}
}
\ No newline at end of file
diff --git a/login.json b/login.json
new file mode 100644
index 00000000..9e08f36a
--- /dev/null
+++ b/login.json
@@ -0,0 +1,13 @@
+{
+ "TMDB": {
+ "api_key": ""
+ },
+ "crunchyroll": {
+ "device_id": "",
+ "etp_rt": ""
+ },
+ "tubi": {
+ "email": "",
+ "password": ""
+ }
+}
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index ca7d202c..94cf9ab6 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,8 +2,8 @@ httpx
bs4
rich
tqdm
-m3u8
-lxml
+m3u8
+lxml
isodate
psutil
unidecode
@@ -12,4 +12,5 @@ jsbeautifier
pathvalidate
pycryptodomex
ua-generator
-pywidevine
\ No newline at end of file
+pywidevine
+pyplayready
\ No newline at end of file