From a4523c64674d2938fbb6a076be3448394d493059 Mon Sep 17 00:00:00 2001
From: MoojMidge <56883549+MoojMidge@users.noreply.github.com>
Date: Mon, 30 Dec 2024 15:05:58 +1100
Subject: [PATCH] Use multiple Representations rather than single
Representation with multiple BaseURLs in MPD manifest #527 #1040
- ISA doesn't support Representations with multiple BaseURLs
- Dont block adaptive formats for clients that may need PO tokens
- Some urls for adaptive formats may be blocked even if client does need a PO token
- As not all urls may be blocked, allow ISA to try any available for playback
---
.../youtube/client/request_client.py | 1 -
.../youtube/helper/stream_info.py | 56 ++++++++++---------
2 files changed, 29 insertions(+), 28 deletions(-)
diff --git a/resources/lib/youtube_plugin/youtube/client/request_client.py b/resources/lib/youtube_plugin/youtube/client/request_client.py
index 963457c5c..e1906700f 100644
--- a/resources/lib/youtube_plugin/youtube/client/request_client.py
+++ b/resources/lib/youtube_plugin/youtube/client/request_client.py
@@ -177,7 +177,6 @@ class YouTubeRequestClient(BaseRequestsClass):
'ios': {
'_id': 5,
'_auth_type': False,
- '_use_adaptive': False,
'_os': {
'major': '18',
'minor': '1',
diff --git a/resources/lib/youtube_plugin/youtube/helper/stream_info.py b/resources/lib/youtube_plugin/youtube/helper/stream_info.py
index aead0f65f..658943c12 100644
--- a/resources/lib/youtube_plugin/youtube/helper/stream_info.py
+++ b/resources/lib/youtube_plugin/youtube/helper/stream_info.py
@@ -1245,7 +1245,7 @@ def _process_progressive_streams(self,
if not new_url:
continue
- new_url, _ = self._process_url_params(new_url)
+ new_url, _, _ = self._process_url_params(new_url)
stream_map['itag'] = itag
yt_format = self._get_stream_format(
@@ -1344,7 +1344,7 @@ def _process_url_params(self,
update_primary=False,
digits_re=re_compile(r'\d+')):
if not url:
- return url, None
+ return url, None, None
parts = urlsplit(url)
params = parse_qs(parts.query)
@@ -1403,13 +1403,14 @@ def _process_url_params(self,
elif primary_update_url or secondary_update_url:
query_str = parts.query
else:
- return url, None
+ return url, None, None
parts._replace(query=query_str)
return (
+ parts.geturl(),
parts._replace(netloc=primary_update_url).geturl()
if primary_update_url else
- parts.geturl(),
+ None,
parts._replace(netloc=secondary_update_url).geturl()
if secondary_update_url else
None,
@@ -2163,12 +2164,11 @@ def _process_adaptive_streams(self,
if quality_group not in data:
data[quality_group] = {}
- url = unquote(url)
- primary_url, secondary_url = self._process_url_params(url)
+ urls = self._process_url_params(unquote(url))
- details = {
+ details = [{
'mimeType': mime_type,
- 'baseUrl': entity_escape(primary_url),
+ 'baseUrl': entity_escape(url),
'mediaType': media_type,
'container': container,
'codecs': codecs,
@@ -2193,18 +2193,17 @@ def _process_adaptive_streams(self,
'roleOrder': role_order,
'sampleRate': sample_rate,
'channels': channels,
- }
- if secondary_url:
- details['baseUrlSecondary'] = entity_escape(secondary_url)
+ } for url in urls if url]
data[mime_group][itag] = data[quality_group][itag] = details
if not video_data and not audio_only:
context.log_debug('Generate MPD: No video mime-types found')
return None, None
- def _stream_sort(stream, alt_sort=('alt_sort' in stream_features)):
- if not stream:
+ def _stream_sort(streams, alt_sort=('alt_sort' in stream_features)):
+ if not streams:
return (1,)
+ stream = streams[0]
preferred = stream['preferred_codec']
return (
@@ -2223,7 +2222,7 @@ def _stream_sort(stream, alt_sort=('alt_sort' in stream_features)):
def _group_sort(item):
group, streams = item
- main_stream = streams[0]
+ main_stream = streams[0][0]
key = (
not group.startswith(main_stream['mimeType']),
@@ -2233,7 +2232,7 @@ def _group_sort(item):
main_stream['langName'],
- main_stream['roleOrder'],
)
- return key + _stream_sort(main_stream)
+ return key + _stream_sort(streams[0])
video_data = sorted((
(group, sorted(streams.values(), key=_stream_sort))
@@ -2273,7 +2272,7 @@ def _filter_group(previous_group, previous_stream, item):
return not skip_group
new_group = item[0]
- new_stream = item[1][0]
+ new_stream = item[1][0][0]
media_type = new_stream['mediaType']
if media_type != previous_stream['mediaType']:
@@ -2317,12 +2316,12 @@ def _filter_group(previous_group, previous_stream, item):
localize = context.localize
main_stream = {
- 'audio': audio_data[0][1][0],
+ 'audio': audio_data[0][1][0][0],
'multi_audio': False,
'multi_language': False,
}
if video_data:
- main_stream['video'] = video_data[0][1][0]
+ main_stream['video'] = video_data[0][1][0][0]
duration = main_stream['video']['duration']
else:
duration = main_stream['audio']['duration']
@@ -2351,7 +2350,7 @@ def _filter_group(previous_group, previous_stream, item):
if do_filter and _filter_group(group, stream, item):
continue
group, streams = item
- stream = streams[0]
+ stream = streams[0][0]
container = stream['container']
media_type = stream['mediaType']
mime_type = stream['mimeType']
@@ -2453,9 +2452,7 @@ def _filter_group(previous_group, previous_stream, item):
'/>\n'
# Representation Label element is not used by ISA
'\t\t\t\t\n'
- '\t\t\t\t{baseUrl}\n' +
- ('\t\t\t\t{baseUrlSecondary}\n'
- if 'baseUrlSecondary' in stream else '') +
+ '\t\t\t\t{baseUrl}\n'
'\t\t\t\t\n'
'\t\t\t\t\t\n'
'\t\t\t\t\n'
@@ -2464,7 +2461,11 @@ def _filter_group(previous_group, previous_stream, item):
quality=(idx + 1),
priority=(num_streams - idx),
**stream
- ) for idx, stream in enumerate(streams)])
+ )
+ for idx, stream_list in enumerate(streams)
+ for stream in stream_list
+ ])
+
elif media_type == 'video':
output.extend([(
'\t\t\t\n'
# Representation Label element is not used by ISA
'\t\t\t\t\n'
- '\t\t\t\t{baseUrl}\n' +
- ('\t\t\t\t{baseUrlSecondary}\n'
- if 'baseUrlSecondary' in stream else '') +
+ '\t\t\t\t{baseUrl}\n'
'\t\t\t\t\n'
'\t\t\t\t\t\n'
'\t\t\t\t\n'
@@ -2492,7 +2491,10 @@ def _filter_group(previous_group, previous_stream, item):
quality=(idx + 1),
priority=(num_streams - idx),
**stream
- ) for idx, stream in enumerate(streams)])
+ )
+ for idx, stream_list in enumerate(streams)
+ for stream in stream_list
+ ])
output.append('\t\t\n')
set_id += 1