Skip to content

Commit

Permalink
Use multiple Representations rather than single Representation with m…
Browse files Browse the repository at this point in the history
…ultiple BaseURLs in MPD manifest anxdpanic#527 anxdpanic#1040

- ISA doesn't support Representations with multiple BaseURLs
- Dont block adaptive formats for clients that may need PO tokens
- Some urls for adaptive formats may be blocked even if client does need a PO token
- As not all urls may be blocked, allow ISA to try any available for playback
  • Loading branch information
MoojMidge committed Dec 30, 2024
1 parent 74e91ac commit a4523c6
Show file tree
Hide file tree
Showing 2 changed files with 29 additions and 28 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ class YouTubeRequestClient(BaseRequestsClass):
'ios': {
'_id': 5,
'_auth_type': False,
'_use_adaptive': False,
'_os': {
'major': '18',
'minor': '1',
Expand Down
56 changes: 29 additions & 27 deletions resources/lib/youtube_plugin/youtube/helper/stream_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -1245,7 +1245,7 @@ def _process_progressive_streams(self,

if not new_url:
continue
new_url, _ = self._process_url_params(new_url)
new_url, _, _ = self._process_url_params(new_url)

stream_map['itag'] = itag
yt_format = self._get_stream_format(
Expand Down Expand Up @@ -1344,7 +1344,7 @@ def _process_url_params(self,
update_primary=False,
digits_re=re_compile(r'\d+')):
if not url:
return url, None
return url, None, None

parts = urlsplit(url)
params = parse_qs(parts.query)
Expand Down Expand Up @@ -1403,13 +1403,14 @@ def _process_url_params(self,
elif primary_update_url or secondary_update_url:
query_str = parts.query
else:
return url, None
return url, None, None

parts._replace(query=query_str)
return (
parts.geturl(),
parts._replace(netloc=primary_update_url).geturl()
if primary_update_url else
parts.geturl(),
None,
parts._replace(netloc=secondary_update_url).geturl()
if secondary_update_url else
None,
Expand Down Expand Up @@ -2163,12 +2164,11 @@ def _process_adaptive_streams(self,
if quality_group not in data:
data[quality_group] = {}

url = unquote(url)
primary_url, secondary_url = self._process_url_params(url)
urls = self._process_url_params(unquote(url))

details = {
details = [{
'mimeType': mime_type,
'baseUrl': entity_escape(primary_url),
'baseUrl': entity_escape(url),
'mediaType': media_type,
'container': container,
'codecs': codecs,
Expand All @@ -2193,18 +2193,17 @@ def _process_adaptive_streams(self,
'roleOrder': role_order,
'sampleRate': sample_rate,
'channels': channels,
}
if secondary_url:
details['baseUrlSecondary'] = entity_escape(secondary_url)
} for url in urls if url]
data[mime_group][itag] = data[quality_group][itag] = details

if not video_data and not audio_only:
context.log_debug('Generate MPD: No video mime-types found')
return None, None

def _stream_sort(stream, alt_sort=('alt_sort' in stream_features)):
if not stream:
def _stream_sort(streams, alt_sort=('alt_sort' in stream_features)):
if not streams:
return (1,)
stream = streams[0]

preferred = stream['preferred_codec']
return (
Expand All @@ -2223,7 +2222,7 @@ def _stream_sort(stream, alt_sort=('alt_sort' in stream_features)):

def _group_sort(item):
group, streams = item
main_stream = streams[0]
main_stream = streams[0][0]

key = (
not group.startswith(main_stream['mimeType']),
Expand All @@ -2233,7 +2232,7 @@ def _group_sort(item):
main_stream['langName'],
- main_stream['roleOrder'],
)
return key + _stream_sort(main_stream)
return key + _stream_sort(streams[0])

video_data = sorted((
(group, sorted(streams.values(), key=_stream_sort))
Expand Down Expand Up @@ -2273,7 +2272,7 @@ def _filter_group(previous_group, previous_stream, item):
return not skip_group

new_group = item[0]
new_stream = item[1][0]
new_stream = item[1][0][0]

media_type = new_stream['mediaType']
if media_type != previous_stream['mediaType']:
Expand Down Expand Up @@ -2317,12 +2316,12 @@ def _filter_group(previous_group, previous_stream, item):
localize = context.localize

main_stream = {
'audio': audio_data[0][1][0],
'audio': audio_data[0][1][0][0],
'multi_audio': False,
'multi_language': False,
}
if video_data:
main_stream['video'] = video_data[0][1][0]
main_stream['video'] = video_data[0][1][0][0]
duration = main_stream['video']['duration']
else:
duration = main_stream['audio']['duration']
Expand Down Expand Up @@ -2351,7 +2350,7 @@ def _filter_group(previous_group, previous_stream, item):
if do_filter and _filter_group(group, stream, item):
continue
group, streams = item
stream = streams[0]
stream = streams[0][0]
container = stream['container']
media_type = stream['mediaType']
mime_type = stream['mimeType']
Expand Down Expand Up @@ -2453,9 +2452,7 @@ def _filter_group(previous_group, previous_stream, item):
'/>\n'
# Representation Label element is not used by ISA
'\t\t\t\t<Label>{label}</Label>\n'
'\t\t\t\t<BaseURL>{baseUrl}</BaseURL>\n' +
('\t\t\t\t<BaseURL>{baseUrlSecondary}</BaseURL>\n'
if 'baseUrlSecondary' in stream else '') +
'\t\t\t\t<BaseURL>{baseUrl}</BaseURL>\n'
'\t\t\t\t<SegmentBase indexRange="{indexRange}">\n'
'\t\t\t\t\t<Initialization range="{initRange}"/>\n'
'\t\t\t\t</SegmentBase>\n'
Expand All @@ -2464,7 +2461,11 @@ def _filter_group(previous_group, previous_stream, item):
quality=(idx + 1),
priority=(num_streams - idx),
**stream
) for idx, stream in enumerate(streams)])
)
for idx, stream_list in enumerate(streams)
for stream in stream_list
])

elif media_type == 'video':
output.extend([(
'\t\t\t<Representation'
Expand All @@ -2481,9 +2482,7 @@ def _filter_group(previous_group, previous_stream, item):
'>\n'
# Representation Label element is not used by ISA
'\t\t\t\t<Label>{label}</Label>\n'
'\t\t\t\t<BaseURL>{baseUrl}</BaseURL>\n' +
('\t\t\t\t<BaseURL>{baseUrlSecondary}</BaseURL>\n'
if 'baseUrlSecondary' in stream else '') +
'\t\t\t\t<BaseURL>{baseUrl}</BaseURL>\n'
'\t\t\t\t<SegmentBase indexRange="{indexRange}">\n'
'\t\t\t\t\t<Initialization range="{initRange}"/>\n'
'\t\t\t\t</SegmentBase>\n'
Expand All @@ -2492,7 +2491,10 @@ def _filter_group(previous_group, previous_stream, item):
quality=(idx + 1),
priority=(num_streams - idx),
**stream
) for idx, stream in enumerate(streams)])
)
for idx, stream_list in enumerate(streams)
for stream in stream_list
])

output.append('\t\t</AdaptationSet>\n')
set_id += 1
Expand Down

0 comments on commit a4523c6

Please sign in to comment.