diff --git a/script.module.resolveurl/lib/resolveurl/plugins/googlevideo.py b/script.module.resolveurl/lib/resolveurl/plugins/googlevideo.py
index b978cda3..1a749b19 100644
--- a/script.module.resolveurl/lib/resolveurl/plugins/googlevideo.py
+++ b/script.module.resolveurl/lib/resolveurl/plugins/googlevideo.py
@@ -1,6 +1,6 @@
"""
Plugin for ResolveURL
- Copyright (C) 2014 smokdpi
+ Copyright (C) 2026 icarok99
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -16,14 +16,13 @@
along with this program. If not, see .
"""
-from resolveurl import common, hmf
+from resolveurl import common
from resolveurl.resolver import ResolveUrl, ResolverError
from resolveurl.lib import helpers
import re
-from kodi_six import xbmc, xbmcaddon, xbmcvfs
import json
-from six.moves import urllib_error, urllib_parse, urllib_request
-import six
+from urllib import error as urllib_error, parse as urllib_parse, request as urllib_request
+from kodi_six import xbmc, xbmcaddon, xbmcvfs
import sqlite3
@@ -61,10 +60,7 @@ def get_media_url(self, host, media_id):
if xbmc.getCondVisibility('System.HasAddon(plugin.googledrive)') and self.get_setting('use_gdrive') == "true":
addon = xbmcaddon.Addon('plugin.googledrive')
- if six.PY3:
- db = xbmcvfs.translatePath(addon.getAddonInfo('profile')) + 'accounts.db'
- else:
- db = xbmc.translatePath(addon.getAddonInfo('profile')) + 'accounts.db'
+ db = xbmcvfs.translatePath(addon.getAddonInfo('profile')) + 'accounts.db'
conn = sqlite3.connect(db)
c = conn.cursor()
c.execute("SELECT key FROM store;")
@@ -97,7 +93,7 @@ def get_media_url(self, host, media_id):
video = self._parse_redirect(video, hdrs=self.headers)
if video:
- if 'plugin://' in video: # google plus embedded videos may result in this
+ if 'plugin://' in video:
return video
else:
return video + helpers.append_headers(self.headers)
@@ -113,7 +109,7 @@ def http_response(self, request, response):
return response
opener = urllib_request.build_opener(NoRedirection)
- urllib_request.install_opener(opener) # @ big change
+ urllib_request.install_opener(opener)
request = urllib_request.Request(url, headers=hdrs)
try:
response = urllib_request.urlopen(request)
@@ -154,21 +150,152 @@ def _parse_google(self, link):
response = self.net.http_GET(link)
sources = self._parse_gdocs(response.content)
elif 'blogger.com/video.g?token=' in link:
- # Quick hack till I figure the direction to take this plugin
- response = self.net.http_GET(link)
- source = re.search(r'''['"]play_url["']\s*:\s*["']([^"']+)''', response.content)
- if source:
- sources = [("Unknown Quality", source.group(1).decode('unicode-escape') if six.PY2 else source.group(1))]
+ sources = self._parse_blogger_batchexecute(link)
return response, sources
+ def _parse_blogger_batchexecute(self, blogger_url):
+ token_match = re.search(r'token=([A-Za-z0-9_-]+)', blogger_url)
+ if not token_match:
+ raise ResolverError('Could not extract token from Blogger URL: %s' % blogger_url)
+ token = token_match.group(1)
+
+ try:
+ req = urllib_request.Request(blogger_url, headers=self.headers)
+ resp = urllib_request.urlopen(req)
+ page_text = resp.read().decode('utf-8', errors='ignore')
+ except urllib_error.HTTPError as e:
+ raise ResolverError('Failed to load Blogger page (HTTP %s): %s' % (e.code, blogger_url))
+ except Exception as e:
+ raise ResolverError('Failed to load Blogger page: %s' % str(e))
+
+ sid_match = re.search(r'"FdrFJe"\s*:\s*"([^"]+)"', page_text)
+ bh_match = re.search(r'"cfb2h"\s*:\s*"([^"]+)"', page_text)
+ at_match = re.search(r'"SNlM0e"\s*:\s*"([^"]+)"', page_text)
+
+ if not sid_match or not bh_match:
+ raise ResolverError('Failed to extract session params (FdrFJe/cfb2h) from Blogger page')
+
+ sid = sid_match.group(1)
+ bh = bh_match.group(1)
+ at = at_match.group(1) if at_match else ''
+
+ inner = json.dumps([token, '', 0], separators=(',', ':'))
+ freq = json.dumps([[['WcwnYd', inner, None, 'generic']]], separators=(',', ':'))
+ post_body = 'f.req=' + urllib_parse.quote(freq)
+ if at:
+ post_body += '&at=' + urllib_parse.quote(at)
+
+ batch_url = (
+ 'https://www.blogger.com/_/BloggerVideoPlayerUi/data/batchexecute'
+ '?rpcids=WcwnYd&source-path=%2Fvideo.g'
+ '&f.sid={sid}&bl={bh}&hl=en-US&_reqid=100001&rt=c'
+ ).format(sid=urllib_parse.quote(sid), bh=urllib_parse.quote(bh))
+
+ batch_headers = dict(self.headers)
+ batch_headers.update({
+ 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
+ 'X-Same-Domain': '1',
+ 'Origin': 'https://www.blogger.com',
+ 'Referer': blogger_url,
+ })
+
+ try:
+ req2 = urllib_request.Request(batch_url, data=post_body.encode('utf-8'), headers=batch_headers)
+ batch_resp = urllib_request.urlopen(req2)
+ batch_body = batch_resp.read().decode('utf-8', errors='ignore')
+ except urllib_error.HTTPError as e:
+ raise ResolverError('batchexecute request failed (HTTP %s)' % e.code)
+ except Exception as e:
+ raise ResolverError('batchexecute request failed: %s' % str(e))
+
+ video_url = self._parse_batchexecute_response(batch_body)
+ if not video_url:
+ raise ResolverError('No video URL found in Blogger batchexecute response')
+
+ if 'itag=22' in video_url:
+ quality = '720p'
+ elif 'itag=18' in video_url:
+ quality = '360p'
+ else:
+ quality = 'Unknown Quality'
+
+ return [(quality, video_url)]
+
+ def _parse_batchexecute_response(self, body):
+ video_url = None
+
+ for line in body.splitlines():
+ if 'wrb.fr' not in line:
+ continue
+ try:
+ outer = json.loads(line)
+ except ValueError:
+ continue
+
+ for entry in outer:
+ if not isinstance(entry, list) or len(entry) < 3:
+ continue
+ if entry[0] != 'wrb.fr' or entry[1] != 'WcwnYd':
+ continue
+ try:
+ data = json.loads(entry[2])
+ except (ValueError, TypeError):
+ continue
+
+ streams = None
+ for elem in data:
+ if isinstance(elem, list) and elem and isinstance(elem[0], list):
+ streams = elem
+ break
+
+ if not streams:
+ continue
+
+ mp4_urls = []
+ for stream in streams:
+ if not isinstance(stream, list) or not stream:
+ continue
+ url = stream[0]
+ if not isinstance(url, str):
+ continue
+ if 'mime=video%2Fmp4' in url or 'mime=video/mp4' in url:
+ mp4_urls.append(url)
+
+ for u in mp4_urls:
+ if 'itag=22' in u:
+ video_url = u
+ break
+
+ if not video_url:
+ for u in mp4_urls:
+ if 'itag=18' in u:
+ video_url = u
+ break
+
+ if not video_url and mp4_urls:
+ video_url = mp4_urls[0]
+
+ if not video_url and streams and isinstance(streams[0], list) and streams[0]:
+ candidate = streams[0][0]
+ if isinstance(candidate, str):
+ video_url = candidate
+
+ if video_url:
+ break
+
+ if not video_url:
+ gv_match = re.search(r'https://[^"\\]+\.googlevideo\.com/[^"\\]+', body)
+ if gv_match:
+ video_url = gv_match.group(0)
+
+ return video_url
+
def __parse_gplus(self, html):
sources = []
match = re.search(r'[^&]+).*?&itag=(?P[^&]+)', item4):
link = match.group('link')
itag = match.group('itag')
@@ -224,8 +349,6 @@ def _parse_gdocs(self, html):
items = value.split(',')
for item in items:
_source_itag, source_url = item.split('|')
- if isinstance(source_url, six.text_type) and six.PY2: # @big change
- source_url = source_url.decode('unicode_escape').encode('utf-8')
quality = self.itag_map.get(_source_itag, 'Unknown Quality [%s]' % _source_itag)
source_url = urllib_parse.unquote(source_url)
urls.append((quality, source_url))
@@ -235,11 +358,6 @@ def _parse_gdocs(self, html):
def parse_json(html):
if html:
try:
- if not isinstance(html, six.text_type):
- if html.startswith('\xef\xbb\xbf'):
- html = html[3:]
- elif html.startswith('\xfe\xff'):
- html = html[2:]
js_data = json.loads(html)
if js_data is None:
return {}