diff --git a/README.md b/README.md
index 5ee9bea..48073c0 100644
--- a/README.md
+++ b/README.md
@@ -7,6 +7,9 @@ Requires Python modules:
- lxml (https://pypi.python.org/pypi/lxml/3.2.5)
- m3u8 (https://pypi.python.org/pypi/m3u8/)
- cfscrape (https://pypi.python.org/pypi/cfscrape/)
+- Node.js is required for (safe) Javascript execution.
+ Your computer or server may already have it (check with node -v). If not, you can install it with apt-get install
+ nodejs on Ubuntu and Debian. Otherwise, please read ( https://nodejs.org/en/download/package-manager/ )Node's installation instructions.
crunchy-xml-decoder will try to install PyCrypto and lxml automatically,
if they are missing. m3u8 and crscrape can be installed using PIP.
diff --git a/crunchy-xml-decoder.bat b/crunchy-xml-decoder.bat
index 4c2753d..cfe0524 100644
--- a/crunchy-xml-decoder.bat
+++ b/crunchy-xml-decoder.bat
@@ -1,5 +1,9 @@
@echo off
+RD "%PUBLIC%\Crunchyroll-XML-Decoder_link" 1>NUL 2>NUL
+mklink /j "%PUBLIC%\Crunchyroll-XML-Decoder_link" %cd% 1>NUL 2>NUL
+cd "%PUBLIC%\Crunchyroll-XML-Decoder_link"
:sratre
crunchy-xml-decoder.py %1 %2 %3 %4 %5 %6 %7 %8 %9
+RD "%PUBLIC%\Crunchyroll-XML-Decoder_link" 1>NUL 2>NUL
pause
diff --git a/crunchy-xml-decoder.py b/crunchy-xml-decoder.py
index 6a5ad16..786eb12 100644
--- a/crunchy-xml-decoder.py
+++ b/crunchy-xml-decoder.py
@@ -11,28 +11,76 @@
import login
import decode
import altfuncs
-import re, urllib2
+import re
from collections import deque
import time
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(autocatch)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def autocatch():
+ import requests, pickle
+ with open('cookies') as f:
+ cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
+ session = requests.session()
+ session.cookies = cookies
+ del session.cookies['c_visitor']
+ data = {'Referer': 'http://crunchyroll.com/', 'Host': 'www.crunchyroll.com',
+ 'User-Agent': 'Mozilla/5.0 Windows NT 6.1; rv:26.0 Gecko/20100101 Firefox/26.0'}
+ aList = []
print 'indicate the url : '
url=raw_input()
- mykey = urllib2.urlopen(url)
- take = open("queue_.txt", "w")
+ #RSS
+ rescash = session.get(url+'.rss', params=data)
+ rescash.encoding = 'UTF-8'
+ mykey = rescash.text
+ session_num_ = [int(word.replace('season ','')) for word in re.findall('season [0-9]+', mykey)]
+ if not session_num_ == []:
+ if not min(session_num_) == 1:
+ print 'RSS Auto-cash Failed\ntrying Cashing the Site'
+ #
+ rescash = session.get(url, params=data)
+ rescash.encoding = 'UTF-8'
+ mykey = rescash.text
+ aList_t = re.findall(''+url+'/(.+?)', mykey)
+ for i in aList_t:
+ aList.append(url+'/'+i)
+ if rescash.status_code == 404:
+ print 'RSS Auto-cash Failed\ntrying Cashing the Site'
+ #
+ rescash = session.get(url, params=data)
+ rescash.encoding = 'UTF-8'
+ mykey = rescash.text
+ aList_t = re.findall('> take, i
+ take.close()
+ else:
+ print 'Site Auto-cash Failed\nPlease Build List Manually'
+ subprocess.call('notepad.exe '+"queue.txt")
#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(CHECKING)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if not os.path.exists("export"):
os.makedirs("export")
@@ -43,8 +91,9 @@ def autocatch():
iforcesub = False
iforceusa = False
ilocalizecookies = False
-ionlymainsub=False
-def defaultsettings(vvquality, vlang1, vlang2, vforcesub, vforceusa, vlocalizecookies, onlymainsub):
+ionlymainsub = False
+iconnection_n_ = 1
+def defaultsettings(vvquality, vlang1, vlang2, vforcesub, vforceusa, vlocalizecookies, onlymainsub, vconnection_n_):
dsettings='''[SETTINGS]
# Set this to the preferred quality. Possible values are: "android" (hard-subbed), "360p", "480p", "720p", "1080p", or "highest" for highest available.
# Note that any quality higher than 360p still requires premium, unless it's available that way for free (some first episodes).
@@ -63,11 +112,13 @@ def defaultsettings(vvquality, vlang1, vlang2, vforcesub, vforceusa, vlocalizeco
localizecookies = '''+str(vlocalizecookies)+'''
# Set this if you only want to mux one subtitle only (this so make easy for some devices like TVs to play subtitle)
onlymainsub='''+str(onlymainsub)+'''
+# this option to increase the Number of the connection
+connection_n_='''+str(vconnection_n_)+'''
'''
open('.\\settings.ini', 'w').write(dsettings.encode('utf-8'))
if not os.path.exists(".\\settings.ini"):
- defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub)
+ defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub, iconnection_n_)
if not os.path.exists(".\\cookies"):
if raw_input(u'Do you have an account [Y/N]?').lower() == 'y':
@@ -160,7 +211,7 @@ def Languages_(Varname_):
Languages_()
def videoquality_():
- slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config()
+ slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub, vconnection_n_ = altfuncs.config()
seleccion = 5
print '''Set This To The Preferred Quality:
0.- android (hard-subbed)
@@ -193,7 +244,7 @@ def videoquality_():
print "ERROR: Invalid option."
videoquality_()
def settings_():
- slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub = altfuncs.config()
+ slang1, slang2, sforcesub, sforceusa, slocalizecookies, vquality, vonlymainsub, vconnection_n_ = altfuncs.config()
slang1 = {u'Español (Espana)' : 'Espanol_Espana', u'Français (France)' : 'Francais', u'Português (Brasil)' : 'Portugues',
u'English' : 'English', u'Español' : 'Espanol', u'Türkçe' : 'Turkce', u'Italiano' : 'Italiano',
u'العربية' : 'Arabic', u'Deutsch' : 'Deutsch'}[slang1]
@@ -218,7 +269,8 @@ def settings_():
5.- USA Proxy = '''+str(sforceusa)+''' #use a US session ID
6.- Localize cookies = '''+str(slocalizecookies)+''' #Localize the cookies (Experiment)
7.- Only One Subtitle = '''+str(vonlymainsub)+''' #Only download Primary Language
-8.- Restore Default Settings
+8.- Change the Number of The Download Connection = '''+str(vconnection_n_)+'''
+9.- Restore Default Settings
'''
try:
seleccion = int(input("> "))
@@ -227,46 +279,50 @@ def settings_():
settings_()
if seleccion == 1 :
vquality = videoquality_()
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 2 :
slang1 = Languages_('slang1')
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 3 :
slang2 = Languages_('slang2')
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 4 :
if sforcesub:
sforcesub = False
else:
sforcesub = True
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 5 :
if sforceusa:
sforceusa = False
else:
sforceusa = True
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 6 :
if slocalizecookies:
slocalizecookies = False
else:
slocalizecookies = True
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 7 :
if vonlymainsub:
vonlymainsub = False
else:
vonlymainsub = True
- defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub)
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
settings_()
elif seleccion == 8 :
- defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub)
+ vconnection_n_ = raw_input(u'Please Input The Download Connection Nymber: ')
+ defaultsettings(vquality, slang1, slang2, sforcesub, sforceusa, slocalizecookies, vonlymainsub, vconnection_n_)
+ settings_()
+ elif seleccion == 9 :
+ defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub, iconnection_n_)
settings_()
elif seleccion == 0 :
pass
@@ -359,7 +415,7 @@ def makechoise():
decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
sys.exit()
if arg.default_settings:
- defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies)
+ defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies, ionlymainsub, iconnection_n_)
sys.exit()
if arg.queue:
queueu(arg.queue)
diff --git a/crunchy-xml-decoder/altfuncs.py b/crunchy-xml-decoder/altfuncs.py
index 6249a41..03734f6 100644
--- a/crunchy-xml-decoder/altfuncs.py
+++ b/crunchy-xml-decoder/altfuncs.py
@@ -35,7 +35,8 @@ def config():
global localizecookies
localizecookies = configr.getboolean('SETTINGS', 'localizecookies')
onlymainsub = configr.getboolean('SETTINGS', 'onlymainsub')
- return [lang, lang2, forcesub, forceusa, localizecookies, quality, onlymainsub]
+ connection_n_ = int(configr.get('SETTINGS', 'connection_n_'))
+ return [lang, lang2, forcesub, forceusa, localizecookies, quality, onlymainsub, connection_n_]
#def playerrev(url):
@@ -67,10 +68,12 @@ def gethtml(url):
u'العربية' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
- session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
+ session.cookies['sess_id'] = requests.get('http://rssfeedfilter.netne.net/').json()['sessionId'].encode('ascii', 'ignore')
+ #print 'I recommend to re-login so we don\'t overload crunchyroll unblocker'
except:
sleep(10) # sleep so we don't overload crunblocker
- session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
+ session.cookies['sess_id'] = requests.get('http://crunchyrollus.netne.net/').json()['sessionId'].encode('ascii', 'ignore')
+ #print 'I recommend to re-login so we don\'t overload crunchyroll unblocker'
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
print 'Apparently not a URL'
@@ -103,15 +106,21 @@ def getxml(req, med_id):
u'العربية' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
- session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
+ session.cookies['sess_id'] = session.cookies['usa_sess_id']
except:
- sleep(10) # sleep so we don't overload crunblocker
- session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
+ try:
+ session.cookies['sess_id'] = requests.get('http://rssfeedfilter.netne.net/').json()['sessionId'].encode('ascii', 'ignore')
+ #print 'I recommend to re-login so we don\'t overload crunchyroll unblocker'
+ except:
+ sleep(10) # sleep so we don't overload crunblocker
+ session.cookies['sess_id'] = requests.get('http://crunchyrollus.netne.net/').json()['sessionId'].encode('ascii', 'ignore')
+ #print 'I recommend to re-login so we don\'t overload crunchyroll unblocker'
headers = {'Referer': 'http://static.ak.crunchyroll.com/versioned_assets/ChromelessPlayerApp.17821a0e.swf',
'Host': 'www.crunchyroll.com', 'Content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0)'}
res = session.post(url, params=payload, headers=headers)
res.encoding = 'UTF-8'
+ #print session.cookies
return res.text
diff --git a/crunchy-xml-decoder/decode.py b/crunchy-xml-decoder/decode.py
index 144e639..ed10797 100644
--- a/crunchy-xml-decoder/decode.py
+++ b/crunchy-xml-decoder/decode.py
@@ -39,7 +39,7 @@ def decode(page_url):
if page_url == '':
page_url = raw_input('Please enter Crunchyroll video URL:\n')
- lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
+ lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_ = altfuncs.config()
#player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)
diff --git a/crunchy-xml-decoder/functtest.py b/crunchy-xml-decoder/functtest.py
index 2c690da..5641b9d 100644
--- a/crunchy-xml-decoder/functtest.py
+++ b/crunchy-xml-decoder/functtest.py
@@ -92,12 +92,24 @@ def unzip_(filename_,out):
except ImportError:
print 'Installing Cfscrape...',
try:
- if python_bit_=="32 bit" or python_bit_=="64 bit":
+ #if python_bit_=="32 bit" or python_bit_=="64 bit":
pip.main(['install', '--quiet', 'cfscrape'])
print "Installed"
except KeyError:
print "Something Has Gone Wrong While Retrieving Cfscrape link\nPlease Download Cfscrape Manually"
+try:
+ import m3u8
+ print('m3u8 installed')
+except ImportError:
+ print 'Installing m3u8...',
+ try:
+ #if python_bit_=="32 bit" or python_bit_=="64 bit":
+ pip.main(['install', '--quiet', 'm3u8'])
+ print "Installed"
+ except KeyError:
+ print "Something Has Gone Wrong While Retrieving m3u8 link\nPlease Download m3u8 Manually"
+
if Crypto_link_ or lxml_link_:
if not os.path.exists("temp"):
os.makedirs("temp")
diff --git a/crunchy-xml-decoder/hls.py b/crunchy-xml-decoder/hls.py
index c60c091..b9b306b 100644
--- a/crunchy-xml-decoder/hls.py
+++ b/crunchy-xml-decoder/hls.py
@@ -6,6 +6,12 @@
import StringIO
import socket
import os
+from threading import Thread
+from time import sleep
+import math
+import time
+import subprocess
+import requests
blocksize = 16384
@@ -16,7 +22,11 @@ def __init__(self, uri, cur, total):
self.total = total
self.offset = 0
self._restart()
- self.file_size = int(self.stream.info().get('Content-Length', -1))
+ try:
+ self.file_size = int(self.stream.info().get('Content-Length', -1))
+ except:
+ self.file_size = int(self.stream.headers['content-length'])
+ #print self.file_size
if self.file_size <= 0:
print "Invalid file size"
sys.exit()
@@ -26,21 +36,45 @@ def _progress(self):
sys.stdout.flush()
def _restart(self):
- req = urllib2.Request(self.uri)
- if self.offset:
- req.headers['Range'] = 'bytes=%s-' % (self.offset, )
- while True:
- try:
- self.stream = urllib2.urlopen(req, timeout = 180)
- break
- except socket.timeout:
- continue
- except socket.error, e:
- if e.errno != errno.ECONNRESET:
- raise
+ try:
+ #import requstss2
+ req = urllib2.Request(self.uri)
+ if self.offset:
+ req.headers['Range'] = 'bytes=%s-' % (self.offset, )
+
+ while True:
+ try:
+ self.stream = urllib2.urlopen(req, timeout = 180)
+ #print self.stream
+ break
+ except socket.timeout:
+ continue
+ except socket.error, e:
+ if e.errno != errno.ECONNRESET:
+ raise
+ except:
+ if self.offset:
+ headers = {'Range': 'bytes=%s-' % (self.offset, )}
+ req = requests.get(self.uri, headers=headers, stream=True, timeout = 180)
+ else:
+ req = requests.get(self.uri, stream=True, timeout = 180)
+
+ while True:
+ try:
+ self.stream = req.raw
+ break
+ except socket.timeout:
+ continue
+ except socket.error, e:
+ if e.errno != errno.ECONNRESET:
+ raise
def read(self, n):
buffer = []
+ global download_size_
+ if not 'download_size_' in globals():
+ download_size_ = [0]*self.total
+ download_size_[self.cur-1] = self.offset
while self.offset < self.file_size:
try:
data = self.stream.read(min(n, self.file_size - self.offset))
@@ -59,6 +93,22 @@ def read(self, n):
self._restart()
return "".join(buffer)
+def compute_total_size(video):
+ global total_size
+ global total_size_l_
+ total_size = 0
+ total_size_l_=[]
+ for n, seg in enumerate(video.segments):
+ try:
+ req_1 = urllib2.Request(seg.uri)
+ stream_1 = urllib2.urlopen(req_1, timeout = 180)
+ total_size += int(stream_1.info().get('Content-Length', -1))
+ total_size_l_.append(stream_1.info().get('Content-Length', -1))
+ except:
+ stream_1r = requests.head(seg.uri, timeout = 180).headers['content-length']
+ total_size += int(stream_1r)
+ total_size_l_.append(stream_1r)
+
def copy_with_decrypt(input, output, key, media_sequence):
if key.iv is not None:
iv = str(key.iv)[2:]
@@ -71,30 +121,173 @@ def copy_with_decrypt(input, output, key, media_sequence):
break
output.write(aes.decrypt(data))
-def fetch_streams(output, video):
- output = open(output, 'wb')
- for n, seg in enumerate(video.segments):
- sys.stdout.write('\x1b[2K\r%d/%d' % (n + 1, len(video.segments)))
- sys.stdout.flush()
- raw = resumable_fetch(seg.uri, n+1, len(video.segments))
- if hasattr(video, 'key'):
- copy_with_decrypt(raw, output, video.key, video.media_sequence + n)
+def size_adj(size_, x_):
+ if x_ == 'harddisk':
+ if size_/1024 > 1:
+ if (size_/1024)/1024 > 1:
+ if ((size_/1024)/1024)/1024 > 1:
+ size_out_ = str(((size_/1024)/1024)/1024)+'GB'
+ else:
+ size_out_ = str((size_/1024)/1024)+'MB'
+ else:
+ size_out_ = str(size_/1024)+'KB'
+ else:
+ size_out_ = str(size_)+'bytes'
+ if x_ == 'internet':
+ if size_/1024 > 1:
+ if (size_/1024)/1024 > 1:
+ if ((size_/1024)/1024)/1024 > 1:
+ size_out_ = format(((size_/1024)/1024)/1024, '.2f')+'Gb/s'
+ else:
+ size_out_ = format((size_/1024)/1024, '.2f')+'Mb/s'
+ else:
+ size_out_ = format(size_/1024, '.2f')+'Kb/s'
else:
- copy_with_decrypt(raw, output, video.keys[0], video.media_sequence + n)
- size = output.tell()
- if size % 188 != 0:
- size = size // 188 * 188
- output.seek(size)
- output.truncate(size)
+ size_out_ = format(size_, '.2f')+'b/s'
+ return size_out_
+
+def download(video, output, Url, seg_n, connection_n):
+ global start_t
+ if not 'start_t' in globals():
+ start_t = time.clock()
+ raw = resumable_fetch(Url, seg_n, len(video.segments))
+ progress_.append(seg_n)
+ percentage = ((len(progress_)) * 100)/len(video.segments)
+ avail_dots = 30
+ if total_size_t_.is_alive():
+ total_size_s_ = 'C.Size'
+ else:
+ total_size_s_ = size_adj(total_size, 'harddisk')
+ shaded_dots = int(math.floor(float(len(progress_) + 1) / len(video.segments) * avail_dots))
+ global max_output_len
+ if not 'max_output_len' in globals():
+ max_output_len = 0
+ for i in range(0,connection_n):
+ try:
+ os.path.getsize(os.path.join(os.getcwd(), output.name[:-1]+str(i)))
+ except:
+ pass
+ global download_size_
+ if not 'download_size_' in globals():
+ download_size_ = [0]*len(video.segments)
+ download_size_1 = 0
+ for i in download_size_:
+ download_size_1 += i
+
+ output_len = len("\r" + '[' + '.'*shaded_dots + ' '*(avail_dots-shaded_dots) + '] %'+str(percentage)+' (%d/%d) %s/%s @ %s' % (len(progress_), len(video.segments),size_adj(download_size_1, 'harddisk'), total_size_s_, str(size_adj(download_size_1/(time.clock()-start_t), 'internet'))))
+ max_output_len = max(max_output_len, output_len)
+ sys.stdout.write("\r" + '[' + '.'*shaded_dots + ' '*(avail_dots-shaded_dots) + '] %'+str(percentage)+' (%d/%d) %s/%s @ %s' % (len(progress_), len(video.segments),size_adj(download_size_1, 'harddisk'), total_size_s_, str(size_adj(download_size_1/(time.clock()-start_t), 'internet'))) + ' '*(max_output_len-output_len))
+ sys.stdout.flush()
+ if hasattr(video, 'key'):
+ copy_with_decrypt(raw, output, video.key, video.media_sequence + seg_n-1)
+ else:
+ copy_with_decrypt(raw, output, video.keys[0], video.media_sequence + seg_n-1)
+ size = output.tell()
+ if size % 188 != 0:
+ size = size // 188 * 188
+ output.seek(size)
+ output.truncate(size)
+
+def down_thread(video, output, start, end, seg_url, connection_n):
+ #this function is for debug
+ for i in range(start, end+1):
+ download(video, output, seg_url[i-1], i, connection_n)
+
+def fetch_streams(output_dir, video, connection_n):
+ global total_size
+ global total_size_t_
+
+ total_size_t_ = Thread(target=compute_total_size,args=[video])
+ total_size_t_.start()
+ if len(video.segments)/connection_n <2:
+ connection_n = len(video.segments)/2
+ connection_dist = []
+ seg_arr_list_=[]
+ seg_arr_list_2_=[]
+ for i in range(1, len(video.segments)+1):
+ seg_arr_list_.append(i)
+ seg_arr_list_2_.append(i)
+ seg_arr_list_.reverse()
+ seg_arr_list_2_.reverse()
+ for l in range(0, len(video.segments)):
+ for i in range(0,connection_n):
+ if not 'seg_arr_list_len_{0}'.format(i) in locals():
+ locals()['seg_arr_list_len_{0}'.format(i)]=[]
+ try:
+ locals()['seg_arr_list_len_{0}'.format(i)].append(seg_arr_list_.pop())
+ except:
+ pass
+ for i in range(0, connection_n):
+ for l in range(0, len(locals()['seg_arr_list_len_{0}'.format(i)])):
+ if not 'thread_dis_{0}'.format(i) in locals():
+ locals()['thread_dis_{0}'.format(i)]=[]
+ try:
+ locals()['thread_dis_{0}'.format(i)].append(seg_arr_list_2_.pop())
+ except:
+ pass
+ connection_dist = []
+ for i in range(0, connection_n):
+ connection_dist.append(min(locals()['thread_dis_{0}'.format(i)]))
+ connection_dist.append(max(locals()['thread_dis_{0}'.format(i)]))
+
+ seg_url = []
+ for n, seg in enumerate(video.segments):
+ seg_url.append(seg.uri)
+ connection_dist.reverse()
+ threads = []
+ global progress_
+ progress_ = []
+ for i in range (1, connection_n+1):
+ locals()['file_seg_{0}'.format(i)] = open(output_dir+str(i), 'wb')
+ threads.append(Thread(target=down_thread,args=(video, locals()['file_seg_{0}'.format(i)], connection_dist.pop(), connection_dist.pop(), seg_url, connection_n)))
+ # Start all threads
+ for x in threads:
+ x.start()
+
+ # Wait for all of them to finish
+ for x in threads:
+ x.join()
+ #print locals()
+ if connection_n==1:
+ locals()['file_seg_1'].close()
+ os.rename(output_dir+'1',output_dir)
+ else:
+ #final_file_ = open(output_dir, 'wb')
+ cmd_appd = ['copy /b ','cat ']
+ for i in range (1, connection_n+1):
+ cmd_appd[0] += '"'+output_dir+str(i)+'"+'
+ cmd_appd[1] += '"'+output_dir+str(i)+'" '
+ #temp_file_ = open(output_dir+str(i), 'rb')
+ #final_file_.write(temp_file_.read())
+ #temp_file_.close()
+ locals()['file_seg_{0}'.format(i)].close()
+ #final_file_.close()
+ cmd_appd[0] = cmd_appd[0][:-1]
+ cmd_appd[0] += ' "'+output_dir+'"'
+ cmd_appd[1] += '> "'+output_dir+'"'
+ #print cmd_appd
+ try:
+ subprocess.call(cmd_appd[0], shell=True)
+ except:
+ subprocess.call(cmd_appd[1], shell=True)
+ for i in range (1, connection_n+1):
+ os.remove(output_dir+str(i))
print '\n'
+
def fetch_encryption_key(video):
if hasattr(video, 'key'):
assert video.key.method == 'AES-128'
- video.key.key_value = urllib2.urlopen(url = video.key.uri).read()
+ try:
+ video.key.key_value = urllib2.urlopen(url = video.key.uri).read()
+ except:
+ video.key.key_value = requests.get(video.keys[0].uri).text.encode('windows-1252')
else:
assert video.keys[0].method == 'AES-128'
- video.keys[0].key_value = urllib2.urlopen(url = video.keys[0].uri).read()
+ try:
+ video.keys[0].key_value = urllib2.urlopen(url = video.keys[0].uri).read()
+ except:
+ video.keys[0].key_value = requests.get(video.keys[0].uri).text.encode('windows-1252')
def find_best_video(uri):
playlist = m3u8.load(uri)
@@ -106,7 +299,35 @@ def find_best_video(uri):
best_stream = stream
return find_best_video(best_stream.absolute_uri)
-def video_hls(uri, output):
+def video_hls(uri, output, connection_n):
video = find_best_video(uri)
+ connection_n = connection_n
fetch_encryption_key(video)
- fetch_streams(output, video)
+ fetch_streams(output, video, connection_n)
+
+if __name__ == '__main__':
+ connection_n = 1
+ try:
+ uri = sys.argv[1]
+ except:
+ print "invalid url"
+ try:
+ if int(sys.argv[2]):
+ connection_n = int(sys.argv[2])
+ except:
+ try:
+ output = sys.argv[2]
+ except:
+ output = "download.ts"
+ try:
+ if int(sys.argv[3]):
+ connection_n = int(sys.argv[3])
+ except:
+ try:
+ output = sys.argv[3]
+
+ except:
+ if not 'output' in locals():
+ output = "download.ts"
+
+ video_hls(uri, output, connection_n)
\ No newline at end of file
diff --git a/crunchy-xml-decoder/hls_old.py b/crunchy-xml-decoder/hls_old.py
new file mode 100644
index 0000000..4b518f4
--- /dev/null
+++ b/crunchy-xml-decoder/hls_old.py
@@ -0,0 +1,119 @@
+import sys
+import errno
+import m3u8
+import urllib2
+from Crypto.Cipher import AES
+import StringIO
+import socket
+import os
+
+import math
+
+blocksize = 16384
+
+class resumable_fetch:
+ def __init__(self, uri, cur, total):
+ self.uri = uri
+ self.cur = cur
+ self.total = total
+ self.offset = 0
+ self._restart()
+ self.file_size = int(self.stream.info().get('Content-Length', -1))
+ if self.file_size <= 0:
+ print "Invalid file size"
+ sys.exit()
+
+ def _progress(self):
+ sys.stdout.write('\x1b[2K\r%d/%d' % (self.cur, self.total))
+ sys.stdout.flush()
+
+ def _restart(self):
+ req = urllib2.Request(self.uri)
+ if self.offset:
+ req.headers['Range'] = 'bytes=%s-' % (self.offset, )
+
+ while True:
+ try:
+ self.stream = urllib2.urlopen(req, timeout = 180)
+ break
+ except socket.timeout:
+ continue
+ except socket.error, e:
+ if e.errno != errno.ECONNRESET:
+ raise
+
+ def read(self, n):
+ buffer = []
+ while self.offset < self.file_size:
+ try:
+ data = self.stream.read(min(n, self.file_size - self.offset))
+ self.offset += len(data)
+ n -= len(data)
+ buffer.append(data)
+ if n == 0 or data:
+ break
+ except socket.timeout:
+ self._progress()
+ self._restart()
+ except socket.error as e:
+ if e.errno != errno.ECONNRESET:
+ raise
+ self._progress()
+ self._restart()
+ return "".join(buffer)
+
+
+def copy_with_decrypt(input, output, key, media_sequence):
+ if key.iv is not None:
+ iv = str(key.iv)[2:]
+ else:
+ iv = "%032x" % media_sequence
+ aes = AES.new(key.key_value, AES.MODE_CBC, iv.decode('hex'))
+ while True:
+ data = input.read(blocksize)
+ if not data:
+ break
+ output.write(aes.decrypt(data))
+
+def fetch_streams(output, video):
+ output = open(output, 'wb')
+ for n, seg in enumerate(video.segments):
+ percentage = ((n + 1) * 100)/len(video.segments)
+ avail_dots = 50
+ shaded_dots = int(math.floor(float(n + 1) / len(video.segments) * avail_dots))
+ sys.stdout.write("\r" + '[' + '.'*shaded_dots + ' '*(avail_dots-shaded_dots) + '] %'+str(percentage)+' (%d/%d)\x1b[2K' % (n + 1, len(video.segments)))
+ sys.stdout.flush()
+ raw = resumable_fetch(seg.uri, n+1, len(video.segments))
+ if hasattr(video, 'key'):
+ copy_with_decrypt(raw, output, video.key, video.media_sequence + n)
+ else:
+ copy_with_decrypt(raw, output, video.keys[0], video.media_sequence + n)
+ size = output.tell()
+ if size % 188 != 0:
+ size = size // 188 * 188
+ output.seek(size)
+ output.truncate(size)
+ print '\n'
+
+def fetch_encryption_key(video):
+ if hasattr(video, 'key'):
+ assert video.key.method == 'AES-128'
+ video.key.key_value = urllib2.urlopen(url = video.key.uri).read()
+ else:
+ assert video.keys[0].method == 'AES-128'
+ video.keys[0].key_value = urllib2.urlopen(url = video.keys[0].uri).read()
+
+def find_best_video(uri):
+ playlist = m3u8.load(uri)
+ if not playlist.is_variant:
+ return playlist
+ best_stream = playlist.playlists[0]
+ for stream in playlist.playlists:
+ if stream.stream_info.bandwidth == 'max' or stream.stream_info.bandwidth > best_stream.stream_info.bandwidth:
+ best_stream = stream
+ return find_best_video(best_stream.absolute_uri)
+
+def video_hls(uri, output):
+ video = find_best_video(uri)
+ fetch_encryption_key(video)
+ fetch_streams(output, video)
diff --git a/crunchy-xml-decoder/login.py b/crunchy-xml-decoder/login.py
index 94edd86..6a04660 100644
--- a/crunchy-xml-decoder/login.py
+++ b/crunchy-xml-decoder/login.py
@@ -55,6 +55,11 @@ def login(username, password):
c.expires = 9999999999 # Saturday, November 20th 2286, 17:46:39 (GMT)
del session.cookies['c_visitor']
+ #try:
+ #session.cookies['usa_sess_id'] = re.split('"',requests.get('https://cr.onestay.moe/getid').text)[5]
+ #except:
+ #sleep(10) # sleep so we don't overload crunblocker
+ #session.cookies['usa_sess_id'] = re.split('"',requests.get('https://rubbix.net/crunchyroll/').text)[5]
userstatus = getuserstatus(session)
if username != '' and userstatus[1] == 'Guest':
diff --git a/crunchy-xml-decoder/ultimate.py b/crunchy-xml-decoder/ultimate.py
index bbd8327..cc07c5b 100644
--- a/crunchy-xml-decoder/ultimate.py
+++ b/crunchy-xml-decoder/ultimate.py
@@ -75,6 +75,9 @@ def subtitles(eptitle):
xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
xmllist = unidecode(xmllist).replace('><', '>\n<')
+ #xmllist = open('xmllist.xml', 'wb')
+ #xmllist.write(xmllist.encode('utf-8'))
+ #xmllist.close()
global hardcoded
if 'None' in xmllist:
@@ -218,12 +221,17 @@ def ultimate(page_url, seasonnum, epnum):
#lang1, lang2 = altfuncs.config()
#lang1, lang2, forcesub = altfuncs.config()
- lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
+ lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub, connection_n_ = altfuncs.config()
#player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)
-
+ #htmlfile = open('html.html', 'wb')
+ #htmlfile.write(html.encode('utf-8'))
+ #htmlfile.close()
#h = HTMLParser.HTMLParser()
title = re.findall('(.+?)', html)[0].replace('Crunchyroll - Watch ', '')
+ #print os.path.join('export', '{}.flv'.format(title))
+ print format('Now Downloading - '+title.encode('utf-8'))
+ #print title
if len(os.path.join('export', title+'.flv')) > 255:
title = re.findall('^(.+?) \- ', title)[0]
@@ -245,7 +253,9 @@ def ultimate(page_url, seasonnum, epnum):
media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')
-
+ #xmlfile = open('xmlfile.xml', 'wb')
+ #xmlfile.write(xmlconfig.encode('utf-8'))
+ #xmlfile.close()
try:
if '4' in xmlconfig.find_all('code')[0]:
print xmlconfig.find_all('msg')[0].text
@@ -290,7 +300,7 @@ def ultimate(page_url, seasonnum, epnum):
video_input = os.path.join("export", title + '.flv')
else:
video_input = os.path.join("export", title + '.ts')
- video_hls(filen, video_input)
+ video_hls(filen, video_input, connection_n_)
heightp = '360p' if xmlconfig.height.string == '368' else '{0}p'.format(xmlconfig.height.string) # This is less likely to fail
subtitles(title)