Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 17 additions & 9 deletions crunchy-xml-decoder.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,26 +13,34 @@
import altfuncs
import re, urllib2
from collections import deque

import cfscrape
from bs4 import BeautifulSoup
import time

#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(autocatch)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
def autocatch():
print 'indicate the url : '
url=raw_input()
mykey = urllib2.urlopen(url)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Connection': 'keep-alive'}
session = cfscrape.create_scraper()
mykey = session.get(url, headers=headers)
take = open("queue_.txt", "w")

for text in mykey:
match = re.search('<a href="/(.+?)" title=', text)
if match:
print >> take, 'http://www.crunchyroll.com/'+match.group(1)

asdasd= BeautifulSoup(mykey.content, 'html.parser')


for text in asdasd.find_all('a'):
if "episode" in text.get('href'):
print text.get('href')
print >> take, 'http://www.crunchyroll.com/'+text.get('href')

take.close()

with open('queue_.txt') as f, open('queue.txt', 'w') as fout:
fout.writelines(reversed(f.readlines()))
os.remove('queue_.txt')


#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#(CHECKING)#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#-#
if not os.path.exists("export"):
os.makedirs("export")
Expand All @@ -51,7 +59,7 @@ def defaultsettings(vvquality, vlang1, vlang2, vforcesub, vforceusa, vlocalizeco
# We're not miracle workers.
video_quality = '''+vvquality+'''
# Set this to the desired subtitle language. If the subtitles aren't available in that language, it reverts to the second language option (below).
# Available languages: English, Espanol, Espanol_Espana, Francais, Portugues, Turkce, Italiano, Arabic, Deutsch, Russian
# Available languages: English, Espanol, Espanol_Espana, Francais, Portugues, Turkce, Italiano, Arabic, Deutsch
language = '''+vlang1+'''
# If the first language isn't available, what language would you like as a backup? Only if then they aren't found, then it goes to English as default
language2 = '''+vlang2+'''
Expand Down
38 changes: 22 additions & 16 deletions crunchy-xml-decoder/altfuncs.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import urlparse
from ConfigParser import ConfigParser
import pickle
import cfscrape

import requests

Expand Down Expand Up @@ -65,12 +66,12 @@ def gethtml(url):
session.cookies['c_locale']={u'Español (Espana)' : 'esES', u'Français (France)' : 'frFR', u'Português (Brasil)' : 'ptBR',
u'English' : 'enUS', u'Español' : 'esLA', u'Türkçe' : 'enUS', u'Italiano' : 'itIT',
u'العربية' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
except:
sleep(10) # sleep so we don't overload crunblocker
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
parts = urlparse.urlsplit(url)
if not parts.scheme or not parts.netloc:
print 'Apparently not a URL'
Expand All @@ -94,23 +95,28 @@ def getxml(req, med_id):
payload = {'req': req, 'media_id': med_id, 'video_format': video_format, 'video_encode_quality': resolution}
with open('cookies') as f:
cookies = requests.utils.cookiejar_from_dict(pickle.load(f))
session = requests.session()
session.cookies = cookies
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Connection': 'keep-alive'}
session = cfscrape.create_scraper()
res_get = session.get('https://www.crunchyroll.com/login', headers=headers)
s = re.search('name="login_form\\[_token\\]" value="([^"]*)"', res_get.text)
if s is None:
print 'CSRF token not found'
sys.exit()
token = s.group(1)
session.cookies['c_d'] = cookies['c_d']
session.cookies['c_userid'] = cookies['c_userid']
session.cookies['c_userkey'] = cookies['c_userkey']
del session.cookies['c_visitor']
if not forceusa and localizecookies:
session.cookies['c_locale']={u'Español (Espana)' : 'esES', u'Français (France)' : 'frFR', u'Português (Brasil)' : 'ptBR',
u'English' : 'enUS', u'Español' : 'esLA', u'Türkçe' : 'enUS', u'Italiano' : 'itIT',
u'العربية' : 'arME' , u'Deutsch' : 'deDE'}[lang]
if forceusa:
try:
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
except:
sleep(10) # sleep so we don't overload crunblocker
session.cookies['sess_id'] = requests.get('http://www.crunblocker.com/sess_id.php').text
headers = {'Referer': 'http://static.ak.crunchyroll.com/versioned_assets/ChromelessPlayerApp.17821a0e.swf',
'Host': 'www.crunchyroll.com', 'Content-type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0)'}
res = session.post(url, params=payload, headers=headers)
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:26.0) Gecko/20100101 Firefox/26.0)',
'login_form[_token]': token}
res = session.post(url, data=payload, headers=headers)
res.encoding = 'UTF-8'
return res.text

Expand Down
44 changes: 39 additions & 5 deletions crunchy-xml-decoder/decode.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,38 +39,72 @@ def decode(page_url):
if page_url == '':
page_url = raw_input('Please enter Crunchyroll video URL:\n')

try:
int(page_url)
page_url = 'http://www.crunchyroll.com/media-' + page_url
except ValueError:
if not page_url.startswith('http://') and not page_url.startswith('https://'):
page_url = 'http://' + page_url
try:
int(page_url[-6:])
except ValueError:
if bool(seasonnum) and bool(epnum):
page_url = altfuncs.vidurl(page_url, seasonnum, epnum)
elif bool(epnum):
page_url = altfuncs.vidurl(page_url, 1, epnum)
else:
page_url = altfuncs.vidurl(page_url, False, False)



# ----------

lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
#player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)

#h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
if len(os.path.join('export', title+'.ass')) > 255:
title = re.findall('^(.+?) \- ', title)[0]
# title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
# if len(os.path.join('export', title+'.ass')) > 255:
# title = re.findall('^(.+?) \- ', title)[0]

### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings ###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...'}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))
# title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

### End stolen code ###

media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')


try:
if '4' in xmlconfig.find_all('code')[0]:
print xmlconfig.find_all('msg')[0].text
sys.exit()
except IndexError:
pass


xmllist = altfuncs.getxml('RpcApiSubtitle_GetListing', media_id)
xmllist = unidecode(xmllist).replace('><', '>\n<')


#modif
title = xmlconfig.find('series_title').string
title = pattern.sub(lambda m: rep[re.escape(m.group(0))], title)
if int(xmlconfig.find('episode_number').string) < 10:
title = unidecode(title + ' - 0' + xmlconfig.find('episode_number').string)
else:
title = unidecode(title + ' - ' + xmlconfig.find('episode_number').string)
if len(os.path.join('export', title+'.ass')) > 255:
title = re.findall('^(.+?) \- ', title)[0]
#modif



if '<media_id>None</media_id>' in xmllist:
print 'The video has hardcoded subtitles.'
Expand Down Expand Up @@ -124,7 +158,7 @@ def decode(page_url):
if formattedsubs is None:
continue
#subfile = open(eptitle + '.ass', 'wb')
subfile = open(os.path.join('export', title+'['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass'), 'wb')
subfile = open(os.path.join('export', title+' ['+sub_id3.pop(0)+']'+sub_id4.pop(0)+'.ass'), 'wb')
subfile.write(formattedsubs.encode('utf-8-sig'))
subfile.close()
#shutil.move(title + '.ass', os.path.join(os.getcwd(), 'export', ''))
Expand Down
14 changes: 13 additions & 1 deletion crunchy-xml-decoder/functtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,24 @@ def unzip_(filename_,out):
except ImportError:
print 'Installing Cfscrape...',
try:
if python_bit_=="32 bit" or python_bit_=="64 bit":
#if python_bit_=="32 bit" or python_bit_=="64 bit":
pip.main(['install', '--quiet', 'cfscrape'])
print "Installed"
except KeyError:
print "Something Has Gone Wrong While Retrieving Cfscrape link\nPlease Download Cfscrape Manually"

try:
import m3u8
print('m3u8 installed')
except ImportError:
print 'Installing m3u8...',
try:
#if python_bit_=="32 bit" or python_bit_=="64 bit":
pip.main(['install', '--quiet', 'm3u8'])
print "Installed"
except KeyError:
print "Something Has Gone Wrong While Retrieving m3u8 link\nPlease Download m3u8 Manually"

if Crypto_link_ or lxml_link_:
if not os.path.exists("temp"):
os.makedirs("temp")
Expand Down
16 changes: 13 additions & 3 deletions crunchy-xml-decoder/login.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,12 @@ def login(username, password):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; WOW64; rv:34.0) Gecko/20100101 Firefox/34.0',
'Connection': 'keep-alive'}
session = cfscrape.create_scraper()

res_get = session.get('https://www.crunchyroll.com/login', headers=headers)


cookies = res_get.cookies
cookies.set('c_locale', 'enUS', domain='.crunchyroll.com', path='/')

s = re.search('name="login_form\\[_token\\]" value="([^"]*)"', res_get.text)
if s is None:
print 'CSRF token not found'
Expand All @@ -46,14 +50,20 @@ def login(username, password):
'login_form[password]': password,
'login_form[_token]': token}

res_post = session.post('https://www.crunchyroll.com/login', data=payload, headers=headers, allow_redirects = False)
res_post = session.post('https://www.crunchyroll.com/login', data=payload, headers=headers, cookies=cookies, allow_redirects = False)

print token
session.cookies.set('token', token, domain='.crunchyroll.com', path='/')
print session.cookies._cookies['.crunchyroll.com']['/']['token']
print session.cookies['token']
print session.cookies
if not (res_post.status_code == 302 or (res_post.status_code == 200 and username == '')):
print 'Login failed'
sys.exit()

for c in session.cookies:
c.expires = 9999999999 # Saturday, November 20th 2286, 17:46:39 (GMT)

del session.cookies['c_visitor']

userstatus = getuserstatus(session)
Expand Down
38 changes: 24 additions & 14 deletions crunchy-xml-decoder/ultimate.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def video():
cmd = [os.path.join('video-engine', 'rtmpdump.exe'),
'-r', url1, '-a', url2,
'-f', 'WIN 11,8,800,50',
'-m', '15',
'-m', '600',
'-W', 'http://www.crunchyroll.com/vendor/ChromelessPlayerApp-c0d121b.swf',
'-p', page_url2,
'-y', filen,
Expand Down Expand Up @@ -109,7 +109,6 @@ def subtitles(eptitle):
lang = lang2
except IndexError:
lang ='English'
sub_id3 = [word.replace('[Russkii]','rus') for word in sub_id3]
sub_id3 = [word.replace('[English (US)]','eng') for word in sub_id3]
sub_id3 = [word.replace('[Deutsch]','deu') for word in sub_id3]
sub_id3 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id3]
Expand All @@ -118,15 +117,16 @@ def subtitles(eptitle):
sub_id3 = [word.replace('[Espanol]','spa') for word in sub_id3]
sub_id3 = [word.replace('[Italiano]','ita') for word in sub_id3]
sub_id3 = [word.replace('[l`rby@]','ara') for word in sub_id3]
sub_id3 = [word.replace('[Russkii]','rus') for word in sub_id3]
# sub_id4 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id4]
sub_id4 = [word.replace('[l`rby@]',u'[Arabic]') for word in sub_id4]
sub_id5 = [word.replace('[Russkii]','rus') for word in sub_id5]
sub_id5 = [word.replace('[English (US)]','eng') for word in sub_id5]
sub_id5 = [word.replace('[Deutsch]','deu') for word in sub_id5]
sub_id5 = [word.replace('[Portugues (Brasil)]','por') for word in sub_id5]
sub_id5 = [word.replace('[Francais (France)]','fre') for word in sub_id5]
sub_id5 = [word.replace('[Espanol (Espana)]','spa_spa') for word in sub_id5]
sub_id5 = [word.replace('[Espanol]','spa') for word in sub_id5]
sub_id5 = [word.replace('[Russkii]','rus') for word in sub_id5]
sub_id5 = [word.replace('[Italiano]','ita') for word in sub_id5]
sub_id5 = [word.replace('[l`rby@]','ara') for word in sub_id5]
# sub_id6 = [word.replace('[l`rby@]',u'[العربية]') for word in sub_id6]
Expand Down Expand Up @@ -157,7 +157,7 @@ def subtitles(eptitle):

sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
u'العربية': 'ara', u'Deutsch': 'deu', u'Russian': 'rus'}[lang]
u'العربية': 'ara',u'[Russkii]':'rus', u'Deutsch': 'deu'}[lang]

for i in sub_id2:
sublangc = sub_id3.pop(0)
Expand Down Expand Up @@ -186,7 +186,7 @@ def ultimate(page_url, seasonnum, epnum):
---- Start New Export ----
--------------------------

CrunchyRoll Downloader Toolkit DX v0.98b
CrunchyRoll Downloader Toolkit DX v0.98d

Crunchyroll hasn't changed anything.

Expand Down Expand Up @@ -222,22 +222,22 @@ def ultimate(page_url, seasonnum, epnum):
#lang1, lang2, forcesub = altfuncs.config()
lang1, lang2, forcesub, forceusa, localizecookies, vquality, onlymainsub = altfuncs.config()
#player_revision = altfuncs.playerrev(page_url)
html = altfuncs.gethtml(page_url)
#html = altfuncs.gethtml(page_url)

#h = HTMLParser.HTMLParser()
title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
if len(os.path.join('export', title+'.flv')) > 255:
title = re.findall('^(.+?) \- ', title)[0]
#title = re.findall('<title>(.+?)</title>', html)[0].replace('Crunchyroll - Watch ', '')
#if len(os.path.join('export', title+'.flv')) > 255:
# title = re.findall('^(.+?) \- ', title)[0]

# title = h.unescape(unidecode(title)).replace('/', ' - ').replace(':', '-').
# replace('?', '.').replace('"', "''").replace('|', '-').replace('&quot;',"''").strip()

### Taken from http://stackoverflow.com/questions/6116978/python-replace-multiple-strings and improved to include the backslash###
rep = {' / ': ' - ', '/': ' - ', ':': '-', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...', ' \ ': ' - '}
rep = {' / ': ' - ', '/': ' - ', ':': '', '?': '.', '"': "''", '|': '-', '&quot;': "''", 'a*G':'a G', '*': '#', u'\u2026': '...', ' \ ': ' - '}

rep = dict((re.escape(k), v) for k, v in rep.iteritems())
pattern = re.compile("|".join(rep.keys()))
title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))
#title = unidecode(pattern.sub(lambda m: rep[re.escape(m.group(0))], title))

### End stolen code ###

Expand All @@ -247,7 +247,11 @@ def ultimate(page_url, seasonnum, epnum):

media_id = page_url[-6:]
xmlconfig = BeautifulSoup(altfuncs.getxml('RpcApiVideoPlayer_GetStandardConfig', media_id), 'xml')

#print xmlconfig
if "banned" in xmlconfig:
print xmlconfig
print "You are Banned"
sys.exit()
try:
if '4' in xmlconfig.find_all('code')[0]:
print xmlconfig.find_all('msg')[0].text
Expand All @@ -256,6 +260,11 @@ def ultimate(page_url, seasonnum, epnum):
pass

vid_id = xmlconfig.find('media_id').string
title = xmlconfig.find('series_title').string
title = pattern.sub(lambda m: rep[re.escape(m.group(0))], title)
title = unidecode(title + ' - S01E' + xmlconfig.find('episode_number').string)
print xmlconfig.find('exclusive').string.replace('Stream','Download')
print title

# ----------

Expand Down Expand Up @@ -299,7 +308,7 @@ def ultimate(page_url, seasonnum, epnum):

print 'Starting mkv merge'
mkvmerge = os.path.join("video-engine", "mkvmerge.exe")
filename_output = os.path.join("export", title + '[' + heightp.strip() +'].mkv')
filename_output = os.path.join("export", title + '.mkv')
subtitle_input = []
if os.path.isfile(mkvmerge):
with_wine = os.name != 'nt'
Expand All @@ -312,7 +321,8 @@ def ultimate(page_url, seasonnum, epnum):
if not hardcoded:
sublang = {u'Español (Espana)': 'spa_spa', u'Français (France)': 'fre', u'Português (Brasil)': 'por',
u'English': 'eng', u'Español': 'spa', u'Türkçe': 'tur', u'Italiano': 'ita',
u'العربية': 'ara', u'Deutsch': 'deu'}[lang]
u'العربية': 'ara', u'[Russkii]':'rus', u'Deutsch': 'deu'}[lang]

for i in sub_id2:
sublangc=sub_id5.pop(0)
sublangn=sub_id6.pop(0)
Expand Down
Binary file modified video-engine/mkvmerge.exe
Binary file not shown.