Openload vuelve a fallar
Publicado: 26 Jun 2016, 00:04
Hola buenas,me vuelven a salir las famosas palomitas en openload,si es posible necesito de vuestra ayuda. Gracias.
Soporte oficial de pelisalacarta y tvalacarta - Ayuda con tu media center
https://www.mimediacenter.info/foro/
Código: Seleccionar todo
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("[openload.py] url=" + page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
text_encodes = scrapertools.find_multiple_matches(data,">(゚ω゚ノ=.*?)</")
text_decode = aadecode(
text_encodes[
eval(
scrapertools.find_single_match(
aadecode(text_encodes[0]),
"\(([^\)]+)\)"
)
)
]
)
pre_media_url = scrapertools.find_single_match(text_decode, "(http.*?true)")
media_url = scrapertools.get_header_from_response(pre_media_url, header_to_get="location")
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [openload]", media_url ] )
return video_urls
Código: Seleccionar todo
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector for openload.co
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import logger
from core import scrapertools
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'}
def test_video_exists(page_url):
logger.info("pelisalacarta.servers.openload test_video_exists(page_url='%s')" % page_url)
data = scrapertools.downloadpageWithoutCookies(page_url)
if 'We are sorry!' in data:
return False, "[Openload] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("[openload.py] url=" + page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
text_encodes = scrapertools.find_multiple_matches(data,">(゚ω゚ノ=.*?)</")
text_decode = aadecode(
text_encodes[
eval(
scrapertools.find_single_match(
aadecode(text_encodes[0]),
"\(([^\)]+)\)"
)
)
]
)
pre_media_url = scrapertools.find_single_match(text_decode, "(http.*?true)")
media_url = scrapertools.get_header_from_response(pre_media_url, header_to_get="location")
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + " [openload]", media_url ] )
return video_urls
if "videocontainer" not in data:
url = page_url.replace("/embed/","/f/")
data = scrapertools.downloadpageWithoutCookies(url)
text_encode = scrapertools.find_single_match(data,"Click to start Download.*?<script[^>]+>(.*?)</script")
text_decode = decode(text_encode)
videourl = scrapertools.find_single_match(text_decode, '(http.*?)\}')
videourl = videourl.replace("https://","http://")
extension = videourl[-4:]
video_urls.append([ extension + " [Openload]", videourl+header_down+extension])
else:
text_encode = scrapertools.find_multiple_matches(data,'<script type="text/javascript">(゚ω゚.*?)</script>')
# Buscamos la variable que nos indica el script correcto
subtract = scrapertools.find_single_match(data, 'welikekodi_ya_rly = ([^;]+)')
index = eval(subtract)
text_decode = decode(text_encode[index])
videourl = scrapertools.find_single_match(text_decode, "(http.*?true)")
videourl = scrapertools.get_header_from_response(videourl, header_to_get="location")
videourl = videourl.replace("https://","http://").replace("?mime=true","")
extension = videourl[-4:]
video_urls.append([extension + " [Openload] ", videourl+header_down+extension, 0, subtitle])
for video_url in video_urls:
logger.info("pelisalacarta.servers.openload %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
patronvideos = '//(?:www.)?openload.../(?:embed|f)/([0-9a-zA-Z-_]+)'
logger.info("pelisalacarta.servers.openload find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(text)
for media_id in matches:
titulo = "[Openload]"
url = 'https://openload.co/embed/%s/' % media_id
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'openload'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
def decode(text):
text = re.sub(r"\s+", "", text)
data = text.split("+(゚Д゚)[゚o゚]")[1]
chars = data.split("+(゚Д゚)[゚ε゚]+")[1:]
txt = ""
for char in chars:
char = char \
.replace("(o゚ー゚o)","u") \
.replace("c", "0") \
.replace("(゚Д゚)['0']", "c") \
.replace("゚Θ゚", "1") \
.replace("!+[]", "1") \
.replace("-~", "1+") \
.replace("o", "3") \
.replace("_", "3") \
.replace("゚ー゚", "4") \
.replace("(+", "(")
char = re.sub(r'\((\d)\)', r'\1', char)
for x in scrapertools.find_multiple_matches(char,'(\(\d\+\d\))'):
char = char.replace( x, str(eval(x)) )
for x in scrapertools.find_multiple_matches(char,'(\(\d\^\d\^\d\))'):
char = char.replace( x, str(eval(x)) )
for x in scrapertools.find_multiple_matches(char,'(\(\d\+\d\+\d\))'):
char = char.replace( x, str(eval(x)) )
for x in scrapertools.find_multiple_matches(char,'(\(\d\+\d\))'):
char = char.replace( x, str(eval(x)) )
for x in scrapertools.find_multiple_matches(char,'(\(\d\-\d\))'):
char = char.replace( x, str(eval(x)) )
if 'u' not in char: txt+= char + "|"
txt = txt[:-1].replace('+','')
txt_result = "".join([ chr(int(n, 8)) for n in txt.split('|') ])
sum_base = ""
m3 = False
if ".toString(" in txt_result:
if "+(" in txt_result:
m3 = True
sum_base = "+"+scrapertools.find_single_match(txt_result,".toString...(\d+).")
txt_pre_temp = scrapertools.find_multiple_matches(txt_result,"..(\d),(\d+).")
txt_temp = [ (n, b) for b ,n in txt_pre_temp ]
else:
txt_temp = scrapertools.find_multiple_matches(txt_result, '(\d+)\.0.\w+.([^\)]+).')
for numero, base in txt_temp:
code = toString( int(numero), eval(base+sum_base) )
if m3:
txt_result = re.sub( r'"|\+', '', txt_result.replace("("+base+","+numero+")", code) )
else:
txt_result = re.sub( r"'|\+", '', txt_result.replace(numero+".0.toString("+base+")", code) )
return txt_result
def toString(number,base):
string = "0123456789abcdefghijklmnopqrstuvwxyz"
if number < base:
return string[number]
else:
return toString(number//base,base) + string[number%base]
Código: Seleccionar todo
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector for openload.co
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core import logger
from core import scrapertools
from lib.aadecode import decode as aadecode
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'}
def test_video_exists(page_url):
logger.info("pelisalacarta.servers.openload test_video_exists(page_url='%s')" % page_url)
data = scrapertools.downloadpageWithoutCookies(page_url)
if 'We are sorry!' in data:
return False, "[Openload] El archivo no existe o ha sido borrado"
return True, ""
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
logger.info("[openload.py] url=" + page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
text_encodes = scrapertools.find_multiple_matches(data, ">(゚ω゚ノ=.*?)</")
text_decode = aadecode(
text_encodes[
eval(
scrapertools.find_single_match(
aadecode(text_encodes[0]),
"\(([^\)]+)\)"
)
)
]
)
pre_media_url = scrapertools.find_single_match(text_decode, "(http.*?true)")
media_url = scrapertools.get_header_from_response(pre_media_url, header_to_get="location")
video_urls.append([scrapertools.get_filename_from_url(media_url)[-4:] + " [openload]", media_url])
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
patronvideos = '//(?:www.)?openload.../(?:embed|f)/([0-9a-zA-Z-_]+)'
logger.info("pelisalacarta.servers.openload find_videos #" + patronvideos + "#")
matches = re.compile(patronvideos, re.DOTALL).findall(text)
for media_id in matches:
titulo = "[Openload]"
url = 'https://openload.co/embed/%s/' % media_id
if url not in encontrados:
logger.info(" url=" + url)
devuelve.append([titulo, url, 'openload'])
encontrados.add(url)
else:
logger.info(" url duplicada=" + url)
return devuelve
Código: Seleccionar todo
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Lib AADecode JS and scraper of the possible cases in the returns decoded.
# File: /lib/aadecode.py
# Use:
# from aadecode import decode as aadecode
# text_decode = aadecode(text_encode)
# @robalo & @Cmos
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------
import re
from core.scrapertools import *
def decode(text):
text = re.sub(r"\s+|/\*.*?\*/", "", text)
data = text.split("+(゚Д゚)[゚o゚]")[1]
chars = data.split("+(゚Д゚)[゚ε゚]+")[1:]
txt = ""
for char in chars:
char = char \
.replace("(o゚ー゚o)","u") \
.replace("c", "0") \
.replace("(゚Д゚)['0']", "c") \
.replace("゚Θ゚", "1") \
.replace("!+[]", "1") \
.replace("-~", "1+") \
.replace("o", "3") \
.replace("_", "3") \
.replace("゚ー゚", "4") \
.replace("(+", "(")
char = re.sub(r'\((\d)\)', r'\1', char)
c = ""; subchar = ""
for v in char:
c+= v
try: x = c; subchar+= str(eval(x)); c = ""
except: pass
if subchar != '': txt+= subchar + "|"
txt = txt[:-1].replace('+','')
txt_result = "".join([ chr(int(n, 8)) for n in txt.split('|') ])
return toStringCases(txt_result)
def toStringCases(txt_result):
sum_base = ""
m3 = False
if ".toString(" in txt_result:
if "+(" in txt_result:
m3 = True
sum_base = "+"+find_single_match(txt_result,".toString...(\d+).")
txt_pre_temp = find_multiple_matches(txt_result,"..(\d),(\d+).")
txt_temp = [ (n, b) for b ,n in txt_pre_temp ]
else:
txt_temp = find_multiple_matches(txt_result, '(\d+)\.0.\w+.([^\)]+).')
for numero, base in txt_temp:
code = toString( int(numero), eval(base+sum_base) )
if m3:
txt_result = re.sub( r'"|\+', '', txt_result.replace("("+base+","+numero+")", code) )
else:
txt_result = re.sub( r"'|\+", '', txt_result.replace(numero+".0.toString("+base+")", code) )
return txt_result
def toString(number,base):
string = "0123456789abcdefghijklmnopqrstuvwxyz"
if number < base:
return string[number]
else:
return toString(number//base,base) + string[number%base]