nowvideo.py
Código: Seleccionar todo
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para nowvideo
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
def test_video_exists( page_url ):
logger.info("[nowvideo.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page(page_url)
if "The file is being converted" in data:
return False,"El fichero está en proceso"
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[nowvideo.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page( page_url )
# URL a invocar: http://www.nowvideo.eu/api/player.api.php?file=3695bce6e6288&user=undefined&codes=1&pass=undefined&key=83%2E44%2E253%2E73%2D64a25e17853b4b19586841e04b0d9382
# En la página:
'''
flashvars.domain="http://www.nowvideo.eu";
flashvars.file="3695bce6e6288";
flashvars.filekey="83.44.253.73-64a25e17853b4b19586841e04b0d9382";
flashvars.advURL="0";
flashvars.autoplay="false";
flashvars.cid="1";
'''
domain = scrapertools.get_match(data,'flashvars.domain="([^"]+)"')
file = scrapertools.get_match(data,'flashvars.file="([^"]+)"')
key = scrapertools.get_match(data,'flashvars.filekey="([^"]+)"')
codes = scrapertools.get_match(data,'flashvars.cid="([^"]+)"')
url = domain + "/api/player.api.php?file="+file+"&user=undefined&codes="+codes+"&pass=undefined&key="+key.replace(".","%2E").replace("-","%2D")
data = scrapertools.cache_page( url )
logger.info("data="+data)
# url=http://f23.nowvideo.eu/dl/653d434d3cd95f1f7b9df894366652ba/4fc2af77/nnb7e7f45f276be5a75b10e8d6070f6f4c.flv&title=Title%26asdasdas&site_url=http://www.nowvideo.eu/video/3695bce6e6288&seekparm=&enablelimit=0
location = scrapertools.get_match(data,'url=([^\&]+)&')
location = location + "?client=FLASH"
video_urls.append( [ scrapertools.get_filename_from_url(location)[-4:] + " [nowvideo]",location ] )
for video_url in video_urls:
logger.info("[nowvideo.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#<a href="http://www.nowvideo.eu/video/3695bce6e6288" target="_blank">1° Tempo</a>
patronvideos = '<a href="(http://www.nowvideo.eu/video/[a-z0-9]+)"[^>]+>([^<]+)</a>'
logger.info("[nowvideo.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = match[1]+" [nowvideo]"
url = match[0]
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
patronvideos = '(nowvideo.co/video/[a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www2."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://www.nowvideo.eu/video/3695bce6e6288
#http://www.nowvideo.eu/video/4fd0757fd4592
patronvideos = '(nowvideo.eu/video/[a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www."+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://www.player3k.info/nowvideo/?id=t1hkrf1bnf2ek
patronvideos = 'player3k.info/nowvideo/\?id\=([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.eu/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://embed.nowvideo.eu/embed.php?v=obkqt27q712s9&width=600&height=480
#http://embed.nowvideo.eu/embed.php?v=4grxvdgzh9fdw&width=568&height=340
patronvideos = 'nowvideo.eu/embed.php\?v\=([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.eu/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http://embed.nowvideo.eu/embed.php?width=600&height=480&v=9fb588463b2c8
patronvideos = 'nowvideo.eu/embed.php\?.+?v\=([a-z0-9]+)'
logger.info("[nowvideo.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[nowvideo]"
url = "http://www.nowvideo.eu/video/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'nowvideo' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
Código: Seleccionar todo
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para italiafilm
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os,sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "italiafilm"
__category__ = "F,S,A"
__type__ = "generic"
__title__ = "Italia film (IT)"
__language__ = "IT"
DEBUG = True #config.get_setting("debug")
EVIDENCE = " "
def isGeneric():
return True
def mainlist(item):
logger.info("[gnula.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="Novità" , action="peliculas", url="http://italiafilm.tv/"))
itemlist.append( Item(channel=__channel__, title="Categorie" , action="categorias", url="http://italiafilm.tv/"))
itemlist.append( Item(channel=__channel__, title="Cerca Film", action="search"))
return itemlist
def categorias(item):
logger.info("[italiafilm.py] categorias")
itemlist = []
logger.error("io")
data = scrapertools.cache_page(item.url)
data = scrapertools.get_match(data,"<h2>Categorie Film</h2>(.*?)</div>") #hey
patron = '<li class="[^"]+"><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url,title in matches:
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url,url)
scrapedplot = ""
scrapedthumbnail = ""
if DEBUG: logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action='peliculas', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
# Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search(item,texto):
logger.info("[italiafilm.py] search "+texto)
itemlist = []
texto = texto.replace(" ","%20")
item.url = "http://italiafilm.tv"
item.extra = "do=search&subaction=search&story="+texto+"&x=0&y=0"
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def peliculas(item):
logger.info("[italiafilm.py] peliculas")
itemlist = []
# Descarga la página
if item.extra!="":
post = item.extra
else:
post=None
data = scrapertools.cachePage(item.url,post)
# Extrae las entradas (carpetas)
patronvideos = '<a href="([^"]+)"><img class="news-item-image" title="([^"]+)" alt="[^"]+" src="([^"]+)"></a>[^<]+'
patronvideos += '<span class="shortstoryintro">[^<]+'
patronvideos += '<div id="news[^>]+>([^<]+)</div>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for url,title,thumbnail,plot in matches:
# Atributos
scrapedtitle = title
scrapedurl = urlparse.urljoin(item.url,url)
scrapedthumbnail = urlparse.urljoin(item.url,thumbnail)
scrapedplot = plot
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
itemlist.append( Item(channel=__channel__, action='findvideos', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
# Extrae las entradas (carpetas)
patronvideos = '<a href="([^"]+)"><span class="thide pnext">Avanti</span></a>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for match in matches:
# Atributos
scrapedtitle = ">> Pagina seguente"
scrapedurl = urlparse.urljoin(item.url,match)
scrapedthumbnail = ""
scrapedplot = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# Añade al listado de XBMC
itemlist.append( Item(channel=__channel__, action='peliculas', title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
return itemlist
def to_ita(text):
text = text.replace('&', '&')
text = text.replace('à', 'a\'')
text = text.replace('è', 'e\'')
text = text.replace('é', 'e\'')
text = text.replace('ì', 'i\'')
text = text.replace('ò', 'o\'')
text = text.replace('ù', 'u\'')
text = text.replace('×', 'x')
text = text.replace(''', '\'')
return text
# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
from servers import servertools
# mainlist
mainlist_items = mainlist(Item())
# Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
peliculas_items = peliculas(mainlist_items[0])
bien = False
for pelicula_item in peliculas_items:
mirrors = servertools.find_video_items( item=pelicula_item )
if len(mirrors)>0:
bien = True
break
return bien