Te paso algo con lo que trabajar. Itenta saber que hace
Código: Seleccionar todo
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para asiansubita
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import scrapertools
from core import logger
from core import config
from core.item import Item
from servers import servertools
from servers import adfly
__channel__ = "asiansubita"
__category__ = "F"
__type__ = "generic"
__title__ = "asiansubita"
__language__ = "IT"
host = "http://asiansubita.altervista.org"
def isGeneric():
return True
def mainlist( item ):
logger.info( "[asiansubita.py] mainlist" )
itemlist = []
itemlist.append( Item( channel=__channel__, action="fichas", title="Home", url=host ) )
itemlist.append( Item( channel=__channel__, action="search", title="Buscar...", url=host ) )
return itemlist
## Al llamarse "search" la función, el launcher pide un texto a buscar y lo añade como parámetro
def search( item, texto ):
logger.info( "[asiansubita.py] " + item.url + " search " + texto )
item.url+= "/?s=" + texto
try:
return fichas( item )
## Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def fichas( item ):
logger.info( "[asiansubita.py] mainlist" )
itemlist = []
## Descarga la página
data = scrapertools.cache_page( item.url )
## Extrae las datos
patron = '<article.*?'
patron += 'href="([^"]+)".*?'
patron += 'src="([^"]+)".*?'
patron += 'title="([^"]+)".*?'
patron += '/article>'
matches = re.compile( patron, re.DOTALL ).findall( data )
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities( scrapedtitle )
itemlist.append( Item( channel=__channel__, action="findvideos" , title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, show=scrapedtitle ) )
## Paginación
next_page = scrapertools.find_single_match( data, '<a href="(http://asiansubita.altervista.org/page/\d+/)" >' )
if next_page != "":
itemlist.append( Item( channel=__channel__, action="fichas" , title=">> Página siguiente" , url=next_page ) )
return itemlist
def findvideos( item ):
logger.info( "[asiansubita.py] findvideos" )
itemlist = []
## Descarga la página
data = scrapertools.cache_page( item.url )
## Extrae las datos
thumbnail = scrapertools.find_single_match( data, 'src="([^"]+)"[^<]+</p>' )
plot = scrapertools.decodeHtmlentities(
scrapertools.find_single_match( data, '<p style="text-align: justify;">(.*?)</p>' )
)
patron = 'href="(http://adf.ly/[^"]+)" target="_blank">([^<]+)</a>'
matches = re.compile( patron, re.DOTALL ).findall( data )
for scrapedurl, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities( scrapedtitle )
title = "[" + scrapedtitle + "] " + item.fulltitle
itemlist.append( Item( channel=__channel__, action="play" , title=title, url=scrapedurl, thumbnail=thumbnail, plot=plot, fulltitle=item.fulltitle, show=item.show ) )
return itemlist
def play( item ):
logger.info( "[asiansubita.py] play" )
data = adfly.get_long_url( item.url )
itemlist = servertools.find_video_items( data=data )
for videoitem in itemlist:
videoitem.title = item.show
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.channel = __channel__
return itemlist