Hola buenos días Jesus.
No sé de dónde has podido sacar que estaba enfadado no tengo ninguna razón para estarlo, perdona si te he dado esa sensación no era mi intención.
Siguiendo con el tema del login ahora sí que estoy liado, al mesclar partes de código de filenium.py y cinetube.py meas matado, ahora sí que no me salgo.
Te paso el fichero con el login hecho por si Melo puedes terminar.
Código: Seleccionar todo
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal Descarregadirecta Carles Carmona
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
import sys
import base64
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
logger.info("[apiezas.py] Authorization="+base64string)
authorization_header = "Basic %s" % base64string
from core import scrapertools
from core import config
from core import logger
from core.item import Item
from servers import servertools
CHANNELNAME = "apiezas"
DEBUG = True
SESION = config.get_setting("session","apiezas")
LOGIN = config.get_setting("login","apiezas")
PASSWORD = config.get_setting("password","apiezas")
def isGeneric():
return True
def mainlist(item):
logger.info("[apiezas.py] mainlist")
itemlist=[]
itemlist.append( Item(channel=CHANNELNAME , action="search" , title="Buscador" , url="http://yamuza.com/apiezas/Pelis/"))
itemlist.append( Item(channel=CHANNELNAME , action="top" , title="Mas Vistas" , url="http://yamuza.com/apiezas/Pelis/&Vistas"))
itemlist.append( Item(channel=CHANNELNAME , action="top" , title="Mas Votadas" , url="http://yamuza.com/apiezas/Pelis/&Votadas"))
itemlist.append( Item(channel=CHANNELNAME , action="generico" , title="Novedades" , url="http://yamuza.com/apiezas/Pelis/"))
itemlist.append( Item(channel=CHANNELNAME , action="abecedario" , title="Orden Alfabético" , url="http://yamuza.com/apiezas/Pelis/"))
itemlist.append( Item(channel=CHANNELNAME , action="generos" , title="Por Géneros" , url="http://yamuza.com/apiezas/Pelis/"))
if SESION=="true":
perform_login(LOGIN,PASSWORD)
itemlist.append( Item(channel=CHANNELNAME, title="Cerrar sesion ("+LOGIN+")", action="logout"))
else:
itemlist.append( Item(channel=CHANNELNAME, title="Iniciar sesion", action="login"))
return itemlist
def perform_login(login,password):
# Invoca al login, y con eso se quedarán las cookies de sesión necesarias
login = login.replace("@","%40")
headers=[]
headers.append( [ "Authorization", authorization_header ] )
data = scrapertools.cache_page( "http://www.yamuza.com/apiezas/Pelis" , headers=headers )
def logout(item):
nombre_fichero_config_canal = os.path.join( config.get_data_path() , CHANNELNAME+".xml" )
config_canal = open( nombre_fichero_config_canal , "w" )
config_canal.write("<settings>\n<session>false</session>\n<login></login>\n<password></password>\n</settings>")
config_canal.close();
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, title="Sesión finalizada", action="mainlist"))
return itemlist
def login(item):
import xbmc
keyboard = xbmc.Keyboard("","Login")
keyboard.doModal()
if (keyboard.isConfirmed()):
login = keyboard.getText()
keyboard = xbmc.Keyboard("","Password")
keyboard.doModal()
if (keyboard.isConfirmed()):
password = keyboard.getText()
nombre_fichero_config_canal = os.path.join( config.get_data_path() , CHANNELNAME+".xml" )
config_canal = open( nombre_fichero_config_canal , "w" )
config_canal.write("<settings>\n<session>true</session>\n<login>"+login+"</login>\n<password>"+password+"</password>\n</settings>")
config_canal.close();
itemlist = []
itemlist.append( Item(channel=CHANNELNAME, title="Sesión iniciada", action="mainlist"))
return itemlist
def prueba (item):
itemlist = []
def search(item,texto, categoria="*"):
logger.info("[apiezas.py] Search")
url="http://yamuza.com/apiezas/Pelis/buscar/?q="+texto
item2 = Item(channel=CHANNELNAME , action="generico" , title="Novedades" , url=url)
itemlist = []
itemlist=generico(item2)
return itemlist
def generico(item):
logger.info("[apiezas.py] Generico")
url = item.url
data = scrapertools.cachePage(url)
# Extrae las entradas (carpetas)
patron = '<ul class="peliculas clf cntclsx4 f_left_li">(.*?)<div id="ft" class="white">'
matches = re.compile(patron,re.DOTALL).findall(data)
logger.info("hay %d matches" % len(matches))
itemlist = []
for match in matches:
data2 = match
patron = '<h2 class="titpeli bold ico_b"><a href="(.*?)" title=".*?">(.*?)</a></h2>.*?'
patron += '<img src="(.*?)".*?</a>.*?'
patron += '<p>(.*?)</p>'
matches2 = re.compile(patron,re.DOTALL).findall(data2)
logger.info("hay %d matches2" % len(matches2))
for match2 in matches2:
# Atributos
scrapedurl = match2[0]
scrapedtitle =match2[1]
scrapedthumbnail = match2[2]
scrapedplot = match2[3]
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
# A�ade al listado de XBMC
itemlist.append( Item(channel=item.channel , action="detail" , title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, plot=scrapedplot ))
return itemlist
def generos(item):
logger.info("[apiezas.py] Generos")
url = item.url
data = scrapertools.cachePage(url)
# Extrae las entradas (carpetas)
patron = 'Selecciona tu categoria</span></div>(.*?)Peliculas Destacadas</span>'
matches = re.compile(patron,re.DOTALL).findall(data)
logger.info("hay %d matches" % len(matches))
itemlist = []
for match in matches:
data2 = match
patron = '<li><a title="(.*?)" href="(.*?)">.*?</li>'
#patron += '<span class="titulotool"><strong>(.*?)</strong></span> <strong>(.*?)</strong>.*?'
#patron += '<span class="pop_desc">.*?<p>(.*?)</p>'
matches2 = re.compile(patron,re.DOTALL).findall(data2)
logger.info("hay %d matches2" % len(matches2))
for match2 in matches2:
scrapedtitle = match2[0]
scrapedurl = match2[1]
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=item.channel , action="generico" , title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, plot=scrapedplot , fanart=scrapedthumbnail ))
return itemlist
def abecedario(item):
logger.info("[apiezas.py] Abecedario")
url = item.url
data = scrapertools.cachePage(url)
# Extrae las entradas (carpetas)
patron = 'Peliculas por Orden Alfabetico</div>(.*?)Buscador</div>'
matches = re.compile(patron,re.DOTALL).findall(data)
logger.info("hay %d matches" % len(matches))
itemlist = []
for match in matches:
data2 = match
patron = '<li><a href="(.*?)".*?>(.*?)</a></li>'
#patron += '<span class="titulotool"><strong>(.*?)</strong></span> <strong>(.*?)</strong>.*?'
#patron += '<span class="pop_desc">.*?<p>(.*?)</p>'
matches2 = re.compile(patron,re.DOTALL).findall(data2)
logger.info("hay %d matches2" % len(matches2))
for match2 in matches2:
scrapedtitle = match2[1]
scrapedurl = match2[0]
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=item.channel , action="generico" , title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, plot=scrapedplot , fanart=scrapedthumbnail ))
return itemlist
def top(item):
logger.info("[apiezas.py] Top")
url = item.url.split("&")[0]
tipo = item.url.split("&")[1]
data = scrapertools.cachePage(url)
# Extrae las entradas (carpetas)
if tipo=="Vistas":
logger.info("Mas Vistas")
patron = '<span class="d_block">Peliculas Destacadas</span>(.*?)<div class='
else:
logger.info("Mas Votadas")
patron = '<span class="d_block">Peliculas Votadas</span>(.*?)<div id="cn" class="f_right">'
matches = re.compile(patron,re.DOTALL).findall(data)
logger.info("hay %d matches" % len(matches))
scrapertools.printMatches(matches)
itemlist = []
for match in matches:
data2 = match
patron = '<li><a href="(.*?)".*?>(.*?)<span>(.*?)</span></a></li>'
#patron += '<span class="titulotool"><strong>(.*?)</strong></span> <strong>(.*?)</strong>.*?'
#patron += '<span class="pop_desc">.*?<p>(.*?)</p>'
matches2 = re.compile(patron,re.DOTALL).findall(data2)
logger.info("hay %d matches2" % len(matches2))
for match2 in matches2:
scrapedtitle = match2[1]+"("+match2[2]+")"
scrapedurl = match2[0]
scrapedthumbnail = ""
scrapedplot = ""
itemlist.append( Item(channel=item.channel , action="generico" , title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, plot=scrapedplot , fanart=scrapedthumbnail ))
return itemlist
def detail(item):
logger.info("[apiezas.py] detail")
title = item.title
thumbnail = item.thumbnail
plot = item.plot
scrapedurl = ""
url = item.url
itemlist = []
# Descarga la p�gina
data = scrapertools.cachePage(url)
# Usa findvideos
listavideos = servertools.findvideos(data)
itemlist = []
for video in listavideos:
server = video[2]
scrapedtitle = item.title + " [" + server + "]"
scrapedurl = video[1]
itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))
return itemlist
Como siempre un saludo y gracias.