Página 3 de 5

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 11 Jun 2016, 01:03
por makal
este es el codigo

Código: Seleccionar todo

# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------

import urlparse,urllib2,urllib,re
import os, sys

from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"
__creationdate__ = "20111014"

DEBUG = config.get_setting("debug")
    
def isGeneric():
    return True

def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = []
    itemlist.append( Item(channel=__channel__, title="Recién agregadas", action="peliculas", url="http://peliculasaudiolatino.com/ultimas-agregadas.html"))
    itemlist.append( Item(channel=__channel__, title="Recién actualizadas", action="peliculas", url="http://peliculasaudiolatino.com/recien-actualizadas.html"))
    itemlist.append( Item(channel=__channel__, title="Las más vistas", action="peliculas", url="http://peliculasaudiolatino.com/las-mas-vistas.html"))
    
    itemlist.append( Item(channel=__channel__, title="Listado por géneros" , action="generos", url="http://peliculasaudiolatino.com"))
    itemlist.append( Item(channel=__channel__, title="Listado por años" , action="anyos", url="http://peliculasaudiolatino.com"))
    
    itemlist.append( Item(channel=__channel__, title="Buscar..." , action="search") )
    return itemlist

def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron  = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        url = urlparse.urljoin(item.url,scrapedurl)
        title = scrapedtitle.strip()
        thumbnail = urlparse.urljoin(item.url,scrapedthumbnail)
        plot = ""

        # Añade al listado
        itemlist.append( Item(channel=__channel__, action="findvideos", title=title , fulltitle=title, url=url , thumbnail=thumbnail , plot=plot , viewmode="movie", folder=True) )

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data,'<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page!="":
        itemlist.append( Item(channel=__channel__, action="peliculas", title=">> Página siguiente" , url=urlparse.urljoin(item.url,next_page).replace("/../../","/"), folder=True) )

    return itemlist

def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data,'span>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if (DEBUG): scrapertools.printMatches(matches)
                                          
    for match in matches:
        scrapedurl = urlparse.urljoin(item.url,match[0])
        scrapedtitle = match[1].strip()
        scrapedthumbnail = ""
        scrapedplot = ""
        logger.info(scrapedtitle)

        itemlist.append( Item(channel=__channel__, action="peliculas", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )

    itemlist = sorted(itemlist, key=lambda Item: Item.title)    
    return itemlist
    
def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data,"span>Ultimo A(.*?)</ul>")
    logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if (DEBUG): scrapertools.printMatches(matches)
                                          
    for scrapedurl,scrapedtitle in matches:
        url = urlparse.urljoin(item.url,scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append( Item(channel=__channel__, action="peliculas", title=title , url=url , thumbnail=thumbnail , plot=plot, folder=True) )

    return itemlist

def search(item,texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ","+")
    try:
        # Series
        item.url="http://peliculasaudiolatino.com/busqueda.php?q=%s"
        item.url = item.url % texto
        item.extra = ""
        itemlist.extend(peliculas(item))
        itemlist = sorted(itemlist, key=lambda Item: Item.title) 
        
        return itemlist
        
    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )
        return []

def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    # Descarga la página
    data = scrapertools.cachePage(item.url)
    title = item.title
    scrapedthumbnail = item.thumbnail
    itemlist = []
    '''
    <tr>
    <th class="headtable" align="left"><a href="http://peliculasaudiolatino.com/perfil/KAOSNORMAL.html" target="_blank"><img class="smallpic" src="http://peliculasaudiolatino.com/userpic/userpic1457229143.jpg" height="20" width="20">KAOSNORMAL</a></th>
    <th class="headmovil" align="left"><img src="http://www.google.com/s2/favicons?domain=gamovideo.com" width="16"/>gamovideo.com</th>
    <th class="headmovil" align="left"><img src="http://peliculasaudiolatino.com/images/la_la.png" width="22" align=absmiddle></th>
    <th class="headtable" align="left"><img src="http://peliculasaudiolatino.com/images/1dvd.png"> DVD</th>
    <th class="slink" align="left"><div id="btnp"><a href="javascript:void(0);" style="cursor:pointer" onClick="window.open('http://peliculasaudiolatino.com/movies/Samba/141400.html','mywindow','width=1000,height=800,location=no,status=no,scrollbars=yes,toolbar=no,menubar=no,directories=no');"><span class="headmovil">Ver</span></a></div> </th>
    </tr>
    '''
    '''
    <tr>
    <th class="headtable" align="left"><a href="http://peliculasaudiolatino.com/perfil/KAOSNORMAL.html" target="_blank"><img class="smallpic" src="http://peliculasaudiolatino.com/userpic/userpic1457229143.jpg" height="20" width="20">KAOSNORMAL</a></th>
    <th class="headmovil" align="left"><img src="http://www.google.com/s2/favicons?domain=userscloud.com" width="16"/>userscloud.com</th>
    <th class="headmovil" align="left"><img src="http://peliculasaudiolatino.com/images/la_la.png" width="22" align=absmiddle></th>
    <th class="headtable" align="left"><img src="http://peliculasaudiolatino.com/images/1dvd.png"> DVD</th>
    <th class="slink" align="left"><div id="btnp"><a href="http://peliculasaudiolatino.com/externo.php?host=http://www.userscloud.com/rid15pn653zu" target="_blank"><span class="headmovil">Descargar</span></a></div> </th>
    </tr>
    '''
    #patron  = '<th class="headtable" align="left">.*?</th[^<]+'
    #patron += '<th class="headmovil" align="left"><img[^>]+>([^<]+)</th[^<]+'
    #patron += '<th class="headmovil" align="left"><img src="([^"]+)"[^<]+</th[^<]+'
    #patron += '<th class="headtable" align="left"><img[^>]+>([^<]+)</th>'
    #patron += '<th class="slink" align="left"><div id="btnp"><a href="(.*?)<span class="headmovil">Ver</span>'


    patron='<tr><th class="headtable" align="left"><a href=.*?><img class=.*?<th class="headmovil" align="left"><img src=.*?alt="(.*?)"/>.*?<span class="headtable">.*?<th class="headmovil".*?alt="(.*?)" align=absmiddle>.*?<th class="headmovil".*?<img src=.*?alt="(.*?)".*?a href="(.*?)><span class="headmovil">Ver'

    matches = re.compile(patron,re.DOTALL).findall(data)
    if (DEBUG): scrapertools.printMatches(matches)
    for servidor,imgidioma,calidad,scrapedurl in matches:
        url = scrapedurl
        idioma = img_idioma_to_img_name(imgidioma)
        title = "Ver en "+servidor+" ["+idioma+"]["+calidad+"]"
        itemlist.append( Item(channel=__channel__, action="play", title=title , fulltitle=item.fulltitle, url=url , thumbnail=scrapedthumbnail , folder=False) )

    return itemlist

def img_idioma_to_img_name(imgidioma):

    if "la_la.png" in imgidioma:
        return "Latino"
    elif "es_es" in imgidioma:
        return "Español"
    elif "en_es" in imgidioma:
        return "Subtitulado"
    else:
        return scrapertools.get_filename_from_url(imgidioma)

def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist=[]

    data2 = item.url

    if data2.startswith("javascript"):

        item.url = scrapertools.find_single_match(data2,"window.open\('([^']+)'")
        data2 = scrapertools.cache_page(item.url)
    
    logger.info("data2="+data2)
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")

    data2 = data2.replace("http://peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/vk/play.php?url=","http://vk.com/video_ext.php?oid=")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/ttv/play.php?url=","http://www.tumi.tv/")

    data2 = data2.replace("http://peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/tumi.php?url=","http://www.tumi.tv/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/playerto.php?url=","http://played.to/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/videoweed.php?url=","http://www.videoweed.es/file/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/netu.php?url=","http://netu.tv/watch_video.php?v=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/powvideo.php?url=","http://powvideo.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/streamin.php?url=","http://streamin.to/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidspot.php?url=","http://vidspot.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/allmy.php?url=","http://allmyvideos.net/")
    data2 = data2.replace('http://peliculasaudiolatino.com/show/r"></iframe>url=',"http://realvid.net/")

    data2 = data2.replace("http://peliculasaudiolatino.com/show/roc.php?url=","http://rocvideo.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vide.php?url=","http://thevideo.me/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidto.php?url=","http://vidto.me/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vodlocker.php?url=","http://vodlocker.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/videomega.php?url=","http://videomega.tv/?ref=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/gamo.php?url=","http://gamovideo.com/")
    data2 = data2.replace("%26","&")
    logger.info("data2="+data2)

    listavideos = servertools.findvideos(data2)
    for video in listavideos:
        scrapedtitle = item.title+video[0]
        videourl = video[1]
        server = video[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
    
    return itemlist

# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    from servers import servertools
    
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos( item=novedades_item )
        if len(mirrors)>0:
            bien = True
            break

    return bien

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 11 Jun 2016, 01:57
por Hernan_Ar_c
Al parecer hicieron cambios en la pagina principal, y ademas en la pagina que contiene el video, perdon a lo mejor mi poco conocimiento no me deja ver la simpleza que por ahi otros mas experimentados pueden ver en la pagina les dejos el codigo de la pagina a ver que se puede hacer....

Código: Seleccionar todo

<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Estas Viendo Antinatural 2015 Online | Peliculasaudiolatino.com</title>
<script type="text/javascript">
//<![CDATA[
try{if (!window.CloudFlare) {var CloudFlare=[{verbose:0,p:1465447238,byc:0,owlid:"cf",bag2:1,mirage2:0,oracle:0,paths:{cloudflare:"/cdn-cgi/nexp/dok3v=1613a3a185/"},atok:"5dc2d00697838c2942b1957e3a78dc4e",petok:"edb34f6b6517f46e5867280e436376e15014b4b4-1465606404-1800",zone:"peliculasaudiolatino.com",rocket:"a",apps:{},sha2test:0}];document.write('<script type="text/javascript" src="//ajax.cloudflare.com/cdn-cgi/nexp/dok3v=e982913d31/cloudflare.min.js"><'+'\/script>');}}catch(e){};
//]]>
</script>
<style>body{background:#111;}</style>
</head>
<body>
<div class="header" align="center">
<table>
<tr><td>
<iframe align="left" width="900" height="700" frameborder="0" hspace="0" marginheight="0" marginwidth="0" vspace="0" src="http://peliculasaudiolatino.org/show/vidx.php?url=VjFaamVFMHlWbFpPVldoVVltMVNhRnBYZEVwa01XUlZWR3hPYkZKV1NsTlZSbEYzVUZFOVBRPT0rUA=="></iframe>
</td>
</tr></table>
</div>
<div style="visibility:hidden">
<script id="_waungo" type="text/rocketscript">var _wau = _wau || []; _wau.push(["small", "au4nxbf5764o", "ngo"]);
(function() {var s=document.createElement("script"); s.async=true;
s.src="http://widgets.amung.us/small.js";
document.getElementsByTagName("head")[0].appendChild(s);
})();</script>
</div>
<script type="text/rocketscript">
  (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
  (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
  m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
  })(window,document,'script','//www.google-analytics.com/analytics.js','ga');

  ga('create', 'UA-12564223-1', 'auto');
  ga('send', 'pageview');

</script>
</body>
</html>
Segun creo yo el problema esta aca

Código: Seleccionar todo

src="http://peliculasaudiolatino.org/show/vidx.php?url=VjFaamVFMHlWbFpPVldoVVltMVNhRnBYZEVwa01XUlZWR3hPYkZKV1NsTlZSbEYzVUZFOVBRPT0rUA==">

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 11 Jun 2016, 04:09
por SeiTaN
Están haciendo cambios, no he podido ver ninguna pelicula desde su web, hace unos días si que podía.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 11 Jun 2016, 18:07
por SeiTaN
Parece que la web ya va, en un ratillo subo los nuevos cambios del canal.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 11 Jun 2016, 20:09
por SeiTaN
Bueno he tardado un poco más de la cuenta, sigo la senda del maestro darth Cmos siendo su humilde aprendiz :twisted:

Coñas aparte, he probado un par de pelis y va online, ya me direis :)

EDIT: correguido thumbnails de mejor calidad, búsqueda, reestructuración del menú y añadido la nueva parte de series.

Código: Seleccionar todo

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------

import re
import sys
import urlparse

from core import config
from core import channeltools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"

CHANNEL_HOST = "http://peliculasaudiolatino.com/"
CHANNEL_DEFAULT_HEADERS = [
    ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:33.0) Gecko/20100101 Firefox/33.0"],
    ["Accept-Encoding", "gzip, deflate"],
    ["Referer", CHANNEL_HOST]
]

DEBUG = config.get_setting("debug")

parameters = channeltools.get_channel_parameters(__channel__)
thumbnail_host = parameters['thumbnail']


def isGeneric():
    return True


def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = list()
    itemlist.append(Item(channel=__channel__, title="[B]Películas[/B]", folder=False,
                         thumbnail=thumbnail_host))

    itemlist.append(Item(channel=__channel__, title="      Estrenos", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/estrenos-2016.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién agregadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/ultimas-agregadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién actualizadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/recien-actualizadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Las más vistas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/las-mas-vistas.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por géneros", action="generos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por años", action="anyos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Buscar...", action="search"))

    itemlist.append(Item(channel=__channel__, title="[B]Series TV[/B]", folder=False,
                         thumbnail=thumbnail_host))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/series-completas.html")))
    return itemlist


def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.strip()
        if "series-completas" in item.url:
            # cogemos la imagen buena si venimos de series
            thumbnail = scrapedthumbnail.replace("poster/85x115/series/", "")
            action = "temporadas"
        else:
            # cogemos la imagen buena de peliculas
            thumbnail = scrapedthumbnail.replace("poster/85x115/peliculas/", "")
            action = "findvideos"

        itemlist.append(Item(channel=__channel__, action=action, title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, folder=True))

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
                             url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), folder=True))

    return itemlist


def temporadas(item):
    logger.info("channels.peliculasaudiolatino temporadas")

    itemlist = []
    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)

    plot = scrapertools.find_single_match(data, "Sinopsis:</b>(.*?)</p>")

    patron = '<div class="accord-header">(.*?)</li></ul></div>'
    list_temporadas = re.compile(patron, re.DOTALL).findall(data)

    for temporada in list_temporadas:
        logger.info("yeah {}".format(temporada))
        num_temporada = scrapertools.find_single_match(temporada, 'Temporada (\d+)[\s]<span')
        logger.info("yeah {}".format(num_temporada))

        patron = "<ul><li><a href='([^']+)'.+?>(.*?)[\s]<span"
        matches = re.compile(patron, re.DOTALL).findall(temporada)

        for scrapedurl, scrapedtitle in matches:
            num_episodio = scrapertools.find_single_match(scrapedtitle, '.+? (\d+)')
            title = "{0}x{1} {2}".format(num_temporada, num_episodio, scrapedtitle)

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=item.thumbnail, plot=plot, folder=True))

    return itemlist


def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, 'span.+?>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for match in matches:
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedtitle = match[1].strip()
        # logger.info(scrapedtitle)

        itemlist.append(Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl))

    itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return itemlist


def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, "span.+?>Años<span(.*?)</ul>")
    # logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        # if DEBUG:
        #     logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="peliculas", title=title, url=url, thumbnail=thumbnail,
                             plot=plot, folder=True))

    return itemlist


def search(item, texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ", "+")
    try:
        params = "search={0}".format(texto)
        data = scrapertools.cachePagePost(CHANNEL_HOST + 'autoplete.php', params)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
        # logger.info("dataaa {}".format(data))

        patron = '<a href="([^"]+)"><img src="([^"]+)".+?</a>.+?<b>(.*?)</b>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            title = scrapertools.htmlclean(scrapedtitle.strip())
            plot = ""

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=scrapedthumbnail, plot=plot, viewmode="movie", folder=True))

        return itemlist

    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []


def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    scrapedthumbnail = item.thumbnail
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    logger.info("dataaa {0}".format(data))
    patron = '<table class="table_links">(.*?)</table>'
    bloques = re.compile(patron, re.DOTALL).findall(data)
    logger.info("bloques {}".format(bloques))

    if bloques:
        patron = '<span class="infotx">(.*?)</span>.*?<span class="infotx">(.*?)</span>.+?<img .+? alt="([^"]+)".+?' \
                 '<img .+? alt="([^"]+)".+?<a href="([^"]+)".+?</span>(.*?)</a>'

        matches = re.compile(patron, re.DOTALL).findall(bloques[0])
        for uploader, scrapedservidor, imgidioma, calidad, scrapedurl, scrapedtipo in matches:
            url = scrapedurl
            pos_dot = scrapedservidor.index('.')
            servidor = scrapedservidor[:pos_dot]
            title = "{0} en {1} [{2}] [{3}] ({4})".format(scrapedtipo.strip(), servidor, imgidioma, calidad,
                                                          uploader)
            itemlist.append(Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=scrapedthumbnail))

    return itemlist


def play(item):
    logger.info("channels.peliculasaudiolatino play")

    data = scrapertools.cache_page(item.url)
    # logger.info("data1="+data)
    url = scrapertools.find_single_match(data, '<iframe .*?src="([^"]+)"')

    # obtenemos la url buena
    data = scrapertools.anti_cloudflare(url, host=CHANNEL_HOST, headers=CHANNEL_DEFAULT_HEADERS)
    # logger.info("data2 {}".format(data))

    url = scrapertools.find_single_match(data, '<IFRAME .*?SRC="([^"]+)"')
    # logger.info("url2="+url)

    itemlist = servertools.find_video_items(data=url)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist


# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos(item=novedades_item)
        if len(mirrors) > 0:
            bien = True
            break

    return bien


Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 00:28
por makal
SeiTaN escribió:Bueno he tardado un poco más de la cuenta, sigo la senda del maestro darth Cmos siendo su humilde aprendiz :twisted:

Coñas aparte, he probado un par de pelis y va online, ya me direis :)

EDIT: correguido thumbnails de mejor calidad, búsqueda, reestructuración del menú y añadido la nueva parte de series.

Código: Seleccionar todo

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------

import re
import sys
import urlparse

from core import config
from core import channeltools
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"

CHANNEL_HOST = "http://peliculasaudiolatino.com/"
CHANNEL_DEFAULT_HEADERS = [
    ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:33.0) Gecko/20100101 Firefox/33.0"],
    ["Accept-Encoding", "gzip, deflate"],
    ["Referer", CHANNEL_HOST]
]

DEBUG = config.get_setting("debug")

parameters = channeltools.get_channel_parameters(__channel__)
thumbnail_host = parameters['thumbnail']


def isGeneric():
    return True


def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = list()
    itemlist.append(Item(channel=__channel__, title="[B]Películas[/B]", folder=False,
                         thumbnail=thumbnail_host))

    itemlist.append(Item(channel=__channel__, title="      Estrenos", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/estrenos-2016.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién agregadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/ultimas-agregadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién actualizadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/recien-actualizadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Las más vistas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/las-mas-vistas.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por géneros", action="generos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por años", action="anyos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Buscar...", action="search"))

    itemlist.append(Item(channel=__channel__, title="[B]Series TV[/B]", folder=False,
                         thumbnail=thumbnail_host))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/series-completas.html")))
    return itemlist


def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.strip()
        if "series-completas" in item.url:
            # cogemos la imagen buena si venimos de series
            thumbnail = scrapedthumbnail.replace("poster/85x115/series/", "")
            action = "temporadas"
        else:
            # cogemos la imagen buena de peliculas
            thumbnail = scrapedthumbnail.replace("poster/85x115/peliculas/", "")
            action = "findvideos"

        itemlist.append(Item(channel=__channel__, action=action, title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, folder=True))

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
                             url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), folder=True))

    return itemlist


def temporadas(item):
    logger.info("channels.peliculasaudiolatino temporadas")

    itemlist = []
    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)

    plot = scrapertools.find_single_match(data, "Sinopsis:</b>(.*?)</p>")

    patron = '<div class="accord-header">(.*?)</li></ul></div>'
    list_temporadas = re.compile(patron, re.DOTALL).findall(data)

    for temporada in list_temporadas:
        logger.info("yeah {}".format(temporada))
        num_temporada = scrapertools.find_single_match(temporada, 'Temporada (\d+)[\s]<span')
        logger.info("yeah {}".format(num_temporada))

        patron = "<ul><li><a href='([^']+)'.+?>(.*?)[\s]<span"
        matches = re.compile(patron, re.DOTALL).findall(temporada)

        for scrapedurl, scrapedtitle in matches:
            num_episodio = scrapertools.find_single_match(scrapedtitle, '.+? (\d+)')
            title = "{0}x{1} {2}".format(num_temporada, num_episodio, scrapedtitle)

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=item.thumbnail, plot=plot, folder=True))

    return itemlist


def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, 'span.+?>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for match in matches:
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedtitle = match[1].strip()
        # logger.info(scrapedtitle)

        itemlist.append(Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl))

    itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return itemlist


def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, "span.+?>Años<span(.*?)</ul>")
    # logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        # if DEBUG:
        #     logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="peliculas", title=title, url=url, thumbnail=thumbnail,
                             plot=plot, folder=True))

    return itemlist


def search(item, texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ", "+")
    try:
        params = "search={0}".format(texto)
        data = scrapertools.cachePagePost(CHANNEL_HOST + 'autoplete.php', params)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
        # logger.info("dataaa {}".format(data))

        patron = '<a href="([^"]+)"><img src="([^"]+)".+?</a>.+?<b>(.*?)</b>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            title = scrapertools.htmlclean(scrapedtitle.strip())
            plot = ""

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=scrapedthumbnail, plot=plot, viewmode="movie", folder=True))

        return itemlist

    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []


def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    scrapedthumbnail = item.thumbnail
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    logger.info("dataaa {0}".format(data))
    patron = '<table class="table_links">(.*?)</table>'
    bloques = re.compile(patron, re.DOTALL).findall(data)
    logger.info("bloques {}".format(bloques))

    if bloques:
        patron = '<span class="infotx">(.*?)</span>.*?<span class="infotx">(.*?)</span>.+?<img .+? alt="([^"]+)".+?' \
                 '<img .+? alt="([^"]+)".+?<a href="([^"]+)".+?</span>(.*?)</a>'

        matches = re.compile(patron, re.DOTALL).findall(bloques[0])
        for uploader, scrapedservidor, imgidioma, calidad, scrapedurl, scrapedtipo in matches:
            url = scrapedurl
            pos_dot = scrapedservidor.index('.')
            servidor = scrapedservidor[:pos_dot]
            title = "{0} en {1} [{2}] [{3}] ({4})".format(scrapedtipo.strip(), servidor, imgidioma, calidad,
                                                          uploader)
            itemlist.append(Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=scrapedthumbnail))

    return itemlist


def play(item):
    logger.info("channels.peliculasaudiolatino play")

    data = scrapertools.cache_page(item.url)
    # logger.info("data1="+data)
    url = scrapertools.find_single_match(data, '<iframe .*?src="([^"]+)"')

    # obtenemos la url buena
    data = scrapertools.anti_cloudflare(url, host=CHANNEL_HOST, headers=CHANNEL_DEFAULT_HEADERS)
    # logger.info("data2 {}".format(data))

    url = scrapertools.find_single_match(data, '<IFRAME .*?SRC="([^"]+)"')
    # logger.info("url2="+url)

    itemlist = servertools.find_video_items(data=url)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist


# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos(item=novedades_item)
        if len(mirrors) > 0:
            bien = True
            break

    return bien

saludos muchas gracias SeiTaN pero no me funciona me sale un error cuando trato de abrirlo

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 01:01
por Hernan_Ar_c
Los menus funcionan, pero a la hora de reproducir un video...

Código: Seleccionar todo

kodi/addons/plugin.video.pelisalacarta/channels/peliculasaudiolatino.py", line 269, in play
                                                data = scrapertools.anti_cloudflare(url, host=CHANNEL_HOST, headers=CHANNEL_DEFAULT_HEADERS)
                                            AttributeError: 'module' object has no attribute 'anti_cloudflare'
                                            -->End of Python script error report<--

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 01:02
por SeiTaN
Mea culpa, hice los cambios sobre una versión que tengo beta de pelisalacarta, cuando lo adapte a la 4.0.9 lo subo.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 16:08
por SeiTaN
a ver ahora... versión para 4.0.9, me queda pendiente arreglar el tema de las series.

Imagen

Imagen

Imagen

Código: Seleccionar todo

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------

import urllib2
import re
import sys
import urlparse

from core import config
from core import channeltools
from core import logger
from core import scrapertools
from servers import servertools
from core.item import Item

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"

CHANNEL_HOST = "http://peliculasaudiolatino.com/"
headers = [
    ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:33.0) Gecko/20100101 Firefox/33.0"],
    ["Accept-Encoding", "gzip, deflate"],
    ["Referer", CHANNEL_HOST]
]


DEBUG = config.get_setting("debug")

parameters = channeltools.get_channel_parameters(__channel__)
thumbnail_host = parameters['thumbnail']


def isGeneric():
    return True


def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = list()
    itemlist.append(Item(channel=__channel__, title="[B]Películas[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))

    itemlist.append(Item(channel=__channel__, title="      Estrenos", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/estrenos-2016.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién agregadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/ultimas-agregadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién actualizadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/recien-actualizadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Las más vistas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/las-mas-vistas.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por géneros", action="generos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por años", action="anyos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Buscar...", action="search"))

    itemlist.append(Item(channel=__channel__, title="[B]Series TV[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/series-completas.html")))
    return itemlist


def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.strip()
        if "series-completas" in item.url:
            # cogemos la imagen buena si venimos de series
            thumbnail = scrapedthumbnail.replace("poster/85x115/series/", "")
            action = "temporadas"
        else:
            # cogemos la imagen buena de peliculas
            thumbnail = scrapedthumbnail.replace("poster/85x115/peliculas/", "")
            action = "findvideos"

        itemlist.append(Item(channel=__channel__, action=action, title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, folder=True))

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
                             url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), folder=True))

    return itemlist


def temporadas(item):
    logger.info("channels.peliculasaudiolatino temporadas")

    itemlist = []
    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)

    plot = scrapertools.find_single_match(data, "Sinopsis:</b>(.*?)</p>")

    patron = '<div class="accord-header">(.*?)</li></ul></div>'
    list_temporadas = re.compile(patron, re.DOTALL).findall(data)

    for temporada in list_temporadas:
        logger.info("yeah {}".format(temporada))
        num_temporada = scrapertools.find_single_match(temporada, 'Temporada (\d+)[\s]<span')
        logger.info("yeah {}".format(num_temporada))

        patron = "<ul><li><a href='([^']+)'.+?>(.*?)[\s]<span"
        matches = re.compile(patron, re.DOTALL).findall(temporada)

        for scrapedurl, scrapedtitle in matches:
            num_episodio = scrapertools.find_single_match(scrapedtitle, '.+? (\d+)')
            title = "{0}x{1} {2}".format(num_temporada, num_episodio, scrapedtitle)

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=item.thumbnail, plot=plot, folder=True))

    return itemlist


def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, 'span.+?>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for match in matches:
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedtitle = match[1].strip()
        # logger.info(scrapedtitle)

        itemlist.append(Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl))

    itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return itemlist


def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, "span.+?>Años<span(.*?)</ul>")
    # logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        # if DEBUG:
        #     logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="peliculas", title=title, url=url, thumbnail=thumbnail,
                             plot=plot, folder=True))

    return itemlist


def search(item, texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ", "+")
    try:
        params = "search={0}".format(texto)
        data = scrapertools.cachePagePost(CHANNEL_HOST + 'autoplete.php', params)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
        # logger.info("dataaa {}".format(data))

        patron = '<a href="([^"]+)"><img src="([^"]+)".+?</a>.+?<b>(.*?)</b>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            title = scrapertools.htmlclean(scrapedtitle.strip())
            plot = ""

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=scrapedthumbnail, plot=plot, viewmode="movie", folder=True))

        return itemlist

    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []


def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    scrapedthumbnail = item.thumbnail
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    logger.info("dataaa {0}".format(data))
    patron = '<table class="table_links">(.*?)</table>'
    bloques = re.compile(patron, re.DOTALL).findall(data)
    logger.info("bloques {}".format(bloques))

    if bloques:
        patron = '<span class="infotx">(.*?)</span>.*?<span class="infotx">(.*?)</span>.+?<img .+? alt="([^"]+)".+?' \
                 '<img .+? alt="([^"]+)".+?<a href="([^"]+)".+?</span>(.*?)</a>'

        matches = re.compile(patron, re.DOTALL).findall(bloques[0])
        for uploader, scrapedservidor, imgidioma, calidad, scrapedurl, scrapedtipo in matches:
            url = scrapedurl
            pos_dot = scrapedservidor.index('.')
            servidor = scrapedservidor[:pos_dot]
            title = "{0} en {1} [{2}] [{3}] ({4})".format(scrapedtipo.strip(), servidor, imgidioma, calidad,
                                                          uploader)
            itemlist.append(Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=scrapedthumbnail))

    return itemlist


def play(item):
    logger.info("channels.peliculasaudiolatino play")

    data = scrapertools.cache_page(item.url)
    # logger.info("data1="+data)
    url = scrapertools.find_single_match(data, '<iframe .*?src="([^"]+)"')

    # obtenemos la url buena
    data = anti_cloudflare(url)
    # logger.info("data2 {}".format(data))

    url = scrapertools.find_single_match(data, '<IFRAME .*?SRC="([^"]+)"')
    # logger.info("url2="+url)

    itemlist = servertools.find_video_items(data=url)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist


# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos(item=novedades_item)
        if len(mirrors) > 0:
            bien = True
            break

    return bien

def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))
        scrapertools.get_headers_from_response(CHANNEL_HOST + '/' + resp_headers['refresh'][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 17:07
por isaac18
Buenas tardes SeiTaN, gracias por tu trabajo , pero en XBMC no me va , no salen las peliculas.
saludos.....