Página 4 de 5

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 18:39
por SeiTaN
isaac18 ¿que te pone el log?

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 19:07
por robalo
Bueno parece que hay un poco de lio con el canal, os paso mi TfM para que veáis el funcionamiento básico y podaís intentar adaptar la intención. No tiene tantas opciones, algunas son copias de otras. Creo que la funcionalidad es coherente con el funcionamiento de la página a excepción de opciones de usuario si es que las tiene.

El TfM puede convivvir con la versión que tenéis y así poder observar los corportamientos de ambas versiones.
Espero que os ayude a mejorar el canal.

Copiar los archivos en la carpeta channels, no machaca nada.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 21:41
por isaac18
No me deja subirlo.La extensión log no está permitida, esto me pone

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 22:43
por SeiTaN
isaac18, en el log que has subido no aparece nada.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 22:58
por makal
SeiTaN escribió:a ver ahora... versión para 4.0.9, me queda pendiente arreglar el tema de las series.

Imagen

Imagen

Imagen

Código: Seleccionar todo

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------

import urllib2
import re
import sys
import urlparse

from core import config
from core import channeltools
from core import logger
from core import scrapertools
from servers import servertools
from core.item import Item

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"

CHANNEL_HOST = "http://peliculasaudiolatino.com/"
headers = [
    ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:33.0) Gecko/20100101 Firefox/33.0"],
    ["Accept-Encoding", "gzip, deflate"],
    ["Referer", CHANNEL_HOST]
]


DEBUG = config.get_setting("debug")

parameters = channeltools.get_channel_parameters(__channel__)
thumbnail_host = parameters['thumbnail']


def isGeneric():
    return True


def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = list()
    itemlist.append(Item(channel=__channel__, title="[B]Películas[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))

    itemlist.append(Item(channel=__channel__, title="      Estrenos", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/estrenos-2016.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién agregadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/ultimas-agregadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién actualizadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/recien-actualizadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Las más vistas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/las-mas-vistas.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por géneros", action="generos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por años", action="anyos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Buscar...", action="search"))

    itemlist.append(Item(channel=__channel__, title="[B]Series TV[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/series-completas.html")))
    return itemlist


def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.strip()
        if "series-completas" in item.url:
            # cogemos la imagen buena si venimos de series
            thumbnail = scrapedthumbnail.replace("poster/85x115/series/", "")
            action = "temporadas"
        else:
            # cogemos la imagen buena de peliculas
            thumbnail = scrapedthumbnail.replace("poster/85x115/peliculas/", "")
            action = "findvideos"

        itemlist.append(Item(channel=__channel__, action=action, title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, folder=True))

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
                             url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), folder=True))

    return itemlist


def temporadas(item):
    logger.info("channels.peliculasaudiolatino temporadas")

    itemlist = []
    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)

    plot = scrapertools.find_single_match(data, "Sinopsis:</b>(.*?)</p>")

    patron = '<div class="accord-header">(.*?)</li></ul></div>'
    list_temporadas = re.compile(patron, re.DOTALL).findall(data)

    for temporada in list_temporadas:
        logger.info("yeah {}".format(temporada))
        num_temporada = scrapertools.find_single_match(temporada, 'Temporada (\d+)[\s]<span')
        logger.info("yeah {}".format(num_temporada))

        patron = "<ul><li><a href='([^']+)'.+?>(.*?)[\s]<span"
        matches = re.compile(patron, re.DOTALL).findall(temporada)

        for scrapedurl, scrapedtitle in matches:
            num_episodio = scrapertools.find_single_match(scrapedtitle, '.+? (\d+)')
            title = "{0}x{1} {2}".format(num_temporada, num_episodio, scrapedtitle)

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=item.thumbnail, plot=plot, folder=True))

    return itemlist


def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, 'span.+?>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for match in matches:
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedtitle = match[1].strip()
        # logger.info(scrapedtitle)

        itemlist.append(Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl))

    itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return itemlist


def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, "span.+?>Años<span(.*?)</ul>")
    # logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        # if DEBUG:
        #     logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="peliculas", title=title, url=url, thumbnail=thumbnail,
                             plot=plot, folder=True))

    return itemlist


def search(item, texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ", "+")
    try:
        params = "search={0}".format(texto)
        data = scrapertools.cachePagePost(CHANNEL_HOST + 'autoplete.php', params)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
        # logger.info("dataaa {}".format(data))

        patron = '<a href="([^"]+)"><img src="([^"]+)".+?</a>.+?<b>(.*?)</b>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            title = scrapertools.htmlclean(scrapedtitle.strip())
            plot = ""

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=scrapedthumbnail, plot=plot, viewmode="movie", folder=True))

        return itemlist

    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []


def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    scrapedthumbnail = item.thumbnail
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    logger.info("dataaa {0}".format(data))
    patron = '<table class="table_links">(.*?)</table>'
    bloques = re.compile(patron, re.DOTALL).findall(data)
    logger.info("bloques {}".format(bloques))

    if bloques:
        patron = '<span class="infotx">(.*?)</span>.*?<span class="infotx">(.*?)</span>.+?<img .+? alt="([^"]+)".+?' \
                 '<img .+? alt="([^"]+)".+?<a href="([^"]+)".+?</span>(.*?)</a>'

        matches = re.compile(patron, re.DOTALL).findall(bloques[0])
        for uploader, scrapedservidor, imgidioma, calidad, scrapedurl, scrapedtipo in matches:
            url = scrapedurl
            pos_dot = scrapedservidor.index('.')
            servidor = scrapedservidor[:pos_dot]
            title = "{0} en {1} [{2}] [{3}] ({4})".format(scrapedtipo.strip(), servidor, imgidioma, calidad,
                                                          uploader)
            itemlist.append(Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=scrapedthumbnail))

    return itemlist


def play(item):
    logger.info("channels.peliculasaudiolatino play")

    data = scrapertools.cache_page(item.url)
    # logger.info("data1="+data)
    url = scrapertools.find_single_match(data, '<iframe .*?src="([^"]+)"')

    # obtenemos la url buena
    data = anti_cloudflare(url)
    # logger.info("data2 {}".format(data))

    url = scrapertools.find_single_match(data, '<IFRAME .*?SRC="([^"]+)"')
    # logger.info("url2="+url)

    itemlist = servertools.find_video_items(data=url)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist


# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos(item=novedades_item)
        if len(mirrors) > 0:
            bien = True
            break

    return bien

def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))
        scrapertools.get_headers_from_response(CHANNEL_HOST + '/' + resp_headers['refresh'][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)

muchas gracias SeiTaN funciona muy bien ahora

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 12 Jun 2016, 23:08
por isaac18
isaac18 escribió:No me deja subirlo.La extensión log no está permitida, esto me pone
aver ahora....

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 13 Jun 2016, 06:30
por SeiTaN
Isaac18, aparece que has accedido a pelisalacarta, pero no veo que entrarás al canal.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 13 Jun 2016, 09:51
por isaac18
SeiTaN escribió:Isaac18, aparece que has accedido a pelisalacarta, pero no veo que entrarás al canal.
Aver ahora creo que si parece ya el error.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 13 Jun 2016, 10:13
por SeiTaN

Código: Seleccionar todo

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------

import urllib2
import re
import sys
import urlparse

from core import config
from core import channeltools
from core import logger
from core import scrapertools
from servers import servertools
from core.item import Item

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"

CHANNEL_HOST = "http://peliculasaudiolatino.com/"
headers = [
    ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:33.0) Gecko/20100101 Firefox/33.0"],
    ["Accept-Encoding", "gzip, deflate"],
    ["Referer", CHANNEL_HOST]
]


DEBUG = config.get_setting("debug")

parameters = channeltools.get_channel_parameters(__channel__)
thumbnail_host = parameters['thumbnail']


def isGeneric():
    return True


def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = list()
    itemlist.append(Item(channel=__channel__, title="[B]Películas[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))

    itemlist.append(Item(channel=__channel__, title="      Estrenos", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/estrenos-2016.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién agregadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/ultimas-agregadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién actualizadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/recien-actualizadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Las más vistas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/las-mas-vistas.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por géneros", action="generos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por años", action="anyos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Buscar...", action="search"))

    itemlist.append(Item(channel=__channel__, title="[B]Series TV[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/series-completas.html")))
    return itemlist


def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.strip()
        if "series-completas" in item.url:
            # cogemos la imagen buena si venimos de series
            thumbnail = scrapedthumbnail.replace("poster/85x115/series/", "")
            action = "temporadas"
        else:
            # cogemos la imagen buena de peliculas
            thumbnail = scrapedthumbnail.replace("poster/85x115/peliculas/", "")
            action = "findvideos"

        itemlist.append(Item(channel=__channel__, action=action, title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, folder=True))

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
                             url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), folder=True))

    return itemlist


def temporadas(item):
    logger.info("channels.peliculasaudiolatino temporadas")

    itemlist = []
    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)

    plot = scrapertools.find_single_match(data, "Sinopsis:</b>(.*?)</p>")

    patron = '<div class="accord-header">(.*?)</li></ul></div>'
    list_temporadas = re.compile(patron, re.DOTALL).findall(data)

    for temporada in list_temporadas:
        num_temporada = scrapertools.find_single_match(temporada, 'Temporada (\d+)[\s]<span')

        patron = "<ul><li><a href='([^']+)'.+?>(.*?)[\s]<span"
        matches = re.compile(patron, re.DOTALL).findall(temporada)

        for scrapedurl, scrapedtitle in matches:
            num_episodio = scrapertools.find_single_match(scrapedtitle, '.+? (\d+)')
            title = "{0}x{1} {2}".format(num_temporada, num_episodio, scrapedtitle)

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=item.thumbnail, plot=plot, folder=True))

    return itemlist


def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, 'span.+?>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for match in matches:
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedtitle = match[1].strip()
        # logger.info(scrapedtitle)

        itemlist.append(Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl))

    itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return itemlist


def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, "span.+?>Años<span(.*?)</ul>")
    # logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        # if DEBUG:
        #     logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="peliculas", title=title, url=url, thumbnail=thumbnail,
                             plot=plot, folder=True))

    return itemlist


def search(item, texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ", "+")
    try:
        params = "search={0}".format(texto)
        data = scrapertools.cachePagePost(CHANNEL_HOST + 'autoplete.php', params)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
        # logger.info("dataaa {}".format(data))

        patron = '<a href="([^"]+)"><img src="([^"]+)".+?</a>.+?<b>(.*?)</b>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            title = scrapertools.htmlclean(scrapedtitle.strip())
            plot = ""

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=scrapedthumbnail, plot=plot, viewmode="movie", folder=True))

        return itemlist

    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []


def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    scrapedthumbnail = item.thumbnail
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    logger.info("dataaa {0}".format(data))
    patron = '<table class="table_links">(.*?)</table>'
    bloques = re.compile(patron, re.DOTALL).findall(data)the 

    if bloques:
        patron = '<span class="infotx">(.*?)</span>.*?<span class="infotx">(.*?)</span>.+?<img .+? alt="([^"]+)".+?' \
                 '<img .+? alt="([^"]+)".+?<a href="([^"]+)".+?</span>(.*?)</a>'

        matches = re.compile(patron, re.DOTALL).findall(bloques[0])
        for uploader, scrapedservidor, imgidioma, calidad, scrapedurl, scrapedtipo in matches:
            url = scrapedurl
            pos_dot = scrapedservidor.index('.')
            servidor = scrapedservidor[:pos_dot]
            title = "{0} en {1} [{2}] [{3}] ({4})".format(scrapedtipo.strip(), servidor, imgidioma, calidad,
                                                          uploader)
            itemlist.append(Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=scrapedthumbnail))

    return itemlist


def play(item):
    logger.info("channels.peliculasaudiolatino play")

    data = scrapertools.cache_page(item.url)
    # logger.info("data1="+data)
    url = scrapertools.find_single_match(data, '<iframe .*?src="([^"]+)"')

    # obtenemos la url buena
    data = anti_cloudflare(url)
    # logger.info("data2 {}".format(data))

    url = scrapertools.find_single_match(data, '<IFRAME .*?SRC="([^"]+)"')
    # logger.info("url2="+url)

    itemlist = servertools.find_video_items(data=url)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist


# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos(item=novedades_item)
        if len(mirrors) > 0:
            bien = True
            break

    return bien

def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))
        scrapertools.get_headers_from_response(CHANNEL_HOST + '/' + resp_headers['refresh'][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)
He quitado los loggers que te podrian dar más fallos, a ver si asi te funciona.

Re: Peliculasaudiolatino dejo de funcionar.

Publicado: 13 Jun 2016, 10:32
por isaac18
SeiTaN escribió:

Código: Seleccionar todo

# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para peliculasaudiolatino
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
# ------------------------------------------------------------

import urllib2
import re
import sys
import urlparse

from core import config
from core import channeltools
from core import logger
from core import scrapertools
from servers import servertools
from core.item import Item

__channel__ = "peliculasaudiolatino"
__category__ = "F"
__type__ = "generic"
__title__ = "Peliculasaudiolatino"
__language__ = "ES"

CHANNEL_HOST = "http://peliculasaudiolatino.com/"
headers = [
    ["User-Agent", "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:33.0) Gecko/20100101 Firefox/33.0"],
    ["Accept-Encoding", "gzip, deflate"],
    ["Referer", CHANNEL_HOST]
]


DEBUG = config.get_setting("debug")

parameters = channeltools.get_channel_parameters(__channel__)
thumbnail_host = parameters['thumbnail']


def isGeneric():
    return True


def mainlist(item):
    logger.info("channels.peliculasaudiolatino mainlist")

    itemlist = list()
    itemlist.append(Item(channel=__channel__, title="[B]Películas[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))

    itemlist.append(Item(channel=__channel__, title="      Estrenos", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/estrenos-2016.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién agregadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/ultimas-agregadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Recién actualizadas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/recien-actualizadas.html")))
    itemlist.append(Item(channel=__channel__, title="      Las más vistas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/las-mas-vistas.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por géneros", action="generos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Listado por años", action="anyos",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/lista-completa.html")))
    itemlist.append(Item(channel=__channel__, title="      Buscar...", action="search"))

    itemlist.append(Item(channel=__channel__, title="[B]Series TV[/B]", folder=False,
                         thumbnail=thumbnail_host, action="mainlist"))
    itemlist.append(Item(channel=__channel__, title="      Todas", action="peliculas",
                         url=urlparse.urljoin(CHANNEL_HOST, "/series-completas.html")))
    return itemlist


def peliculas(item):
    logger.info("channels.peliculasaudiolatino peliculas")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Extrae las entradas de la pagina seleccionada
    patron = '<div class="top"[^<]+'
    patron += '<a href="([^"]+)"><img src="([^"]+)" alt="([^"]+)">'

    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)
    itemlist = []

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle.strip()
        if "series-completas" in item.url:
            # cogemos la imagen buena si venimos de series
            thumbnail = scrapedthumbnail.replace("poster/85x115/series/", "")
            action = "temporadas"
        else:
            # cogemos la imagen buena de peliculas
            thumbnail = scrapedthumbnail.replace("poster/85x115/peliculas/", "")
            action = "findvideos"

        itemlist.append(Item(channel=__channel__, action=action, title=title, fulltitle=title, url=url,
                             thumbnail=thumbnail, folder=True))

    # Extrae la marca de siguiente página
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"><span class="icon-chevron-right">')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, action="peliculas", title=">> Página siguiente",
                             url=urlparse.urljoin(item.url, next_page).replace("/../../", "/"), folder=True))

    return itemlist


def temporadas(item):
    logger.info("channels.peliculasaudiolatino temporadas")

    itemlist = []
    # Descarga la página
    data = scrapertools.cache_page(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)

    plot = scrapertools.find_single_match(data, "Sinopsis:</b>(.*?)</p>")

    patron = '<div class="accord-header">(.*?)</li></ul></div>'
    list_temporadas = re.compile(patron, re.DOTALL).findall(data)

    for temporada in list_temporadas:
        num_temporada = scrapertools.find_single_match(temporada, 'Temporada (\d+)[\s]<span')

        patron = "<ul><li><a href='([^']+)'.+?>(.*?)[\s]<span"
        matches = re.compile(patron, re.DOTALL).findall(temporada)

        for scrapedurl, scrapedtitle in matches:
            num_episodio = scrapertools.find_single_match(scrapedtitle, '.+? (\d+)')
            title = "{0}x{1} {2}".format(num_temporada, num_episodio, scrapedtitle)

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=item.thumbnail, plot=plot, folder=True))

    return itemlist


def generos(item):
    logger.info("channels.peliculasaudiolatino generos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, 'span.+?>Generos<span(.*?)</ul>')

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for match in matches:
        scrapedurl = urlparse.urljoin(item.url, match[0])
        scrapedtitle = match[1].strip()
        # logger.info(scrapedtitle)

        itemlist.append(Item(channel=__channel__, action="peliculas", title=scrapedtitle, url=scrapedurl))

    itemlist = sorted(itemlist, key=lambda Item: Item.title)
    return itemlist


def anyos(item):
    logger.info("channels.peliculasaudiolatino anyos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Limita el bloque donde buscar
    data = scrapertools.find_single_match(data, "span.+?>Años<span(.*?)</ul>")
    # logger.info("channels.peliculasaudiolatino data="+data)

    # Extrae las entradas
    patron = '<li><a href="([^"]+)">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    # if DEBUG:
    #     scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:
        url = urlparse.urljoin(item.url, scrapedurl)
        title = scrapedtitle
        thumbnail = ""
        plot = ""
        # if DEBUG:
        #     logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")

        itemlist.append(Item(channel=__channel__, action="peliculas", title=title, url=url, thumbnail=thumbnail,
                             plot=plot, folder=True))

    return itemlist


def search(item, texto):
    logger.info("channels.peliculasaudiolatino search")
    itemlist = []

    texto = texto.replace(" ", "+")
    try:
        params = "search={0}".format(texto)
        data = scrapertools.cachePagePost(CHANNEL_HOST + 'autoplete.php', params)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
        # logger.info("dataaa {}".format(data))

        patron = '<a href="([^"]+)"><img src="([^"]+)".+?</a>.+?<b>(.*?)</b>'
        matches = re.compile(patron, re.DOTALL).findall(data)

        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            title = scrapertools.htmlclean(scrapedtitle.strip())
            plot = ""

            itemlist.append(Item(channel=__channel__, action="findvideos", title=title, fulltitle=title, url=scrapedurl,
                                 thumbnail=scrapedthumbnail, plot=plot, viewmode="movie", folder=True))

        return itemlist

    # Se captura la excepción, para no interrumpir al buscador global si un canal falla
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)
        return []


def findvideos(item):
    logger.info("channels.peliculasaudiolatino videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    scrapedthumbnail = item.thumbnail
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    logger.info("dataaa {0}".format(data))
    patron = '<table class="table_links">(.*?)</table>'
    bloques = re.compile(patron, re.DOTALL).findall(data)the 

    if bloques:
        patron = '<span class="infotx">(.*?)</span>.*?<span class="infotx">(.*?)</span>.+?<img .+? alt="([^"]+)".+?' \
                 '<img .+? alt="([^"]+)".+?<a href="([^"]+)".+?</span>(.*?)</a>'

        matches = re.compile(patron, re.DOTALL).findall(bloques[0])
        for uploader, scrapedservidor, imgidioma, calidad, scrapedurl, scrapedtipo in matches:
            url = scrapedurl
            pos_dot = scrapedservidor.index('.')
            servidor = scrapedservidor[:pos_dot]
            title = "{0} en {1} [{2}] [{3}] ({4})".format(scrapedtipo.strip(), servidor, imgidioma, calidad,
                                                          uploader)
            itemlist.append(Item(channel=__channel__, action="play", title=title, fulltitle=item.fulltitle, url=url,
                                 thumbnail=scrapedthumbnail))

    return itemlist


def play(item):
    logger.info("channels.peliculasaudiolatino play")

    data = scrapertools.cache_page(item.url)
    # logger.info("data1="+data)
    url = scrapertools.find_single_match(data, '<iframe .*?src="([^"]+)"')

    # obtenemos la url buena
    data = anti_cloudflare(url)
    # logger.info("data2 {}".format(data))

    url = scrapertools.find_single_match(data, '<IFRAME .*?SRC="([^"]+)"')
    # logger.info("url2="+url)

    itemlist = servertools.find_video_items(data=url)

    for videoitem in itemlist:
        videoitem.title = item.title
        videoitem.channel = __channel__

    return itemlist


# Verificación automática de canales: Esta función debe devolver "True" si está ok el canal.
def test():
    # mainlist
    mainlist_items = mainlist(Item())
    # Da por bueno el canal si alguno de los vídeos de "Novedades" devuelve mirrors
    novedades_items = peliculas(mainlist_items[0])
    bien = False
    for novedades_item in novedades_items:
        mirrors = findvideos(item=novedades_item)
        if len(mirrors) > 0:
            bien = True
            break

    return bien

def anti_cloudflare(url):
    # global headers

    try:
        resp_headers = scrapertools.get_headers_from_response(url, headers=headers)
        resp_headers = dict(resp_headers)
    except urllib2.HTTPError, e:
        resp_headers = e.headers

    if 'refresh' in resp_headers:
        time.sleep(int(resp_headers['refresh'][:1]))
        scrapertools.get_headers_from_response(CHANNEL_HOST + '/' + resp_headers['refresh'][7:], headers=headers)

    return scrapertools.cache_page(url, headers=headers)
He quitado los loggers que te podrian dar más fallos, a ver si asi te funciona.

SeiTan gracias crack pero no se que podra ser ahora salen las pelis , pero me da error de escrpt , estoy ultilizando XBMC, pero en Kodi va perfecto SeiTan ,no se que sera