Pongo lo que llevo hasta ahora, falta lo de multiepisodes que con mas tiempo lo mirare
Código: Seleccionar todo
# -*- coding: utf-8 -*-
from core.libs import *
QLT = Qualities({
    Qualities.rip: ['DVDRip', 'HDRip'],
    Qualities.hd_full: ['1080p', 'BluRay-1080p', 'BDremux-1080p',  'MicroHD-1080p', 'FullBluRay'],
    Qualities.hd: ['BluRay-720p', 'hdtv', '720p'],
    Qualities.scr: ['dvdscr'],
    Qualities.sd: ['sd']
})
HOST = 'http://www.mejortorrent.com'
def mainlist(item):
    logger.trace()
    itemlist = list()
    itemlist.append(item.clone(
        action="newest_movies",
        label="Peliculas",
        url=HOST + '/torrents-de-peliculas.html',
        type="item",
        group=True,
        content_type='movies'
    ))
    itemlist.append(item.clone(
        action="newest_movies",
        label="Peliculas HD",
        url=HOST + '/torrents-de-peliculas-hd-alta-definicion.html',
        type="item",
        group=True,
        content_type='movies'
    ))
    itemlist.append(item.clone(
        action="newest_episodes",
        label="Series",
        url=HOST + '/torrents-de-series.html',
        type="item",
        group=True,
        quality='sd',
        content_type='episodes'
    ))
    
    itemlist.append(item.clone(
        action="newest_episodes",
        label="Series HD",
        url=HOST + '/torrents-de-series-hd-alta-definicion.html',
        type="item",
        group=True,
        content_type='episodes'
    ))
    
    return itemlist
def newest_movies(item):
    logger.trace()
    itemlist = list()
    data = httptools.downloadpage(item.url).data
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
    data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
    # Extraemos bloque_noticia
    patron = "<td><div align='justify'><center>(.*?)</td>"
    for noticia in scrapertools.find_multiple_matches(data,patron):
        # Obtenemos las urls y los posters incluidos en cada noticia
        patron = '<a href="([^"]+)"><img src="([^"]+)'
        for url, poster in scrapertools.find_multiple_matches(noticia, patron):
            # Con ayuda de la url obtenemos el titulo y la calidad de cada pelicula
            patron = '%s">([^<]+)</a> <b>([^<]+)</b>' % url
            title, quality = scrapertools.find_single_match(noticia, patron)
            title = title.replace(quality, '').replace('(4K-HDR)','').replace('(FullBluRay)','').strip()
            quality = quality.replace('(','').replace(')','').strip()
            itemlist.append(item.clone(
                title=title,
                label=title,
                url= HOST + url,
                type="movie",
                content_type="servers",
                poster= HOST + poster,
                action="findvideos",
                quality=QLT.get(quality)
                ))
    # Paginador
    next_url = scrapertools.find_single_match(data, "</span> <a href='(.*?)'")
    if next_url:
        next_url = HOST + next_url
        itemlist.append(item.clone(
            action="newest_movies",
            url=next_url,
            type='next'
        ))
    return itemlist
def newest_episodes(item):
    logger.trace()
    itemlist = list()
    data = httptools.downloadpage(item.url).data
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
    data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
    # Extraemos bloque_noticia
    patron = "<td><div align='justify'><center>(.*?)</td>"
    for noticia in scrapertools.find_multiple_matches(data,patron):
        # Obtenemos las urls y los posters incluidos en cada noticia
        patron = '<a href="([^"]+)"><img src="([^"]+)'
        for url, poster in scrapertools.find_multiple_matches(noticia, patron):
            # Con ayuda de la url obtenemos el titulo_temporada y el capitulo
            patron = '%s">([^<]+)</a>\s*<b>([^<]+)</b>' % url
            title_season, episode = scrapertools.find_single_match(noticia, patron)
            tvshowtitle, season = scrapertools.find_single_match(title_season, "(?i)(.*?) - ([0-9,M]+)")
            quality = scrapertools.find_single_match(title_season, "\[([^\]]+)")
            # si es miniserie
            season = int(season.lower().replace('m', '1'))
            if 'episodio' in url:
                action='findvideos'
                content_type='servers'
                extra='extra' #para identificar en findvideos 'ir a la serie'
            else:
                action='episodes'
                content_type='episodes'
        
            new_item = item.clone(
                tvshowtitle=tvshowtitle.strip(),
                label=tvshowtitle.strip(),
                url=HOST + url,
                action=action,
                thumb=HOST + poster,
                season=season,
                type='episode',
                content_type=content_type,
                extra=extra,
                quality = QLT.get(quality) if quality else item.quality
                )
        
            num_episode = scrapertools.find_multiple_matches(episode, "(\d+)")
            num_episode = [int(n) for n in num_episode]
            new_item.episode = num_episode[0]
            itemlist.append(new_item)
    # Paginador
    next_url = scrapertools.find_single_match(data, "</span> <a href='(.*?)'")
    if next_url:
        next_url = HOST + next_url
        itemlist.append(item.clone(
            action="newest_episodes",
            url=next_url,
            type='next'
        ))
    return itemlist
def episodes(item):
    logger.trace()
    itemlist = list()
    data = httptools.downloadpage(item.url).data
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")
    data = re.sub(r"\n|\r|\t|\s{2}| ", "", data)
    bloque = scrapertools.find_single_match(data, 'Listado(.*?)<center')
    
    for url, title in scrapertools.find_multiple_matches(bloque, "<a href='(.*?)'>(.*?)<"):
        season,episode = scrapertools.find_single_match(title,'(\d+).*?(\d+)')
            
        new_item = item.clone(
            title=title,
            url=HOST + url,
            season=int(season),
            episode=int(episode),
            type='episode',
            content_type='servers',
            action="findvideos",
            extra=''#para que no se repita,ir a la serie,luego al meternos en la serie
            )
    
        itemlist.append(new_item)
    return itemlist
def findvideos(item):
    logger.trace()
    itemlist = list()
    data = httptools.downloadpage(item.url).data
    data = re.sub(r"\n|\r|\t| |<br>", "", data)
          
    data_torrent = scrapertools.find_single_match(data, "Torrent:.*?<a href='(.*?)'")
    url2 = HOST+"/"+data_torrent
    #descargamos la siguiente pagina
    data_enlace = httptools.downloadpage(url2).data
    data_enlace = re.sub(r"\n|\r|\t| |<br>", "", data_enlace)
    
    enlace = "Pincha <a href='(.*?)'>"
    url = scrapertools.find_single_match(data_enlace, enlace)
    itemlist.append(item.clone(
        label=item.label,
        action="play",
        url=HOST+url,
        type='server',
        server='torrent'
        ))
    #ir a la serie
    serie = scrapertools.find_single_match(data,"<td valign='top'>.*?<a href='(.*?)'")
    if 'extra' in item.extra:
        url = HOST + serie
        itemlist.append(item.clone(
            label="Ir a la Serie",
            action="episodes",
            type='item',
            url=url,
            content_type='episodes'
        ))
    itemlist = servertools.get_servers_from_id(itemlist)
    
    return itemlist
-linea 43, añado 'sd', no creo que haya de otra calidad en esa categoria y para mostrarla en quality(en newest_episodes) y que no de problemas.
-linea 124, aqui añado para diferenciar el tipo de enlace.
-linea 219, añado item para ir a la serie, lo hago con 'extra' de la linea 124.
 , he estado mirando y con range() a lo mejor se podria,con mas tiempo lo miro.