# -*- coding: utf-8 -*-

# feedparser är ett packet från PyPi som stödjer både RSS och ATOM feeds
# den har även inbyggt stöd för HTTP vilket vi inte kommer att använda
# eftersom att det använder blockerande IO
import feedparser
from twisted.internet import reactor,defer
from twisted.python import log
from twisted.web.client import getPage
import string
import datetime
# feed manager äger våra callbacks som reactorn
class FeedManager:
    def __init__(self):
        self.feeds = list()
        self.sched = None
        self.scheduleUpdate(2)

    def addFeed(self,url):
        self.feeds.append(Feed(url))
        self.scheduleUpdate(1)
    

    def getImages(self):
        for feed in self.feeds:
            if not feed.ready:
                continue
            for entry in feed.entries:
                for link in entry.links:
                    if "image" in link.type and not "profile_images" in link.href:
                        yield link.href
                    

    def scheduleUpdate(self,time=360):
        if self.sched is not None and not self.sched.called :
            self.sched.cancel()
        self.sched = reactor.callLater(time,self.updater)

    def updater(self):
        
        defs = list()
        for feed in self.feeds:
            def onError(error):
                print("could not update feed {}. \n Error: {}".format(feed,error))
            getPage(feed.url).addCallbacks(callback=feed.onDownloaded,
                                           errback=onError).addErrback(log.err)
        self.scheduleUpdate()
        


class Entry:
    def __init__(self,entry):
        self.title = entry.title
        self.summary = entry.summary
        self.content = entry.get("content")
        self.published = entry.get("published_parsed")
        self.link = entry.get("link")
        self.links = entry.get("links") #all the links
        if self.title == self.content:
            self.content =""


""" Feed representerar ett feed som vi har subsribat och abstraherar updateringar osv."""
class Feed:
    def __init__(self,url):
        self.url = url
        self.lastUpdate = None
        
    @property
    def ready(self):
        return hasattr(self,"title") and self._ready
    
    def __repr__(self):
        return self.title if hasattr(self,"title") else self.url

    def onDownloaded(self,document):
        data = feedparser.parse(document)
        upd = data.feed.get("updated_parsed")
        
        # om vi får samma update date så skippa det.
        # anledningen till att vi checkar för likhet är att 
        # en feed ägare kanske revertar sitt feed till en äldre
        # version
        
        if self.lastUpdate is not None and self.lastUpdate ==  upd:
            return

        self._ready = False

        self.title = data.feed.get("title")
        self.subtitle = data.feed.get("subtitle")
        self.info = data.feed.get("info")
        self.rights = data.feed.get("rights")
        self.link = data.feed.get("link")
        special = []
        if "twitter.com" in self.url:
            special.append("twitter")

        if "image" in data.feed:
            self.image = data.feed.image.href
        else:
            self.image = None
        self.entries = []
        for entry in data.entries:
            # create a new wrapper
            self.entries.append(Entry(entry))

        self.special = string.join(special," ")
        self.ready = True
    
