First version on subversion
authorJean-Michel Nirgal Vourgère <jmv@nirgal.com>
Fri, 25 May 2012 21:30:55 +0000 (21:30 +0000)
committerJean-Michel Nirgal Vourgère <jmv@nirgal.com>
Fri, 25 May 2012 21:30:55 +0000 (21:30 +0000)
html_parser.py [new file with mode: 0755]
htmlentities.py [new file with mode: 0755]
members.py [new file with mode: 0755]
run.sh [new file with mode: 0755]

diff --git a/html_parser.py b/html_parser.py
new file mode 100755 (executable)
index 0000000..ab33204
--- /dev/null
@@ -0,0 +1,351 @@
+#!/usr/bin/env python
+# -*- encoding: utf-8 -*-
+
+import sys, htmlentities
+from optparse import OptionParser
+
+VERBOSE_PARSER = False
+
+TI_EMPTY    = 1 # there's not content in these tags, ie assume <tagname ... / >
+taginfo = {
+    'meta': TI_EMPTY,
+    'link': TI_EMPTY,
+    'br':  TI_EMPTY,
+    'img':  TI_EMPTY,
+    'hr':  TI_EMPTY,
+}
+
+class Node:
+    class Flags:
+        ROOT    = 1 # this is the root node. There can be only one root
+        CLOSING = 2 # this is a closing tag such as </b>. This tags from the lexer are discarded by the parser
+        CLOSED  = 4 # this is closed. Uncleaned output will only have closing tag if that flag is present.
+
+    def __init__(self):
+        self.father = None
+        self.children = []
+        self.flags = 0
+
+class Tag(Node):
+    def __init__(self):
+        Node.__init__(self)
+        self.name = ''
+        self.attributes = {}
+
+    def get_tag_info(self):
+        """
+        Returns TI_ flags base on the name of the name
+        """
+        return taginfo.get(self.name, 0)
+
+    def __repr__(self):
+        #assert self.name != u''
+        result = '<'
+        if self.flags & Node.Flags.CLOSING:
+            result += '/'
+        result += self.name
+        for k,v in self.attributes.items():
+            #result += u' (('+k+u'))'
+            result += ' '+k
+            if v:
+                result += '="'+v.replace('\\', '\\\\').replace('"', '\\"')+'"'
+        result += '>'
+        return result
+
+    #def __repr__(self):
+    #    return 'Tag'+unicode(self).encode('utf8')
+
+class Leaf(Node):
+    # TODO: rename this to CDATA or whatever
+    def __init__(self, text):
+        Node.__init__(self)
+        self.text = htmlentities.resolve(text)
+    def __repr__(self):
+        return self.text # FIXME escape ?
+    #def __repr__(self):
+    #    return 'Leaf<'+repr(self.text.encode('utf8'))+'>'
+
+
+def html_lexer(page):
+    """
+    That iterator yields Nodes with father/children unset
+    """
+    buf = page # buffer
+    pos = 0 # everything before that position has already been parsed
+    l = len(buf) # constant length
+    state = 0
+
+    def buffind(token):
+        r = buf.find(token, pos)
+        if r==-1:
+            return None
+        return r
+
+    def get_next_tag():
+        state = 'INIT'
+        state_white_skiping = False
+        p = pos # will start with skipping '<'
+        tag = Tag()
+        while True:
+            p += 1
+            if p>=l: # EOS
+                return None, p # what about last?
+            c = buf[p]
+           
+            if state_white_skiping:
+                if ord(c)<=32:
+                    continue
+                else:
+                    state_white_skiping = False
+                
+            if state == 'INIT':
+                if c == '/':
+                    tag.flags += Node.Flags.CLOSING
+                    continue
+                elif c == '>':
+                    return tag, p+1
+                else:
+                    state = 'NAME'
+                    tag.name += c.lower()
+                    continue
+            elif state == 'NAME':
+                if ord(c)<=32 or c=='/':
+                    state = 'ATT_NAME'
+                    att_name = ''
+                    state_white_skiping = True
+                    continue
+                elif c == '>':
+                    return tag, p+1
+                else:
+                    tag.name += c.lower()
+                    continue
+            elif state == 'ATT_NAME':
+                if ord(c)<=32:
+                    state = 'ATT_EQUALS'
+                    state_white_skiping = True
+                    continue
+                elif c == '=':
+                    state = 'ATT_VALUE'
+                    state_white_skiping = True
+                    att_value = ''
+                    continue
+                elif c == '>':
+                    if att_name != '':
+                        tag.attributes[att_name] = ''
+                    return tag, p+1
+                else:   
+                    att_name += c.lower()
+                    continue
+            elif state == 'ATT_EQUALS':
+                if ord(c)<=32:
+                    continue
+                elif c == '=':
+                    state = 'ATT_VALUE'
+                    state_white_skiping = True
+                    att_value = ''
+                    continue
+                elif c == '>':
+                    if att_name != '':
+                        tag.attributes[att_name] = ''
+                    return tag, p+1
+                else:
+                    if att_name != '':
+                        tag.attributes[att_name] = ''
+                    state = 'ATT_NAME'
+                    att_name = c.lower()
+                    state_white_skiping = True
+                    continue
+            elif state == 'ATT_VALUE':
+                if att_value == '': # first char
+                    if c == '"' or c == "'":
+                        att_value_escape = c
+                        state = 'ATT_VALUE_QUOTED'
+                        continue
+                if ord(c)<32:
+                    tag.attributes[att_name] = att_value
+                    state = 'ATT_NAME'
+                    state_white_skiping = True
+                    att_name = ''
+                    continue
+                elif c == '>':
+                    tag.attributes[att_name] = att_value
+                    return tag, p+1
+                else:
+                    att_value += c
+                    continue
+            elif state == 'ATT_VALUE_QUOTED':
+                if c == att_value_escape:
+                    tag.attributes[att_name] = att_value
+                    state = 'ATT_NAME'
+                    state_white_skiping = True
+                    att_name = ''
+                    continue
+                else:
+                    att_value += c
+                    continue
+
+    while True:
+        # get next tag position
+        # TODO: check it's a real tag and not a fragment that should added to that leafnode
+        pt1 = buffind('<')
+        if pt1 != pos:
+            yield Leaf(buf[pos:pt1])
+            if pt1 is None:
+                return
+        pos = pt1
+        
+        tag, pos = get_next_tag()
+        yield tag
+
+
+def html_parse(page):
+    """
+    This function fetches the nodes from the lexer and assemble them in a node tree
+    """
+    root = Tag()
+    root.flags = Node.Flags.ROOT
+    father = root
+    for node in html_lexer(page):
+        if isinstance(node, Leaf):
+            node.father = father
+            father.children.append(node)
+        elif node.flags & Node.Flags.CLOSING:
+            # change current father
+            newfather = father
+            while True:
+                # TODO: optimize with Node.Flags.ROOT
+                if newfather is None:
+                    #TODO: log.debug()
+                    if VERBOSE_PARSER:
+                        print('Closing tag', node, 'does not match any opening tag. Discarding.', file=sys.stderr)
+                    break
+                if newfather.name == node.name:
+                    newfather.flags |= Node.Flags.CLOSED
+                    if VERBOSE_PARSER:
+                        if newfather != father:
+                            print('Closing tag', node, 'has auto-closed other nodes', end=' ', file=sys.stderr)
+                            deb = father
+                            while deb != newfather:
+                                print(deb, end=' ', file=sys.stderr)
+                                deb = deb.father
+                            print(file=sys.stderr)
+                    father = newfather.father
+                    break
+                newfather = newfather.father
+        else:
+            node.father = father
+            father.children.append(node)
+            #print 'node=',node,'info=',node.get_tag_info()
+            if not node.get_tag_info() & TI_EMPTY:
+                father = node
+        #print 'node=',node,'father=',father
+    return root
+
+
+def print_idented_tree(node, identation_level=-1):
+    if not node.flags & Node.Flags.ROOT:
+        print('   '*identation_level+repr(node))
+    for c in node.children:
+        print_idented_tree(c, identation_level+1)
+    if isinstance(node, Tag) and (node.flags&Node.Flags.CLOSED):
+        print('   '*identation_level+'</'+node.name+'>')
+
+def print_lexer_tree(p):
+    identing = 0
+    for item in html_lexer(p):
+        if isinstance(item, Tag) and item.flags & Node.Flags.CLOSING:
+            identing -= 1
+        print('   '*identing, end=' ')
+        if isinstance(item, Tag) and not item.flags & Node.Flags.CLOSING:
+            identing += 1
+        print(repr(item))
+
+
+def get_elem(root, tagname):
+    """
+    Returns all the elements whose name matches
+    But not from the children of thoses
+    """
+    if isinstance(root, Leaf):
+        return []
+    if root.name == tagname:
+        return [ root ]
+    results = []
+    for node in root.children:
+        match = get_elem(node, tagname)
+        if match:
+            results += match
+    return results
+        
+
+def split_table(table):
+    """
+    Returns table content as a list (rows) of list (columns)
+    """
+    ctr = []
+    for tr in get_elem(table, 'tr'):
+        ctd = []
+        for td in get_elem(tr, 'td'):
+            ctd += [ td ]
+        ctr.append(ctd)
+    return ctr
+
+def split_table_r_to_leaf(root):
+    """
+    Recursivly split tables as descibed in split_table
+    Only returns leaf text or list for sub tables
+    """
+    result = []
+    tables = get_elem(root, 'table')
+    if len(tables)==0:
+        return get_merged_leaf_content(root)
+    for table in tables:
+        rrow = []
+        for row in split_table(table):
+            rcol = []
+            for col in row:
+                subr = split_table_r_to_leaf(col)
+                rcol.append(subr)
+            rrow.append(rcol)
+        result.append(rrow)
+    return result
+        
+
+def get_merged_leaf_content(root):
+    """
+    Returns all the leaf content agregated in a string
+    """
+    if isinstance(root, Leaf):
+        return root.text
+
+    result = ''
+    for node in root.children:
+        result += get_merged_leaf_content(node)
+    return result
+
+
+if __name__ == "__main__":
+    parser = OptionParser()
+    parser.add_option("--dump-lexer", help="Debug: Dump idented lexer output", action='store_true', dest='lexer_dump', default=False)
+    parser.add_option("--dump-parser", help="Debug: Dump idented parser output", action='store_true', dest='parser_dump', default=False)
+    parser.add_option("--verbose-parser", help="Debug: Verbose parser errors", action='store_true', dest='verbose_parser', default=False)
+    (options, args) = parser.parse_args()
+
+    try:
+        filename = args[0]
+    except IndexError:
+        print('Need a filename', file=sys.stderr)
+        sys.exit(-1)
+
+    VERBOSE_PARSER = options.verbose_parser
+    p = file(filename, encoding='utf-8').read()
+   
+    if options.lexer_dump:
+        print_lexer_tree(p)
+        sys.exit(0)
+
+    if options.parser_dump:
+        root = html_parse(p)
+        print_idented_tree(root)
+        sys.exit(0)
+
diff --git a/htmlentities.py b/htmlentities.py
new file mode 100755 (executable)
index 0000000..42afcf0
--- /dev/null
@@ -0,0 +1,198 @@
+#!/usr/bin/env python3
+# -*- encoding: utf-8 -*-
+
+__all__ = ['resolve', 'expand', 'cleanCDATA']
+
+from html.entities import name2codepoint as entities
+
+entities_autocomplete = {}
+longestEntityLen = 0
+for key,value in entities.items():
+    if value<=255:
+        entities_autocomplete[key] = value
+    l = len(key)
+    if l>longestEntityLen:
+        longestEntityLen = l
+
+# Characters in range 127-159 are illegals, but they are sometimes wrongly used in web pages
+# Internet Explorer assumes it is taken from Microsoft extension to Latin 1 page 8859-1 aka CP1512
+# However, to be clean, we must remap them to their real unicode values
+# Unknown codes are translated into a space
+iso88591_remap = [
+       32,             # 127: ???
+       8364,   # 128: Euro symbol
+       32,             # 129: ???
+       8218,   # 130: Single Low-9 Quotation Mark
+       402,    # 131: Latin Small Letter F With Hook
+       8222,   # 132: Double Low-9 Quotation Mark
+       8230,   # 133: Horizontal Ellipsis
+       8224,   # 134: Dagger
+       8225,   # 135: Double Dagger
+       710,    # 136: Modifier Letter Circumflex Accent
+       8240,   # 137: Per Mille Sign
+       352,    # 138: Latin Capital Letter S With Caron
+       8249,   # 139: Single Left-Pointing Angle Quotation Mark
+       338,    # 140: Latin Capital Ligature OE
+       32,             # 141: ???
+       381,    # 142: Latin Capital Letter Z With Caron
+       32,             # 143: ???
+       32,             # 144: ???
+       8216,   # 145: Left Single Quotation Mark
+       8217,   # 146: Right Single Quotation Mark
+       8220,   # 147: Left Double Quotation Mark
+       8221,   # 148: Right Double Quotation Mark
+       8226,   # 149: Bullet
+       8211,   # 150: En Dash
+       8212,   # 151: Em Dash
+       732,    # 152: Small Tilde
+       8482,   # 153: Trade Mark Sign
+       353,    # 154: Latin Small Letter S With Caron
+       8250,   # 155: Single Right-Pointing Angle Quotation Mark
+       339,    # 156: Latin Small Ligature OE
+       32,             # 157: ???
+       382,    # 158: Latin Small Letter Z With Caron
+       376             # 159: Latin Capital Letter Y With Diaeresis
+]
+
+
+def checkForUnicodeReservedChar(value):
+    if value >= 0xfffe:
+        return ord('?')
+    if value < 127 or value > 159:
+        return value
+    return iso88591_remap[value-127]
+
+def expand(text):
+    result = ''
+    for c in text:
+        oc = ord(c)
+        oc = checkForUnicodeReservedChar(oc)
+        if oc<32 or c=='&' or c=='<' or c=='>' or c=='"' or oc>127:
+            result += '&#'+str(oc)+';'
+        else:
+            result += c
+    return result
+
+def resolve(text):
+    pos = 0
+    result = ''
+    l = len(text)
+    while True:
+        prevpos = pos
+        pos = text.find('&', prevpos)
+        if pos == -1:
+            ## print "No more &"
+            break
+
+        if pos >= l-2:
+            ## print "Too shoort"
+            break
+               # here we are sure the next two chars exist
+        
+        result += text[prevpos:pos]
+        c = text[pos+1]
+        if c == '#':
+            ## print "numeric entity"
+                       # This looks like an char whose unicode if given raw
+            c = text[pos+2]
+            if c == 'x' or c == 'X' and pos < l-3:
+                tmppos = text.find(';', pos+3)
+                if tmppos != -1:
+                    s = text[pos+3: tmppos]
+                    try:
+                        value = int(s, 16)
+                        value = checkForUnicodeReservedChar(value) # remap unicode char if in range 127-159
+                        result += chr(value)
+                        pos = tmppos + 1
+                        continue # ok, we did it
+                    except ValueError:
+                                           # there pos is not updated so that the original escape-like sequence is kept unchanged
+                        pass
+            else:
+                               # the given unicode value is decimal
+                               # IE behavior: parse until non digital char, no conversion if this is not
+                sb = ''
+                tmppos = pos+2
+                while True:
+                    if tmppos >= l:
+                        break # out of range
+                    c = text[tmppos]
+                    if c == ';':
+                        tmppos += 1
+                        break
+                    if c<'0' or c>'9':
+                        break
+                    sb += c
+                    tmppos += 1
+                try:
+                    value = int(sb)
+                    value = checkForUnicodeReservedChar(value); # remap unicode char if in range 127-159
+                    result += chr(value)
+                    pos = tmppos
+                    continue # ok, we did it
+                except ValueError:
+                    # there pos is not updated so that the original escape-like sequence is kept unchanged
+                    pass
+        else:
+            # here the first character is not a '#'
+            # let's try the known html entities
+
+            sb = ''
+            tmppos = pos + 1
+            while True:
+                if tmppos >= l or tmppos-pos > longestEntityLen + 1: # 1 more for ';'
+                    c2 = entities_autocomplete.get(sb, 0)
+                    break
+                c = text[tmppos]
+                if c == ';':
+                    tmppos += 1
+                    c2 = entities.get(sb, 0)
+                    break
+                c2 = entities_autocomplete.get(sb, 0)
+                if c2:
+                    break
+                sb += c
+                tmppos += 1
+            if c2:
+                result += chr(c2)
+                pos = tmppos
+                continue # ok, we did it
+                        
+        result += '&' # something went wrong, just skip is '&'
+        pos += 1
+
+    result += text[prevpos:] 
+    return result
+
+def cleanCDATA(text):
+    """
+    resolve entities
+    removes useless whites, \r, \n and \t with whites
+    expand back entities
+    """
+    tmp = resolve(text)
+    result = ''
+    isLastWhite = False # so that first white is not removed
+    for c in tmp:
+        if c in ' \r\n\t':
+            if not isLastWhite:
+                result += ' '
+                isLastWhite = True
+        else:
+            result += c
+            isLastWhite = False
+
+    return expand(result)
+
+if __name__ == '__main__':
+    import sys
+    if len(sys.argv)<2:
+        print("Missing required parameter. Try '&amp;test'", file=sys.stderr)
+        sys.exit(1)
+    input = ' '.join(sys.argv[1:])
+    #print 'input:', input
+    #raw = resolve(input)
+    #print 'resolved:', raw
+    #print 'expanded:', expand(raw)
+    print('cleanCDATA:', cleanCDATA(input))
+
diff --git a/members.py b/members.py
new file mode 100755 (executable)
index 0000000..0cb7ea3
--- /dev/null
@@ -0,0 +1,255 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+import os
+import time
+import re
+import logging
+from datetime import datetime
+import calendar
+import urllib.request
+from http.cookiejar import CookieJar
+
+from html_parser import *
+import htmlentities
+
+#SERVER
+#LOGIN
+#PASSWORD
+#BASE_URL
+HOME = os.environ['HOME']
+ARCHIVE = HOME + '/fourmizzz/archive.py'
+RESULT = HOME + '/fourmizzz/results.log'
+
+def read_config():
+    '''
+    That function will read config.py in fourmizzz directory
+    and set up globals SERVER, LOGIN, PASSWORD, and BASE_URL.
+    '''
+    global SERVER, LOGIN, PASSWORD
+    global BASE_URL
+    CONFIG_TEMPLATE='''# Veuillez modifier les lignes ci dessous avec vos paramètres:
+SERVER = 's1.fourmizzz.fr'
+LOGIN = 'MonIdentifiant'
+PASSWORD = 'MonMotDePasse'
+    '''
+
+    sys.path.append(HOME+'/fourmizzz')
+    try:
+        import config
+    except ImportError:
+        CONFIG = HOME+'/fourmizzz/config.py'
+        logging.fatal("No configuration file. Creating %s", CONFIG)
+        f = open(CONFIG, mode='w+', encoding='utf-8')
+        f.write(CONFIG_TEMPLATE)
+        f.close()
+        logging.fatal("Please update this file with your details.")
+        sys.exit(1)
+    SERVER = config.SERVER
+    LOGIN = config.LOGIN
+    PASSWORD = config.PASSWORD
+    BASE_URL = 'http://%s' % SERVER
+
+def hide_password_from_log(txt):
+    return re.sub('mot_passe=.*', 'mot_passe=********', txt)
+
+__opener__ = None
+def httpopen(url, post_data=None):
+    if post_data:
+        log_post_data = hide_password_from_log(post_data)
+        post_data = post_data.encode('utf-8') # str->bytes
+        logging.debug('HTTP POST %s %s', url, log_post_data)
+    else:
+        logging.debug('HTTP GET %s', url)
+    global __opener__
+    if __opener__ is None:
+        cookiejar = CookieJar()
+        __opener__ = urllib.request.build_opener()
+        __opener__.add_handler(urllib.request.HTTPCookieProcessor(cookiejar))
+    http_response = __opener__.open(url, post_data)
+    return http_response
+
+def sleep(seconds):
+    logging.debug('Waiting %s seconds', seconds)
+    time.sleep(seconds)
+
+def timestamp():
+    return calendar.timegm(datetime.now().timetuple())
+
+def number_format(i):
+    result = ''
+    while i:
+        if result:
+            result = ' ' + result
+        d3 = i % 1000
+        i = i//1000
+        if i:
+            result = ('%03d' % d3) + result
+        else:
+            result = ('%d' % d3) + result
+    return result
+
+
+def tdc_get(alliance_tag=None):
+    #httpresponse = httpopen(BASE_URL + '/')
+    #html = httpresponse.read().decode('utf-8')
+    #print(html)
+   
+    httpresponse = httpopen(BASE_URL + '/index.php?connexion=1', 'serveur=%s&pseudo=%s&mot_passe=%s' % (SERVER, LOGIN, PASSWORD))
+    #httpresponse = httpopen(BASE_URL + '/index.php?connexion=1', 'serveur=%s&pseudo=%s&mot_passe=%s&connexion=Connexion' % (SERVER, LOGIN, PASSWORD))
+    html = httpresponse.read().decode('utf-8')
+    if "redirectReine" not in html:
+        logging.fatal("Impossible de s'authentifier. Vérifiez vos paramètres dans config.py")
+        sys.exit(1)
+    #print('-'*80)
+    #print(html)
+    
+    #httpresponse = httpopen(BASE_URL + '/alliance.php?Membres')
+    #html = httpresponse.read().decode('utf-8')
+    #print('-'*80)
+    #print(html)
+
+    if not alliance_tag:
+        httpresponse = httpopen(BASE_URL + '/alliance.php?Membres',
+            'xajax=membre&xajaxr='+str(timestamp()))
+        html = httpresponse.read().decode('utf-8')
+        #print('-'*80)
+        #print(html)
+
+        root = html_parse(html)
+        table = get_elem(root, 'table')[0]
+        td = get_elem(table, 'td')[3]
+        table = get_elem(td, 'table')[0]
+        #print('-'*80)
+        #print_idented_tree(table)
+    else:
+        httpresponse = httpopen(BASE_URL + '/classementAlliance.php?alliance=%s' % alliance_tag)
+        html = httpresponse.read().decode('utf-8')
+        
+        root = html_parse(html)
+        table = get_elem(root, 'table')[2]
+        #print('-'*80)
+        #print_idented_tree(table)
+
+    summary = {}
+    for tr in get_elem(table, 'tr'):
+        row = get_elem(tr, 'td')
+        if not alliance_tag and len(row) != 10:
+            continue
+        if alliance_tag and len(row) != 6:
+            continue
+        #print('-'*10)
+        #print_idented_tree(tr)
+        if not alliance_tag:
+            nick = get_merged_leaf_content(row[3])
+            tdc = get_merged_leaf_content(row[4]).replace(' ', '')
+        else:
+            nick = get_merged_leaf_content(row[2])
+            tdc = get_merged_leaf_content(row[3]).replace(' ', '')
+
+        tdc = int(tdc)
+        #print(nick, tdc)
+        summary[nick] = tdc
+    logging.debug('%s members - total tdc = %s cm²', len(summary), number_format(sum(summary.values())))
+    return summary
+
+
+def tdc_compare(oldtdc, newtdc):
+    changes = {}
+    txtchanges = []
+    for nick in newtdc.keys():
+        told = oldtdc.get(nick, 0)
+        tnew = newtdc[nick]
+        if told == tnew:
+            continue
+        changes[nick] = { 'old': told, 'new': tnew, 'delta': tnew-told }
+    
+    for nick in oldtdc.keys():
+        if nick not in newtdc:
+            told = oldtdc[nick]
+            tnew = 0
+            changes[nick] = { 'old': told, 'new': 0, 'delta': -told }
+    #for nick, change in changes.items():
+    #    print(nick, ' - ', change['old'], ' - ', change['new'], ' - ', change['delta'])
+    #print
+
+    if not len(changes):
+        logging.info('No changes')
+        return
+    for nick in changes.keys():
+        delta = changes[nick]['delta']
+        if delta <= 0:
+            continue # 0 is already process, <0 will be processed when nick swaps with nick2
+        for nick2 in changes.keys():
+            if changes[nick2]['delta'] == 0:
+                continue # already done
+            if changes[nick2]['delta'] != -delta:
+                continue # not the good one
+            percent = float(oldtdc[nick2] - newtdc[nick2]) / oldtdc[nick2] * 100.
+            txtchanges.append('%s a pris %s cm² à %s (%.2f%%)' % (nick, number_format(delta), nick2, percent))
+            changes[nick]['delta'] = 0
+            changes[nick2]['delta'] = 0
+
+                
+    for nick, change in changes.items():
+        delta = change['delta']
+        if delta < 0:
+            if nick in newtdc:
+                percent = float(-delta) / oldtdc[nick] * 100.
+                txtchanges.append('%s a perdu %s cm² (%.2f%%)' % (nick, number_format(-delta), percent))
+            else:
+                txtchanges.append("%s a quité l'alliance avec %s cm²" % (nick, number_format(-delta)))
+        elif delta > 0:
+            if nick in oldtdc:
+                txtchanges.append('%s a gagné %s cm²' % (nick, number_format(delta)))
+            else:
+                txtchanges.append("%s a rejoint l'alliance avec %s cm²" % (nick, number_format(delta)))
+
+    for txtchange in txtchanges:
+        logging.info(txtchange)
+
+
+if __name__ == '__main__':
+    from optparse import OptionParser
+    parser = OptionParser()
+    parser.add_option('-d', '--debug',
+        action='store_true', dest='debug', default=False,
+        help="debug mode")
+    parser.add_option('-a', '--alliance',
+        dest='alliance',
+        help="alliance tag. default is to process player own alliance.")
+    parser.add_option('--dry-run',
+        action='store_true', dest='dryrun', default=False,
+        help="don't store result in archives.")
+    options, args = parser.parse_args()
+
+    if options.debug:
+        loglevel = logging.DEBUG
+    else:
+        loglevel = logging.INFO
+    logging.basicConfig(filename=RESULT, level=loglevel, format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S %Z')
+
+    read_config()
+
+    oldtdc = None
+    try:
+        f = open(ARCHIVE, mode='r+', encoding='utf-8')
+    except IOError as err:
+        if err.errno == 2: # No such file or directory
+            logging.warning("No archive file, creating one.")
+            f = open(ARCHIVE, mode='w+', encoding='utf-8')
+        else:
+            raise
+    else:
+        oldtdc = eval(f.read())
+    
+    newtdc = tdc_get(options.alliance)
+    if oldtdc is not None:
+        tdc_compare(oldtdc, newtdc)
+    
+    # Save archive only after processing, just in case it crashes
+    if not options.dryrun:
+        f.seek(0)
+        f.write(repr(newtdc))
+        f.truncate()
diff --git a/run.sh b/run.sh
new file mode 100755 (executable)
index 0000000..bdcd0d4
--- /dev/null
+++ b/run.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+cp ~/fourmizzz/archive.py ~/fourmizzz/archive.py.0
+~/kod/fourmizzz/members.py
+#tail ~/fourmizzz/results
+