summaryrefslogtreecommitdiffstats
path: root/parsers/people.py
blob: 93d8a0e1bec5f9680688b84dc153a20c3d43b5df (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
#.-*- coding: utf-8 -*-

import troggle.settings as settings
import troggle.expo.models as models
import csv
import re
import datetime
import os

#   Julian: the below code was causing errors and it seems like a duplication of the above. Hope I haven't broken anything by commenting it. -Aaron
#
#        if name in expoers2008:
#            print "2008:", name
#            expomissing.discard(name) # I got an error which I think was caused by this -- python complained that a set changed size during iteration.
#            yo = models.Expedition.objects.filter(year = "2008")[0]
#            pyo = models.PersonExpedition(person = pObject, expedition = yo, is_guest=is_guest)
#            pyo.save()

def parseMugShotAndBlurb(firstname, lastname, person, header, pObject):
    #create mugshot Photo instance
    mugShotPath = os.path.join(settings.EXPOWEB, "folk", person[header["Mugshot"]])
    if mugShotPath[-3:]=='jpg': #if person just has an image, add it
        mugShotObj = models.Photo(
            caption="Mugshot for "+firstname+" "+lastname,
            is_mugshot=True,
            file=mugShotPath,
            )
        mugShotObj.save()
        mugShotObj.contains_person.add(pObject)
        mugShotObj.save()
    elif mugShotPath[-3:]=='htm': #if person has an html page, find the image(s) and add it. Also, add the text from the html page to the "blurb" field in his model instance.
        personPageOld=open(mugShotPath,'r').read()
        pObject.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group() #this needs to be refined, take care of the HTML and make sure it doesn't match beyond the blurb
        for photoFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL):
            mugShotPath=settings.EXPOWEB+"folk/"+photoFilename
        mugShotObj = models.Photo(
            caption="Mugshot for "+firstname+" "+lastname,
            is_mugshot=True,
            file=mugShotPath,
            )
        mugShotObj.save()
        mugShotObj.contains_person.add(pObject)
        mugShotObj.save()
    pObject.save()



def LoadPersonsExpos():
    
    persontab = open(os.path.join(settings.EXPOWEB, "noinfo", "folk.csv"))
    personreader = csv.reader(persontab)
    headers = personreader.next()
    header = dict(zip(headers, range(len(headers))))
    
    models.Expedition.objects.all().delete()
    years = headers[5:]
    years.append("2008")
    for year in years:
        y = models.Expedition(year = year, name = "CUCC expo%s" % year)
        y.save()
    print "lll", years 

    
    models.Person.objects.all().delete()
    models.PersonExpedition.objects.all().delete()
    expoers2008 = """Edvin Deadman,Kathryn Hopkins,Djuke Veldhuis,Becka Lawson,Julian Todd,Natalie Uomini,Aaron Curtis,Tony Rooke,Ollie Stevens,Frank Tully,Martin Jahnke,Mark Shinwell,Jess Stirrups,Nial Peters,Serena Povia,Olly Madge,Steve Jones,Pete Harley,Eeva Makiranta,Keith Curtis""".split(",")
    expomissing = set(expoers2008)

    for person in personreader:
        name = person[header["Name"]]
        name = re.sub("<.*?>", "", name)
        mname = re.match("(\w+)(?:\s((?:van |ten )?\w+))?(?:\s\(([^)]*)\))?", name)

        if mname.group(3):
            nickname = mname.group(3)
        else:
            nickname = ""

        firstname, lastname = mname.group(1), mname.group(2) or ""

        print firstname, lastname, "NNN", nickname
        #assert lastname == person[header[""]], person

        href = firstname.lower()
        if lastname:
            href += "_" + lastname.lower()
        pObject = models.Person(first_name = firstname,
                                last_name = lastname, href=href, 
                                is_vfho = person[header["VfHO member"]],
                )

        is_guest = person[header["Guest"]] == "1"  # this is really a per-expo catagory; not a permanent state
        pObject.save()
        parseMugShotAndBlurb(firstname, lastname, person, header, pObject)
    
        for year, attended in zip(headers, person)[5:]:
            yo = models.Expedition.objects.filter(year = year)[0]
            if attended == "1" or attended == "-1":
                pyo = models.PersonExpedition(person = pObject, expedition = yo, nickname=nickname, is_guest=is_guest)
                pyo.save()

            # error
            elif (firstname, lastname) == ("Mike", "Richardson") and year == "2001":
                print "Mike Richardson(2001) error"
                pyo = models.PersonExpedition(person = pObject, expedition = yo, nickname=nickname, is_guest=is_guest)
                pyo.save()


    # this fills in those people for whom 2008 was their first expo
    for name in expomissing:
        firstname, lastname = name.split()
        is_guest = name in ["Eeva Makiranta", "Keith Curtis"]
        print "2008:", name
        pObject = models.Person(first_name = firstname,
                                last_name = lastname,
                                is_vfho = False,
                                mug_shot = "")
        pObject.save()
        yo = models.Expedition.objects.filter(year = "2008")[0]
        pyo = models.PersonExpedition(person = pObject, expedition = yo, nickname="", is_guest=is_guest)
        pyo.save()

# expedition name lookup cached for speed (it's a very big list)
Gpersonexpeditionnamelookup = { }
def GetPersonExpeditionNameLookup(expedition):
    global Gpersonexpeditionnamelookup
    res = Gpersonexpeditionnamelookup.get(expedition.name)
    if res:
        return res
    
    res = {}
    duplicates = set()
    
    personexpeditions = models.PersonExpedition.objects.filter(expedition=expedition)
    for personexpedition in personexpeditions:
        possnames = [ ]
        f = personexpedition.person.first_name.lower()
        l = personexpedition.person.last_name.lower()
        if l:
            possnames.append(f + " " + l)
            possnames.append(f + " " + l[0])
            possnames.append(f + l[0])
            possnames.append(f[0] + " " + l)
        possnames.append(f)
        if personexpedition.nickname:
            possnames.append(personexpedition.nickname.lower())
        
        for possname in possnames:
            if possname in res:
                duplicates.add(possname)
            else:
                res[possname] = personexpedition
        
    for possname in duplicates:
        del res[possname]
    
    Gpersonexpeditionnamelookup[expedition.name] = res
    return res