summaryrefslogtreecommitdiffstats
path: root/parsers
diff options
context:
space:
mode:
Diffstat (limited to 'parsers')
-rw-r--r--parsers/QMs.py10
-rw-r--r--parsers/caves.py9
-rw-r--r--parsers/cavetab.py2
-rw-r--r--parsers/logbooks.py39
-rw-r--r--parsers/people.py10
-rw-r--r--parsers/subcaves.py2
-rw-r--r--parsers/survex.py64
-rw-r--r--parsers/surveys.py28
8 files changed, 83 insertions, 81 deletions
diff --git a/parsers/QMs.py b/parsers/QMs.py
index 47e6bc7..a5ecd2f 100644
--- a/parsers/QMs.py
+++ b/parsers/QMs.py
@@ -38,7 +38,7 @@ def parseCaveQMs(cave,inputFile):
dialect=csv.Sniffer().sniff(qmCSVContents.read())
qmCSVContents.seek(0,0)
qmReader = csv.reader(qmCSVContents,dialect=dialect)
- qmReader.next() # Skip header row
+ next(qmReader) # Skip header row
for line in qmReader:
try:
year=int(line[0][1:5])
@@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
elif cave=='hauch':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
if hadToCreate:
- print(cave + " placeholder logbook entry for " + str(year) + " added to database")
+ print((cave + " placeholder logbook entry for " + str(year) + " added to database"))
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
newQM = QM()
newQM.found_by=placeholder
@@ -71,9 +71,9 @@ def parseCaveQMs(cave,inputFile):
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
preexistingQM.delete()
newQM.save()
- print("overwriting " + str(preexistingQM) +"\r")
+ print(("overwriting " + str(preexistingQM) +"\r"))
else: # otherwise, print that it was ignored
- print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
+ print(("preserving " + str(preexistingQM) + ", which was edited in admin \r"))
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
newQM.save()
@@ -82,7 +82,7 @@ def parseCaveQMs(cave,inputFile):
except KeyError: #check on this one
continue
except IndexError:
- print("Index error in " + str(line))
+ print(("Index error in " + str(line)))
continue
def parse_KH_QMs(kh, inputFile):
diff --git a/parsers/caves.py b/parsers/caves.py
index 4f65675..745b119 100644
--- a/parsers/caves.py
+++ b/parsers/caves.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
-import troggle.core.models as models
-from django.conf import settings
import os
import re
+from django.conf import settings
+
+import troggle.core.models as models
def readcaves():
@@ -14,11 +15,11 @@ def readcaves():
area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None)
print(" - Reading Entrances")
#print "list of <Slug> <Filename>"
- for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
+ for filename in next(os.walk(settings.ENTRANCEDESCRIPTIONS))[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'):
readentrance(filename)
print (" - Reading Caves")
- for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
+ for filename in next(os.walk(settings.CAVEDESCRIPTIONS))[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'):
readcave(filename)
diff --git a/parsers/cavetab.py b/parsers/cavetab.py
index 99202d5..8d257a7 100644
--- a/parsers/cavetab.py
+++ b/parsers/cavetab.py
@@ -58,7 +58,7 @@ def LoadCaveTab():
cavetab = open(os.path.join(settings.EXPOWEB, "noinfo", "CAVETAB2.CSV"),'rU')
caveReader = csv.reader(cavetab)
- caveReader.next() # Strip out column headers
+ next(caveReader) # Strip out column headers
logging.info("Beginning to import caves from "+str(cavetab)+"\n"+"-"*60+"\n")
diff --git a/parsers/logbooks.py b/parsers/logbooks.py
index 01800a1..ded90e4 100644
--- a/parsers/logbooks.py
+++ b/parsers/logbooks.py
@@ -40,7 +40,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
tripperson = re.sub(round_bracket_regex, "", tripperson).strip()
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear:
- print(" - No name match for: '%s'" % tripperson)
+ print((" - No name match for: '%s'" % tripperson))
message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
res.append((personyear, logtime_underground))
@@ -72,11 +72,11 @@ def GetTripCave(place): #need to be fuzzier about matching here. Already a very
return tripCaveRes
elif len(tripCaveRes)>1:
- print("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes))
- correctIndex=input("type list index of correct cave")
+ print(("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes)))
+ correctIndex=eval(input("type list index of correct cave"))
return tripCaveRes[correctIndex]
else:
- print("No cave found for place " , place)
+ print(("No cave found for place " , place))
return
logentries = [] # the entire logbook is a single object: a list of entries
@@ -92,7 +92,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
if not author:
- print(" - Skipping logentry: " + title + " - no author for entry")
+ print((" - Skipping logentry: " + title + " - no author for entry"))
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
return
@@ -175,7 +175,7 @@ def Parseloghtmltxt(year, expedition, txt):
''', trippara)
if not s:
if not re.search(r"Rigging Guide", trippara):
- print("can't parse: ", trippara) # this is 2007 which needs editing
+ print(("can't parse: ", trippara)) # this is 2007 which needs editing
#assert s, trippara
continue
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
@@ -203,7 +203,7 @@ def Parseloghtmltxt(year, expedition, txt):
def Parseloghtml01(year, expedition, txt):
tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
- s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
+ s = re.match("(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
assert s, trippara[:300]
tripheader, triptext = s.group(1), s.group(2)
mtripid = re.search(r'<a id="(.*?)"', tripheader)
@@ -251,7 +251,7 @@ def Parseloghtml01(year, expedition, txt):
def Parseloghtml03(year, expedition, txt):
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
- s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara)
+ s = re.match("(?s)\s*<p>(.*?)</p>(.*)$", trippara)
assert s, trippara
tripheader, triptext = s.group(1), s.group(2)
tripheader = re.sub(r"&nbsp;", " ", tripheader)
@@ -261,7 +261,7 @@ def Parseloghtml03(year, expedition, txt):
if re.match("T/U|Time underwater", sheader[-1]):
tu = sheader.pop()
if len(sheader) != 3:
- print("header not three pieces", sheader)
+ print(("header not three pieces", sheader))
tripdate, triptitle, trippeople = sheader
ldate = ParseDate(tripdate.strip(), year)
triptitles = triptitle.split(" , ")
@@ -325,35 +325,36 @@ def LoadLogbookForExpedition(expedition):
#print " - Cache is more than 30 days old."
bad_cache= True
if bad_cache:
- print " - Cache is either stale or more than 30 days old. Deleting it."
+ print(" - Cache is either stale or more than 30 days old. Deleting it.")
os.remove(cache_filename)
logentries=[]
raise
- print(" - Reading cache: " + cache_filename )
+ print((" - Reading cache: " + cache_filename ))
try:
with open(cache_filename, "rb") as f:
logentries = pickle.load(f)
- print " - Loaded ", len(logentries), " objects"
+ print(" - Loaded ", len(logentries), " objects")
logbook_cached = True
except:
- print " - Failed to load corrupt cache. Deleting it.\n"
+ print(" - Failed to load corrupt cache. Deleting it.\n")
os.remove(cache_filename)
logentries=[]
+ raise
except:
print(" - Opening logbook: ")
- file_in = open(os.path.join(expowebbase, year_settings[0]))
+ file_in = open(os.path.join(expowebbase, year_settings[0]),'rb')
txt = file_in.read().decode("latin1")
file_in.close()
parsefunc = year_settings[1]
logbook_parseable = True
- print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1])
+ print((" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1]))
if logbook_parseable:
parser = globals()[parsefunc]
parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
# and this has also stored all the objects in logentries[]
- print " - Storing " , len(logentries), " log entries"
+ print(" - Storing " , len(logentries), " log entries")
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
with open(cache_filename, "wb") as f:
pickle.dump(logentries, f, 2)
@@ -370,7 +371,7 @@ def LoadLogbookForExpedition(expedition):
i +=1
else:
try:
- file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE))
+ file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE),'rb')
txt = file_in.read().decode("latin1")
file_in.close()
logbook_parseable = True
@@ -378,7 +379,7 @@ def LoadLogbookForExpedition(expedition):
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
except (IOError):
logbook_parseable = False
- print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year)
+ print(("Couldn't open default logbook file and nothing in settings for expo " + expedition.year))
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
@@ -391,7 +392,7 @@ def LoadLogbooks():
# Fetch all expos
expos = models.Expedition.objects.all()
for expo in expos:
- print("\nLoading Logbook for: " + expo.year)
+ print(("\nLoading Logbook for: " + expo.year))
# Load logbook for expo
LoadLogbookForExpedition(expo)
diff --git a/parsers/people.py b/parsers/people.py
index 265dacc..e05c5a3 100644
--- a/parsers/people.py
+++ b/parsers/people.py
@@ -4,7 +4,7 @@ from django.conf import settings
import troggle.core.models as models
import csv, re, datetime, os, shutil
from utils import save_carefully
-from HTMLParser import HTMLParser
+from html.parser import HTMLParser
from unidecode import unidecode
# def saveMugShot(mugShotPath, mugShotFilename, person):
@@ -45,7 +45,7 @@ def parseMugShotAndBlurb(personline, header, person):
#Only finds the first image, not all of them
person.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group()
else:
- print "ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename
+ print("ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename)
#for mugShotFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL):
# mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename)
# saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
@@ -55,8 +55,8 @@ def LoadPersonsExpos():
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
personreader = csv.reader(persontab)
- headers = personreader.next()
- header = dict(zip(headers, range(len(headers))))
+ headers = next(personreader)
+ header = dict(list(zip(headers, list(range(len(headers))))))
# make expeditions
print(" - Loading expeditions")
@@ -100,7 +100,7 @@ def LoadPersonsExpos():
parseMugShotAndBlurb(personline=personline, header=header, person=person)
# make person expedition from table
- for year, attended in zip(headers, personline)[5:]:
+ for year, attended in list(zip(headers, personline))[5:]:
expedition = models.Expedition.objects.get(year=year)
if attended == "1" or attended == "-1":
lookupAttribs = {'person':person, 'expedition':expedition}
diff --git a/parsers/subcaves.py b/parsers/subcaves.py
index 739af44..5889a91 100644
--- a/parsers/subcaves.py
+++ b/parsers/subcaves.py
@@ -34,7 +34,7 @@ def importSubcaves(cave):
nonLookupAttribs={'description':description}
newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs)
- logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
+ logging.info("Added " + str(newSubcave) + " to " + str(cave))
except IOError:
logging.info("Subcave import couldn't open "+subcaveFilePath)
diff --git a/parsers/survex.py b/parsers/survex.py
index 38cae62..907b183 100644
--- a/parsers/survex.py
+++ b/parsers/survex.py
@@ -33,27 +33,27 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
try:
survexleg.tape = float(ls[stardata["tape"]])
except ValueError:
- print("! Tape misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Tape misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
survexleg.tape = 1000
try:
lclino = ls[stardata["clino"]]
except:
- print("! Clino misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Clino misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
lclino = error
try:
lcompass = ls[stardata["compass"]]
except:
- print("! Compass misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Compass misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: Compass misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
lcompass = error
@@ -67,9 +67,9 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
try:
survexleg.compass = float(lcompass)
except ValueError:
- print("! Compass misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Compass misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
survexleg.compass = 1000
@@ -143,7 +143,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
global insp
# uncomment to print out all files during parsing
- print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path)
+ print((insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path))
stamp = datetime.now()
lineno = 0
@@ -196,7 +196,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Wallet ; ref - %s - found in survexscansfolders' % refscan)
else:
message = ' ! Wallet ; ref - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
- print(insp+message)
+ print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message)
# This whole section should be moved if we can have *QM become a proper survex command
@@ -268,7 +268,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Wallet *REF - %s - found in survexscansfolders' % refscan)
else:
message = ' ! Wallet *REF - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
- print(insp+message)
+ print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message)
continue
@@ -293,7 +293,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
cmd = cmd.lower()
if re.match("include$(?i)", cmd):
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
- print(insp+' - Include path found including - ' + includepath)
+ print((insp+' - Include path found including - ' + includepath))
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
if path_match:
@@ -303,7 +303,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cave:
survexfile.cave = cave
else:
- print(insp+' - No match in DB (i) for %s, so loading..' % includepath)
+ print((insp+' - No match in DB (i) for %s, so loading..' % includepath))
includesurvexfile = models.SurvexFile(path=includepath)
includesurvexfile.save()
includesurvexfile.SetDirectory()
@@ -326,10 +326,10 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cave:
survexfile.cave = cave
else:
- print(insp+' - No match (b) for %s' % newsvxpath)
+ print((insp+' - No match (b) for %s' % newsvxpath))
name = line.lower()
- print(insp+' - Begin found for: ' + name)
+ print((insp+' - Begin found for: ' + name))
# print(insp+'Block cave: ' + str(survexfile.cave))
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0)
survexblockdown.save()
@@ -420,7 +420,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
"calibrate", "set", "infer", "alias", "cs", "declination", "case"]:
message = "! Bad svx command in line:%s %s %s %s" % (cmd, line, survexblock, survexblock.survexfile.path)
- print(insp+message)
+ print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message)
endstamp = datetime.now()
@@ -482,7 +482,7 @@ def LoadPos():
If we don't have it in the database, print an error message and discard it.
"""
topdata = settings.SURVEX_DATA + settings.SURVEX_TOPNAME
- print(' - Generating a list of Pos from %s.svx and then loading...' % (topdata))
+ print((' - Generating a list of Pos from %s.svx and then loading...' % (topdata)))
# Be careful with the cache file.
# If LoadPos has been run before,
@@ -498,39 +498,39 @@ def LoadPos():
updtsvx = os.path.getmtime(topdata + ".svx")
updtcache = os.path.getmtime(cachefile)
age = updtcache - updtsvx
- print(' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) )))
+ print((' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) ))))
now = time.time()
if now - updtcache > 3*24*60*60:
- print " cache is more than 3 days old. Deleting."
+ print(" cache is more than 3 days old. Deleting.")
os.remove(cachefile)
elif age < 0 :
- print " cache is stale. Deleting."
+ print(" cache is stale. Deleting.")
os.remove(cachefile)
else:
- print " cache is fresh. Reading..."
+ print(" cache is fresh. Reading...")
try:
with open(cachefile, "r") as f:
for line in f:
l = line.rstrip()
if l in notfoundbefore:
notfoundbefore[l] +=1 # should not be duplicates
- print " DUPLICATE ", line, notfoundbefore[l]
+ print(" DUPLICATE ", line, notfoundbefore[l])
else:
notfoundbefore[l] =1
except:
- print " FAILURE READ opening cache file %s" % (cachefile)
+ print(" FAILURE READ opening cache file %s" % (cachefile))
raise
notfoundnow =[]
found = 0
skip = {}
- print "\n" # extra line because cavern overwrites the text buffer somehow
+ print("\n") # extra line because cavern overwrites the text buffer somehow
# cavern defaults to using same cwd as supplied input file
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
- print " - This next bit takes a while. Matching ~32,000 survey positions. Be patient..."
+ print(" - This next bit takes a while. Matching ~32,000 survey positions. Be patient...")
posfile = open("%s.pos" % (topdata))
posfile.readline() #Drop header
@@ -550,7 +550,7 @@ def LoadPos():
found += 1
except:
notfoundnow.append(name)
- print " - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip))
+ print(" - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)))
if found > 10: # i.e. a previous cave import has been done
try:
@@ -560,8 +560,8 @@ def LoadPos():
f.write("%s\n" % i)
for j in skip:
f.write("%s\n" % j) # NB skip not notfoundbefore
- print(' Not-found cache file written: %s entries' % c)
+ print((' Not-found cache file written: %s entries' % c))
except:
- print " FAILURE WRITE opening cache file %s" % (cachefile)
+ print(" FAILURE WRITE opening cache file %s" % (cachefile))
raise
diff --git a/parsers/surveys.py b/parsers/surveys.py
index 942c0a5..2f0ff90 100644
--- a/parsers/surveys.py
+++ b/parsers/surveys.py
@@ -10,7 +10,7 @@ import csv
import re
import datetime
-#from PIL import Image
+from PIL import Image
from utils import save_carefully
from functools import reduce
@@ -82,14 +82,14 @@ def get_or_create_placeholder(year):
# logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead
-# def listdir(*directories):
- # try:
- # return os.listdir(os.path.join(settings.SURVEYS, *directories))
- # except:
- # import urllib.request, urllib.parse, urllib.error
- # url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
- # folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
- # return [folder.rstrip(r"/") for folder in folders]
+def listdir(*directories):
+ try:
+ return os.listdir(os.path.join(settings.SURVEYS, *directories))
+ except:
+ import urllib.request, urllib.parse, urllib.error
+ url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
+ folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
+ return [folder.rstrip(r"/") for folder in folders]
# add survey scans
# def parseSurveyScans(expedition, logfile=None):
@@ -252,7 +252,7 @@ def LoadListScans():
def FindTunnelScan(tunnelfile, path):
scansfolder, scansfile = None, None
- mscansdir = re.search(r"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
+ mscansdir = re.search(rb"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
if mscansdir:
scansfolderl = SurvexScansFolder.objects.filter(walletname=mscansdir.group(1))
if len(scansfolderl):
@@ -273,7 +273,7 @@ def FindTunnelScan(tunnelfile, path):
if scansfile:
tunnelfile.survexscans.add(scansfile)
- elif path and not re.search(r"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
+ elif path and not re.search(rb"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
name = os.path.split(path)[1]
#print("debug-tunnelfileobjects ", tunnelfile.tunnelpath, path, name)
rtunnelfilel = TunnelFile.objects.filter(tunnelname=name)
@@ -295,16 +295,16 @@ def SetTunnelfileInfo(tunnelfile):
if tunnelfile.filesize <= 0:
print("DEBUG - zero length xml file", ff)
return
- mtype = re.search(r"<(fontcolours|sketch)", ttext)
+ mtype = re.search(rb"<(fontcolours|sketch)", ttext)
assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
- tunnelfile.npaths = len(re.findall(r"<skpath", ttext))
+ tunnelfile.npaths = len(re.findall(rb"<skpath", ttext))
tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
- for path, style in re.findall(r'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
+ for path, style in re.findall(rb'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included