summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--core/fileAbstraction.py9
-rw-r--r--core/forms.py10
-rw-r--r--core/models.py101
-rw-r--r--core/models_survex.py16
-rw-r--r--core/templatetags/link.py2
-rw-r--r--core/templatetags/wiki_markup.py6
-rw-r--r--core/view_surveys.py40
-rw-r--r--core/views.py10
-rw-r--r--core/views_caves.py4
-rw-r--r--core/views_logbooks.py12
-rw-r--r--core/views_other.py4
-rw-r--r--core/views_survex.py3
-rw-r--r--databaseReset.py10
-rw-r--r--[l---------]docker/requirements.txt10
-rw-r--r--export/toqms.py4
-rw-r--r--flatpages/tests.py2
-rw-r--r--flatpages/views.py35
-rw-r--r--imagekit/management/commands/ikflush.py4
-rw-r--r--imagekit/models.py9
-rw-r--r--imagekit/options.py2
-rw-r--r--imagekit/specs.py2
-rw-r--r--imagekit/tests.py2
-rw-r--r--logbooksdump.py12
-rw-r--r--middleware.py2
-rw-r--r--modelviz.py12
-rw-r--r--parsers/QMs.py10
-rw-r--r--parsers/caves.py9
-rw-r--r--parsers/cavetab.py2
-rw-r--r--parsers/logbooks.py39
-rw-r--r--parsers/people.py10
-rw-r--r--parsers/subcaves.py2
-rw-r--r--parsers/survex.py64
-rw-r--r--parsers/surveys.py28
-rw-r--r--pathreport.py8
-rw-r--r--profiles/views.py6
-rw-r--r--settings.py32
-rw-r--r--urls.py10
-rw-r--r--utils.py6
38 files changed, 288 insertions, 261 deletions
diff --git a/core/fileAbstraction.py b/core/fileAbstraction.py
index 86191b7..0ebd6eb 100644
--- a/core/fileAbstraction.py
+++ b/core/fileAbstraction.py
@@ -1,6 +1,7 @@
import troggle.settings as settings
import os
-import urllib
+import urllib.request, urllib.parse, urllib.error
+from functools import reduce
def urljoin(x, y): return x + "/" + y
@@ -26,8 +27,8 @@ def listdir(*path):
else:
c = ""
c = c.replace("#", "%23")
- print("FILE: ", settings.FILES + "listdir/" + c)
- return urllib.urlopen(settings.FILES + "listdir/" + c).read()
+ print(("FILE: ", settings.FILES + "listdir/" + c))
+ return urllib.request.urlopen(settings.FILES + "listdir/" + c).read()
def dirsAsList(*path):
return [d for d in listdir(*path).split("\n") if len(d) > 0 and d[-1] == "/"]
@@ -39,5 +40,5 @@ def readFile(*path):
try:
f = open(os.path.join(settings.FILES, *path))
except:
- f = urllib.urlopen(settings.FILES + "download/" + reduce(urljoin, path))
+ f = urllib.request.urlopen(settings.FILES + "download/" + reduce(urljoin, path))
return f.read() \ No newline at end of file
diff --git a/core/forms.py b/core/forms.py
index 8265178..b8d2e07 100644
--- a/core/forms.py
+++ b/core/forms.py
@@ -1,5 +1,5 @@
from django.forms import ModelForm
-from models import Cave, Person, PersonExpedition, LogbookEntry, QM, Expedition, Entrance, CaveAndEntrance
+from .models import Cave, Person, PersonExpedition, LogbookEntry, QM, Expedition, Entrance, CaveAndEntrance
import django.forms as forms
from django.forms.models import modelformset_factory
from django.contrib.admin.widgets import AdminDateWidget
@@ -114,8 +114,7 @@ def getTripForm(expedition):
class TripForm(forms.Form):
date = forms.DateField()
title = forms.CharField(max_length=200)
- caves = [cave.reference() for cave in Cave.objects.all()]
- caves.sort()
+ caves = sorted([cave.reference() for cave in Cave.objects.all()])
caves = ["-----"] + caves
cave = forms.ChoiceField([(c, c) for c in caves], required=False)
location = forms.CharField(max_length=200, required=False)
@@ -123,7 +122,7 @@ def getTripForm(expedition):
html = forms.CharField(widget=TinyMCE(attrs={'cols': 80, 'rows': 30}))
def clean(self):
- print(dir(self))
+ print((dir(self)))
if self.cleaned_data.get("caveOrLocation") == "cave" and not self.cleaned_data.get("cave"):
self._errors["cave"] = self.error_class(["This field is required"])
if self.cleaned_data.get("caveOrLocation") == "location" and not self.cleaned_data.get("location"):
@@ -131,8 +130,7 @@ def getTripForm(expedition):
return self.cleaned_data
class PersonTripForm(forms.Form):
- names = [get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)]
- names.sort()
+ names = sorted([get_name(pe) for pe in PersonExpedition.objects.filter(expedition = expedition)])
names = ["-----"] + names
name = forms.ChoiceField([(n, n) for n in names])
TU = forms.FloatField(required=False)
diff --git a/core/models.py b/core/models.py
index 5b9f48c..dc51024 100644
--- a/core/models.py
+++ b/core/models.py
@@ -1,5 +1,18 @@
-import urllib, urlparse, string, os, datetime, logging, re
+import string
+import os
+import datetime
+import logging
+import re
import subprocess
+
+from urllib.request import *
+from urllib.parse import *
+from urllib.error import *
+from decimal import Decimal, getcontext
+getcontext().prec=2 #use 2 significant figures for decimal calculations
+
+import settings
+
from django.forms import ModelForm
from django.db import models
from django.contrib import admin
@@ -8,12 +21,8 @@ from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.db.models import Min, Max
from django.conf import settings
-from decimal import Decimal, getcontext
from django.core.urlresolvers import reverse
-from imagekit.models import ImageModel
from django.template import Context, loader
-import settings
-getcontext().prec=2 #use 2 significant figures for decimal calculations
from troggle.core.models_survex import *
@@ -30,7 +39,7 @@ def get_related_by_wikilinks(wiki_text):
number = qmdict['number'])
res.append(qm)
except QM.DoesNotExist:
- print('fail on '+str(wikilink))
+ print(('fail on '+str(wikilink)))
return res
@@ -52,7 +61,7 @@ class TroggleModel(models.Model):
return self._meta.object_name
def get_admin_url(self):
- return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
+ return urllib.parse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
abstract = True
@@ -64,7 +73,7 @@ class TroggleImageModel(models.Model):
return self._meta.object_name
def get_admin_url(self):
- return urlparse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
+ return urllib.parse.urljoin(settings.URL_ROOT, "/admin/core/" + self.object_name().lower() + "/" + str(self.pk))
class Meta:
@@ -85,7 +94,7 @@ class Expedition(TroggleModel):
get_latest_by = 'year'
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('expedition', args=[self.year]))
# construction function. should be moved out
def get_expedition_day(self, date):
@@ -117,10 +126,9 @@ class ExpeditionDay(TroggleModel):
personexpeditions = self.persontrip_set.filter(expeditionday=self)
return personexpeditions and personexpeditions[0] or None
-#
-# single Person, can go on many years
-#
class Person(TroggleModel):
+ """single Person, can go on many years
+ """
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
fullname = models.CharField(max_length=200)
@@ -132,7 +140,7 @@ class Person(TroggleModel):
orderref = models.CharField(max_length=200) # for alphabetic
user = models.OneToOneField(User, null=True, blank=True)
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
+ return urllib.parse.urljoin(settings.URL_ROOT,reverse('person',kwargs={'first_name':self.first_name,'last_name':self.last_name}))
class Meta:
verbose_name_plural = "People"
@@ -153,7 +161,7 @@ class Person(TroggleModel):
for personexpedition in self.personexpedition_set.all():
if not personexpedition.is_guest:
- print(personexpedition.expedition.year)
+ print((personexpedition.expedition.year))
notability += Decimal(1) / (max_expo_val - int(personexpedition.expedition.year))
return notability
@@ -178,10 +186,9 @@ class Person(TroggleModel):
#self.notability = 0.0 # set temporarily
-#
-# Person's attenance to one Expo
-#
class PersonExpedition(TroggleModel):
+ """Person's attendance to one Expo
+ """
expedition = models.ForeignKey(Expedition)
person = models.ForeignKey(Person)
slugfield = models.SlugField(max_length=50,blank=True,null=True)
@@ -213,7 +220,6 @@ class PersonExpedition(TroggleModel):
def __unicode__(self):
return "%s: (%s)" % (self.person, self.expedition)
-
#why is the below a function in personexpedition, rather than in person? - AC 14 Feb 09
def name(self):
if self.nickname:
@@ -223,7 +229,7 @@ class PersonExpedition(TroggleModel):
return self.person.first_name
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('personexpedition',kwargs={'first_name':self.person.first_name,'last_name':self.person.last_name,'year':self.expedition.year}))
def surveyedleglength(self):
survexblocks = [personrole.survexblock for personrole in self.personrole_set.all() ]
@@ -238,11 +244,9 @@ class PersonExpedition(TroggleModel):
res = self.persontrip_set.all().aggregate(day_max=Max("expeditionday__date"))
return res["day_max"]
-#
-# Single parsed entry from Logbook
-#
class LogbookEntry(TroggleModel):
-
+ """Single parsed entry from Logbook
+ """
LOGBOOK_ENTRY_TYPES = (
("wiki", "Wiki style logbook"),
("html", "Html style logbook")
@@ -265,22 +269,27 @@ class LogbookEntry(TroggleModel):
ordering = ('-date',)
def __getattribute__(self, item):
- if item == "cave": #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
- return CaveSlug.objects.get(slug = self.cave_slug).cave
- return super(LogbookEntry, self).__getattribute__(item)
+ if item == "cave":
+ #Allow a logbookentries cave to be directly accessed despite not having a proper foreignkey
+ return CaveSlug.objects.get(slug = self.cave_slug).cave
+ # parse error in python3.8
+ # https://stackoverflow.com/questions/41343263/provide-classcell-example-for-python-3-6-metaclass
+ return super(LogbookEntry, self).__getattribute__(item)
def __init__(self, *args, **kwargs):
- if "cave" in kwargs.keys():
+ if "cave" in list(kwargs.keys()):
if kwargs["cave"] is not None:
kwargs["cave_slug"] = CaveSlug.objects.get(cave=kwargs["cave"], primary=True).slug
kwargs.pop("cave")
+ # parse error in python3.8
+ # https://stackoverflow.com/questions/41343263/provide-classcell-example-for-python-3-6-metaclass
return super(LogbookEntry, self).__init__(*args, **kwargs)
def isLogbookEntry(self): # Function used in templates
return True
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('logbookentry',kwargs={'date':self.date,'slug':self.slug}))
def __unicode__(self):
return "%s: (%s)" % (self.date, self.title)
@@ -357,9 +366,9 @@ class Area(TroggleModel):
parent = models.ForeignKey('Area', blank=True, null=True)
def __unicode__(self):
if self.parent:
- return unicode(self.parent) + u" - " + unicode(self.short_name)
+ return str(self.parent) + " - " + str(self.short_name)
else:
- return unicode(self.short_name)
+ return str(self.short_name)
def kat_area(self):
if self.short_name in ["1623", "1626"]:
return self.short_name
@@ -371,7 +380,7 @@ class CaveAndEntrance(models.Model):
entrance = models.ForeignKey('Entrance')
entrance_letter = models.CharField(max_length=20,blank=True,null=True)
def __unicode__(self):
- return unicode(self.cave) + unicode(self.entrance_letter)
+ return str(self.cave) + str(self.entrance_letter)
class CaveSlug(models.Model):
cave = models.ForeignKey('Cave')
@@ -454,10 +463,10 @@ class Cave(TroggleModel):
else:
href = self.official_name.lower()
#return settings.URL_ROOT + '/cave/' + href + '/'
- return urlparse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('cave',kwargs={'cave_id':href,}))
- def __unicode__(self, sep = u": "):
- return unicode("slug:"+self.slug())
+ def __unicode__(self, sep = ": "):
+ return str("slug:"+self.slug())
def get_QMs(self):
return QM.objects.filter(found_by__cave_slug=self.caveslug_set.all())
@@ -539,7 +548,7 @@ def getCaveByReference(reference):
area = Area.objects.get(short_name = areaname)
#print(area)
foundCaves = list(Cave.objects.filter(area = area, kataster_number = code).all()) + list(Cave.objects.filter(area = area, unofficial_number = code).all())
- print(list(foundCaves))
+ print((list(foundCaves)))
if len(foundCaves) == 1:
return foundCaves[0]
else:
@@ -549,7 +558,7 @@ class OtherCaveName(TroggleModel):
name = models.CharField(max_length=160)
cave = models.ForeignKey(Cave)
def __unicode__(self):
- return unicode(self.name)
+ return str(self.name)
class EntranceSlug(models.Model):
entrance = models.ForeignKey('Entrance')
@@ -597,7 +606,7 @@ class Entrance(TroggleModel):
cached_primary_slug = models.CharField(max_length=200,blank=True,null=True)
def __unicode__(self):
- return unicode(self.slug())
+ return str(self.slug())
def exact_location(self):
return SurvexStation.objects.lookup(self.exact_station)
@@ -714,12 +723,12 @@ class CaveDescription(TroggleModel):
def __unicode__(self):
if self.long_name:
- return unicode(self.long_name)
+ return str(self.long_name)
else:
- return unicode(self.short_name)
+ return str(self.short_name)
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('cavedescription', args=(self.short_name,)))
def save(self):
"""
@@ -734,7 +743,7 @@ class CaveDescription(TroggleModel):
class NewSubCave(TroggleModel):
name = models.CharField(max_length=200, unique = True)
def __unicode__(self):
- return unicode(self.name)
+ return str(self.name)
class QM(TroggleModel):
#based on qm.csv in trunk/expoweb/1623/204 which has the fields:
@@ -762,14 +771,14 @@ class QM(TroggleModel):
comment=models.TextField(blank=True,null=True)
def __unicode__(self):
- return u"%s %s" % (self.code(), self.grade)
+ return "%s %s" % (self.code(), self.grade)
def code(self):
- return u"%s-%s-%s" % (unicode(self.found_by.cave)[6:], self.found_by.date.year, self.number)
+ return "%s-%s-%s" % (str(self.found_by.cave)[6:], self.found_by.date.year, self.number)
def get_absolute_url(self):
#return settings.URL_ROOT + '/cave/' + self.found_by.cave.kataster_number + '/' + str(self.found_by.date.year) + '-' + '%02d' %self.number
- return urlparse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade}))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('qm',kwargs={'cave_id':self.found_by.cave.kataster_number,'year':self.found_by.date.year,'qm_id':self.number,'grade':self.grade}))
def get_next_by_id(self):
return QM.objects.get(id=self.id+1)
@@ -778,7 +787,7 @@ class QM(TroggleModel):
return QM.objects.get(id=self.id-1)
def wiki_link(self):
- return u"%s%s%s" % ('[[QM:',self.code(),']]')
+ return "%s%s%s" % ('[[QM:',self.code(),']]')
#photoFileStorage = FileSystemStorage(location=settings.PHOTOS_ROOT, base_url=settings.PHOTOS_URL)
#class DPhoto(TroggleImageModel):
@@ -880,4 +889,4 @@ class DataIssue(TroggleModel):
ordering = ['date']
def __unicode__(self):
- return u"%s - %s" % (self.parser, self.message)
+ return "%s - %s" % (self.parser, self.message)
diff --git a/core/models_survex.py b/core/models_survex.py
index 448cea9..6ab6f8a 100644
--- a/core/models_survex.py
+++ b/core/models_survex.py
@@ -1,7 +1,7 @@
from django.db import models
from django.conf import settings
import os
-import urlparse
+import urllib.parse
import re
from django.core.urlresolvers import reverse
@@ -128,8 +128,8 @@ class SurvexBlock(models.Model):
def isSurvexBlock(self): # Function used in templates
return True
- def __unicode__(self):
- return self.name and unicode(self.name) or 'no name'
+ def __str__(self):
+ return self.name and str(self.name) or 'no name'
def GetPersonroles(self):
res = [ ]
@@ -185,7 +185,7 @@ class SurvexPersonRole(models.Model):
expeditionday = models.ForeignKey("ExpeditionDay", null=True)
def __unicode__(self):
- return unicode(self.person) + " - " + unicode(self.survexblock) + " - " + unicode(self.nrole)
+ return str(self.person) + " - " + str(self.survexblock) + " - " + str(self.nrole)
class SurvexScansFolder(models.Model):
@@ -196,10 +196,10 @@ class SurvexScansFolder(models.Model):
ordering = ('walletname',)
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)}))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('surveyscansfolder', kwargs={"path":re.sub("#", "%23", self.walletname)}))
def __unicode__(self):
- return unicode(self.walletname) + " (Survey Scans Folder)"
+ return str(self.walletname) + " (Survey Scans Folder)"
class SurvexScanSingle(models.Model):
ffile = models.CharField(max_length=200)
@@ -210,10 +210,10 @@ class SurvexScanSingle(models.Model):
ordering = ('name',)
def get_absolute_url(self):
- return urlparse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name}))
+ return urllib.parse.urljoin(settings.URL_ROOT, reverse('surveyscansingle', kwargs={"path":re.sub("#", "%23", self.survexscansfolder.walletname), "file":self.name}))
def __unicode__(self):
- return "Survey Scan Image: " + unicode(self.name) + " in " + unicode(self.survexscansfolder)
+ return "Survey Scan Image: " + str(self.name) + " in " + str(self.survexscansfolder)
class TunnelFile(models.Model):
diff --git a/core/templatetags/link.py b/core/templatetags/link.py
index cb861ad..a17dd4b 100644
--- a/core/templatetags/link.py
+++ b/core/templatetags/link.py
@@ -5,5 +5,5 @@ register = template.Library()
@register.filter()
def link(value):
- return mark_safe("<a href=\'%s\'>"%value.get_absolute_url()+unicode(value)+"</a>")
+ return mark_safe("<a href=\'%s\'>"%value.get_absolute_url()+str(value)+"</a>")
diff --git a/core/templatetags/wiki_markup.py b/core/templatetags/wiki_markup.py
index 25c7103..024b9b7 100644
--- a/core/templatetags/wiki_markup.py
+++ b/core/templatetags/wiki_markup.py
@@ -4,7 +4,7 @@ from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
from django.conf import settings
from troggle.core.models import QM, LogbookEntry, Cave
-import re, urlparse
+import re, urllib.parse
register = template.Library()
@@ -94,7 +94,7 @@ def wiki_to_html_short(value, autoescape=None):
qm=QM.objects.get(found_by__cave__kataster_number = qmdict['cave'],
found_by__date__year = qmdict['year'],
number = qmdict['number'])
- return r'<a href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, unicode(qm))
+ return r'<a href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, str(qm))
except QM.DoesNotExist: #bother aaron to make him clean up the below code - AC
try:
placeholder=LogbookEntry.objects.get(date__year=qmdict['year'],cave__kataster_number=qmdict['cave'], title__icontains='placeholder')
@@ -105,7 +105,7 @@ def wiki_to_html_short(value, autoescape=None):
title='placeholder'
)
qm=QM(found_by = placeholder, number = qmdict['number'])
- return r'<a class="redtext" href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, unicode(qm))
+ return r'<a class="redtext" href="%s" id="q%s">%s</a>' % (qm.get_absolute_url(), qm.code, str(qm))
value = re.sub(qmMatchPattern,qmrepl, value, re.DOTALL)
diff --git a/core/view_surveys.py b/core/view_surveys.py
index aad48c3..65a4f30 100644
--- a/core/view_surveys.py
+++ b/core/view_surveys.py
@@ -1,12 +1,12 @@
from django.conf import settings
-import fileAbstraction
+from . import fileAbstraction
from django.shortcuts import render_to_response
from django.http import HttpResponse, Http404
import os, stat
import re
from troggle.core.models import SurvexScansFolder, SurvexScanSingle, SurvexBlock, TunnelFile
import parsers.surveys
-import urllib
+import urllib.request, urllib.parse, urllib.error
# inline fileabstraction into here if it's not going to be useful anywhere else
# keep things simple and ignore exceptions everywhere for now
@@ -113,19 +113,19 @@ def UniqueFile(fname):
# join it all up and then split them off for the directories that don't exist
# anyway, this mkdir doesn't work
def SaveImageInDir(name, imgdir, project, fdata, bbinary):
- print ("hihihihi", fdata, settings.SURVEYS)
+ print(("hihihihi", fdata, settings.SURVEYS))
fimgdir = os.path.join(settings.SURVEYS, imgdir)
if not os.path.isdir(fimgdir):
- print "*** Making directory", fimgdir
+ print("*** Making directory", fimgdir)
os.path.mkdir(fimgdir)
fprojdir = os.path.join(fimgdir, project)
if not os.path.isdir(fprojdir):
- print "*** Making directory", fprojdir
+ print("*** Making directory", fprojdir)
os.path.mkdir(fprojdir)
- print "hhh"
+ print("hhh")
fname = os.path.join(fprojdir, name)
- print fname, "fff"
+ print(fname, "fff")
fname = UniqueFile(fname)
p2, p1 = os.path.split(fname)
@@ -133,7 +133,7 @@ def SaveImageInDir(name, imgdir, project, fdata, bbinary):
p4, p3 = os.path.split(p3)
res = os.path.join(p3, p2, p1)
- print "saving file", fname
+ print("saving file", fname)
fout = open(fname, (bbinary and "wb" or "w"))
fout.write(fdata.read())
fout.close()
@@ -145,33 +145,33 @@ def SaveImageInDir(name, imgdir, project, fdata, bbinary):
def jgtuploadfile(request):
filesuploaded = [ ]
project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"]
- print (project, user, tunnelversion)
- for uploadedfile in request.FILES.values():
+ print((project, user, tunnelversion))
+ for uploadedfile in list(request.FILES.values()):
if uploadedfile.field_name in ["tileimage", "backgroundimage"] and \
uploadedfile.content_type in ["image/png", "image/jpeg"]:
fname = user + "_" + re.sub("[\\\\/]", "-", uploadedfile.name) # very escaped \
- print fname
+ print(fname)
fileuploaded = SaveImageInDir(fname, uploadedfile.field_name, project, uploadedfile, True)
filesuploaded.append(settings.URL_ROOT + "/jgtfile/" + fileuploaded)
if uploadedfile.field_name in ["sketch"] and \
uploadedfile.content_type in ["text/plain"]:
fname = user + "_" + re.sub("[\\\\/]", "-", uploadedfile.name) # very escaped \
- print fname
+ print(fname)
fileuploaded = SaveImageInDir(fname, uploadedfile.field_name, project, uploadedfile, False)
filesuploaded.append(settings.URL_ROOT + "/jgtfile/" + fileuploaded)
#print "FF", request.FILES
#print ("FFF", request.FILES.values())
message = ""
- print "gothere"
+ print("gothere")
return render_to_response('fileupload.html', {'message':message, 'filesuploaded':filesuploaded, 'settings': settings})
def surveyscansfolder(request, path):
#print [ s.walletname for s in SurvexScansFolder.objects.all() ]
- survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path))
+ survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.parse.unquote(path))
return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
def surveyscansingle(request, path, file):
- survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.unquote(path))
+ survexscansfolder = SurvexScansFolder.objects.get(walletname=urllib.parse.unquote(path))
survexscansingle = SurvexScanSingle.objects.get(survexscansfolder=survexscansfolder, name=file)
return HttpResponse(content=open(survexscansingle.ffile), content_type=getMimeType(path.split(".")[-1]))
#return render_to_response('survexscansfolder.html', { 'survexscansfolder':survexscansfolder, 'settings': settings })
@@ -187,21 +187,21 @@ def tunneldata(request):
def tunnelfile(request, path):
- tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path))
+ tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.parse.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
return HttpResponse(content=open(tfile), content_type="text/plain")
def tunnelfileupload(request, path):
- tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.unquote(path))
+ tunnelfile = TunnelFile.objects.get(tunnelpath=urllib.parse.unquote(path))
tfile = os.path.join(settings.TUNNEL_DATA, tunnelfile.tunnelpath)
project, user, password, tunnelversion = request.POST["tunnelproject"], request.POST["tunneluser"], request.POST["tunnelpassword"], request.POST["tunnelversion"]
- print (project, user, tunnelversion)
+ print((project, user, tunnelversion))
- assert len(request.FILES.values()) == 1, "only one file to upload"
+ assert len(list(request.FILES.values())) == 1, "only one file to upload"
- uploadedfile = request.FILES.values()[0]
+ uploadedfile = list(request.FILES.values())[0]
if uploadedfile.field_name != "sketch":
return HttpResponse(content="Error: non-sketch file uploaded", content_type="text/plain")
diff --git a/core/views.py b/core/views.py
index 26c2e52..d81e03c 100644
--- a/core/views.py
+++ b/core/views.py
@@ -1,8 +1,8 @@
# primary namespace
-import view_surveys
-import views_caves
-import views_survex
-import views_logbooks
-import views_other
+from . import view_surveys
+from . import views_caves
+from . import views_survex
+from . import views_logbooks
+from . import views_other
diff --git a/core/views_caves.py b/core/views_caves.py
index 1f9b91e..1a7d077 100644
--- a/core/views_caves.py
+++ b/core/views_caves.py
@@ -14,9 +14,7 @@ from django import forms
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
-import re
-import urlparse
-#import urllib.parse
+import re, urllib.parse
from django.shortcuts import get_object_or_404, render
import settings
diff --git a/core/views_logbooks.py b/core/views_logbooks.py
index 65453fa..b29b72e 100644
--- a/core/views_logbooks.py
+++ b/core/views_logbooks.py
@@ -62,8 +62,7 @@ def expedition(request, expeditionname):
expeditions = Expedition.objects.all()
personexpeditiondays = [ ]
dateditems = list(this_expedition.logbookentry_set.all()) + list(this_expedition.survexblock_set.all())
- dates = list(set([item.date for item in dateditems]))
- dates.sort()
+ dates = sorted(set([item.date for item in dateditems]))
for personexpedition in this_expedition.personexpedition_set.all():
prow = [ ]
for date in dates:
@@ -115,8 +114,7 @@ def GetPersonChronology(personexpedition):
a.setdefault("personroles", [ ]).append(personrole.survexblock)
# build up the tables
- rdates = res.keys()
- rdates.sort()
+ rdates = sorted(list(res.keys()))
res2 = [ ]
@@ -206,8 +204,8 @@ def pathsreport(request):
ncodes = len(pathsdict)
- bycodeslist = sorted(pathsdict.iteritems())
- bypathslist = sorted(pathsdict.iteritems(), key=lambda x: x[1])
+ bycodeslist = sorted(pathsdict.items())
+ bypathslist = sorted(iter(pathsdict.items()), key=lambda x: x[1])
return render(request, 'pathsreport.html', {
"pathsdict":pathsdict,
@@ -264,7 +262,7 @@ def newLogbookEntry(request, expeditionyear, pdate = None, pslug = None):
'expeditionyear': expeditionyear})
f.write(template.render(context))
f.close()
- print(logbookparsers.parseAutoLogBookEntry(filename))
+ print((logbookparsers.parseAutoLogBookEntry(filename)))
return HttpResponseRedirect(reverse('expedition', args=[expedition.year])) # Redirect after POST
else:
if pslug and pdate:
diff --git a/core/views_other.py b/core/views_other.py
index 6adca9c..47a071a 100644
--- a/core/views_other.py
+++ b/core/views_other.py
@@ -60,7 +60,7 @@ def controlPanel(request):
databaseReset.dirsredirect()
for item in importlist:
if item in request.POST:
- print("running"+ " databaseReset."+item+"()")
+ print(("running"+ " databaseReset."+item+"()"))
exec("databaseReset."+item+"()")
jobs_completed.append(item)
else:
@@ -180,7 +180,7 @@ def logbook_entry_suggestions(request):
try:
lbo=LogbookEntry.objects.get(date__year=qm['year'],title__icontains="placeholder for QMs in")
except:
- print("failed to get placeholder for year "+str(qm['year']))
+ print(("failed to get placeholder for year "+str(qm['year'])))
temp_QM=QM(found_by=lbo,number=qm['number'],grade=qm['grade'])
temp_QM.grade=qm['grade']
diff --git a/core/views_survex.py b/core/views_survex.py
index 1e6c1bf..7595ea9 100644
--- a/core/views_survex.py
+++ b/core/views_survex.py
@@ -266,8 +266,7 @@ def survexcaveslist(request):
subdircaves = [ ]
# first sort the file list
- fnumlist = [ (-int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ]
- fnumlist.sort()
+ fnumlist = sorted([ (-int(re.match(r"\d*", f).group(0) or "0"), f) for f in os.listdir(cavesdir) ])
print(fnumlist)
diff --git a/databaseReset.py b/databaseReset.py
index f08e3a6..09b3214 100644
--- a/databaseReset.py
+++ b/databaseReset.py
@@ -100,7 +100,7 @@ def import_surveyimgs():
for future re-working to manage progress against notes, plans and elevs.
"""
#import troggle.parsers.surveys
- print("NOT Importing survey images")
+ #print("Importing survey images")
#troggle.parsers.surveys.parseSurveys(logfile=settings.LOGFILE)
def import_surveyscans():
@@ -258,7 +258,7 @@ class JobQueue():
print("-- ", settings.DATABASES['default']['NAME'], settings.DATABASES['default']['ENGINE'])
- #print("-- DATABASES.default", settings.DATABASES['default'])
+ print("-- DATABASES.default", settings.DATABASES['default'])
# but because the user may be expecting to add this to a db with lots of tables already there,
# the jobqueue may not start from scratch so we need to initialise the db properly first
@@ -334,11 +334,7 @@ class JobQueue():
print(" this", end=' ')
else:
# prints one place to the left of where you expect
- if r[len(r)-1]:
- s = r[i]-r[len(r)-1]
- else:
- s = 0
- days = (s)/(24*60*60)
+ days = (r[i]-r[len(r)-1])/(24*60*60)
print('%8.2f' % days, end=' ')
elif r[i]:
print('%8.1f' % r[i], end=' ')
diff --git a/docker/requirements.txt b/docker/requirements.txt
index d561bd8..7a197bd 120000..100644
--- a/docker/requirements.txt
+++ b/docker/requirements.txt
@@ -1 +1,9 @@
-requirements.txt.dj-1.7.11 \ No newline at end of file
+Django==1.7.11
+django-registration==2.1.2
+mysql
+#imagekit
+django-imagekit
+Image
+django-tinymce==2.7.0
+smartencoding
+unidecode
diff --git a/export/toqms.py b/export/toqms.py
index 2564094..69e565c 100644
--- a/export/toqms.py
+++ b/export/toqms.py
@@ -22,10 +22,10 @@ def qmRow(qm):
}
qmRow=['' for x in range(len(headers))]
- for column, modelField in columnsToModelFields.items():
+ for column, modelField in list(columnsToModelFields.items()):
if modelField:
# Very sorry about the atrocious replace below. I will fix this soon if noone beats me to it. - AC
- qmRow[headers.index(column)]=modelField.replace(u'\xd7','x').replace(u'\u201c','').replace(u'\u2013','').replace(u'\xbd','')
+ qmRow[headers.index(column)]=modelField.replace('\xd7','x').replace('\u201c','').replace('\u2013','').replace('\xbd','')
return qmRow
def writeQmTable(outfile,cave):
diff --git a/flatpages/tests.py b/flatpages/tests.py
index 2247054..c7c4668 100644
--- a/flatpages/tests.py
+++ b/flatpages/tests.py
@@ -12,7 +12,7 @@ class SimpleTest(TestCase):
"""
Tests that 1 + 1 always equals 2.
"""
- self.failUnlessEqual(1 + 1, 2)
+ self.assertEqual(1 + 1, 2)
__test__ = {"doctest": """
Another way to test that 1 + 1 is equal to 2.
diff --git a/flatpages/views.py b/flatpages/views.py
index d265c75..27f0cd0 100644
--- a/flatpages/views.py
+++ b/flatpages/views.py
@@ -1,18 +1,19 @@
-import troggle.settings as settings
-from troggle.helper import login_required_if_public
-from django.shortcuts import render
+import os
+import re
+from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
-from django.core.urlresolvers import reverse
+from django.core.urlresolvers import reverse
from django.template import Context, loader
import django.forms as forms
+
from tinymce.widgets import TinyMCE
+
+from troggle.helper import login_required_if_public
from troggle.flatpages.models import Redirect, EntranceRedirect
from troggle.core.models import Cave
import troggle.core.views_caves
-
-import os
-import re
+import troggle.settings as settings
def flatpage(request, path):
try:
@@ -35,7 +36,7 @@ def flatpage(request, path):
if path.startswith("noinfo") and settings.PUBLIC_SITE and not request.user.is_authenticated():
- print("flat path noinfo", path)
+ print(("flat path noinfo", path))
return HttpResponseRedirect(reverse("auth_login") + '?next=%s' % request.path)
if path.endswith("/") or path == "":
@@ -57,32 +58,32 @@ def flatpage(request, path):
if path.endswith(".htm") or path.endswith(".html"):
html = o.read()
- m = re.search(r"(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)", html, re.DOTALL + re.IGNORECASE)
+ m = re.search(rb'(.*)<\s*head([^>]*)>(.*)<\s*/head\s*>(.*)<\s*body([^>]*)>(.*)<\s*/body\s*>(.*)', html, re.DOTALL + re.IGNORECASE)
if m:
preheader, headerattrs, head, postheader, bodyattrs, body, postbody = m.groups()
else:
return HttpResponse(html + "Page could not be split into header and body")
- m = re.search(r"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
+ m = re.search(rb"<title>(.*)</title>", head, re.DOTALL + re.IGNORECASE)
if m:
title, = m.groups()
else:
title = ""
- m = re.search(r"<meta([^>]*)noedit", head, re.DOTALL + re.IGNORECASE)
+ m = re.search(rb"<meta([^>]*)noedit", head, re.DOTALL + re.IGNORECASE)
if m:
editable = False
else:
editable = True
has_menu = False
- menumatch = re.match('(.*)<div id="menu">', body, re.DOTALL + re.IGNORECASE)
+ menumatch = re.match(rb'(.*)<div id="menu">', body, re.DOTALL + re.IGNORECASE)
if menumatch:
has_menu = True
- menumatch = re.match('(.*)<ul id="links">', body, re.DOTALL + re.IGNORECASE)
+ menumatch = re.match(rb'(.*)<ul id="links">', body, re.DOTALL + re.IGNORECASE)
if menumatch:
has_menu = True
#body, = menumatch.groups()
- if re.search(r"iso-8859-1", html):
- body = unicode(body, "iso-8859-1")
+ if re.search(rb"iso-8859-1", html):
+ body = str(body, "iso-8859-1")
body.strip
return render(request, 'flatpage.html', {'editable': editable, 'path': path, 'title': title, 'body': body, 'homepage': (path == "index.htm"), 'has_menu': has_menu})
else:
@@ -129,7 +130,7 @@ def editflatpage(request, path):
if linksmatch:
body, links = linksmatch.groups()
if re.search(r"iso-8859-1", html):
- body = unicode(body, "iso-8859-1")
+ body = str(body, "iso-8859-1")
else:
return HttpResponse("Page could not be split into header and body")
except IOError:
@@ -154,7 +155,7 @@ def editflatpage(request, path):
postbody = "</html>"
body = flatpageForm.cleaned_data["html"]
body = body.replace("\r", "")
- result = u"%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody)
+ result = "%s<head%s>%s</head>%s<body%s>\n%s</body>%s" % (preheader, headerargs, head, postheader, bodyargs, body, postbody)
f = open(filepath, "w")
f.write(result)
f.close()
diff --git a/imagekit/management/commands/ikflush.py b/imagekit/management/commands/ikflush.py
index c03440f..0af60c4 100644
--- a/imagekit/management/commands/ikflush.py
+++ b/imagekit/management/commands/ikflush.py
@@ -20,9 +20,9 @@ def flush_cache(apps, options):
"""
apps = [a.strip(',') for a in apps]
if apps:
- print 'Flushing cache for %s...' % ', '.join(apps)
+ print('Flushing cache for %s...' % ', '.join(apps))
else:
- print 'Flushing caches...'
+ print('Flushing caches...')
for app_label in apps:
app = cache.get_app(app_label)
diff --git a/imagekit/models.py b/imagekit/models.py
index 140715e..0eba0ab 100644
--- a/imagekit/models.py
+++ b/imagekit/models.py
@@ -47,7 +47,7 @@ class ImageModelBase(ModelBase):
except ImportError:
raise ImportError('Unable to load imagekit config module: %s' % \
opts.spec_module)
- for spec in [spec for spec in module.__dict__.values() \
+ for spec in [spec for spec in list(module.__dict__.values()) \
if isinstance(spec, type) \
and issubclass(spec, specs.ImageSpec) \
and spec != specs.ImageSpec]:
@@ -56,7 +56,7 @@ class ImageModelBase(ModelBase):
setattr(cls, '_ik', opts)
-class ImageModel(models.Model):
+class ImageModel(models.Model, metaclass=ImageModelBase):
""" Abstract base class implementing all core ImageKit functionality
Subclasses of ImageModel are augmented with accessors for each defined
@@ -64,7 +64,6 @@ class ImageModel(models.Model):
storage locations and other options.
"""
- __metaclass__ = ImageModelBase
class Meta:
abstract = True
@@ -81,10 +80,10 @@ class ImageModel(models.Model):
self._ik.admin_thumbnail_spec
else:
if hasattr(self, 'get_absolute_url'):
- return u'<a href="%s"><img src="%s"></a>' % \
+ return '<a href="%s"><img src="%s"></a>' % \
(self.get_absolute_url(), prop.url)
else:
- return u'<a href="%s"><img src="%s"></a>' % \
+ return '<a href="%s"><img src="%s"></a>' % \
(self._imgfield.url, prop.url)
admin_thumbnail_view.short_description = _('Thumbnail')
admin_thumbnail_view.allow_tags = True
diff --git a/imagekit/options.py b/imagekit/options.py
index 022cc9e..71883e0 100644
--- a/imagekit/options.py
+++ b/imagekit/options.py
@@ -18,6 +18,6 @@ class Options(object):
spec_module = 'imagekit.defaults'
def __init__(self, opts):
- for key, value in opts.__dict__.iteritems():
+ for key, value in opts.__dict__.items():
setattr(self, key, value)
self.specs = [] \ No newline at end of file
diff --git a/imagekit/specs.py b/imagekit/specs.py
index a6832ba..4b991dd 100644
--- a/imagekit/specs.py
+++ b/imagekit/specs.py
@@ -6,7 +6,7 @@ spec found.
"""
import os
-from StringIO import StringIO
+from io import StringIO
from imagekit.lib import *
from imagekit.utils import img_to_fobj
from django.core.files.base import ContentFile
diff --git a/imagekit/tests.py b/imagekit/tests.py
index 8c2eb5e..c44f66d 100644
--- a/imagekit/tests.py
+++ b/imagekit/tests.py
@@ -83,4 +83,4 @@ class IKTest(TestCase):
# make sure image file is deleted
path = self.p.image.path
self.p.delete()
- self.failIf(os.path.isfile(path))
+ self.assertFalse(os.path.isfile(path))
diff --git a/logbooksdump.py b/logbooksdump.py
index 4fa4d16..29a0248 100644
--- a/logbooksdump.py
+++ b/logbooksdump.py
@@ -1,21 +1,25 @@
import os
import time
import timeit
+
import settings
os.environ['PYTHONPATH'] = settings.PYTHON_PATH
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'settings')
+
from django.core import management
from django.db import connection, close_old_connections
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.core.urlresolvers import reverse
+
from troggle.core.models import Cave, Entrance
import troggle.flatpages.models
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def import_auto_logbooks():
- import parsers.logbooks
import os
+ import troggle.parsers.logbooks
+
for pt in troggle.core.models.PersonTrip.objects.all():
pt.delete()
for lbe in troggle.core.models.LogbookEntry.objects.all():
@@ -27,7 +31,7 @@ def import_auto_logbooks():
"autologbook")
for root, dirs, filenames in os.walk(directory):
for filename in filenames:
- print(os.path.join(root, filename))
+ print((os.path.join(root, filename)))
parsers.logbooks.parseAutoLogBookEntry(os.path.join(root, filename))
#Temporary function until definitive source of data transfered.
@@ -50,7 +54,7 @@ def dumplogbooks():
filename = os.path.join(directory,
dateStr + "." + slugify(lbe.title)[:50] + ".html")
if lbe.cave:
- print(lbe.cave.reference())
+ print((lbe.cave.reference()))
trip = {"title": lbe.title, "html":lbe.text, "cave": lbe.cave.reference(), "caveOrLocation": "cave"}
else:
trip = {"title": lbe.title, "html":lbe.text, "location":lbe.place, "caveOrLocation": "location"}
@@ -63,6 +67,6 @@ def dumplogbooks():
'date': dateStr,
'expeditionyear': lbe.expedition.year})
output = template.render(context)
- f.write(unicode(output).encode( "utf-8" ))
+ f.write(str(output).encode( "utf-8" ))
f.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
diff --git a/middleware.py b/middleware.py
index 7c27500..7699c6a 100644
--- a/middleware.py
+++ b/middleware.py
@@ -27,7 +27,7 @@ class SmartAppendSlashMiddleware(object):
if settings.SMART_APPEND_SLASH and (not old_url[1].endswith('/')) and not _resolves(old_url[1]) and _resolves(old_url[1] + '/'):
new_url[1] = new_url[1] + '/'
if settings.DEBUG and request.method == 'POST':
- raise RuntimeError, "You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to %s%s (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings." % (new_url[0], new_url[1])
+ raise RuntimeError("You called this URL via POST, but the URL doesn't end in a slash and you have SMART_APPEND_SLASH set. Django can't redirect to the slash URL while maintaining POST data. Change your form to point to %s%s (note the trailing slash), or set SMART_APPEND_SLASH=False in your Django settings." % (new_url[0], new_url[1]))
if new_url != old_url:
# Redirect
if new_url[0]:
diff --git a/modelviz.py b/modelviz.py
index e3ada4b..2603fca 100644
--- a/modelviz.py
+++ b/modelviz.py
@@ -33,7 +33,7 @@ import getopt, sys
from django.core.management import setup_environ
try:
- import settings
+ from . import settings
except ImportError:
pass
else:
@@ -180,22 +180,22 @@ def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hd",
["help", "disable_fields"])
- except getopt.GetoptError, error:
- print __doc__
+ except getopt.GetoptError as error:
+ print(__doc__)
sys.exit(error)
else:
if not args:
- print __doc__
+ print(__doc__)
sys.exit()
kwargs = {}
for opt, arg in opts:
if opt in ("-h", "--help"):
- print __doc__
+ print(__doc__)
sys.exit()
if opt in ("-d", "--disable_fields"):
kwargs['disable_fields'] = True
- print generate_dot(args, **kwargs)
+ print(generate_dot(args, **kwargs))
if __name__ == "__main__":
main()
diff --git a/parsers/QMs.py b/parsers/QMs.py
index 47e6bc7..a5ecd2f 100644
--- a/parsers/QMs.py
+++ b/parsers/QMs.py
@@ -38,7 +38,7 @@ def parseCaveQMs(cave,inputFile):
dialect=csv.Sniffer().sniff(qmCSVContents.read())
qmCSVContents.seek(0,0)
qmReader = csv.reader(qmCSVContents,dialect=dialect)
- qmReader.next() # Skip header row
+ next(qmReader) # Skip header row
for line in qmReader:
try:
year=int(line[0][1:5])
@@ -48,7 +48,7 @@ def parseCaveQMs(cave,inputFile):
elif cave=='hauch':
placeholder, hadToCreate = LogbookEntry.objects.get_or_create(date__year=year, title="placeholder for QMs in 234", text="QMs temporarily attached to this should be re-attached to their actual trips", defaults={"date": date(year, 1, 1),"cave":hauchHl})
if hadToCreate:
- print(cave + " placeholder logbook entry for " + str(year) + " added to database")
+ print((cave + " placeholder logbook entry for " + str(year) + " added to database"))
QMnum=re.match(r".*?-\d*?-X?(?P<numb>\d*)",line[0]).group("numb")
newQM = QM()
newQM.found_by=placeholder
@@ -71,9 +71,9 @@ def parseCaveQMs(cave,inputFile):
if preexistingQM.new_since_parsing==False: #if the pre-existing QM has not been modified, overwrite it
preexistingQM.delete()
newQM.save()
- print("overwriting " + str(preexistingQM) +"\r")
+ print(("overwriting " + str(preexistingQM) +"\r"))
else: # otherwise, print that it was ignored
- print("preserving " + str(preexistingQM) + ", which was edited in admin \r")
+ print(("preserving " + str(preexistingQM) + ", which was edited in admin \r"))
except QM.DoesNotExist: #if there is no pre-existing QM, save the new one
newQM.save()
@@ -82,7 +82,7 @@ def parseCaveQMs(cave,inputFile):
except KeyError: #check on this one
continue
except IndexError:
- print("Index error in " + str(line))
+ print(("Index error in " + str(line)))
continue
def parse_KH_QMs(kh, inputFile):
diff --git a/parsers/caves.py b/parsers/caves.py
index 4f65675..745b119 100644
--- a/parsers/caves.py
+++ b/parsers/caves.py
@@ -1,9 +1,10 @@
# -*- coding: utf-8 -*-
-import troggle.core.models as models
-from django.conf import settings
import os
import re
+from django.conf import settings
+
+import troggle.core.models as models
def readcaves():
@@ -14,11 +15,11 @@ def readcaves():
area_1626 = models.Area.objects.update_or_create(short_name = "1626", parent = None)
print(" - Reading Entrances")
#print "list of <Slug> <Filename>"
- for filename in os.walk(settings.ENTRANCEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
+ for filename in next(os.walk(settings.ENTRANCEDESCRIPTIONS))[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'):
readentrance(filename)
print (" - Reading Caves")
- for filename in os.walk(settings.CAVEDESCRIPTIONS).next()[2]: #Should be a better way of getting a list of files
+ for filename in next(os.walk(settings.CAVEDESCRIPTIONS))[2]: #Should be a better way of getting a list of files
if filename.endswith('.html'):
readcave(filename)
diff --git a/parsers/cavetab.py b/parsers/cavetab.py
index 99202d5..8d257a7 100644
--- a/parsers/cavetab.py
+++ b/parsers/cavetab.py
@@ -58,7 +58,7 @@ def LoadCaveTab():
cavetab = open(os.path.join(settings.EXPOWEB, "noinfo", "CAVETAB2.CSV"),'rU')
caveReader = csv.reader(cavetab)
- caveReader.next() # Strip out column headers
+ next(caveReader) # Strip out column headers
logging.info("Beginning to import caves from "+str(cavetab)+"\n"+"-"*60+"\n")
diff --git a/parsers/logbooks.py b/parsers/logbooks.py
index 01800a1..ded90e4 100644
--- a/parsers/logbooks.py
+++ b/parsers/logbooks.py
@@ -40,7 +40,7 @@ def GetTripPersons(trippeople, expedition, logtime_underground):
tripperson = re.sub(round_bracket_regex, "", tripperson).strip()
personyear = GetPersonExpeditionNameLookup(expedition).get(tripperson.lower())
if not personyear:
- print(" - No name match for: '%s'" % tripperson)
+ print((" - No name match for: '%s'" % tripperson))
message = "No name match for: '%s' in year '%s'" % (tripperson, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
res.append((personyear, logtime_underground))
@@ -72,11 +72,11 @@ def GetTripCave(place): #need to be fuzzier about matching here. Already a very
return tripCaveRes
elif len(tripCaveRes)>1:
- print("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes))
- correctIndex=input("type list index of correct cave")
+ print(("Ambiguous place " + str(place) + " entered. Choose from " + str(tripCaveRes)))
+ correctIndex=eval(input("type list index of correct cave"))
return tripCaveRes[correctIndex]
else:
- print("No cave found for place " , place)
+ print(("No cave found for place " , place))
return
logentries = [] # the entire logbook is a single object: a list of entries
@@ -92,7 +92,7 @@ def EnterLogIntoDbase(date, place, title, text, trippeople, expedition, logtime_
trippersons, author = GetTripPersons(trippeople, expedition, logtime_underground)
if not author:
- print(" - Skipping logentry: " + title + " - no author for entry")
+ print((" - Skipping logentry: " + title + " - no author for entry"))
message = "Skipping logentry: %s - no author for entry in year '%s'" % (title, expedition.year)
models.DataIssue.objects.create(parser='logbooks', message=message)
return
@@ -175,7 +175,7 @@ def Parseloghtmltxt(year, expedition, txt):
''', trippara)
if not s:
if not re.search(r"Rigging Guide", trippara):
- print("can't parse: ", trippara) # this is 2007 which needs editing
+ print(("can't parse: ", trippara)) # this is 2007 which needs editing
#assert s, trippara
continue
tripid, tripid1, tripdate, trippeople, triptitle, triptext, tu = s.groups()
@@ -203,7 +203,7 @@ def Parseloghtmltxt(year, expedition, txt):
def Parseloghtml01(year, expedition, txt):
tripparas = re.findall(r"<hr[\s/]*>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
- s = re.match(u"(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
+ s = re.match("(?s)\s*(?:<p>)?(.*?)</?p>(.*)$(?i)", trippara)
assert s, trippara[:300]
tripheader, triptext = s.group(1), s.group(2)
mtripid = re.search(r'<a id="(.*?)"', tripheader)
@@ -251,7 +251,7 @@ def Parseloghtml01(year, expedition, txt):
def Parseloghtml03(year, expedition, txt):
tripparas = re.findall(r"<hr\s*/>([\s\S]*?)(?=<hr)", txt)
for trippara in tripparas:
- s = re.match(u"(?s)\s*<p>(.*?)</p>(.*)$", trippara)
+ s = re.match("(?s)\s*<p>(.*?)</p>(.*)$", trippara)
assert s, trippara
tripheader, triptext = s.group(1), s.group(2)
tripheader = re.sub(r"&nbsp;", " ", tripheader)
@@ -261,7 +261,7 @@ def Parseloghtml03(year, expedition, txt):
if re.match("T/U|Time underwater", sheader[-1]):
tu = sheader.pop()
if len(sheader) != 3:
- print("header not three pieces", sheader)
+ print(("header not three pieces", sheader))
tripdate, triptitle, trippeople = sheader
ldate = ParseDate(tripdate.strip(), year)
triptitles = triptitle.split(" , ")
@@ -325,35 +325,36 @@ def LoadLogbookForExpedition(expedition):
#print " - Cache is more than 30 days old."
bad_cache= True
if bad_cache:
- print " - Cache is either stale or more than 30 days old. Deleting it."
+ print(" - Cache is either stale or more than 30 days old. Deleting it.")
os.remove(cache_filename)
logentries=[]
raise
- print(" - Reading cache: " + cache_filename )
+ print((" - Reading cache: " + cache_filename ))
try:
with open(cache_filename, "rb") as f:
logentries = pickle.load(f)
- print " - Loaded ", len(logentries), " objects"
+ print(" - Loaded ", len(logentries), " objects")
logbook_cached = True
except:
- print " - Failed to load corrupt cache. Deleting it.\n"
+ print(" - Failed to load corrupt cache. Deleting it.\n")
os.remove(cache_filename)
logentries=[]
+ raise
except:
print(" - Opening logbook: ")
- file_in = open(os.path.join(expowebbase, year_settings[0]))
+ file_in = open(os.path.join(expowebbase, year_settings[0]),'rb')
txt = file_in.read().decode("latin1")
file_in.close()
parsefunc = year_settings[1]
logbook_parseable = True
- print(" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1])
+ print((" - Parsing logbook: " + year_settings[0] + "\n - Using parser: " + year_settings[1]))
if logbook_parseable:
parser = globals()[parsefunc]
parser(expedition.year, expedition, txt)
SetDatesFromLogbookEntries(expedition)
# and this has also stored all the objects in logentries[]
- print " - Storing " , len(logentries), " log entries"
+ print(" - Storing " , len(logentries), " log entries")
cache_filename = os.path.join(expowebbase, year_settings[0])+".cache"
with open(cache_filename, "wb") as f:
pickle.dump(logentries, f, 2)
@@ -370,7 +371,7 @@ def LoadLogbookForExpedition(expedition):
i +=1
else:
try:
- file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE))
+ file_in = open(os.path.join(expowebbase, expedition.year, settings.DEFAULT_LOGBOOK_FILE),'rb')
txt = file_in.read().decode("latin1")
file_in.close()
logbook_parseable = True
@@ -378,7 +379,7 @@ def LoadLogbookForExpedition(expedition):
parsefunc = settings.DEFAULT_LOGBOOK_PARSER
except (IOError):
logbook_parseable = False
- print("Couldn't open default logbook file and nothing in settings for expo " + expedition.year)
+ print(("Couldn't open default logbook file and nothing in settings for expo " + expedition.year))
#return "TOLOAD: " + year + " " + str(expedition.personexpedition_set.all()[1].logbookentry_set.count()) + " " + str(models.PersonTrip.objects.filter(personexpedition__expedition=expedition).count())
@@ -391,7 +392,7 @@ def LoadLogbooks():
# Fetch all expos
expos = models.Expedition.objects.all()
for expo in expos:
- print("\nLoading Logbook for: " + expo.year)
+ print(("\nLoading Logbook for: " + expo.year))
# Load logbook for expo
LoadLogbookForExpedition(expo)
diff --git a/parsers/people.py b/parsers/people.py
index 265dacc..e05c5a3 100644
--- a/parsers/people.py
+++ b/parsers/people.py
@@ -4,7 +4,7 @@ from django.conf import settings
import troggle.core.models as models
import csv, re, datetime, os, shutil
from utils import save_carefully
-from HTMLParser import HTMLParser
+from html.parser import HTMLParser
from unidecode import unidecode
# def saveMugShot(mugShotPath, mugShotFilename, person):
@@ -45,7 +45,7 @@ def parseMugShotAndBlurb(personline, header, person):
#Only finds the first image, not all of them
person.blurb=re.search('<body>.*<hr',personPageOld,re.DOTALL).group()
else:
- print "ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename
+ print("ERROR: --------------- Broken link or Blurb parse error in ", mugShotFilename)
#for mugShotFilename in re.findall('i/.*?jpg',personPageOld,re.DOTALL):
# mugShotPath = os.path.join(settings.EXPOWEB, "folk", mugShotFilename)
# saveMugShot(mugShotPath=mugShotPath, mugShotFilename=mugShotFilename, person=person)
@@ -55,8 +55,8 @@ def LoadPersonsExpos():
persontab = open(os.path.join(settings.EXPOWEB, "folk", "folk.csv"))
personreader = csv.reader(persontab)
- headers = personreader.next()
- header = dict(zip(headers, range(len(headers))))
+ headers = next(personreader)
+ header = dict(list(zip(headers, list(range(len(headers))))))
# make expeditions
print(" - Loading expeditions")
@@ -100,7 +100,7 @@ def LoadPersonsExpos():
parseMugShotAndBlurb(personline=personline, header=header, person=person)
# make person expedition from table
- for year, attended in zip(headers, personline)[5:]:
+ for year, attended in list(zip(headers, personline))[5:]:
expedition = models.Expedition.objects.get(year=year)
if attended == "1" or attended == "-1":
lookupAttribs = {'person':person, 'expedition':expedition}
diff --git a/parsers/subcaves.py b/parsers/subcaves.py
index 739af44..5889a91 100644
--- a/parsers/subcaves.py
+++ b/parsers/subcaves.py
@@ -34,7 +34,7 @@ def importSubcaves(cave):
nonLookupAttribs={'description':description}
newSubcave=save_carefully(Subcave,lookupAttribs=lookupAttribs,nonLookupAttribs=nonLookupAttribs)
- logging.info("Added " + unicode(newSubcave) + " to " + unicode(cave))
+ logging.info("Added " + str(newSubcave) + " to " + str(cave))
except IOError:
logging.info("Subcave import couldn't open "+subcaveFilePath)
diff --git a/parsers/survex.py b/parsers/survex.py
index 38cae62..907b183 100644
--- a/parsers/survex.py
+++ b/parsers/survex.py
@@ -33,27 +33,27 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
try:
survexleg.tape = float(ls[stardata["tape"]])
except ValueError:
- print("! Tape misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Tape misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: Tape misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
survexleg.tape = 1000
try:
lclino = ls[stardata["clino"]]
except:
- print("! Clino misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Clino misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: Clino misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
lclino = error
try:
lcompass = ls[stardata["compass"]]
except:
- print("! Compass misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Compass misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: Compass misread in line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
lcompass = error
@@ -67,9 +67,9 @@ def LoadSurvexLineLeg(survexblock, stardata, sline, comment, cave):
try:
survexleg.compass = float(lcompass)
except ValueError:
- print("! Compass misread in", survexblock.survexfile.path)
- print(" Stardata:", stardata)
- print(" Line:", ls)
+ print(("! Compass misread in", survexblock.survexfile.path))
+ print((" Stardata:", stardata))
+ print((" Line:", ls))
message = ' ! Value Error: line %s in %s' % (ls, survexblock.survexfile.path)
models.DataIssue.objects.create(parser='survex', message=message)
survexleg.compass = 1000
@@ -143,7 +143,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
global insp
# uncomment to print out all files during parsing
- print(insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path)
+ print((insp+" - Reading file: " + survexblock.survexfile.path + " <> " + survexfile.path))
stamp = datetime.now()
lineno = 0
@@ -196,7 +196,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Wallet ; ref - %s - found in survexscansfolders' % refscan)
else:
message = ' ! Wallet ; ref - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
- print(insp+message)
+ print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message)
# This whole section should be moved if we can have *QM become a proper survex command
@@ -268,7 +268,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
# print(insp+' - Wallet *REF - %s - found in survexscansfolders' % refscan)
else:
message = ' ! Wallet *REF - %s - NOT found in survexscansfolders %s-%s-%s' % (refscan,yr,letterx,wallet)
- print(insp+message)
+ print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message)
continue
@@ -293,7 +293,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
cmd = cmd.lower()
if re.match("include$(?i)", cmd):
includepath = os.path.join(os.path.split(survexfile.path)[0], re.sub(r"\.svx$", "", line))
- print(insp+' - Include path found including - ' + includepath)
+ print((insp+' - Include path found including - ' + includepath))
# Try to find the cave in the DB if not use the string as before
path_match = re.search(r"caves-(\d\d\d\d)/(\d+|\d\d\d\d-?\w+-\d+)/", includepath)
if path_match:
@@ -303,7 +303,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cave:
survexfile.cave = cave
else:
- print(insp+' - No match in DB (i) for %s, so loading..' % includepath)
+ print((insp+' - No match in DB (i) for %s, so loading..' % includepath))
includesurvexfile = models.SurvexFile(path=includepath)
includesurvexfile.save()
includesurvexfile.SetDirectory()
@@ -326,10 +326,10 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cave:
survexfile.cave = cave
else:
- print(insp+' - No match (b) for %s' % newsvxpath)
+ print((insp+' - No match (b) for %s' % newsvxpath))
name = line.lower()
- print(insp+' - Begin found for: ' + name)
+ print((insp+' - Begin found for: ' + name))
# print(insp+'Block cave: ' + str(survexfile.cave))
survexblockdown = models.SurvexBlock(name=name, begin_char=fin.tell(), parent=survexblock, survexpath=survexblock.survexpath+"."+name, cave=survexfile.cave, survexfile=survexfile, totalleglength=0.0)
survexblockdown.save()
@@ -420,7 +420,7 @@ def RecursiveLoad(survexblock, survexfile, fin, textlines):
if cmd not in ["sd", "include", "units", "entrance", "data", "flags", "title", "export", "instrument",
"calibrate", "set", "infer", "alias", "cs", "declination", "case"]:
message = "! Bad svx command in line:%s %s %s %s" % (cmd, line, survexblock, survexblock.survexfile.path)
- print(insp+message)
+ print((insp+message))
models.DataIssue.objects.create(parser='survex', message=message)
endstamp = datetime.now()
@@ -482,7 +482,7 @@ def LoadPos():
If we don't have it in the database, print an error message and discard it.
"""
topdata = settings.SURVEX_DATA + settings.SURVEX_TOPNAME
- print(' - Generating a list of Pos from %s.svx and then loading...' % (topdata))
+ print((' - Generating a list of Pos from %s.svx and then loading...' % (topdata)))
# Be careful with the cache file.
# If LoadPos has been run before,
@@ -498,39 +498,39 @@ def LoadPos():
updtsvx = os.path.getmtime(topdata + ".svx")
updtcache = os.path.getmtime(cachefile)
age = updtcache - updtsvx
- print(' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) )))
+ print((' svx: %s cache: %s not-found cache is fresher by: %s' % (updtsvx, updtcache, str(timedelta(seconds=age) ))))
now = time.time()
if now - updtcache > 3*24*60*60:
- print " cache is more than 3 days old. Deleting."
+ print(" cache is more than 3 days old. Deleting.")
os.remove(cachefile)
elif age < 0 :
- print " cache is stale. Deleting."
+ print(" cache is stale. Deleting.")
os.remove(cachefile)
else:
- print " cache is fresh. Reading..."
+ print(" cache is fresh. Reading...")
try:
with open(cachefile, "r") as f:
for line in f:
l = line.rstrip()
if l in notfoundbefore:
notfoundbefore[l] +=1 # should not be duplicates
- print " DUPLICATE ", line, notfoundbefore[l]
+ print(" DUPLICATE ", line, notfoundbefore[l])
else:
notfoundbefore[l] =1
except:
- print " FAILURE READ opening cache file %s" % (cachefile)
+ print(" FAILURE READ opening cache file %s" % (cachefile))
raise
notfoundnow =[]
found = 0
skip = {}
- print "\n" # extra line because cavern overwrites the text buffer somehow
+ print("\n") # extra line because cavern overwrites the text buffer somehow
# cavern defaults to using same cwd as supplied input file
call([settings.CAVERN, "--output=%s.3d" % (topdata), "%s.svx" % (topdata)])
call([settings.THREEDTOPOS, '%s.3d' % (topdata)], cwd = settings.SURVEX_DATA)
- print " - This next bit takes a while. Matching ~32,000 survey positions. Be patient..."
+ print(" - This next bit takes a while. Matching ~32,000 survey positions. Be patient...")
posfile = open("%s.pos" % (topdata))
posfile.readline() #Drop header
@@ -550,7 +550,7 @@ def LoadPos():
found += 1
except:
notfoundnow.append(name)
- print " - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip))
+ print(" - %s stations not found in lookup of SurvexStation.objects. %s found. %s skipped." % (len(notfoundnow),found, len(skip)))
if found > 10: # i.e. a previous cave import has been done
try:
@@ -560,8 +560,8 @@ def LoadPos():
f.write("%s\n" % i)
for j in skip:
f.write("%s\n" % j) # NB skip not notfoundbefore
- print(' Not-found cache file written: %s entries' % c)
+ print((' Not-found cache file written: %s entries' % c))
except:
- print " FAILURE WRITE opening cache file %s" % (cachefile)
+ print(" FAILURE WRITE opening cache file %s" % (cachefile))
raise
diff --git a/parsers/surveys.py b/parsers/surveys.py
index 942c0a5..2f0ff90 100644
--- a/parsers/surveys.py
+++ b/parsers/surveys.py
@@ -10,7 +10,7 @@ import csv
import re
import datetime
-#from PIL import Image
+from PIL import Image
from utils import save_carefully
from functools import reduce
@@ -82,14 +82,14 @@ def get_or_create_placeholder(year):
# logging.info("added survey " + survey[header['Year']] + "#" + surveyobj.wallet_number + "\r")
# dead
-# def listdir(*directories):
- # try:
- # return os.listdir(os.path.join(settings.SURVEYS, *directories))
- # except:
- # import urllib.request, urllib.parse, urllib.error
- # url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
- # folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
- # return [folder.rstrip(r"/") for folder in folders]
+def listdir(*directories):
+ try:
+ return os.listdir(os.path.join(settings.SURVEYS, *directories))
+ except:
+ import urllib.request, urllib.parse, urllib.error
+ url = settings.SURVEYS + reduce(lambda x, y: x + "/" + y, ["listdir"] + list(directories))
+ folders = urllib.request.urlopen(url.replace("#", "%23")).readlines()
+ return [folder.rstrip(r"/") for folder in folders]
# add survey scans
# def parseSurveyScans(expedition, logfile=None):
@@ -252,7 +252,7 @@ def LoadListScans():
def FindTunnelScan(tunnelfile, path):
scansfolder, scansfile = None, None
- mscansdir = re.search(r"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
+ mscansdir = re.search(rb"(\d\d\d\d#X?\d+\w?|1995-96kh|92-94Surveybookkh|1991surveybook|smkhs)/(.*?(?:png|jpg|pdf|jpeg))$", path)
if mscansdir:
scansfolderl = SurvexScansFolder.objects.filter(walletname=mscansdir.group(1))
if len(scansfolderl):
@@ -273,7 +273,7 @@ def FindTunnelScan(tunnelfile, path):
if scansfile:
tunnelfile.survexscans.add(scansfile)
- elif path and not re.search(r"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
+ elif path and not re.search(rb"\.(?:png|jpg|pdf|jpeg)$(?i)", path):
name = os.path.split(path)[1]
#print("debug-tunnelfileobjects ", tunnelfile.tunnelpath, path, name)
rtunnelfilel = TunnelFile.objects.filter(tunnelname=name)
@@ -295,16 +295,16 @@ def SetTunnelfileInfo(tunnelfile):
if tunnelfile.filesize <= 0:
print("DEBUG - zero length xml file", ff)
return
- mtype = re.search(r"<(fontcolours|sketch)", ttext)
+ mtype = re.search(rb"<(fontcolours|sketch)", ttext)
assert mtype, ff
tunnelfile.bfontcolours = (mtype.group(1)=="fontcolours")
- tunnelfile.npaths = len(re.findall(r"<skpath", ttext))
+ tunnelfile.npaths = len(re.findall(rb"<skpath", ttext))
tunnelfile.save()
# <tunnelxml tunnelversion="version2009-06-21 Matienzo" tunnelproject="ireby" tunneluser="goatchurch" tunneldate="2009-06-29 23:22:17">
# <pcarea area_signal="frame" sfscaledown="12.282584" sfrotatedeg="-90.76982" sfxtrans="11.676667377221136" sfytrans="-15.677173422877454" sfsketch="204description/scans/plan(38).png" sfstyle="" nodeconnzsetrelative="0.0">
- for path, style in re.findall(r'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
+ for path, style in re.findall(rb'<pcarea area_signal="frame".*?sfsketch="([^"]*)" sfstyle="([^"]*)"', ttext):
FindTunnelScan(tunnelfile, path)
# should also scan and look for survex blocks that might have been included
diff --git a/pathreport.py b/pathreport.py
index 3d13d7d..698ea8f 100644
--- a/pathreport.py
+++ b/pathreport.py
@@ -1,10 +1,10 @@
#!/usr/bin/python
-from settings import *
+from .settings import *
import sys
import os
import string
import re
-import urlparse
+import urllib.parse
import django
pathsdict={
@@ -52,9 +52,9 @@ sep2="\r\t\t\t\t\t\t\t" # ugh nasty - terminal output only
bycodes = sorted(pathsdict)
for p in bycodes:
- print p, sep , pathsdict[p]
+ print(p, sep , pathsdict[p])
byvals = sorted(pathsdict, key=pathsdict.__getitem__)
for p in byvals:
- print pathsdict[p] , sep2, p
+ print(pathsdict[p] , sep2, p)
\ No newline at end of file
diff --git a/profiles/views.py b/profiles/views.py
index b5b7143..327d466 100644
--- a/profiles/views.py
+++ b/profiles/views.py
@@ -135,7 +135,7 @@ def create_profile(request, form_class=None, success_url=None,
if extra_context is None:
extra_context = {}
context = RequestContext(request)
- for key, value in extra_context.items():
+ for key, value in list(extra_context.items()):
context[key] = callable(value) and value() or value
return render_to_response(template_name,
@@ -226,7 +226,7 @@ def edit_profile(request, form_class=None, success_url=None,
if extra_context is None:
extra_context = {}
context = RequestContext(request)
- for key, value in extra_context.items():
+ for key, value in list(extra_context.items()):
context[key] = callable(value) and value() or value
return render_to_response(template_name,
@@ -301,7 +301,7 @@ def profile_detail(request, username, public_profile_field=None,
if extra_context is None:
extra_context = {}
context = RequestContext(request)
- for key, value in extra_context.items():
+ for key, value in list(extra_context.items()):
context[key] = callable(value) and value() or value
return render_to_response(template_name,
diff --git a/settings.py b/settings.py
index 9a97387..58b930d 100644
--- a/settings.py
+++ b/settings.py
@@ -1,8 +1,22 @@
-from localsettings import * #inital localsettings call so that urljoins work
import os
-import urlparse
+
+import urllib.parse
+
import django
+from localsettings import *
+ #inital localsettings call so that urljoins work
+
+#Imports should be grouped in the following order:
+
+#Standard library imports.
+#Related third party imports.
+#Local application/library specific imports.
+#You should put a blank line between each group of imports.
+
+print("** importing settings.py")
+print("--**-- REPOS_ROOT_PATH: ", REPOS_ROOT_PATH)
+
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
@@ -10,7 +24,7 @@ BASE_DIR = os.path.dirname(os.path.dirname(__file__))
DEBUG = True
TEMPLATE_DEBUG = DEBUG
-ALLOWED_HOSTS = [u'expo.survex.com']
+ALLOWED_HOSTS = ['expo.survex.com']
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
@@ -48,10 +62,10 @@ ADMIN_MEDIA_PREFIX = '/troggle/media-admin/'
CAVEDESCRIPTIONS = os.path.join(EXPOWEB, "cave_data")
ENTRANCEDESCRIPTIONS = os.path.join(EXPOWEB, "entrance_data")
-MEDIA_URL = urlparse.urljoin(URL_ROOT , '/site_media/')
-SURVEYS_URL = urlparse.urljoin(URL_ROOT , '/survey_scans/')
-PHOTOS_URL = urlparse.urljoin(URL_ROOT , '/photos/')
-SVX_URL = urlparse.urljoin(URL_ROOT , '/survex/')
+MEDIA_URL = urllib.parse.urljoin(URL_ROOT , '/site_media/')
+SURVEYS_URL = urllib.parse.urljoin(URL_ROOT , '/survey_scans/')
+PHOTOS_URL = urllib.parse.urljoin(URL_ROOT , '/photos/')
+SVX_URL = urllib.parse.urljoin(URL_ROOT , '/survex/')
# top-level survex file basename (without .svx)
SURVEX_TOPNAME = "1623"
@@ -169,5 +183,5 @@ TINYMCE_COMPRESSOR = True
MAX_LOGBOOK_ENTRY_TITLE_LENGTH = 200
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
-
-from localsettings import * #localsettings needs to take precedence. Call it to override any existing vars.
+from localsettings import *
+#localsettings needs to take precedence. Call it to override any existing vars.
diff --git a/urls.py b/urls.py
index 410c391..95b2707 100644
--- a/urls.py
+++ b/urls.py
@@ -1,11 +1,11 @@
from django.conf.urls import *
from django.conf import settings
-from core.views import * # flat import
-from core.views_other import *
-from core.views_caves import *
-from core.views_survex import *
-from core.models import *
+from .core.views import * # flat import
+from .core.views_other import *
+from .core.views_caves import *
+from .core.views_survex import *
+from .core.models import *
from django.views.generic.edit import UpdateView
from django.contrib import admin
from django.views.generic.list import ListView
diff --git a/utils.py b/utils.py
index d5deeda..7f9ccdd 100644
--- a/utils.py
+++ b/utils.py
@@ -46,7 +46,7 @@ def save_carefully(objectType, lookupAttribs={}, nonLookupAttribs={}):
instance, created=objectType.objects.get_or_create(defaults=nonLookupAttribs, **lookupAttribs)
if not created and not instance.new_since_parsing:
- for k, v in nonLookupAttribs.items(): #overwrite the existing attributes from the logbook text (except date and title)
+ for k, v in list(nonLookupAttribs.items()): #overwrite the existing attributes from the logbook text (except date and title)
setattr(instance, k, v)
instance.save()
@@ -111,8 +111,8 @@ re_subs = [(re.compile(r"\<b[^>]*\>(.*?)\</b\>", re.DOTALL), r"'''\1'''"),
]
def html_to_wiki(text, codec = "utf-8"):
- if type(text) == str:
- text = unicode(text, codec)
+ if isinstance(text, str):
+ text = str(text, codec)
text = re.sub("</p>", r"", text)
text = re.sub("<p>$", r"", text)
text = re.sub("<p>", r"\n\n", text)