summaryrefslogtreecommitdiffstats
path: root/parsers
diff options
context:
space:
mode:
authorPhilip Sargent <philip.sargent@gmail.com>2025-01-20 02:07:26 +0000
committerPhilip Sargent <philip.sargent@gmail.com>2025-01-20 02:07:26 +0000
commit4d49eefccbac393ca21b81bc35f7a632b4af1178 (patch)
treee372c3edc1df51449f6222ae0065be72bbe9e145 /parsers
parent79cf342d3391d4bbd127678faaeed5471f6954be (diff)
downloadtroggle-4d49eefccbac393ca21b81bc35f7a632b4af1178.tar.gz
troggle-4d49eefccbac393ca21b81bc35f7a632b4af1178.tar.bz2
troggle-4d49eefccbac393ca21b81bc35f7a632b4af1178.zip
encryption round-trip works
Diffstat (limited to 'parsers')
-rw-r--r--parsers/caves.py13
-rw-r--r--parsers/imports.py6
-rw-r--r--parsers/people.py95
3 files changed, 110 insertions, 4 deletions
diff --git a/parsers/caves.py b/parsers/caves.py
index 67330fc..25b1743 100644
--- a/parsers/caves.py
+++ b/parsers/caves.py
@@ -181,7 +181,8 @@ def do_ARGE_cave(slug, caveid, areacode, svxid):
caveid may be kataster number or it may be e.g. LA34
"""
- default_note = "This is (probably) an ARGE or VfHO cave where we only have the survex file and no other information"
+ default_note = "This is (probably) an ARGE or VfHO cave where we only have the survex file and no other information."
+ default_note += "<br />If there is a 'Messteam' or 'Zeichner' listed, then it is probably ARGE."
url = f"{areacode}/{caveid}/{caveid}.html"
urltest = Cave.objects.filter(url=url)
@@ -210,11 +211,17 @@ def do_ARGE_cave(slug, caveid, areacode, svxid):
print(f"{caveid} {rest}")
passages = "\n"
+ # ; Messteam: Uwe Kirsamer, Uli Nohlen, Aiko Schütz, Torben Schulz,Thomas Holder,Robert Winkler
+ # ; Zeichner: Aiko Schütz, Robert Winkler
for line in rest:
if line.strip().startswith("*begin"):
- passages = f"{passages}{line}"
+ passages = f"{passages}{line}<br />\n"
+ if line.strip().startswith("; Messteam:") or line.strip().startswith("; Zeichner:"):
+ passages = f"{passages}{line}<br />\n"
+
+ commentary= "ARGE or VfHO cave.<br />3 lines of the survexfile,<br /> then all the *begin lines and any '; Messteam' and '; Zeichner' lines:<br><pre>"
cave = Cave(
- underground_description="ARGE or VfHO cave.<br>3 lines of the survexfile, then all the *begin lines:<br><pre>" + line1 +line2 +line3 +passages +"</pre>",
+ underground_description=commentary + line1 +line2 +line3 +passages +"</pre>",
unofficial_number="ARGE-or-VfHO",
survex_file= f"{svxid}.svx",
url=url,
diff --git a/parsers/imports.py b/parsers/imports.py
index 743ab12..07b6755 100644
--- a/parsers/imports.py
+++ b/parsers/imports.py
@@ -26,6 +26,12 @@ def import_people():
with transaction.atomic():
troggle.parsers.people.load_people_expos()
+def import_users():
+ print("-- Importing troggle Users (users.json) to ", end="")
+ print(django.db.connections.databases["default"]["NAME"])
+ with transaction.atomic():
+ troggle.parsers.people.load_users()
+
def import_surveyscans():
print("-- Importing Survey Scans and Wallets")
with transaction.atomic():
diff --git a/parsers/people.py b/parsers/people.py
index 5037dae..7c48462 100644
--- a/parsers/people.py
+++ b/parsers/people.py
@@ -1,10 +1,16 @@
+import base64
import csv
+import json
import os
import re
+from cryptography.fernet import Fernet
from html import unescape
from pathlib import Path
from django.conf import settings
+from django.core import serializers
+from django.contrib.auth.models import User
+from django.db import models
from unidecode import unidecode
from troggle.core.models.troggle import DataIssue, Expedition, Person, PersonExpedition
@@ -15,7 +21,6 @@ The standalone script needs to be renedred defucnt, and all the parsing needs to
or they should use the same code by importing a module.
"""
-
def parse_blurb(personline, header, person):
"""create mugshot Photo instance
Would be better if all this was done before the Person object was created in the db, then it would not
@@ -86,6 +91,94 @@ def troggle_slugify(longname):
return slug
+USERS_FILE = "users_e.json"
+ENCRYPTED_DIR = "encrypted"
+def load_users():
+ """These are the previously registered users of the troggle system.
+ """
+ PARSER_USERS = "_users"
+ DataIssue.objects.filter(parser=PARSER_USERS).delete()
+
+ key = settings.LONGTERM_SECRET_KEY # Django generated
+ k = base64.urlsafe_b64encode(key.encode("utf8")[:32]) # make Fernet compatible
+ f = Fernet(k)
+ print(f)
+
+
+ jsonfile = settings.EXPOWEB / ENCRYPTED_DIR / USERS_FILE
+ jsonurl = "/" + str(Path(ENCRYPTED_DIR) / USERS_FILE)
+ if not (jsonfile.is_file()):
+ message = f" ! Users json file does not exist: '{jsonfile}'"
+ DataIssue.objects.create(parser=PARSER_USERS, message=message)
+ print(message)
+ return None
+
+ with open(jsonfile, 'r', encoding='utf-8') as json_f:
+ try:
+ registered_users_dict = json.load(json_f)
+ except FileNotFoundError:
+ print("File not found!")
+ except json.JSONDecodeError:
+ print("Invalid JSON format! - JSONDecodeError")
+ except Exception as e:
+ print(f"An exception occurred: {str(e)}")
+ message = f"! Troggle USERs. Failed to load {jsonfile} JSON file"
+ print(message)
+ DataIssue.objects.update_or_create(parser=PARSER_USERS, message=message, url=jsonurl)
+ return None
+ users_list = registered_users_dict["registered_users"]
+
+ print(f" - {len(users_list)} users read from JSON")
+ for userdata in users_list:
+ if userdata["username"]:
+ if userdata["username"] == "expo":
+ continue
+ if userdata["username"] == "expoadmin":
+ continue
+ try:
+ e_email = userdata["email"]
+ email = f.decrypt(e_email).decode()
+ print(f" - user: '{userdata["username"]} <{email}>' ")
+ if existing_user := User.objects.filter(username=userdata["username"]): # WALRUS
+ # print(f" - deleting existing user '{existing_user[0]}' before importing")
+ existing_user[0].delete()
+ user = User.objects.create_user(userdata["username"], email, "secret")
+ user.set_password = "secret" # stores hash not password
+ user.is_staff = False
+ user.is_superuser = False
+ user.save()
+ except Exception as e:
+ print(f"Exception <{e}>\nusers in db: {len(User.objects.all())}\n{User.objects.all()}")
+ formatted_json = json.dumps(userdata, indent=4)
+ print(formatted_json)
+ return None
+ else:
+ print(f" - user: BAD username for {userdata} ")
+ # if userdata["date"] != "" or userdata["date"] != "None":
+ # message = f"! {str(self.walletname)} Date format not ISO {userdata['date']}. Failed to load from {jsonfile} JSON file"
+ # from troggle.core.models.troggle import DataIssue
+ # DataIssue.objects.update_or_create(parser="wallets", message=message, url=wurl)
+
+
+ ru = []
+ for u in User.objects.all():
+ if u.username == "expo":
+ continue
+ if u.username == "expoadmin":
+ continue
+ e_email = f.encrypt(u.email.encode("utf8")).decode()
+ ru.append({"username":u.username, "email": e_email, "password": u.password})
+ print(u.username, e_email)
+ original = f.decrypt(e_email).decode()
+ print(u.username, original)
+
+ jsondict = { "registered_users": ru }
+ encryptedfile = settings.EXPOWEB / ENCRYPTED_DIR / "encrypt.json"
+ with open(encryptedfile, 'w', encoding='utf-8') as json_f:
+ json.dump(jsondict, json_f, indent=1)
+ return True
+
+
def load_people_expos():
"""This is where the folk.csv file is parsed to read people's names.
Which it gets wrong for people like Lydia-Clare Leather and various 'von' and 'de' middle 'names'