TODO: la suite n'a jamais été finie

Mais pour la postérité ahah, la voici quand même!
This commit is contained in:
2025-08-29 09:08:09 +02:00
parent d70eb7c257
commit 289bfb9e43
5 changed files with 227 additions and 108 deletions

View File

@@ -3,7 +3,7 @@
import time
from subprocess import Popen, PIPE, STDOUT
def extract_signature_content(lines) :
def extract_signed_content(lines) :
gpg_boundary = None
gpg_boundary_stroke = 0;
lines_to_validate = []
@@ -21,6 +21,15 @@ def extract_signature_content(lines) :
lines_to_validate = lines_to_validate[:-1]
return lines_to_validate
def get_mail_subject(lines) :
gpg_boundary = None
gpg_boundary_stroke = 0;
lines_to_validate = []
for line in mail_lines :
if "Subject:" in line :
return line.replace("Subject:", "");
return None
def munpack_extract(folder, file) :
p = Popen(["munpack", "-f", "-t", "-C", folder, file], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
out, err = p.communicate()
@@ -41,10 +50,112 @@ def validate_signature(to_validate) :
p = Popen(["gpg", "--verify", "signature.asc", "tmp"], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
out, err = p.communicate()
out = out.decode("utf-8", "strict")
rm_file("tmp")
return not("BAD" in out or "MAUVAISE" in out)
def rm_file(filepath) :
p = Popen(["rm", filepath], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
p.communicate()
def list_current_pages(html_folder) :
p = Popen(["ls",html_base], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
out, err = p.communicate()
out = out.decode("utf-8", "strict")
return out.split("\n")
def get_bigger_file(html_content) :
greater = -1
for html_file in html_content :
try :
current = int(html_file)
if current > greater :
greater = current
except :
pass
return greater
def hard_cleanup(temp_files, source) :
for f in temp_files :
print("rm {}".format(f))
try :
rm_file(f.split(" ")[0])
except:
pass
# TODO cleanup source mail too
# rm_file(source)
def get_param(params, name, default=None) :
for param in params :
if name in param :
try :
return param.split("=")[1]
except :
return default
return default
def create_new_page(extracted_files, order_type, order_template) :
# Les fichiers sans extension vont etre listes dans le but d'obtenir le
# prochain plus grand fichier.
html_content = list_current_pages(html_base)
print(html_content)
# The bigger file is going to be updated, with its prev pointing on
# the new bigger file which will be bigger+1
bigger_file = get_bigger_file(html_content)
new_bigger = bigger_file +1
template = open(html_base+"/{}_template".format(order_template), 'r')
template_lines = template.readlines()
template.close
if bigger_file > -1 :
# Update the bigger one, to add it a prev link to the next bigger we are
# creating
# Le precedent plus grand va etre mis a jour
actual_bigger = open(html_base+"/{}".format(bigger_file), 'r')
actual_bigger_lines = actual_bigger.readlines()
actual_bigger.close()
actual_bigger = open(html_base+"/{}".format(bigger_file), 'w')
for line in actual_bigger_lines :
actual_bigger.write(line)
if "class='next'" in line :
actual_bigger.write('<a href="{}" class="prev"><</a>\n'.format(
bigger_file+1))
actual_bigger.close()
# Update template next to the bigger file or remove the line if not existant
update_template_lines = template_lines
for line in template_lines :
if "class='next" in line :
if bigger_file > -1 :
update_template_lines.append(
line.format(bigger_file))
else :
update_template_lines.append("<!--{}-->".format(
line.format(bigger_file)))
else :
update_template_lines.append(line)
template_lines = update_template_lines
if "pics" in order_type :
print("the mail contains pictures, those will be added")
if "plain" in order_type :
print("need to create a standar page with template {}".format(order_template))
content = []
# TODO explore extracted files and create content
return False
def update_page(extracted_files, order_type, order_id, order_template) :
return False
def delete_page(order_id) :
return False
new_base = "mails/"
html_base= "/var/www/html"
html_base= "./html"
#p = Popen(['offlineimap'], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#out, err = p.communicate()
@@ -59,121 +170,67 @@ for new_mail in new_mails :
print(new_mail)
#
# Read the mail and extract the pgp signature content
#
# TODO try catch
file = open("{}/{}".format(new_base,new_mail), "r")
mail_lines = file.readlines();
lines_to_validate = extract_signature_content(mail_lines)
mail_lines = file.readlines();
lines_to_validate = extract_signed_content(mail_lines)
mail_subject = get_mail_subject(mail_lines)
# Depacker le mail avec munpack
extracted_files = munpack_extract("./", "{}/{}".format(new_base,new_mail))
if extracted_files is None :
continue;
validation = False
print(extracted_files)
for extracted in extracted_files :
if "signature.asc" in extracted :
validation = validate_signature(lines_to_validate)
print("validation {}".format(validation))
error_occured = False
if validation :
print("Signature is valid, processing subject and content")
print(mail_subject)
subject_parameters = mail_subject.strip().split(";")
order = subject_parameters[0]
order_type = get_param(subject_parameters, "TYPE")
order_id = get_param(subject_parameters, "ID")
order_template = get_param(subject_parameters, "TEMPLATE",
default="default")
# TODO add session in case something goes wrong
if "CREATE" in order :
if order_type is None :
print("Create, but no type")
error_occured = True
else :
error_occured = not create_new_page(extracted_files,
order_type, order_template)
elif "UPDATE" in order :
if order_type is None :
print("Update, but no type")
error_occured = True
if order_id is None :
print("Update, but no id")
error_occured = True
else :
error_occured = not update_page(extracted_files,
order_type, order_id, order_template)
elif "DELETE" in order :
if order_id is None :
print("Delete, but no id")
error_occured = True
else :
error_occured = not delete_page(extracted_files, order_id)
elif "BASH" in order :
print("Not supported yet")
error_occured = True
else:
print("Invalid order")
error_occured = True
else :
error_occured = True
if error_occured :
print("some error occured")
hard_cleanup(extracted_files, "{}/{}".format(new_base, new_mail))
## Les fichiers sans extension vont etre listes dans le but d'obtenir le
## prochain plus grand fichier.
#p = Popen(["ls",html_base], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#out, err = p.communicate()
#html_content = out.split("\n")
#print(html_content)
## Les noms de fichiers sont parcourus a la recherche du plus grand
## numero
#greater = 0
#for html_file in html_content :
# try :
# current = int(html_file)
# if current > greater :
# greater = current
# except :
# pass
## Une fois le plus grand numero isole,
#print("greater {}".format(greater))
## Le precedent plus grand va etre mis a jour
#f = open(html_base+"/{}".format(greater), 'r')
#fn= open(html_base+"/{}new".format(greater), 'w')
#prev_lines = f.readlines()
#for line in prev_lines :
# if 'class="prev"' in line :
# fn.write(' <a href="{}" class="prev"><</a>\n'.format(greater+1))
# else :
# fn.write(line)
#f.close()
#fn.close()
## move the new file to its right place
#p = Popen(["rm",html_base+"/{}".format(greater)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#out, err = p.communicate()
#p = Popen(["mv",html_base+"/{}new".format(greater),
# html_base+"/{}".format(greater)], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#out, err = p.communicate()
## now greater is + 1
#greater += 1
## move index.html to greater
#f = open(html_base+"/index.html", 'r')
#fn= open(html_base+"/{}".format(greater), 'w')
#prev_lines = f.readlines()
#for line in prev_lines :
# if 'class="next"' in line :
# fn.write(' <a href="{}" class="prev"><</a>\n'.format("index.html"))
# fn.write(' <a href="{}" class="next">></a>\n'.format(greater-1))
# else :
# fn.write(line)
#f.close()
#fn.close()
## remove the old index.html file
#p = Popen(["rm",html_base+"/index.html"], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#out, err = p.communicate()
## now create the new index.html file
#f = open(html_base+"/index.html", 'w')
#f.write('<html>\n')
#f.write(' <head>\n')
#f.write(' <link rel="stylesheet" type="text/css" href="style.css">\n')
#f.write(' <meta charset="utf-8"/>\n')
#f.write(' </head>\n')
#f.write(' <body>\n')
#f.write(' <script src="script.js"></script>\n')
#f.write(' <a href="{}" class="next">></a>\n'.format(greater))
#f.write(' <div class="content">\n')
## write the top of the file
#for brut in extracted_files :
# if not brut == "" :
# split = brut.split(" ")
# file_name = split[0]
# file_type = split[1]
# if "plain" in file_type :
# p = Popen(["rm",new_base+"/"+file_name], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
# out, err = p.communicate()
# elif "html" in file_type :
# f.write(' <div class="texte">\n')
# fh = open(new_base+"/"+file_name, 'r')
# lines = fh.readlines()
# for line in lines :
# f.write(line)
# fh.close
# f.write(' </div>\n')
# p = Popen(["rm",new_base+"/"+file_name], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
# out, err = p.communicate()
# else :
# f.write(' <div class="images">\n')
# f.write(' <img src="{}" alt="chaussette">\n'.format(file_name))
# f.write(' </div>\n')
# p = Popen(["mv",new_base+"/"+file_name,
# html_base+"/"+file_name], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
# out, err = p.communicate()
# try :
# p = Popen(["chmod", "644", html_base+"/"+file_name], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
# out, err = p.communicate()
# except :
# pass
## write the end of the file
#f.write(' </div>\n')
#f.write(' </body>\n')
#f.write('</html>\n')
#f.close()
## remove the mail
#p = Popen(["rm", new_base+"/"+new_mail], stdout=PIPE, stdin=PIPE, stderr=STDOUT)
#out, err = p.communicate()