forked from akkana/scripts
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlosalamosmtgs.py
More file actions
executable file
·900 lines (740 loc) · 33.4 KB
/
losalamosmtgs.py
File metadata and controls
executable file
·900 lines (740 loc) · 33.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
#!/usr/bin/env python3
# Scrape the Los Alamos meetings page to be alerted to what's on
# the agenda at upcoming meetings.
# Make it available via RSS.
# Suggestion: run this script via crontab:
# Use crontab -e to add a line like:
# 45 15 * * * python3 /path/tp/htdocs/losalamosmtgs.py > /path/to/htdocs/los-alamos-meetings/LOG 2>&1
import requests
from bs4 import BeautifulSoup, NavigableString
import datetime
from urllib.parse import urljoin
import io
import string
import subprocess
import tempfile
import json
import re
import os, sys
from lxml.html.diff import htmldiff
from urllib3.exceptions import ReadTimeoutError
from math import isclose
# Try to use PyMuPDF if available.
# For some inexplicable reason the package PyMuPDF is imported as "fitz".
try:
import fitz
except:
print("No PyMuPDF installed, using pdftohtml")
########## CONFIGURATION ##############
# You can also pass in RSS_URL RSS_DIR as two optional arguments
# Where to start: the public legistar meeting list
MEETING_LIST_URL = "http://losalamos.legistar.com/Calendar.aspx"
# The place where the RSS will be hosted. Must end with a slash.
# The RSS file will be this/index.rss.
RSS_URL = "http://localhost/los-alamos-meetings/"
# Where to put the generated RSS file. Customize this for your website.
RSS_DIR = os.path.expanduser("~/web/los-alamos-meetings")
if not os.path.exists(RSS_DIR):
os.makedirs(RSS_DIR)
######## END CONFIGURATION ############
# Needed to deal with meetings that don't list a time:
NO_TIME = "NO TIME"
localtz = datetime.datetime.now().astimezone().tzinfo
Verbose = True
# Make a timezone-aware datetime for now:
now = datetime.datetime.now().astimezone()
# and save the timezone
localtz = now.tzinfo
# Format for dates in RSS:
# This has to be GMT, not %Z, because datetime.strptime just
# throws away any %Z info anyway rather than parsing it.
# Better to get an error if we see any time that's not GMT.
RSS_DATE_FORMAT = "%a, %d %b %Y %H:%M GMT"
# Where temp files will be created. pdftohtml can only write to a file.
tempdir = tempfile.mkdtemp()
upcoming_meetings = []
def build_upcoming_meetings_list(only_future=False):
# By default, the calendar page only shows the current month,
# even when there are meetings scheduled for next month.
# To see anything from the upcoming month you have to set cookies
# in the HTTP request.
# If you do that manually, here are the cookies it sets:
# Setting-69-ASP.meetingdetail_aspx.gridMain.SortExpression=Sequence ASC; Setting-69-Calendar Options=info|; Setting-69-Calendar Year=Next Month; Setting-69-Calendar Body=All; Setting-69-ASP.calendar_aspx.gridCalendar.SortExpression=MeetingStartDate DESC; ASP.NET_SessionId=tmk5pfksowfid2t3nqjmpvac; BIGipServerprod_insite_443=874644234.47873.0000
# but with any luck, 'Next Month' is the only one that's actually needed.
# This has to be done before reading the default page,
# to match the decreasing date order of the meetings on each month's page.
if now.day > 20:
cookiedict = { 'Setting-69-Calendar Year': 'Next Month' }
r = requests.get(MEETING_LIST_URL, cookies=cookiedict)
parse_html_meeting_list(r.text, only_future)
# Get the meetings on the default (this month) page.
# These will be appended to the global list upcoming_meetings.
r = requests.get(MEETING_LIST_URL, timeout=30)
parse_html_meeting_list(r.text, only_future)
# The meeting list is in date/time order, latest first.
# Better to list them in the other order, starting with
# meetings today, then meetings tomorrow, etc.
# That's why we couldn't just write meetings from the earlier loop.
# Could sort by keys, 'Meeting Date' and 'Meeting Time',
# but since it's already sorted, it's easier just to reverse.
upcoming_meetings.reverse()
def parse_html_meeting_list(page_html, only_future=False):
"""Parse the page listing meetings, which is HTML generated by pdftohtml.
Return a list of dictionaries for each meeting.
If only_future is set, don't include past meetings.
"""
soup = BeautifulSoup(page_html, 'lxml')
# Remove a bunch of spurious tags
for badtag in [ "font", "span", "div" ]:
badtags = soup.find_all(badtag)
for tag in badtags:
tag.replace_with_children()
caltbl = soup.find("table",
id="ctl00_ContentPlaceHolder1_gridCalendar_ctl00")
# The legend is in the thead
fieldnames = []
for i, field in enumerate(caltbl.thead.find_all("th")):
if field.text:
fieldnames.append(field.text.strip())
else:
fieldnames.append(str(i))
# Loop over meetings, rows in the table:
for row in caltbl.tbody.find_all("tr"):
dic = {}
# Loop over columns describing this meeting:
for i, field in enumerate(row.find_all("td")):
if fieldnames[i].startswith("Agenda"):
# If there's an Agenda URL, make it absolute.
a = field.find("a")
href = a.get("href")
if href:
dic[fieldnames[i]] = urljoin(MEETING_LIST_URL, href)
else:
dic[fieldnames[i]] = None
elif fieldnames[i] == 'Meeting Location':
# The Location field has simple formatting
# such as <br>, so can't just take .text, alas.
dic[fieldnames[i]] = ' '.join([str(c).strip()
for c in field.contents]) \
.strip()
# The little calendar icon somehow comes out with a name of '2'.
# Skip it.
elif fieldnames[i] == '2' or not fieldnames[i]:
continue
# Most fields are simple and won't have any formatting.
# They are full of nbsps '\u00a0', though.
else:
dic[fieldnames[i]] = field.text.replace('\u00a0', ' ').strip()
if "Meeting Date" in dic:
mtg_datetime = meeting_datetime(dic)
if mtg_datetime and only_future and mtg_datetime < utcnow:
continue
upcoming_meetings.append(dic)
def meeting_datetime(mtg):
"""Parse the meeting date and time and return an aware local datetime.
If there's only a date and no time, return a date object.
"""
# The parsed time is in the local time and is unaware,
# because strptime can't create a timezone aware object
# even if the string it's parsing includes a timezone (see above).
if "Meeting Time" not in mtg or not mtg["Meeting Time"]:
mtg["Meeting Time"] = NO_TIME
try:
if mtg["Meeting Time"] != NO_TIME:
unaware = datetime.datetime.strptime(mtg["Meeting Date"] + " "
+ mtg["Meeting Time"],
'%m/%d/%Y %I:%M %p')
else: # No time, so list it at 23:59
unaware = datetime.datetime.strptime(mtg["Meeting Date"],
'%m/%d/%Y')
unaware.replace(hour=23, minute=59, second=0)
# Make it aware in localtime
localtime = unaware.astimezone(localtz)
return localtime
except ValueError:
print("ERROR: Can't parse date on meeting:", mtg)
return None
def diffhtml(before_html, after_html, title=None):
"""Diffs the two files, and returns an html fragment that wraps
differences in <ins> or <del> tags, which you can style as desired.
Returns bytes, not str, because everything else works in bytes
due to using requests.
"""
if not title:
title = "Changed Agenda"
# lxml.html.htmldiff only accepts strings, not bytes, but these
# were read in as bytes because that's what comes from requests;
# so translate them.
if type(before_html) is bytes:
before_html = before_html.decode()
if type(after_html) is bytes:
after_html = after_html.decode()
# lxml.html.htmldiff returns fragments, not full documents.
# So add a header that includes a style for ins and del.
diff = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>%s</title>
<style>
ins { background: #9ff; }
del { background: #fbb; }
</style>
</head>
<body>
<h1>%s</h1>
''' % (title, title)
diff += htmldiff(before_html, after_html)
diff += "\n</body></html>\n"
# encode to return bytes.
return diff.encode()
def agenda_to_html(agendaloc, meetingtime, save_pdf_filename=None):
if 'fitz' in sys.modules:
print("Using fitz")
return html_agenda_fitz(agendaloc, meetingtime, save_pdf_filename)
print("No fitz, using pdftohtml")
return html_agenda_pdftohtml(agendaloc, meetingtime, save_pdf_filename)
def html_agenda_fitz(agendaloc, meetingtime, save_pdf_filename=None):
doc = fitz.open(agendaloc)
def find_indent_levels(pdfdoc):
indents = []
for page in pdfdoc.pages():
for block in page.get_text("blocks"):
indent = round(block[0])
if indent not in indents:
indents.append(indent)
indents.sort()
def group_clusters(lis, max_sep):
"""Reduce a list of numbers removing numbers that are
close to each other.
E.g. [1, 2, 3, 44, 46] -> [2, 45].
lis is a sorted list of numbers.
max_sep is the maximum separation allowed.
"""
# clusters is a list of [ (low, high) ]
clusters = []
def add_to_clusters(l):
for c in clusters:
if l >= c[0] and l <= c[1]:
# the value is already represented by this cluster
return
# Okay, l is outside this cluster. But is it close?
# On the low end?
if l <= c[1] and l >= c[1] - max_sep:
c[0] = l
return
# Or on the high end?
if l >= c[0] and l <= c[0] + max_sep:
c[1] = l
return
# It's outside all of the known ranges.
# Add a new range.
clusters.append([l, l])
for l in lis:
add_to_clusters(l)
# Now clusters is a list of ranges. Take the average of each.
return [ int((c[0] + c[1])/2.) for c in clusters ]
return group_clusters(indents, 10)
indent_levels = find_indent_levels(doc)
print("Found indent levels:", indent_levels)
html = """<html>
<body>
<h3>%s</h3>
""" % (meetingtime.strftime("%a %y-%m-%d"))
for page in doc.pages():
# blocks are like paragraphs in a normal PDF. Here, ??
# block is supposedly a tuple,
# (x0, y0, x1, y1, "lines in block", block_type, block_no)
# according to https://pymupdf.readthedocs.io/en/latest/app2.html
# but I think the last two are reversed, it's really no, type.
# flags=0 disables images, but might also disable other
# desirable things, so watch out.
# https://pymupdf.readthedocs.io/en/latest/app2.html#text-extraction-flags
# values include TEXT_PRESERVE_IMAGES, TEXT_PRESERVE_LIGATURES,
# TEXT_PRESERVE_SPANS, TEXT_PRESERVE_WHITESPACE.
blocks = page.get_text("blocks", flags=0)
for b in blocks:
# This clause isn't needed if TEXT_PRESERVE_IMAGES isn't set.
# if b[4].startswith("<image:"):
# print("Skipping an image")
# continue
# Is the indent close to the minimum indent?
# Then it's a header.
# Decide which level of header to use based on content.
if isclose(b[0], indent_levels[0], abs_tol=10):
print("HEADER:", b[4].replace('\n', ' '))
if re.match('[0-9]+\.\n', b[4]):
html += "<h1>%s</h1>\n<p>\n" % b[4]
elif re.match('[A-Z]+\.\n', b[4]):
html += "<h2>%s</h2>\n<p>\n" % b[4]
else:
html += "<p>%s</p>" % b[4]
elif isclose(b[0], indent_levels[1], abs_tol=10):
print(" ", b[4].replace('\n', ' '))
html += "<br>\n%s\n" % b[4]
else:
print(" OTHER INDENT:", b[4].replace('\n', ' '))
html += "<br><blockquote>\n%s</blockquote>\n" % b[4]
html += "</body></html>"
return html
def html_agenda_pdftohtml(agendaloc, meetingtime, save_pdf_filename):
"""Convert a PDF agenda to text and/or HTML using pdftohtml,
then returned cleaned_up bytes (not str).
save_pdf_filename is for debugging: if set, save the PDF there
and don't delete it.
Returns bytes, not str.
"""
if agendaloc.lower().startswith('http') and ':' in agendaloc:
r = requests.get(agendaloc, timeout=30)
with open(save_pdf_filename, "wb") as pdf_fp:
pdf_fp.write(r.content)
agendaloc = save_pdf_filename
htmlfile = save_pdf_filename + ".html"
args = [ "pdftohtml", "-c", "-s", "-i", "-noframes",
# "-enc", "utf-8",
save_pdf_filename, htmlfile ]
print("Calling", ' '.join(args))
subprocess.call(args)
return clean_up_htmlfile(htmlfile, meetingtime)
def clean_up_htmlfile(htmlfile, meetingtime):
"""Clean up the scary HTML written by pdftohtml,
removing the idiotic dark grey background pdftohtml has hardcoded in,
the assortment of absolute-positioned styles,
the new-paragraph-for-each-line, etc.
Returns bytes, not str.
"""
with open(htmlfile, 'rb') as htmlfp:
# The files produced by pdftohtml contain '\240' characters,
# which are ISO-8859-1 for nbsp.
# Adding "-enc", "utf-8" doesn't change that.
# If they aren't decoded, BeautifulSoup will freak out
# and won't see anything in the file at all.
html_bytes = htmlfp.read().decode('ISO-8859-1')
# Make some changes. Primarily,
# replace the grey background that htmltotext wires in
soup = BeautifulSoup(html_bytes, "lxml")
body = soup.body
# Insert the meeting date at the beginning of the body
h_tag = soup.new_tag("h3")
soup.body.insert(0, h_tag)
datetext = NavigableString(meetingtime.strftime("%a %b %d"))
h_tag.append(datetext)
# Sometimes pdftohtml mysteriously doesn't work, and gives
# a basically empty HTML file: everything is using position:absolute
# and that makes it invisible to BeautifulSoup.
# This seems to be
# https://gitlab.freedesktop.org/poppler/poppler/-/issues/417
# Check for that.
# If all else fails, htmltotext works to extract the text,
# and might produce cleaner output anyway.
# Or there may be some way to get BS to find those
# <p style="position:absolute" tags that it isn't seeing.
bodylen = len(body.text.strip())
if bodylen == 0:
print("** Yikes! Empty HTML from pdftohtml", htmlfile)
return html
elif bodylen < 10:
print(f"Short! Body text is: '{body.text}'")
del body["bgcolor"]
del body["vlink"]
del body["link"]
# Remove all the fixed pixel width styles
for tag in body.find_all('style'):
tag.decompose()
for tag in body.find_all('div'):
del tag["style"]
for tag in body.find_all('p'):
del tag["style"]
# Get rid of the pagination
pagediv_pat = re.compile('page[0-9]*-div')
divs = list(body.find_all(id=pagediv_pat))
for div in divs:
div.replace_with_children()
# There are also anchors like <a name="8">\n</a>
# but they're not really hurting anything.
# Remove hard line breaks. This is a tough decision:
# some line breaks help readability, some hurt it.
# for tag in body.find_all('br'):
# tag.decompose()
# pdftohtml renders each line as a separate paragraph,
# so joining paragraphs helps readability.
# Call join_consecutive_tags starting with outer tags and working inward.
# Do this while the p tags still have classes, so paragraphs of
# different classes don't get merged.
join_consecutive_tags(body, 'p')
# return soup.prettify(encoding='utf-8')
join_consecutive_tags(body, 'i')
join_consecutive_tags(body, 'b')
# Now don't need the class tags any more, so delete them.
for tag in body.find_all('p'):
del tag["class"]
# Try to identify major headers, to highlight them better.
header_pat = re.compile('([0-9]+)\.\s*([A-Z \(\)\/,]+)$', flags=re.DOTALL)
# This doesn't work. I don't know why. find_all(text=pat) works fine
# in simple test cases, but not on the real files.
# for b in soup.find_all('b', text=header_pat):
# Instead, loop over all b tags doing the match explicitly:
for bold in body.find_all('b'):
m = re.match(header_pat, bold.get_text().strip())
if not m:
continue
# Can't change text like this
# b.text = f"## {m.groups(1)}. {m.groups(2)}"
# but we can change the tag name:
bold.name = 'h2'
pretty_html_bytes = soup.prettify(encoding='utf-8')
# Testing: maybe the above changes removed the body contents?
# (I think this bug is long since fixed.)
if not body.text:
print("**Yikes! The changes to", save_pdf_file,
"made the HTML empty. Saving original instead.")
with open(os.path.join(RSS_DIR, save_pdf_filename + "_cleaned.html"),
"wb") as savfp:
savefp.write(pretty_html_bytes)
return html
return pretty_html_bytes
def join_consecutive_tags(soup, tagname, add_spaces=False):
"""Join consecutive tags of name tag if they have the same attributes.
E.g. in <p class="foo">some text</p><p class="foo">different text</p>
would produce <p class="foo">some text different text</p>
If add_spaces, will add spaces between tags.
"""
to_merge = []
tags = list(soup.find_all(tagname))
prev = None
sectionnum_pat = re.compile('[0-9A-Z]{,2}\.')
for tag in tags:
prev = tag.find_previous_sibling()
# If the two tags have the same parent and the same class,
# they should be merged.
if prev and prev.attrs == tag.attrs:
# First merge in the list?
if not to_merge:
to_merge.append([prev, tag])
continue
else:
# Should these be merged with the last merge?
last_group = to_merge[-1]
last_tag_merged = last_group[-1]
prev_sib = prev.find_previous_sibling()
# SPECIAL CASE FOR LEGISTAR:
# Does it look like a section header?
# Don't merge a paragraph that looks like "2."
# with whatever was before it.
if tag.name == 'p' and re.match(sectionnum_pat, tag.text):
continue
elif (prev == last_tag_merged and
tag.attrs == last_tag_merged.attrs):
# Continue a group merge of three or more tags
last_group.append(tag)
else:
# New pair of mergers, make a new group
to_merge.append([prev, tag])
prev = tag
for group in to_merge:
first = group[0]
for tag in group[1:]:
# Iterating directly over tag.children gets crossed up
# when some of the children are moved to another tag.
children = list(tag.children)
for child in children:
first.append(child)
# All of tag's children have been moved to first.
# Delete tag.
tag.decompose()
VALID_FILENAME_CHARS = "-_." + string.ascii_letters + string.digits
def clean_filename(badstr):
return ''.join(c for c in badstr if c in VALID_FILENAME_CHARS)
NO_AGENDA = b"No agenda available."
def write_rss20_file(mtglist):
"""Take a list meeting dictionaries
and make RSS and HTML files from it.
"""
active_meetings = []
##############
# Generate index HTML and RSS file headers.
# Open both the RSS and HTML files:
outrssfilename = os.path.join(RSS_DIR, "index.rss")
outhtmlfilename = os.path.join(RSS_DIR, "index.html")
with open(outrssfilename, 'w') as rssfp, \
open(outhtmlfilename, 'w') as htmlfp:
print("\n==== Generating RSS for", len(mtglist), "meetings")
gendate = now.strftime(RSS_DATE_FORMAT)
print(f"""<?xml version="1.0" encoding="iso-8859-1" ?>
<rss version="2.0"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#">
<channel>
<title>Los Alamos County Government Meetings</title>
<link>{RSS_URL}losalamosmeetings</link>
<description>An Unofficial, Non-Sanctioned Listing of Los Alamos Government Meetings, provided by Akkana Peck.</description>
<language>en</language>
<copyright>Public Domain</copyright>
<ttl>14</ttl>
<pubDate>{gendate}</pubDate>
<managingEditor>akk at shallowsky dot com (Akkana Peck)</managingEditor>
<generator>losalamosmtgs</generator>
""",
file=rssfp)
print(f"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html>
<head>
<title>Los Alamos County Government Meetings</title>
<meta http-equiv="content-type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1">
<link rel="alternate" type="application/rss+xml"
title="Los Alamos Meetings Feed"
href="{RSS_URL}index.rss" />
</head>
<body>
<h1>Los Alamos County Government Meetings</h1>
As of: {gendate}
... <a href="about.html">About Los Alamos Meetings (How to Use This Page)</a>
... <a href="{RSS_URL}index.rss">Los Alamos Meetings RSS2.0 Feed</a>.
""", file=htmlfp)
for mtg in mtglist:
# Is the meeting in the future? Don't list past meetings.
meetingtime = meeting_datetime(mtg)
if not meetingtime:
print("Skipping", mtg["Name"], mtg["Meeting Date"],
"because of bad meeting datetime",
mtg["Meeting Date"], mtg["Meeting Date"])
continue
if meetingtime < now:
print("Skipping", mtg["Name"], mtg["Meeting Date"],
"because", meetingtime, "<", now)
continue
lastmod = None
mtg['cleanname'] = mtgdic_to_cleanname(mtg)
cleanname = mtg['cleanname']
print("\n====", cleanname)
if mtg["Agenda"]: # There is an agenda URL listed
print(cleanname, "has an agenda: fetching it")
# XXX TEMPORARY: save the PDF filename, because sometimes
# pdftohtml produces an HTML file with no content even
# though there's content in the PDF.
pdfout = os.path.join(RSS_DIR, cleanname + ".pdf")
try:
agenda_html = agenda_to_html(mtg["Agenda"],
meetingtime,
save_pdf_filename=pdfout)
except ReadTimeoutError:
print("Timed out on " + agendaloc)
agenda_html = NO_AGENDA
agendastatus = "timeout"
else: # No previous agenda
print(cleanname, "has no agenda to fetch")
agenda_html = NO_AGENDA
# Now agenda_html is guaranteed to be set, either way.
# Might need a diff file too:
agenda_diff = None
# Does the agenda file need to be (re)written?
write_agenda_file = False
# See if there was already an agenda file left from previous runs:
agendafile = os.path.join(RSS_DIR, cleanname + ".html")
# print("agenda filename", agendafile)
try:
with open(agendafile, "rb") as oldfp:
oldhtml = oldfp.read()
if NO_AGENDA in oldhtml: # no agenda previously
if Verbose:
print("No agenda previously")
if agenda_html != NO_AGENDA: # but there is now
write_agenda_file = True
agendastatus = "new"
print(cleanname, ": new agenda")
else:
agendastatus = "no"
print(cleanname, ": no agenda")
else: # there was a previous agenda
if Verbose:
print("There was a previous agenda: ===============")
if agenda_html == NO_AGENDA:
print(oldhtml)
print("========= but now, ===============")
print(agenda_html)
if not agendastatus:
agendastatus = "removed"
print(cleanname, ": removed agenda")
# don't write over the old agenda file
write_agenda_file = False
elif agenda_html != oldhtml: # changed agenda
write_agenda_file = True
# XXX TEMPORARY: save the previous file,
# to have them available while debugging diffs.
os.rename(agendafile,
os.path.join(RSS_DIR,
cleanname + "-old.html"))
# End temporary clause
# Since the agenda has changed, make a diff file
# highlighting the parts that changed.
agenda_diff = diffhtml(oldhtml, agenda_html,
title=cleanname)
agenda_diff_file = os.path.join(RSS_DIR,
mtg['cleanname'] + "-diff.html")
with open(agenda_diff_file, 'wb') as difffp:
difffp.write(agenda_diff)
agendastatus = "changed"
print(cleanname, ": changed agenda")
else:
agendastatus = "unchanged"
print(cleanname, ": unchanged agenda")
except FileNotFoundError:
# No agenda file there previously; probably a new meeting
if Verbose:
print("No previous agenda file")
write_agenda_file = True
if agenda_html == NO_AGENDA:
if Verbose:
print("... and no new agenda now")
agendastatus = "no"
else:
if Verbose:
print("but there's an agenda there")
agendastatus = "new"
# Now agenda_html and agendastatus should be set,
# and maybe agenda_diff too.
# Write the agenda file if it's changed:
if write_agenda_file:
with open(agendafile, 'wb') as outfp:
outfp.write(agenda_html)
# Either way, this meeting is still listed:
# note it so it won't be cleaned from the directory.
active_meetings.append(cleanname)
# Get the old JSON
changestr = ""
jsonfile = os.path.join(RSS_DIR, mtg['cleanname'] + ".json")
try:
with open(jsonfile) as jsonfp:
oldmtg = json.loads(jsonfp.read())
# mtg doesn't have lastmod, so to make sure that
# doesn't trigger a change, copy it:
mtg['lastmod'] = oldmtg['lastmod']
changed_keys = {key for key in oldmtg.keys() & mtg
if oldmtg[key] != mtg[key]}
if changed_keys:
lastmod = now
changestr = "<p>Changed: " + ', '.join(changed_keys) \
+ "</p>"
print("Keys changed:", changed_keys, "lastmod is", lastmod)
elif not lastmod:
print("Nothing has changed, keeping lastmod")
lastmod = datetime.datetime.strptime(oldmtg['lastmod'],
RSS_DATE_FORMAT)
except (RuntimeError, OSError) as e:
if os.path.exists(jsonfile):
print("Error reading jsonfile: %s" % e)
changestr += "Error reading jsonfile: %s\n<p>" % e
elif Verbose:
print("No JSON file there previously")
lastmod = now
changestr += "<b>New listing.</b>\n<p>"
# Update to the real lastmod date before writing JSON
mtg['lastmod'] = lastmod.strftime(RSS_DATE_FORMAT)
mtg['GUID'] = cleanname + '.' + lastmod.strftime("%Y%m%d-%H%M")
# If the meeting is new or something has changed,
# (re)write the JSON file..
with open(jsonfile, 'w') as jsonfp:
jsonfp.write(json.dumps(mtg, indent=4))
mtgtitle = f"""{mtg['Name']} on {meetingtime.strftime("%a %b %d")}"""
desc = f"""{mtg['Name']}: {mtg['Meeting Date']} at {mtg['Meeting Time']}<br />
"""
# Set up the change strings for the header and body
if agendastatus == "new":
agenda_hdr = " (AGENDA)"
desc += "<p><b>There is a new agenda.</b>"
elif agendastatus == "removed":
agenda_hdr = " (REMOVED AGENDA)"
desc += "<p><b>The agenda has been removed.</b>"
elif agendastatus == "changed":
agenda_hdr = " (CHANGED AGENDA)"
desc += "<p><b>The agenda has changed.</b>"
elif agendastatus == "unchanged":
agenda_hdr = ""
desc += "<p>The agenda hasn't changed."
elif agendastatus == "no":
agenda_hdr = " (no agenda)"
desc += "<p>No agenda yet."
if mtg['Meeting Location']:
desc += "<p>Location:" + mtg['Meeting Location']
if changestr:
desc += "<p>" + changestr + '\n'
if agenda_diff:
link = f"{RSS_URL}{cleanname}-diff.html"
else:
link = f"{RSS_URL}{cleanname}.html"
if mtg["Agenda"]:
desc = f"""{desc}<p>
<a href="{mtg["Agenda"]}">Agenda PDF</a><br>
</p>
"""
if mtg["Agenda Packets"]:
# The agenda packet links tend to have & in them
# and so need to be escaped with CDATA
if 'http' in mtg["Agenda Packets"]:
desc += f"""<p><a href="{mtg["Agenda Packets"]}">Agenda Packet</a></p>\n"""
else:
desc = f"""<p>Agenda packet: {mtg["Agenda Packets"]}</p>\n"""
print("GUID will be", mtg['GUID'], "lastmod is", mtg['lastmod'])
# Add the item to the RSS
print(f"""<item>
<title>{mtgtitle} {agenda_hdr}</title>
<guid isPermaLink="false">{mtg['GUID']}</guid>
<link>{link}</link>
<description><![CDATA[ {desc} ]]>
</description>
<pubDate>{mtg['lastmod']}</pubDate>
</item>""", file=rssfp)
# And add it to the HTML
print(f"<p><h2>{mtgtitle} {agenda_hdr}</h2>", file=htmlfp)
if mtg["Agenda"]:
print(f'<p><b><a href="{link}">Agenda: {mtgtitle}</a></b>',
file=htmlfp)
# else:
# print("<p>No agenda yet", file=htmlfp)
print(f"""<p>
{desc}
<p>(Last modified: {gendate}.)
""",
file=htmlfp)
print("</channel>\n</rss>", file=rssfp)
print("</body></html>", file=htmlfp)
print("Wrote", outrssfilename, "and", outhtmlfilename)
# Remove obsolete files for meetings no longer listed.
for f in os.listdir(RSS_DIR):
# Only clean up certain extensions:
rmexts = [ '.json', '.rss', '.html', '.pdf' ]
name, ext = os.path.splitext(f)
if ext not in rmexts:
continue
# Protected files
if f.startswith("index") or f.startswith("about"):
continue
def is_active(f):
for act in active_meetings:
if f.startswith(act):
return True
return False
if not is_active(f):
print("removing", f)
os.unlink(os.path.join(RSS_DIR, f))
def mtgdic_to_cleanname(mtgdic):
"""A standard way to turn date and committee name into something
that can be used for filenames or URLs.
Will be used both for the agenda file and for RSS entries.
"""
mtg_dt = meeting_datetime(mtgdic)
if mtg_dt:
return mtg_dt.strftime("%Y-%m-%d") + '-' \
+ clean_filename(mtgdic["Name"])
return "notime-" + clean_filename(mtgdic["Name"])
if __name__ == '__main__':
if len(sys.argv) > 1:
RSS_URL = sys.argv[1]
# RSS_URL is a directory and must end with a slash
if not RSS_URL.endswith('/'):
RSS_URL += '/'
if len(sys.argv) > 2:
RSS_DIR = sys.argv[2]
build_upcoming_meetings_list()
write_rss20_file(upcoming_meetings)