Repository with sources and generator of https://larlet.fr/david/ https://larlet.fr/david/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

site.py 24KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727
  1. #!/usr/bin/env python3
  2. import json
  3. import locale
  4. import pickle
  5. import time
  6. from collections import defaultdict
  7. from dataclasses import dataclass
  8. from datetime import datetime, timedelta
  9. from html import escape
  10. from itertools import chain, groupby
  11. from operator import attrgetter
  12. from pathlib import Path
  13. from string import Template
  14. from textwrap import dedent
  15. from time import perf_counter
  16. import feedparser
  17. import mistune
  18. from jinja2 import Environment as Env
  19. from jinja2 import FileSystemLoader
  20. from jinja2.filters import do_striptags
  21. from minicli import cli, run, wrap
  22. from mistune.plugins.formatting import mark, strikethrough
  23. from mistune.util import safe_entity
  24. from PIL import Image
  25. from slugify import slugify
  26. from typography import typographie
  27. from widont import widont
  28. # Useful for dates rendering within Jinja2.
  29. locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
  30. VERSION = "2023.09.10"
  31. HERE = Path(".")
  32. DAVID = HERE / "david"
  33. STATIC = HERE / ".." / "larlet-fr-static"
  34. DOMAIN = "https://larlet.fr"
  35. LOCAL_DOMAIN = "http://larlet.test:3579"
  36. # Hardcoding publication at 12 in Paris timezone.
  37. NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
  38. TODAY = datetime.today() + timedelta(hours=6)
  39. PUBLICATION_BUFFER = TODAY - timedelta(days=0)
  40. NB_ITEMS_IN_FEED = 30
  41. SOURCES_PATH = DAVID / "2023" / "_sources"
  42. all_tags = set()
  43. pages_by_tags = defaultdict(list)
  44. pages_by_url = {}
  45. class TagsRenderer(mistune.HTMLRenderer):
  46. """Make the asumption each line starting with a `#` is a tag."""
  47. def paragraph(self, text):
  48. if text.startswith("#"):
  49. tags = " ".join(
  50. f'<a href="/david/2023/{slugify(tag.strip())}/">#{tag.strip()}</a>'
  51. for tag in text.split("#")
  52. if tag.strip()
  53. )
  54. return f"<nav><p>{tags}</p></nav>\n"
  55. return super().paragraph(text)
  56. class FrenchTypographyRenderer(mistune.HTMLRenderer):
  57. """Apply French typographic rules to text."""
  58. def text(self, text):
  59. return typographie(super().text(text), html=True)
  60. def block_html(self, html):
  61. return typographie(super().block_html(html), html=True)
  62. class InternalLinkTitleRenderer(mistune.HTMLRenderer):
  63. """Automatically generate the title for internal links."""
  64. def link(self, text, url, title=None):
  65. s = '<a href="' + self.safe_url(url) + '"'
  66. if not title and url.startswith("/david/2023/"):
  67. # It will not work for internal urls referencing the future.
  68. page = pages_by_url.get(url)
  69. if page:
  70. title = page.title
  71. if title:
  72. s += ' title="' + safe_entity(title) + '"'
  73. return s + ">" + text + "</a>"
  74. class CustomAndBlockquoteLanguageRenderer(
  75. FrenchTypographyRenderer, InternalLinkTitleRenderer, TagsRenderer
  76. ):
  77. """Sets the English language attribute for blockquotes with `[en]` prefix."""
  78. def _get_language(self, text):
  79. if text.startswith("<p>[en] "):
  80. return "en", text.replace("<p>[en] ", "<p>")
  81. else:
  82. return None, text
  83. def block_quote(self, text):
  84. language, text = self._get_language(text)
  85. if language:
  86. return f'\n<blockquote lang="{language}">\n{text}</blockquote>\n'
  87. else:
  88. return f"\n<blockquote>\n{text}</blockquote>\n"
  89. class ImgsWithSizesRenderer(CustomAndBlockquoteLanguageRenderer):
  90. """Renders images as <figure>s and add sizes."""
  91. def paragraph(self, text):
  92. # In case of a figure, we do not want the (non-standard) paragraph.
  93. if text.strip().startswith("<figure>"):
  94. return text
  95. return super().paragraph(text)
  96. def _generate_size(self, src, width, height):
  97. src_size = src.replace(".jpg", f"_{width}x{height}.jpg")
  98. full_path = STATIC / Path(src[1:])
  99. full_path_size = STATIC / Path(src_size[1:])
  100. if full_path_size.exists() or "/2023/" not in src:
  101. return src_size
  102. image = Image.open(full_path)
  103. image.thumbnail((width, height), resample=Image.LANCZOS)
  104. image.save(full_path_size, icc_profile=image.info.get("icc_profile"))
  105. return src_size
  106. def _generate_webp(self, src):
  107. src_webp = src.replace(".jpg", ".webp")
  108. full_path = STATIC / Path(src[1:])
  109. full_path_webp = STATIC / Path(src_webp[1:])
  110. if full_path_webp.exists() or "/2023/" not in src:
  111. return src_webp
  112. image = Image.open(full_path)
  113. image.save(
  114. full_path_webp, format="webp", icc_profile=image.info.get("icc_profile")
  115. )
  116. # command = [
  117. # "cwebp",
  118. # "-q",
  119. # "80",
  120. # full_path,
  121. # "-o",
  122. # full_path_webp,
  123. # "-metadata",
  124. # "icc",
  125. # ]
  126. # subprocess.check_output(command, stderr=subprocess.STDOUT)
  127. return src_webp
  128. def image(self, alt, url, title=None):
  129. SIZES = [(660, 440), (990, 660), (1320, 880)]
  130. full_path = STATIC / Path(url[1:])
  131. image = Image.open(full_path)
  132. width, height = image.size
  133. jpg_srcs = [(url, width, height)]
  134. # src_webp = self._generate_webp(src)
  135. # webp_srcs = [(src_webp, width, height)]
  136. for size_width, size_height in SIZES:
  137. src_size = self._generate_size(url, size_width, size_height)
  138. jpg_srcs.append((src_size, size_width, size_height))
  139. # src_size_webp = self._generate_webp(src_size)
  140. # webp_srcs.append((src_size_webp, size_width, size_height))
  141. jpg_srcsets = ", ".join(
  142. f"{jpg_src} {jpg_width}w" for jpg_src, jpg_width, jpg_height in jpg_srcs
  143. )
  144. # webp_srcsets = ", ".join(
  145. # f"{webp_src} {webp_width}w"
  146. # for webp_src, webp_width, webp_height in webp_srcs
  147. # )
  148. return dedent(
  149. f"""\
  150. <figure>
  151. <a href="{url}"
  152. title="Cliquer pour une version haute résolution">
  153. <img
  154. src="{url}"
  155. width="{width}" height="{height}"
  156. srcset="{jpg_srcsets}"
  157. sizes="min(100vw, calc(100vh * {width} / {height}))"
  158. loading="lazy"
  159. decoding="async"
  160. alt="{alt}">
  161. </a>
  162. <figcaption>{title}</figcaption>
  163. </figure>
  164. """
  165. )
  166. class H2AnchorsRenderer(ImgsWithSizesRenderer):
  167. """Custom renderer for H2 titles with anchors."""
  168. def heading(self, text, level):
  169. if level == 2:
  170. slug = slugify(text)
  171. return (
  172. f'<h2 id="{slug}">'
  173. f"{text} "
  174. f'<a href="#{slug}" title="Ancre vers cette partie">#</a>'
  175. f"</h2>"
  176. )
  177. else:
  178. return super().heading(text, level)
  179. # We want a custom renderer to create a hash/link for each H2 headings.
  180. markdown_with_h2_anchors = mistune.Markdown(
  181. renderer=H2AnchorsRenderer(escape=False),
  182. plugins=[mark, strikethrough],
  183. )
  184. # The second markdown is pertinent to generate articles for the feed,
  185. # we do not need anchors in that case.
  186. markdown_with_img_sizes = mistune.Markdown(
  187. renderer=ImgsWithSizesRenderer(escape=False),
  188. plugins=[mark, strikethrough],
  189. )
  190. # This is the jinja2 configuration to locate templates.
  191. environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
  192. def format_struct_time(value, format="%d-%m-%Y"):
  193. return time.strftime(format, value)
  194. environment.filters["format_struct_time"] = format_struct_time
  195. def neighborhood(iterable, first=None, last=None):
  196. """
  197. Yield the (previous, current, next) items given an iterable.
  198. You can specify a `first` and/or `last` item for bounds.
  199. """
  200. iterator = iter(iterable)
  201. previous = first
  202. current = next(iterator) # Throws StopIteration if empty.
  203. for next_ in iterator:
  204. yield (previous, current, next_)
  205. previous = current
  206. current = next_
  207. yield (previous, current, last)
  208. def each_file_from(source_dir, pattern="*", exclude=None):
  209. """Walk across the `source_dir` and return the `pattern` file paths."""
  210. for path in _each_path_from(source_dir, pattern=pattern, exclude=exclude):
  211. if path.is_file():
  212. yield path
  213. def each_folder_from(source_dir, exclude=None):
  214. """Walk across the `source_dir` and return the folder paths."""
  215. for path in _each_path_from(source_dir, exclude=exclude):
  216. if path.is_dir():
  217. yield path
  218. def _each_path_from(source_dir, pattern="*", exclude=None):
  219. for path in sorted(Path(source_dir).glob(pattern)):
  220. if exclude is not None and path.name in exclude:
  221. continue
  222. yield path
  223. @dataclass
  224. class Page:
  225. title: str
  226. content: str
  227. tags: list
  228. file_path: str
  229. lang: str = "fr"
  230. def __post_init__(self):
  231. try:
  232. date_str, _ = self.file_path.split(" - ", 1)
  233. except ValueError:
  234. # Fallback for 2020 contents (search index)
  235. suffix = len(".md")
  236. prefix = len("YYYY/MM-DD") + suffix
  237. date_str = "2020-" + self.file_path[-prefix:-suffix]
  238. self.url = f"/david/{date_str.replace('-', '/')}/"
  239. self.date = datetime.strptime(date_str, "%Y-%m-%d").date()
  240. self.full_url = f"{DOMAIN}{self.url}"
  241. self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
  242. self.escaped_title = escape(self.title)
  243. tag_template = Template(
  244. f'<a href="{DOMAIN}/david/2023/$tag_slug/">#$tag_name</a>'
  245. )
  246. tag_links = " ".join(
  247. tag_template.substitute(tag_slug=slugify(tag), tag_name=tag)
  248. for tag in self.tags
  249. )
  250. self.escaped_content = escape(
  251. self.content.replace('href="/', f'href="{DOMAIN}/')
  252. .replace('src="/', f'src="{DOMAIN}/')
  253. .replace('href="#', f'href="{self.full_url}#')
  254. + f"<nav><p>{tag_links}</p></nav>"
  255. + '<hr/><p><a href="mailto:david@larlet.fr">Réagir ?</a></p>'
  256. )
  257. # Extract first paragraph.
  258. self.extract = self.content.split("</p>", 1)[0] + "</p>"
  259. # Create the index for the search.
  260. self.search_data = {
  261. "title": self.title,
  262. "url": self.url,
  263. "date": date_str,
  264. "content": do_striptags(self.content)
  265. .replace("\u00a0(cache)", " ")
  266. .replace("'", " "),
  267. }
  268. def __eq__(self, other):
  269. return self.url == other.url
  270. def __lt__(self, other: "Page"):
  271. if not isinstance(other, Page):
  272. return NotImplemented
  273. return self.date < other.date
  274. @staticmethod
  275. def all(source: Path, only_published=True, with_h2_anchors=True):
  276. """Retrieve all pages sorted by desc."""
  277. page_list = []
  278. md = markdown_with_h2_anchors if with_h2_anchors else markdown_with_img_sizes
  279. for file_path in sorted(each_file_from(source, pattern="*.md")):
  280. result, state = md.read(file_path)
  281. result = widont(result, html=True)
  282. # Extract (and remove) the title from the generated page.
  283. title, content = result.split("</h1>", 1)
  284. h1_opening_size = len("<h1>")
  285. title = title[h1_opening_size:]
  286. tags = {}
  287. if "<nav><p>" in content:
  288. # Extract the tags from the generated page.
  289. content, tags_links = content.split("<nav><p>", 1)
  290. nav_closing_size = len("</p></nav>\n")
  291. tags_links = tags_links[:-nav_closing_size]
  292. try:
  293. tags = sorted(
  294. {
  295. tag.strip().split("#", 1)[1]
  296. for tag in tags_links.split("</a>")
  297. if tag.strip()
  298. },
  299. key=lambda tag: slugify(tag),
  300. )
  301. except IndexError:
  302. # It happens for old contents, parsed for the search index.
  303. pass
  304. page = Page(title, content, tags, file_path.name)
  305. pages_by_url[page.url] = page
  306. if not page.is_draft:
  307. all_tags.update(tags)
  308. for tag in tags:
  309. if page not in pages_by_tags[tag]:
  310. pages_by_tags[tag].append(page)
  311. if only_published and page.is_draft:
  312. continue
  313. page_list.append(page)
  314. return sorted(page_list, reverse=True)
  315. @property
  316. def is_draft(self):
  317. return (
  318. datetime(year=self.date.year, month=self.date.month, day=self.date.day)
  319. > PUBLICATION_BUFFER
  320. )
  321. @cli
  322. def pages():
  323. """Build article pages."""
  324. root_path = DAVID / "2023"
  325. for previous, page, next_ in neighborhood(
  326. reversed(Page.all(source=SOURCES_PATH, only_published=False)),
  327. first={
  328. "url": "/david/2022/",
  329. "title": "Publications 2022",
  330. "is_draft": False,
  331. },
  332. ):
  333. template = environment.get_template("article_2020.html")
  334. content = template.render(page=page, prev=previous, next=next_, slugify=slugify)
  335. target_path = Path(page.url[1:])
  336. target_path.mkdir(parents=True, exist_ok=True)
  337. (target_path / "index.html").write_text(content)
  338. if page.is_draft:
  339. print(f"Draft: {LOCAL_DOMAIN}{page.url} ({page.title})")
  340. def group_by_month_year(item):
  341. return item.date.strftime("%B %Y").title()
  342. template = environment.get_template("archives_2020.html")
  343. page_list = reversed(Page.all(source=SOURCES_PATH))
  344. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  345. content = template.render(
  346. page_list=groupby(page_list, key=group_by_month_year), tags=tags
  347. )
  348. (root_path / "index.html").write_text(content)
  349. @cli
  350. def tags():
  351. """Build tags pages."""
  352. # Parse all pages to collect tags.
  353. Page.all(source=SOURCES_PATH, only_published=True)
  354. for tag in all_tags:
  355. template = environment.get_template("tag_2021.html")
  356. content = template.render(
  357. page_list=sorted(pages_by_tags[tag], reverse=True),
  358. tag_name=tag,
  359. )
  360. target_path = DAVID / "2023" / slugify(tag)
  361. target_path.mkdir(parents=True, exist_ok=True)
  362. (target_path / "index.html").write_text(content)
  363. @cli
  364. def home():
  365. """Build the home page with last published items."""
  366. template = environment.get_template("profil.html")
  367. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  368. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  369. content = template.render(page_list=page_list, tags=tags)
  370. (DAVID / "index.html").write_text(content)
  371. @dataclass
  372. class Website:
  373. name: str
  374. url: str
  375. feed: str
  376. def __post_init__(self):
  377. start = perf_counter()
  378. content = feedparser.parse(self.feed)
  379. elapsed = perf_counter() - start
  380. entries = content.get("entries")
  381. print(f"Fetched {self.feed} in {elapsed:.5f} seconds.")
  382. print(f"Content size: {len(pickle.dumps(content))}, {len(entries)} entries.")
  383. date_key_parsed = (
  384. "published_parsed" if "published_parsed" in entries[0] else "updated_parsed"
  385. )
  386. try:
  387. entries = sorted(entries, key=attrgetter(date_key_parsed), reverse=True)[:2]
  388. except AttributeError:
  389. print("No `{date_key_parsed}` in", entries)
  390. for entry in entries:
  391. entry.website = self
  392. entry.date_parsed = entry[date_key_parsed]
  393. self.entries = entries
  394. @cli
  395. def blogroll():
  396. """Build the blogroll with last published items from people."""
  397. template = environment.get_template("blogroll.html")
  398. website_list = [
  399. # Invalid feed, date not standard.
  400. # Website(
  401. # name="Alex Sirac",
  402. # url="https://alexsirac.com/",
  403. # feed="https://alexsirac.com/feed",
  404. # ),
  405. Website(
  406. name="Maïtané Lenoir",
  407. url="https://www.maiwann.net/",
  408. feed="https://www.maiwann.net/feed.xml",
  409. ),
  410. Website(
  411. name="Fanny Cheung",
  412. url="https://ynote.hk/",
  413. feed="https://ynote.hk/feed.xml",
  414. ),
  415. Website(
  416. name="La Lune Mauve",
  417. url="https://lalunemauve.fr/",
  418. feed="https://lalunemauve.fr/feed/",
  419. ),
  420. Website(
  421. name="Eliness",
  422. url="https://www.hypothermia.fr/",
  423. feed="https://www.hypothermia.fr/rss",
  424. ),
  425. Website(
  426. name="Luce Carević",
  427. url="https://luce.carevic.eu/fr",
  428. feed="https://luce.carevic.eu/fr/flux",
  429. ),
  430. Website(
  431. name="Emma",
  432. url="https://emmaclit.com/",
  433. feed="https://emmaclit.com/feed/",
  434. ),
  435. Website(
  436. name="Karl Dubost",
  437. url="https://www.la-grange.net/",
  438. feed="https://www.la-grange.net/feed.atom",
  439. ),
  440. Website(
  441. name="Thomas Parisot",
  442. url="https://thom4.net/",
  443. feed="https://thom4.net/feed/",
  444. ),
  445. Website(
  446. name="Arthur Perret",
  447. url="https://www.arthurperret.fr/",
  448. feed="https://www.arthurperret.fr/feed.xml",
  449. ),
  450. Website(
  451. name="Antoine Fauchié",
  452. url="https://www.quaternum.net/",
  453. feed="https://www.quaternum.net/atom.xml",
  454. ),
  455. Website(
  456. name="Éric D.",
  457. url="https://n.survol.fr/",
  458. feed="https://n.survol.fr/feed",
  459. ),
  460. Website(
  461. name="Aude",
  462. url="https://blog.ecologie-politique.eu/",
  463. feed="https://blog.ecologie-politique.eu/feed/atom",
  464. ),
  465. Website(
  466. name="Llu",
  467. url="https://bribesdereel.net/",
  468. feed="https://bribesdereel.net/feed/rss2",
  469. ),
  470. Website(
  471. name="Winnie Lim (en)",
  472. url="https://winnielim.org/",
  473. feed="https://winnielim.org/feed/",
  474. ),
  475. ]
  476. entry_list = sorted(
  477. chain(*[website.entries for website in website_list]),
  478. key=attrgetter("date_parsed"),
  479. reverse=True,
  480. )
  481. content = template.render(website_list=website_list, entry_list=entry_list)
  482. (DAVID / "blogroll" / "index.html").write_text(content)
  483. @cli
  484. def toot():
  485. """Pre-write the Mastodon message."""
  486. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  487. last_published = page_list[0]
  488. print(f"✍️ QUOTE? — {last_published.title}, {last_published.full_url}")
  489. print()
  490. print("#blog #larletfr #rss")
  491. print(" ".join([f"#{tag}" for tag in last_published.tags]))
  492. @cli
  493. def search():
  494. """Build the static search page with custom index."""
  495. template = environment.get_template("recherche.html")
  496. page_list_2023 = Page.all(
  497. source=SOURCES_PATH, only_published=True, with_h2_anchors=False
  498. )
  499. page_list_2022 = Page.all(
  500. source=DAVID / "2022" / "_sources", only_published=True, with_h2_anchors=False
  501. )
  502. page_list_2021 = Page.all(
  503. source=DAVID / "2021" / "sources", only_published=True, with_h2_anchors=False
  504. )
  505. page_list_2020 = Page.all(
  506. source=DAVID / "2020", only_published=True, with_h2_anchors=False
  507. )
  508. blog_page_list_2019 = BlogPage.all(source=DAVID / "blog" / "2019")
  509. blog_page_list_2018 = BlogPage.all(source=DAVID / "blog" / "2018")
  510. blog_page_list_2017 = BlogPage.all(source=DAVID / "blog" / "2017")
  511. stream_page_list_2019 = StreamPage.all(source=DAVID / "stream" / "2019")
  512. stream_page_list_2018 = StreamPage.all(source=DAVID / "stream" / "2018")
  513. page_list = (
  514. page_list_2023
  515. + page_list_2022
  516. + page_list_2021
  517. + page_list_2020
  518. + blog_page_list_2019
  519. + blog_page_list_2018
  520. + blog_page_list_2017
  521. + stream_page_list_2019
  522. + stream_page_list_2018
  523. )
  524. search_index = json.dumps([page.search_data for page in page_list], indent=2)
  525. content = template.render(search_index=search_index)
  526. (DAVID / "recherche" / "index.html").write_text(content)
  527. @cli
  528. def feed():
  529. """Generate a feed from last published items."""
  530. template = environment.get_template("feed.xml")
  531. page_list = Page.all(source=SOURCES_PATH, with_h2_anchors=False)
  532. content = template.render(
  533. page_list=page_list[:NB_ITEMS_IN_FEED],
  534. current_dt=TODAY.strftime(NORMALIZED_STRFTIME),
  535. BASE_URL=f"{DOMAIN}/david/",
  536. )
  537. (DAVID / "log" / "index.xml").write_text(content)
  538. @wrap
  539. def perf_wrapper():
  540. start = perf_counter()
  541. yield
  542. elapsed = perf_counter() - start
  543. print(f"Done in {elapsed:.5f} seconds.")
  544. # Below are legacy blog contents, still useful for search indexation.
  545. @dataclass
  546. class BlogPage:
  547. title: str
  548. content: str
  549. file_path: str
  550. date_str: str
  551. def __post_init__(self):
  552. self.date = datetime.strptime(self.date_str, "%Y-%m-%d").date()
  553. self.url = f"/{self.file_path}/"
  554. # Create the index for the search.
  555. self.search_data = {
  556. "title": self.title,
  557. "url": self.url,
  558. "date": self.date_str,
  559. "content": do_striptags(self.content)
  560. .replace("\u00a0(cache)", " ")
  561. .replace("'", " ")
  562. .replace("<", "&lt;")
  563. .replace(">", "&gt;"),
  564. }
  565. def __eq__(self, other):
  566. return self.url == other.url
  567. def __lt__(self, other: "BlogPage"):
  568. if not isinstance(other, self.__class__):
  569. return NotImplemented
  570. return self.date < other.date
  571. @staticmethod
  572. def all(source: Path):
  573. """Retrieve all pages sorted by desc."""
  574. page_list = []
  575. for folder in each_folder_from(source):
  576. for path in each_file_from(folder, pattern="*.md"):
  577. metadata, content = path.read_text().split("\n\n", 1)
  578. if "lang:" in metadata:
  579. title, slug, date_, chapo, lang = metadata.split("\n")
  580. else:
  581. title, slug, date_, chapo = metadata.split("\n")
  582. title = title[len("title: ") :].strip()
  583. date_str = date_[len("date: ") :].strip()
  584. content = markdown_with_img_sizes(content)
  585. page = BlogPage(title, content, path.parent, date_str)
  586. page_list.append(page)
  587. return sorted(page_list, reverse=True)
  588. @dataclass
  589. class StreamPage:
  590. title: str
  591. content: str
  592. file_path: str
  593. date_str: str
  594. def __post_init__(self):
  595. self.date = datetime.strptime(self.date_str, "%Y/%m/%d").date()
  596. self.url = f"/{self.file_path}/"
  597. # Create the index for the search.
  598. self.search_data = {
  599. "title": self.title,
  600. "url": self.url,
  601. "date": self.date.isoformat(),
  602. "content": do_striptags(self.content)
  603. .replace("\u00a0(cache)", " ")
  604. .replace("'", " ")
  605. .replace("<", "&lt;")
  606. .replace(">", "&gt;"),
  607. }
  608. def __eq__(self, other):
  609. return self.url == other.url
  610. def __lt__(self, other: "StreamPage"):
  611. if not isinstance(other, self.__class__):
  612. return NotImplemented
  613. return self.date < other.date
  614. @staticmethod
  615. def all(source: Path):
  616. """Retrieve all pages sorted by desc."""
  617. page_list = []
  618. for folder in each_folder_from(source):
  619. for subfolder in each_folder_from(folder):
  620. for path in each_file_from(subfolder, pattern="*.md"):
  621. metadata, content = path.read_text().split("\n\n", 1)
  622. if "lang:" in metadata:
  623. title, lang = metadata.split("\n")
  624. else:
  625. title = metadata.strip()
  626. title = title[len("title: ") :].strip()
  627. date_str = str(path.parent)[-len("YYYY/MM/DD") :]
  628. content = markdown_with_img_sizes(content)
  629. page = StreamPage(title, content, path.parent, date_str)
  630. page_list.append(page)
  631. return sorted(page_list, reverse=True)
  632. if __name__ == "__main__":
  633. run()