Repository with sources and generator of https://larlet.fr/david/ https://larlet.fr/david/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

site.py 23KB

1 year ago
1 year ago
1 year ago
2 years ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
3 years ago
1 year ago
3 years ago
1 year ago
1 year ago
1 year ago
2 years ago
2 years ago
2 years ago
2 years ago
1 year ago
1 year ago
1 year ago
1 year ago
1 year ago
2 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699
  1. #!/usr/bin/env python3
  2. import json
  3. import locale
  4. from collections import defaultdict
  5. from dataclasses import dataclass
  6. from datetime import datetime, timedelta
  7. from html import escape
  8. from itertools import chain, groupby
  9. from operator import attrgetter
  10. from pathlib import Path
  11. from string import Template
  12. from textwrap import dedent
  13. from time import perf_counter
  14. import feedparser
  15. import mistune
  16. from jinja2 import Environment as Env
  17. from jinja2 import FileSystemLoader
  18. from jinja2.filters import do_striptags
  19. from minicli import cli, run, wrap
  20. from mistune.plugins.formatting import mark, strikethrough
  21. from mistune.util import safe_entity
  22. from PIL import Image
  23. from slugify import slugify
  24. from typography import typographie
  25. from widont import widont
  26. # Useful for dates rendering within Jinja2.
  27. locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
  28. HERE = Path(".")
  29. DAVID = HERE / "david"
  30. STATIC = HERE / ".." / "larlet-fr-static"
  31. DOMAIN = "https://larlet.fr"
  32. LOCAL_DOMAIN = "http://larlet.test:3579"
  33. # Hardcoding publication at 12 in Paris timezone.
  34. NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
  35. TODAY = datetime.today() + timedelta(hours=6)
  36. PUBLICATION_BUFFER = TODAY - timedelta(days=0)
  37. NB_ITEMS_IN_FEED = 30
  38. SOURCES_PATH = DAVID / "2023" / "_sources"
  39. all_tags = set()
  40. pages_by_tags = defaultdict(list)
  41. pages_by_url = {}
  42. class TagsRenderer(mistune.HTMLRenderer):
  43. """Make the asumption each line starting with a `#` is a tag."""
  44. def paragraph(self, text):
  45. if text.startswith("#"):
  46. tags = " ".join(
  47. f'<a href="/david/2023/{slugify(tag.strip())}/">#{tag.strip()}</a>'
  48. for tag in text.split("#")
  49. if tag.strip()
  50. )
  51. return f"<nav><p>{tags}</p></nav>\n"
  52. return super().paragraph(text)
  53. class FrenchTypographyRenderer(mistune.HTMLRenderer):
  54. """Apply French typographic rules to text."""
  55. def text(self, text):
  56. return typographie(super().text(text), html=True)
  57. def block_html(self, html):
  58. return typographie(super().block_html(html), html=True)
  59. class InternalLinkTitleRenderer(mistune.HTMLRenderer):
  60. """Automatically generate the title for internal links."""
  61. def link(self, text, url, title=None):
  62. s = '<a href="' + self.safe_url(url) + '"'
  63. if not title and url.startswith("/david/2023/"):
  64. # It will not work for internal urls referencing the future.
  65. page = pages_by_url.get(url)
  66. if page:
  67. title = page.title
  68. if title:
  69. s += ' title="' + safe_entity(title) + '"'
  70. return s + ">" + text + "</a>"
  71. class CustomAndBlockquoteLanguageRenderer(
  72. FrenchTypographyRenderer, InternalLinkTitleRenderer, TagsRenderer
  73. ):
  74. """Sets the English language attribute for blockquotes with `[en]` prefix."""
  75. def _get_language(self, text):
  76. if text.startswith("<p>[en] "):
  77. return "en", text.replace("<p>[en] ", "<p>")
  78. else:
  79. return None, text
  80. def block_quote(self, text):
  81. language, text = self._get_language(text)
  82. if language:
  83. return f'\n<blockquote lang="{language}">\n{text}</blockquote>\n'
  84. else:
  85. return f"\n<blockquote>\n{text}</blockquote>\n"
  86. class ImgsWithSizesRenderer(CustomAndBlockquoteLanguageRenderer):
  87. """Renders images as <figure>s and add sizes."""
  88. def paragraph(self, text):
  89. # In case of a figure, we do not want the (non-standard) paragraph.
  90. if text.strip().startswith("<figure>"):
  91. return text
  92. return super().paragraph(text)
  93. def _generate_size(self, src, width, height):
  94. src_size = src.replace(".jpg", f"_{width}x{height}.jpg")
  95. full_path = STATIC / Path(src[1:])
  96. full_path_size = STATIC / Path(src_size[1:])
  97. if full_path_size.exists() or "/2023/" not in src:
  98. return src_size
  99. image = Image.open(full_path)
  100. image.thumbnail((width, height), resample=Image.LANCZOS)
  101. image.save(full_path_size, icc_profile=image.info.get("icc_profile"))
  102. return src_size
  103. def _generate_webp(self, src):
  104. src_webp = src.replace(".jpg", ".webp")
  105. full_path = STATIC / Path(src[1:])
  106. full_path_webp = STATIC / Path(src_webp[1:])
  107. if full_path_webp.exists() or "/2023/" not in src:
  108. return src_webp
  109. image = Image.open(full_path)
  110. image.save(
  111. full_path_webp, format="webp", icc_profile=image.info.get("icc_profile")
  112. )
  113. # command = [
  114. # "cwebp",
  115. # "-q",
  116. # "80",
  117. # full_path,
  118. # "-o",
  119. # full_path_webp,
  120. # "-metadata",
  121. # "icc",
  122. # ]
  123. # subprocess.check_output(command, stderr=subprocess.STDOUT)
  124. return src_webp
  125. def image(self, alt, url, title=None):
  126. SIZES = [(660, 440), (990, 660), (1320, 880)]
  127. full_path = STATIC / Path(url[1:])
  128. image = Image.open(full_path)
  129. width, height = image.size
  130. jpg_srcs = [(url, width, height)]
  131. # src_webp = self._generate_webp(src)
  132. # webp_srcs = [(src_webp, width, height)]
  133. for size_width, size_height in SIZES:
  134. src_size = self._generate_size(url, size_width, size_height)
  135. jpg_srcs.append((src_size, size_width, size_height))
  136. # src_size_webp = self._generate_webp(src_size)
  137. # webp_srcs.append((src_size_webp, size_width, size_height))
  138. jpg_srcsets = ", ".join(
  139. f"{jpg_src} {jpg_width}w" for jpg_src, jpg_width, jpg_height in jpg_srcs
  140. )
  141. # webp_srcsets = ", ".join(
  142. # f"{webp_src} {webp_width}w"
  143. # for webp_src, webp_width, webp_height in webp_srcs
  144. # )
  145. return dedent(
  146. f"""\
  147. <figure>
  148. <a href="{url}"
  149. title="Cliquer pour une version haute résolution">
  150. <img
  151. src="{url}"
  152. width="{width}" height="{height}"
  153. srcset="{jpg_srcsets}"
  154. sizes="min(100vw, calc(100vh * {width} / {height}))"
  155. loading="lazy"
  156. decoding="async"
  157. alt="{alt}">
  158. </a>
  159. <figcaption>{title}</figcaption>
  160. </figure>
  161. """
  162. )
  163. class H2AnchorsRenderer(ImgsWithSizesRenderer):
  164. """Custom renderer for H2 titles with anchors."""
  165. def heading(self, text, level):
  166. if level == 2:
  167. slug = slugify(text)
  168. return (
  169. f'<h2 id="{slug}">'
  170. f"{text} "
  171. f'<a href="#{slug}" title="Ancre vers cette partie">#</a>'
  172. f"</h2>"
  173. )
  174. else:
  175. return super().heading(text, level)
  176. # We want a custom renderer to create a hash/link for each H2 headings.
  177. markdown_with_h2_anchors = mistune.Markdown(
  178. renderer=H2AnchorsRenderer(escape=False),
  179. plugins=[mark, strikethrough],
  180. )
  181. # The second markdown is pertinent to generate articles for the feed,
  182. # we do not need anchors in that case.
  183. markdown_with_img_sizes = mistune.Markdown(
  184. renderer=ImgsWithSizesRenderer(escape=False),
  185. plugins=[mark, strikethrough],
  186. )
  187. # This is the jinja2 configuration to locate templates.
  188. environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
  189. def neighborhood(iterable, first=None, last=None):
  190. """
  191. Yield the (previous, current, next) items given an iterable.
  192. You can specify a `first` and/or `last` item for bounds.
  193. """
  194. iterator = iter(iterable)
  195. previous = first
  196. current = next(iterator) # Throws StopIteration if empty.
  197. for next_ in iterator:
  198. yield (previous, current, next_)
  199. previous = current
  200. current = next_
  201. yield (previous, current, last)
  202. def each_file_from(source_dir, pattern="*", exclude=None):
  203. """Walk across the `source_dir` and return the `pattern` file paths."""
  204. for path in _each_path_from(source_dir, pattern=pattern, exclude=exclude):
  205. if path.is_file():
  206. yield path
  207. def each_folder_from(source_dir, exclude=None):
  208. """Walk across the `source_dir` and return the folder paths."""
  209. for path in _each_path_from(source_dir, exclude=exclude):
  210. if path.is_dir():
  211. yield path
  212. def _each_path_from(source_dir, pattern="*", exclude=None):
  213. for path in sorted(Path(source_dir).glob(pattern)):
  214. if exclude is not None and path.name in exclude:
  215. continue
  216. yield path
  217. @dataclass
  218. class Page:
  219. title: str
  220. content: str
  221. tags: list
  222. file_path: str
  223. lang: str = "fr"
  224. def __post_init__(self):
  225. try:
  226. date_str, _ = self.file_path.split(" - ", 1)
  227. except ValueError:
  228. # Fallback for 2020 contents (search index)
  229. suffix = len(".md")
  230. prefix = len("YYYY/MM-DD") + suffix
  231. date_str = "2020-" + self.file_path[-prefix:-suffix]
  232. self.url = f"/david/{date_str.replace('-', '/')}/"
  233. self.date = datetime.strptime(date_str, "%Y-%m-%d").date()
  234. self.full_url = f"{DOMAIN}{self.url}"
  235. self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
  236. self.escaped_title = escape(self.title)
  237. tag_template = Template(
  238. f'<a href="{DOMAIN}/david/2023/$tag_slug/">#$tag_name</a>'
  239. )
  240. tag_links = " ".join(
  241. tag_template.substitute(tag_slug=slugify(tag), tag_name=tag)
  242. for tag in self.tags
  243. )
  244. self.escaped_content = escape(
  245. self.content.replace('href="/', f'href="{DOMAIN}/')
  246. .replace('src="/', f'src="{DOMAIN}/')
  247. .replace('href="#', f'href="{self.full_url}#')
  248. + f"<nav><p>{tag_links}</p></nav>"
  249. + '<hr/><p><a href="mailto:david@larlet.fr">Réagir ?</a></p>'
  250. )
  251. # Extract first paragraph.
  252. self.extract = self.content.split("</p>", 1)[0] + "</p>"
  253. # Create the index for the search.
  254. self.search_data = {
  255. "title": self.title,
  256. "url": self.url,
  257. "date": date_str,
  258. "content": do_striptags(self.content)
  259. .replace("\u00a0(cache)", " ")
  260. .replace("'", " "),
  261. }
  262. def __eq__(self, other):
  263. return self.url == other.url
  264. def __lt__(self, other: "Page"):
  265. if not isinstance(other, Page):
  266. return NotImplemented
  267. return self.date < other.date
  268. @staticmethod
  269. def all(source: Path, only_published=True, with_h2_anchors=True):
  270. """Retrieve all pages sorted by desc."""
  271. page_list = []
  272. md = markdown_with_h2_anchors if with_h2_anchors else markdown_with_img_sizes
  273. for file_path in sorted(each_file_from(source, pattern="*.md")):
  274. result, state = md.read(file_path)
  275. result = widont(result, html=True)
  276. # Extract (and remove) the title from the generated page.
  277. title, content = result.split("</h1>", 1)
  278. h1_opening_size = len("<h1>")
  279. title = title[h1_opening_size:]
  280. tags = {}
  281. if "<nav><p>" in content:
  282. # Extract the tags from the generated page.
  283. content, tags_links = content.split("<nav><p>", 1)
  284. nav_closing_size = len("</p></nav>\n")
  285. tags_links = tags_links[:-nav_closing_size]
  286. try:
  287. tags = sorted(
  288. {
  289. tag.strip().split("#", 1)[1]
  290. for tag in tags_links.split("</a>")
  291. if tag.strip()
  292. },
  293. key=lambda tag: slugify(tag),
  294. )
  295. except IndexError:
  296. # It happens for old contents, parsed for the search index.
  297. pass
  298. page = Page(title, content, tags, file_path.name)
  299. pages_by_url[page.url] = page
  300. if not page.is_draft:
  301. all_tags.update(tags)
  302. for tag in tags:
  303. if page not in pages_by_tags[tag]:
  304. pages_by_tags[tag].append(page)
  305. if only_published and page.is_draft:
  306. continue
  307. page_list.append(page)
  308. return sorted(page_list, reverse=True)
  309. @property
  310. def is_draft(self):
  311. return (
  312. datetime(year=self.date.year, month=self.date.month, day=self.date.day)
  313. > PUBLICATION_BUFFER
  314. )
  315. @cli
  316. def pages():
  317. """Build article pages."""
  318. root_path = DAVID / "2023"
  319. for previous, page, next_ in neighborhood(
  320. reversed(Page.all(source=SOURCES_PATH, only_published=False)),
  321. first={
  322. "url": "/david/2022/",
  323. "title": "Publications 2022",
  324. "is_draft": False,
  325. },
  326. ):
  327. template = environment.get_template("article_2020.html")
  328. content = template.render(page=page, prev=previous, next=next_, slugify=slugify)
  329. target_path = Path(page.url[1:])
  330. target_path.mkdir(parents=True, exist_ok=True)
  331. open(target_path / "index.html", "w").write(content)
  332. if page.is_draft:
  333. print(f"Draft: {LOCAL_DOMAIN}{page.url} ({page.title})")
  334. def group_by_month_year(item):
  335. return item.date.strftime("%B %Y").title()
  336. template = environment.get_template("archives_2020.html")
  337. page_list = reversed(Page.all(source=SOURCES_PATH))
  338. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  339. content = template.render(
  340. page_list=groupby(page_list, key=group_by_month_year), tags=tags
  341. )
  342. open(root_path / "index.html", "w").write(content)
  343. @cli
  344. def tags():
  345. """Build tags pages."""
  346. # Parse all pages to collect tags.
  347. Page.all(source=SOURCES_PATH, only_published=True)
  348. for tag in all_tags:
  349. template = environment.get_template("tag_2021.html")
  350. content = template.render(
  351. page_list=sorted(pages_by_tags[tag], reverse=True),
  352. tag_name=tag,
  353. )
  354. target_path = DAVID / "2023" / slugify(tag)
  355. target_path.mkdir(parents=True, exist_ok=True)
  356. open(target_path / "index.html", "w").write(content)
  357. @cli
  358. def home():
  359. """Build the home page with last published items."""
  360. template = environment.get_template("profil.html")
  361. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  362. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  363. content = template.render(page_list=page_list, tags=tags)
  364. open(DAVID / "index.html", "w").write(content)
  365. @dataclass
  366. class Website:
  367. name: str
  368. url: str
  369. feed: str
  370. def __post_init__(self):
  371. content = feedparser.parse(self.feed)
  372. entries = content.get("entries")
  373. try:
  374. entries = sorted(entries, key=attrgetter("updated_parsed"), reverse=True)[
  375. :2
  376. ]
  377. except AttributeError:
  378. print("No `updated_parsed` in", entries)
  379. for entry in entries:
  380. entry.website = self
  381. self.entries = entries
  382. @cli
  383. def blogroll():
  384. """Build the blogroll with last published items from people."""
  385. template = environment.get_template("blogroll.html")
  386. website_list = [
  387. # Invalid feed, date not standard.
  388. # Website(
  389. # name="Alex Sirac",
  390. # url="https://alexsirac.com/",
  391. # feed="https://alexsirac.com/feed",
  392. # ),
  393. Website(
  394. name="Maïtané Lenoir",
  395. url="https://www.maiwann.net/",
  396. feed="https://www.maiwann.net/feed.xml",
  397. ),
  398. Website(
  399. name="Fanny Cheung",
  400. url="https://ynote.hk/",
  401. feed="https://ynote.hk/feed.xml",
  402. ),
  403. Website(
  404. name="La Lune Mauve",
  405. url="https://lalunemauve.fr/",
  406. feed="https://lalunemauve.fr/feed/",
  407. ),
  408. Website(
  409. name="Eliness",
  410. url="https://www.hypothermia.fr/",
  411. feed="https://www.hypothermia.fr/rss",
  412. ),
  413. Website(
  414. name="Luce Carević",
  415. url="https://luce.carevic.eu/fr",
  416. feed="https://luce.carevic.eu/fr/flux",
  417. ),
  418. Website(
  419. name="Emma",
  420. url="https://emmaclit.com/",
  421. feed="https://emmaclit.com/feed/",
  422. ),
  423. Website(
  424. name="Karl Dubost",
  425. url="https://www.la-grange.net/",
  426. feed="https://www.la-grange.net/feed.atom",
  427. ),
  428. Website(
  429. name="Thomas Parisot",
  430. url="https://thom4.net/",
  431. feed="https://thom4.net/feed/",
  432. ),
  433. Website(
  434. name="Arthur Perret",
  435. url="https://www.arthurperret.fr/",
  436. feed="https://www.arthurperret.fr/feed.xml",
  437. ),
  438. Website(
  439. name="Antoine Fauchié",
  440. url="https://www.quaternum.net/",
  441. feed="https://www.quaternum.net/atom.xml",
  442. ),
  443. Website(
  444. name="Éric D.",
  445. url="https://n.survol.fr/",
  446. feed="https://n.survol.fr/feed",
  447. ),
  448. Website(
  449. name="Winnie Lim (en)",
  450. url="https://winnielim.org/",
  451. feed="https://winnielim.org/feed/",
  452. ),
  453. ]
  454. entry_list = sorted(
  455. list(chain(*[website.entries for website in website_list])),
  456. key=attrgetter("updated_parsed"),
  457. reverse=True,
  458. )
  459. content = template.render(website_list=website_list, entry_list=entry_list)
  460. open(DAVID / "blogroll" / "index.html", "w").write(content)
  461. @cli
  462. def toot():
  463. """Pre-write the Mastodon message."""
  464. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  465. last_published = page_list[0]
  466. print(f"✍️ QUOTE? — {last_published.title}, {last_published.full_url}")
  467. print()
  468. print("#blog #larletfr #rss")
  469. print(" ".join([f"#{tag}" for tag in last_published.tags]))
  470. @cli
  471. def search():
  472. """Build the static search page with custom index."""
  473. template = environment.get_template("recherche.html")
  474. page_list_2023 = Page.all(
  475. source=SOURCES_PATH, only_published=True, with_h2_anchors=False
  476. )
  477. page_list_2022 = Page.all(
  478. source=DAVID / "2022" / "_sources", only_published=True, with_h2_anchors=False
  479. )
  480. page_list_2021 = Page.all(
  481. source=DAVID / "2021" / "sources", only_published=True, with_h2_anchors=False
  482. )
  483. page_list_2020 = Page.all(
  484. source=DAVID / "2020", only_published=True, with_h2_anchors=False
  485. )
  486. blog_page_list_2019 = BlogPage.all(source=DAVID / "blog" / "2019")
  487. blog_page_list_2018 = BlogPage.all(source=DAVID / "blog" / "2018")
  488. blog_page_list_2017 = BlogPage.all(source=DAVID / "blog" / "2017")
  489. stream_page_list_2019 = StreamPage.all(source=DAVID / "stream" / "2019")
  490. stream_page_list_2018 = StreamPage.all(source=DAVID / "stream" / "2018")
  491. page_list = (
  492. page_list_2023
  493. + page_list_2022
  494. + page_list_2021
  495. + page_list_2020
  496. + blog_page_list_2019
  497. + blog_page_list_2018
  498. + blog_page_list_2017
  499. + stream_page_list_2019
  500. + stream_page_list_2018
  501. )
  502. search_index = json.dumps([page.search_data for page in page_list], indent=2)
  503. content = template.render(search_index=search_index)
  504. open(DAVID / "recherche" / "index.html", "w").write(content)
  505. @cli
  506. def feed():
  507. """Generate a feed from last published items."""
  508. template = environment.get_template("feed.xml")
  509. page_list = Page.all(source=SOURCES_PATH, with_h2_anchors=False)
  510. content = template.render(
  511. page_list=page_list[:NB_ITEMS_IN_FEED],
  512. current_dt=TODAY.strftime(NORMALIZED_STRFTIME),
  513. BASE_URL=f"{DOMAIN}/david/",
  514. )
  515. open(DAVID / "log" / "index.xml", "w").write(content)
  516. @wrap
  517. def perf_wrapper():
  518. start = perf_counter()
  519. yield
  520. elapsed = perf_counter() - start
  521. print(f"Done in {elapsed:.5f} seconds.")
  522. # Below are legacy blog contents, still useful for search indexation.
  523. @dataclass
  524. class BlogPage:
  525. title: str
  526. content: str
  527. file_path: str
  528. date_str: str
  529. def __post_init__(self):
  530. self.date = datetime.strptime(self.date_str, "%Y-%m-%d").date()
  531. self.url = f"/{self.file_path}/"
  532. # Create the index for the search.
  533. self.search_data = {
  534. "title": self.title,
  535. "url": self.url,
  536. "date": self.date_str,
  537. "content": do_striptags(self.content)
  538. .replace("\u00a0(cache)", " ")
  539. .replace("'", " ")
  540. .replace("<", "&lt;")
  541. .replace(">", "&gt;"),
  542. }
  543. def __eq__(self, other):
  544. return self.url == other.url
  545. def __lt__(self, other: "BlogPage"):
  546. if not isinstance(other, self.__class__):
  547. return NotImplemented
  548. return self.date < other.date
  549. @staticmethod
  550. def all(source: Path):
  551. """Retrieve all pages sorted by desc."""
  552. page_list = []
  553. for folder in each_folder_from(source):
  554. for path in each_file_from(folder, pattern="*.md"):
  555. metadata, content = path.read_text().split("\n\n", 1)
  556. if "lang:" in metadata:
  557. title, slug, date_, chapo, lang = metadata.split("\n")
  558. else:
  559. title, slug, date_, chapo = metadata.split("\n")
  560. title = title[len("title: ") :].strip()
  561. date_str = date_[len("date: ") :].strip()
  562. content = markdown_with_img_sizes(content)
  563. page = BlogPage(title, content, path.parent, date_str)
  564. page_list.append(page)
  565. return sorted(page_list, reverse=True)
  566. @dataclass
  567. class StreamPage:
  568. title: str
  569. content: str
  570. file_path: str
  571. date_str: str
  572. def __post_init__(self):
  573. self.date = datetime.strptime(self.date_str, "%Y/%m/%d").date()
  574. self.url = f"/{self.file_path}/"
  575. # Create the index for the search.
  576. self.search_data = {
  577. "title": self.title,
  578. "url": self.url,
  579. "date": self.date.isoformat(),
  580. "content": do_striptags(self.content)
  581. .replace("\u00a0(cache)", " ")
  582. .replace("'", " ")
  583. .replace("<", "&lt;")
  584. .replace(">", "&gt;"),
  585. }
  586. def __eq__(self, other):
  587. return self.url == other.url
  588. def __lt__(self, other: "StreamPage"):
  589. if not isinstance(other, self.__class__):
  590. return NotImplemented
  591. return self.date < other.date
  592. @staticmethod
  593. def all(source: Path):
  594. """Retrieve all pages sorted by desc."""
  595. page_list = []
  596. for folder in each_folder_from(source):
  597. for subfolder in each_folder_from(folder):
  598. for path in each_file_from(subfolder, pattern="*.md"):
  599. metadata, content = path.read_text().split("\n\n", 1)
  600. if "lang:" in metadata:
  601. title, lang = metadata.split("\n")
  602. else:
  603. title = metadata.strip()
  604. title = title[len("title: ") :].strip()
  605. date_str = str(path.parent)[-len("YYYY/MM/DD") :]
  606. content = markdown_with_img_sizes(content)
  607. page = StreamPage(title, content, path.parent, date_str)
  608. page_list.append(page)
  609. return sorted(page_list, reverse=True)
  610. if __name__ == "__main__":
  611. run()