Repository with sources and generator of https://larlet.fr/david/ https://larlet.fr/david/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

site.py 27KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. #!/usr/bin/env python3
  2. import hashlib
  3. import json
  4. import locale
  5. import time
  6. from collections import defaultdict
  7. from dataclasses import dataclass
  8. from datetime import datetime, timedelta
  9. from html import escape
  10. from itertools import chain, groupby
  11. from operator import attrgetter
  12. from pathlib import Path
  13. from string import Template
  14. from textwrap import dedent
  15. from time import perf_counter
  16. from urllib.parse import unquote, urlparse
  17. import feedparser
  18. import markdown
  19. import mistune
  20. from jinja2 import Environment as Env
  21. from jinja2 import FileSystemLoader
  22. from jinja2.filters import do_striptags
  23. from minicli import cli, run, wrap
  24. from mistune.plugins.formatting import mark, strikethrough
  25. from mistune.util import safe_entity
  26. from PIL import Image
  27. from slugify import slugify
  28. from typography import typographie
  29. from widont import widont
  30. # Useful for dates rendering within Jinja2.
  31. locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
  32. VERSION = "2024.01.01"
  33. HERE = Path(".")
  34. DAVID = HERE / "david"
  35. STATIC = HERE / ".." / "larlet-fr-static"
  36. DOMAIN = "https://larlet.fr"
  37. LOCAL_DOMAIN = "http://larlet.test:3579"
  38. # Hardcoding publication at 12 in Paris timezone.
  39. NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
  40. TODAY = datetime.today() + timedelta(hours=6)
  41. PUBLICATION_BUFFER = TODAY - timedelta(days=0)
  42. NB_ITEMS_IN_FEED = 30
  43. SOURCES_PATH = DAVID / "2024" / "_sources"
  44. all_tags = set()
  45. pages_by_tags = defaultdict(list)
  46. pages_by_url = {}
  47. class TagsRenderer(mistune.HTMLRenderer):
  48. """Make the asumption each line starting with a `#` is a tag."""
  49. def paragraph(self, text):
  50. if text.startswith("#"):
  51. tags = " ".join(
  52. f'<a href="/david/2024/{slugify(tag.strip())}/">#{tag.strip()}</a>'
  53. for tag in text.split("#")
  54. if tag.strip()
  55. )
  56. return f"<nav><p>{tags}</p></nav>\n"
  57. return super().paragraph(text)
  58. class FrenchTypographyRenderer(mistune.HTMLRenderer):
  59. """Apply French typographic rules to text."""
  60. def text(self, text):
  61. return typographie(super().text(text), html=True)
  62. def block_html(self, html):
  63. return typographie(super().block_html(html), html=True)
  64. class CustomLinkAttributesRenderer(mistune.HTMLRenderer):
  65. """Automatically generate the title for internal links.
  66. Also, set the domain as a data-attribute for a remote link.
  67. Also, add an archive link if it exists.
  68. Also, split that.
  69. """
  70. def link(self, text, url, title=None):
  71. attrs = {}
  72. attrs["href"] = self.safe_url(url)
  73. if not title and url.startswith("/david/2024/"):
  74. # It will not work for internal urls referencing the future.
  75. page = pages_by_url.get(url)
  76. if page:
  77. title = page.title
  78. else:
  79. hostname = urlparse(url).hostname
  80. if hostname is not None:
  81. if hostname.startswith("www."):
  82. domain = hostname[len("www.") :]
  83. else:
  84. domain = hostname
  85. attrs["data-link-domain"] = unquote(domain)
  86. if title:
  87. attrs["title"] = safe_entity(title)
  88. attributes = {f'{attr}="{value}"' for attr, value in attrs.items()}
  89. initial_link = f'<a {" ".join(sorted(attributes))}>{text}</a>'
  90. archive_link = ""
  91. hash_url = hashlib.md5(url.encode("utf-8")).hexdigest()
  92. archive_folder = (
  93. HERE.resolve().parent
  94. / "larlet-fr-david-cache"
  95. / "cache"
  96. / "2024"
  97. / hash_url
  98. ).resolve()
  99. if archive_folder.exists():
  100. archive_path_md = archive_folder / "index.md"
  101. parser = markdown.Markdown(extensions=["meta"])
  102. content = parser.convert(archive_path_md.read_text())
  103. metadata = parser.Meta if hasattr(parser, "Meta") else None
  104. archive_date = metadata.get("archive_date")[0]
  105. language = metadata.get("language")[0][:2]
  106. initial_link = (
  107. f'<a {" ".join(sorted(attributes))} hreflang="{language}">{text}</a>'
  108. )
  109. link = f"/david/cache/2024/{hash_url}/"
  110. title = f"Copie locale au {archive_date}"
  111. archive_link = f""" <a href="{link}" title="{title}" hreflang="{language}">
  112. <svg xmlns="http://www.w3.org/2000/svg"
  113. width="20" height="20" viewBox="0 0 24 24"
  114. fill="none" stroke="currentColor"
  115. stroke-width="2" stroke-linecap="square" stroke-linejoin="round"
  116. ><rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect>
  117. <path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1">
  118. </path>
  119. </svg>
  120. <span class="sr-only">[archive]</span>
  121. </a>"""
  122. return f"{initial_link}{archive_link}"
  123. class CustomAndBlockquoteLanguageRenderer(FrenchTypographyRenderer, TagsRenderer):
  124. """Sets the English language attribute for blockquotes with `[en]` prefix."""
  125. def _get_language(self, text):
  126. if text.startswith("<p>[en] "):
  127. return "en", text.replace("<p>[en] ", "<p>")
  128. else:
  129. return None, text
  130. def block_quote(self, text):
  131. language, text = self._get_language(text)
  132. if language:
  133. return f'\n<blockquote lang="{language}">\n{text}</blockquote>\n'
  134. else:
  135. return f"\n<blockquote>\n{text}</blockquote>\n"
  136. class ImgsWithSizesRenderer(CustomAndBlockquoteLanguageRenderer):
  137. """Renders images as <figure>s and add sizes."""
  138. def paragraph(self, text):
  139. # In case of a figure, we do not want the (non-standard) paragraph.
  140. if text.strip().startswith("<figure>"):
  141. return text
  142. return super().paragraph(text)
  143. def _generate_size(self, src, width, height):
  144. src_size = src.replace(".jpg", f"_{width}x{height}.jpg")
  145. full_path = STATIC / Path(src[1:])
  146. full_path_size = STATIC / Path(src_size[1:])
  147. if full_path_size.exists() or "/2024/" not in src:
  148. return src_size
  149. image = Image.open(full_path)
  150. image.thumbnail((width, height), resample=Image.LANCZOS)
  151. image.save(full_path_size, icc_profile=image.info.get("icc_profile"))
  152. return src_size
  153. def _generate_webp(self, src):
  154. src_webp = src.replace(".jpg", ".webp")
  155. full_path = STATIC / Path(src[1:])
  156. full_path_webp = STATIC / Path(src_webp[1:])
  157. if full_path_webp.exists() or "/2024/" not in src:
  158. return src_webp
  159. image = Image.open(full_path)
  160. image.save(
  161. full_path_webp, format="webp", icc_profile=image.info.get("icc_profile")
  162. )
  163. # command = [
  164. # "cwebp",
  165. # "-q",
  166. # "80",
  167. # full_path,
  168. # "-o",
  169. # full_path_webp,
  170. # "-metadata",
  171. # "icc",
  172. # ]
  173. # subprocess.check_output(command, stderr=subprocess.STDOUT)
  174. return src_webp
  175. def image(self, alt, url, title=None):
  176. SIZES = [(660, 440), (990, 660), (1320, 880)]
  177. full_path = STATIC / Path(url[1:])
  178. image = Image.open(full_path)
  179. width, height = image.size
  180. jpg_srcs = [(url, width, height)]
  181. # src_webp = self._generate_webp(src)
  182. # webp_srcs = [(src_webp, width, height)]
  183. for size_width, size_height in SIZES:
  184. src_size = self._generate_size(url, size_width, size_height)
  185. jpg_srcs.append((src_size, size_width, size_height))
  186. # src_size_webp = self._generate_webp(src_size)
  187. # webp_srcs.append((src_size_webp, size_width, size_height))
  188. jpg_srcsets = ", ".join(
  189. f"{jpg_src} {jpg_width}w" for jpg_src, jpg_width, jpg_height in jpg_srcs
  190. )
  191. # webp_srcsets = ", ".join(
  192. # f"{webp_src} {webp_width}w"
  193. # for webp_src, webp_width, webp_height in webp_srcs
  194. # )
  195. return dedent(
  196. f"""\
  197. <figure>
  198. <a href="{url}"
  199. title="Cliquer pour une version haute résolution">
  200. <img
  201. src="{url}"
  202. width="{width}" height="{height}"
  203. srcset="{jpg_srcsets}"
  204. sizes="min(100vw, calc(100vh * {width} / {height}))"
  205. loading="lazy"
  206. decoding="async"
  207. alt="{alt}">
  208. </a>
  209. <figcaption>{title}</figcaption>
  210. </figure>
  211. """
  212. )
  213. class AnchorsRenderer(CustomLinkAttributesRenderer, ImgsWithSizesRenderer):
  214. """Custom renderer for H2 titles and HR with anchors."""
  215. # This is fragile to say the least…
  216. global_counter = 0
  217. def heading(self, text, level):
  218. if level == 2:
  219. slug = slugify(text)
  220. return (
  221. f'<h2 id="{slug}">'
  222. f"{text} "
  223. f'<a href="#{slug}" title="Ancre vers cette partie">#</a>'
  224. f"</h2>"
  225. )
  226. else:
  227. return super().heading(text, level)
  228. def thematic_break(self) -> str:
  229. self.global_counter += 1
  230. id = f"hr-{self.global_counter}"
  231. return (
  232. f'<a href="#{id}" title="Lien vers cette section de la page">'
  233. f'<hr id="{id}" />'
  234. "</a>\n"
  235. )
  236. # We want a custom renderer to create a hash/link for each H2 headings.
  237. markdown_with_anchors = mistune.Markdown(
  238. renderer=AnchorsRenderer(escape=False),
  239. plugins=[mark, strikethrough],
  240. )
  241. # The second markdown is pertinent to generate articles for the feed,
  242. # we do not need anchors in that case.
  243. markdown_with_img_sizes = mistune.Markdown(
  244. renderer=ImgsWithSizesRenderer(escape=False),
  245. plugins=[mark, strikethrough],
  246. )
  247. # This is the jinja2 configuration to locate templates.
  248. environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
  249. def format_struct_time(value, format="%d-%m-%Y"):
  250. return time.strftime(format, value)
  251. environment.filters["format_struct_time"] = format_struct_time
  252. def format_date(value, format="%-d %B %Y"):
  253. return value.strftime(format)
  254. environment.filters["format_date"] = format_date
  255. def neighborhood(iterable, first=None, last=None):
  256. """
  257. Yield the (previous, current, next) items given an iterable.
  258. You can specify a `first` and/or `last` item for bounds.
  259. """
  260. iterator = iter(iterable)
  261. previous = first
  262. current = next(iterator) # Throws StopIteration if empty.
  263. for next_ in iterator:
  264. yield (previous, current, next_)
  265. previous = current
  266. current = next_
  267. yield (previous, current, last)
  268. def each_file_from(source_dir, pattern="*", exclude=None):
  269. """Walk across the `source_dir` and return the `pattern` file paths."""
  270. for path in _each_path_from(source_dir, pattern=pattern, exclude=exclude):
  271. if path.is_file():
  272. yield path
  273. def each_folder_from(source_dir, exclude=None):
  274. """Walk across the `source_dir` and return the folder paths."""
  275. for path in _each_path_from(source_dir, exclude=exclude):
  276. if path.is_dir():
  277. yield path
  278. def _each_path_from(source_dir, pattern="*", exclude=None):
  279. for path in sorted(Path(source_dir).glob(pattern)):
  280. if exclude is not None and path.name in exclude:
  281. continue
  282. yield path
  283. @dataclass
  284. class Page:
  285. title: str
  286. content: str
  287. tags: list
  288. file_path: str
  289. lang: str = "fr"
  290. def __post_init__(self):
  291. try:
  292. date_str, _ = self.file_path.split(" - ", 1)
  293. except ValueError:
  294. # Fallback for 2020 contents (search index)
  295. suffix = len(".md")
  296. prefix = len("YYYY/MM-DD") + suffix
  297. date_str = "2020-" + self.file_path[-prefix:-suffix]
  298. self.url = f"/david/{date_str.replace('-', '/')}/"
  299. self.date = datetime.strptime(date_str, "%Y-%m-%d").date()
  300. self.full_url = f"{DOMAIN}{self.url}"
  301. self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
  302. self.escaped_title = escape(self.title)
  303. tag_template = Template(
  304. f'<a href="{DOMAIN}/david/2024/$tag_slug/">#$tag_name</a>'
  305. )
  306. tag_links = " ".join(
  307. tag_template.substitute(tag_slug=slugify(tag), tag_name=tag)
  308. for tag in self.tags
  309. )
  310. self.escaped_content = escape(
  311. self.content.replace('href="/', f'href="{DOMAIN}/')
  312. .replace('src="/', f'src="{DOMAIN}/')
  313. .replace('href="#', f'href="{self.full_url}#')
  314. + f"<nav><p>{tag_links}</p></nav>"
  315. + '<hr/><p><a href="mailto:david@larlet.fr">Réagir ?</a></p>'
  316. )
  317. # Extract first paragraph.
  318. self.extract = self.content.split("</p>", 1)[0] + "</p>"
  319. # Create the index for the search.
  320. self.search_data = {
  321. "title": self.title,
  322. "url": self.url,
  323. "date": date_str,
  324. "content": do_striptags(self.content)
  325. .replace("\u00a0(cache)", " ")
  326. .replace("'", " "),
  327. }
  328. def __eq__(self, other):
  329. return self.url == other.url
  330. def __lt__(self, other: "Page"):
  331. if not isinstance(other, Page):
  332. return NotImplemented
  333. return self.date < other.date
  334. @staticmethod
  335. def all(source: Path, only_published=True, with_anchors=True):
  336. """Retrieve all pages sorted by desc."""
  337. page_list = []
  338. md = markdown_with_anchors if with_anchors else markdown_with_img_sizes
  339. for file_path in sorted(each_file_from(source, pattern="*.md")):
  340. result, state = md.read(file_path)
  341. result = widont(result, html=True)
  342. # Extract (and remove) the title from the generated page.
  343. title, content = result.split("</h1>", 1)
  344. h1_opening_size = len("<h1>")
  345. title = title[h1_opening_size:]
  346. tags = {}
  347. if "<nav><p>" in content:
  348. # Extract the tags from the generated page.
  349. content, tags_links = content.split("<nav><p>", 1)
  350. nav_closing_size = len("</p></nav>\n")
  351. tags_links = tags_links[:-nav_closing_size]
  352. try:
  353. tags = sorted(
  354. {
  355. tag.strip().split("#", 1)[1]
  356. for tag in tags_links.split("</a>")
  357. if tag.strip()
  358. },
  359. key=lambda tag: slugify(tag),
  360. )
  361. except IndexError:
  362. # It happens for old contents, parsed for the search index.
  363. pass
  364. try:
  365. page = Page(title, content, tags, file_path.name)
  366. except ValueError:
  367. print(f"{title} has no date(?), see {file_path.name}")
  368. continue
  369. pages_by_url[page.url] = page
  370. if not page.is_draft:
  371. all_tags.update(tags)
  372. for tag in tags:
  373. if page not in pages_by_tags[tag]:
  374. pages_by_tags[tag].append(page)
  375. if only_published and page.is_draft:
  376. continue
  377. page_list.append(page)
  378. return sorted(page_list, reverse=True)
  379. @property
  380. def is_draft(self):
  381. return (
  382. datetime(year=self.date.year, month=self.date.month, day=self.date.day)
  383. > PUBLICATION_BUFFER
  384. )
  385. @cli
  386. def pages():
  387. """Build article pages."""
  388. root_path = DAVID / "2024"
  389. for previous, page, next_ in neighborhood(
  390. reversed(Page.all(source=SOURCES_PATH, only_published=False)),
  391. first={
  392. "url": "/david/2023/",
  393. "title": "Publications 2023",
  394. "is_draft": False,
  395. },
  396. ):
  397. template = environment.get_template("article_2024.html")
  398. content = template.render(page=page, prev=previous, next=next_, slugify=slugify)
  399. target_path = Path(page.url[1:])
  400. target_path.mkdir(parents=True, exist_ok=True)
  401. (target_path / "index.html").write_text(content)
  402. if page.is_draft:
  403. print(f"Draft: {LOCAL_DOMAIN}{page.url} ({page.title})")
  404. def group_by_month_year(item):
  405. return item.date.strftime("%B %Y").title()
  406. template = environment.get_template("archives_2024.html")
  407. page_list = reversed(Page.all(source=SOURCES_PATH))
  408. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  409. content = template.render(
  410. page_list=groupby(page_list, key=group_by_month_year), tags=tags
  411. )
  412. (root_path / "index.html").write_text(content)
  413. @cli
  414. def tags():
  415. """Build tags pages."""
  416. # Parse all pages to collect tags.
  417. Page.all(source=SOURCES_PATH, only_published=True)
  418. for tag in all_tags:
  419. template = environment.get_template("tag_2024.html")
  420. content = template.render(
  421. page_list=sorted(pages_by_tags[tag], reverse=True),
  422. tag_name=tag,
  423. slugify=slugify,
  424. )
  425. target_path = DAVID / "2024" / slugify(tag)
  426. target_path.mkdir(parents=True, exist_ok=True)
  427. (target_path / "index.html").write_text(content)
  428. @cli
  429. def home():
  430. """Build the home page with last published items."""
  431. template = environment.get_template("profil_2024.html")
  432. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  433. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  434. content = template.render(page_list=page_list, tags=tags)
  435. (DAVID / "index.html").write_text(content)
  436. @dataclass
  437. class Website:
  438. name: str
  439. url: str
  440. feed: str
  441. def __post_init__(self):
  442. start = perf_counter()
  443. print(f"Fetching {self.feed}…")
  444. content = feedparser.parse(self.feed)
  445. elapsed = perf_counter() - start
  446. entries = content.get("entries")
  447. print(f"Fetched {self.feed} in {elapsed:.5f} seconds.")
  448. print(f"{len(entries)} entries.")
  449. date_key_parsed = (
  450. "published_parsed" if "published_parsed" in entries[0] else "updated_parsed"
  451. )
  452. try:
  453. entries = sorted(entries, key=attrgetter(date_key_parsed), reverse=True)[:2]
  454. except AttributeError:
  455. print("No `{date_key_parsed}` in", entries)
  456. for entry in entries:
  457. entry.website = self
  458. entry.date_parsed = entry[date_key_parsed]
  459. self.entries = entries
  460. @cli
  461. def blogroll():
  462. """Build the blogroll with last published items from people."""
  463. template = environment.get_template("blogroll.html")
  464. website_list = [
  465. # Invalid feed, date not standard.
  466. # Website(
  467. # name="Alex Sirac",
  468. # url="https://alexsirac.com/",
  469. # feed="https://alexsirac.com/feed",
  470. # ),
  471. Website(
  472. name="Maïtané Lenoir",
  473. url="https://www.maiwann.net/",
  474. feed="https://www.maiwann.net/feed.xml",
  475. ),
  476. Website(
  477. name="Fanny Cheung",
  478. url="https://ynote.hk/",
  479. feed="https://ynote.hk/feed.xml",
  480. ),
  481. Website(
  482. name="La Lune Mauve",
  483. url="https://lalunemauve.fr/",
  484. feed="https://lalunemauve.fr/feed/",
  485. ),
  486. Website(
  487. name="Eliness",
  488. url="https://www.hypothermia.fr/",
  489. feed="https://www.hypothermia.fr/rss",
  490. ),
  491. Website(
  492. name="Luce Carević",
  493. url="https://luce.carevic.eu/fr",
  494. feed="https://luce.carevic.eu/fr/flux",
  495. ),
  496. Website(
  497. name="Emma",
  498. url="https://emmaclit.com/",
  499. feed="https://emmaclit.com/feed/",
  500. ),
  501. Website(
  502. name="Karl Dubost",
  503. url="https://www.la-grange.net/",
  504. feed="https://www.la-grange.net/feed.atom",
  505. ),
  506. Website(
  507. name="Thomas Parisot",
  508. url="https://thom4.net/",
  509. feed="https://thom4.net/feed/",
  510. ),
  511. Website(
  512. name="Arthur Perret",
  513. url="https://www.arthurperret.fr/",
  514. feed="https://www.arthurperret.fr/feed.xml",
  515. ),
  516. Website(
  517. name="Antoine Fauchié",
  518. url="https://www.quaternum.net/",
  519. feed="https://www.quaternum.net/atom.xml",
  520. ),
  521. Website(
  522. name="Éric D.",
  523. url="https://n.survol.fr/",
  524. feed="https://n.survol.fr/feed",
  525. ),
  526. Website(
  527. name="Aude",
  528. url="https://blog.ecologie-politique.eu/",
  529. feed="https://blog.ecologie-politique.eu/feed/atom",
  530. ),
  531. # Site en maintenance.
  532. # Website(
  533. # name="Llu",
  534. # url="https://bribesdereel.net/",
  535. # feed="https://bribesdereel.net/feed/rss2",
  536. # ),
  537. Website(
  538. name="Winnie Lim (en)",
  539. url="https://winnielim.org/",
  540. feed="https://winnielim.org/feed/",
  541. ),
  542. Website(
  543. name="brr (en)",
  544. url="https://brr.fyi/",
  545. feed="https://brr.fyi/feed.xml",
  546. ),
  547. ]
  548. entry_list = sorted(
  549. chain(*[website.entries for website in website_list]),
  550. key=attrgetter("date_parsed"),
  551. reverse=True,
  552. )
  553. content = template.render(website_list=website_list, entry_list=entry_list)
  554. (DAVID / "blogroll" / "index.html").write_text(content)
  555. @cli
  556. def toot():
  557. """Pre-write the Mastodon message."""
  558. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  559. last_published = page_list[0]
  560. print(f"✍️ QUOTE? — {last_published.title}, {last_published.full_url}")
  561. print()
  562. print("#blog #larletfr #rss")
  563. print(" ".join([f"#{tag}" for tag in last_published.tags]))
  564. @cli
  565. def search():
  566. """Build the static search page with custom index."""
  567. template = environment.get_template("recherche_2024.html")
  568. page_list_2024 = Page.all(
  569. source=SOURCES_PATH, only_published=True, with_anchors=False
  570. )
  571. page_list_2023 = Page.all(
  572. source=SOURCES_PATH, only_published=True, with_anchors=False
  573. )
  574. page_list_2022 = Page.all(
  575. source=DAVID / "2022" / "_sources", only_published=True, with_anchors=False
  576. )
  577. page_list_2021 = Page.all(
  578. source=DAVID / "2021" / "sources", only_published=True, with_anchors=False
  579. )
  580. page_list_2020 = Page.all(
  581. source=DAVID / "2020", only_published=True, with_anchors=False
  582. )
  583. blog_page_list_2019 = BlogPage.all(source=DAVID / "blog" / "2019")
  584. blog_page_list_2018 = BlogPage.all(source=DAVID / "blog" / "2018")
  585. blog_page_list_2017 = BlogPage.all(source=DAVID / "blog" / "2017")
  586. stream_page_list_2019 = StreamPage.all(source=DAVID / "stream" / "2019")
  587. stream_page_list_2018 = StreamPage.all(source=DAVID / "stream" / "2018")
  588. page_list = (
  589. page_list_2024
  590. + page_list_2023
  591. + page_list_2022
  592. + page_list_2021
  593. + page_list_2020
  594. + blog_page_list_2019
  595. + blog_page_list_2018
  596. + blog_page_list_2017
  597. + stream_page_list_2019
  598. + stream_page_list_2018
  599. )
  600. search_index = json.dumps([page.search_data for page in page_list], indent=2)
  601. content = template.render(search_index=search_index)
  602. (DAVID / "recherche" / "index.html").write_text(content)
  603. @cli
  604. def feed():
  605. """Generate a feed from last published items."""
  606. template = environment.get_template("feed.xml")
  607. page_list = Page.all(source=SOURCES_PATH, with_anchors=False)
  608. content = template.render(
  609. page_list=page_list[:NB_ITEMS_IN_FEED],
  610. current_dt=TODAY.strftime(NORMALIZED_STRFTIME),
  611. BASE_URL=f"{DOMAIN}/david/",
  612. )
  613. (DAVID / "log" / "index.xml").write_text(content)
  614. @wrap
  615. def perf_wrapper():
  616. start = perf_counter()
  617. yield
  618. elapsed = perf_counter() - start
  619. print(f"Done in {elapsed:.5f} seconds.")
  620. # Below are legacy blog contents, still useful for search indexation.
  621. @dataclass
  622. class BlogPage:
  623. title: str
  624. content: str
  625. file_path: str
  626. date_str: str
  627. def __post_init__(self):
  628. self.date = datetime.strptime(self.date_str, "%Y-%m-%d").date()
  629. self.url = f"/{self.file_path}/"
  630. # Create the index for the search.
  631. self.search_data = {
  632. "title": self.title,
  633. "url": self.url,
  634. "date": self.date_str,
  635. "content": do_striptags(self.content)
  636. .replace("\u00a0(cache)", " ")
  637. .replace("'", " ")
  638. .replace("<", "&lt;")
  639. .replace(">", "&gt;"),
  640. }
  641. def __eq__(self, other):
  642. return self.url == other.url
  643. def __lt__(self, other: "BlogPage"):
  644. if not isinstance(other, self.__class__):
  645. return NotImplemented
  646. return self.date < other.date
  647. @staticmethod
  648. def all(source: Path):
  649. """Retrieve all pages sorted by desc."""
  650. page_list = []
  651. for folder in each_folder_from(source):
  652. for path in each_file_from(folder, pattern="*.md"):
  653. metadata, content = path.read_text().split("\n\n", 1)
  654. if "lang:" in metadata:
  655. title, slug, date_, chapo, lang = metadata.split("\n")
  656. else:
  657. title, slug, date_, chapo = metadata.split("\n")
  658. title = title[len("title: ") :].strip()
  659. date_str = date_[len("date: ") :].strip()
  660. content = markdown_with_img_sizes(content)
  661. page = BlogPage(title, content, path.parent, date_str)
  662. page_list.append(page)
  663. return sorted(page_list, reverse=True)
  664. @dataclass
  665. class StreamPage:
  666. title: str
  667. content: str
  668. file_path: str
  669. date_str: str
  670. def __post_init__(self):
  671. self.date = datetime.strptime(self.date_str, "%Y/%m/%d").date()
  672. self.url = f"/{self.file_path}/"
  673. # Create the index for the search.
  674. self.search_data = {
  675. "title": self.title,
  676. "url": self.url,
  677. "date": self.date.isoformat(),
  678. "content": do_striptags(self.content)
  679. .replace("\u00a0(cache)", " ")
  680. .replace("'", " ")
  681. .replace("<", "&lt;")
  682. .replace(">", "&gt;"),
  683. }
  684. def __eq__(self, other):
  685. return self.url == other.url
  686. def __lt__(self, other: "StreamPage"):
  687. if not isinstance(other, self.__class__):
  688. return NotImplemented
  689. return self.date < other.date
  690. @staticmethod
  691. def all(source: Path):
  692. """Retrieve all pages sorted by desc."""
  693. page_list = []
  694. for folder in each_folder_from(source):
  695. for subfolder in each_folder_from(folder):
  696. for path in each_file_from(subfolder, pattern="*.md"):
  697. metadata, content = path.read_text().split("\n\n", 1)
  698. if "lang:" in metadata:
  699. title, lang = metadata.split("\n")
  700. else:
  701. title = metadata.strip()
  702. title = title[len("title: ") :].strip()
  703. date_str = str(path.parent)[-len("YYYY/MM/DD") :]
  704. content = markdown_with_img_sizes(content)
  705. page = StreamPage(title, content, path.parent, date_str)
  706. page_list.append(page)
  707. return sorted(page_list, reverse=True)
  708. if __name__ == "__main__":
  709. run()