Repository with sources and generator of https://larlet.fr/david/ https://larlet.fr/david/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

site.py 26KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791
  1. #!/usr/bin/env python3
  2. import hashlib
  3. import json
  4. import locale
  5. import time
  6. from collections import defaultdict
  7. from dataclasses import dataclass
  8. from datetime import datetime, timedelta
  9. from html import escape
  10. from itertools import chain, groupby
  11. from operator import attrgetter
  12. from pathlib import Path
  13. from string import Template
  14. from textwrap import dedent
  15. from time import perf_counter
  16. from urllib.parse import urlparse
  17. import feedparser
  18. import mistune
  19. from jinja2 import Environment as Env
  20. from jinja2 import FileSystemLoader
  21. from jinja2.filters import do_striptags
  22. from minicli import cli, run, wrap
  23. from mistune.plugins.formatting import mark, strikethrough
  24. from mistune.util import safe_entity
  25. from PIL import Image
  26. from slugify import slugify
  27. from typography import typographie
  28. from widont import widont
  29. # Useful for dates rendering within Jinja2.
  30. locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
  31. VERSION = "2024.01.01"
  32. HERE = Path(".")
  33. DAVID = HERE / "david"
  34. STATIC = HERE / ".." / "larlet-fr-static"
  35. DOMAIN = "https://larlet.fr"
  36. LOCAL_DOMAIN = "http://larlet.test:3579"
  37. # Hardcoding publication at 12 in Paris timezone.
  38. NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
  39. TODAY = datetime.today() + timedelta(hours=6)
  40. PUBLICATION_BUFFER = TODAY - timedelta(days=0)
  41. NB_ITEMS_IN_FEED = 30
  42. SOURCES_PATH = DAVID / "2024" / "_sources"
  43. all_tags = set()
  44. pages_by_tags = defaultdict(list)
  45. pages_by_url = {}
  46. class TagsRenderer(mistune.HTMLRenderer):
  47. """Make the asumption each line starting with a `#` is a tag."""
  48. def paragraph(self, text):
  49. if text.startswith("#"):
  50. tags = " ".join(
  51. f'<a href="/david/2024/{slugify(tag.strip())}/">#{tag.strip()}</a>'
  52. for tag in text.split("#")
  53. if tag.strip()
  54. )
  55. return f"<nav><p>{tags}</p></nav>\n"
  56. return super().paragraph(text)
  57. class FrenchTypographyRenderer(mistune.HTMLRenderer):
  58. """Apply French typographic rules to text."""
  59. def text(self, text):
  60. return typographie(super().text(text), html=True)
  61. def block_html(self, html):
  62. return typographie(super().block_html(html), html=True)
  63. class CustomLinkAttributesRenderer(mistune.HTMLRenderer):
  64. """Automatically generate the title for internal links.
  65. Also, set the domain as a data-attribute for a remote link.
  66. Also, add an archive link if it exists.
  67. Also, split that.
  68. """
  69. def link(self, text, url, title=None):
  70. attrs = {}
  71. attrs["href"] = self.safe_url(url)
  72. if not title and url.startswith("/david/2024/"):
  73. # It will not work for internal urls referencing the future.
  74. page = pages_by_url.get(url)
  75. if page:
  76. title = page.title
  77. else:
  78. hostname = urlparse(url).hostname
  79. if hostname is not None:
  80. if hostname.startswith("www."):
  81. domain = hostname[len("www.") :]
  82. else:
  83. domain = hostname
  84. attrs["data-link-domain"] = domain
  85. if title:
  86. attrs["title"] = safe_entity(title)
  87. attributes = {f'{attr}="{value}"' for attr, value in attrs.items()}
  88. initial_link = f'<a {" ".join(sorted(attributes))}>{text}</a>'
  89. archive_link = ""
  90. hash_url = hashlib.md5(url.encode("utf-8")).hexdigest()
  91. archive_folder = (
  92. HERE.resolve().parent
  93. / "larlet-fr-david-cache"
  94. / "cache"
  95. / "2024"
  96. / hash_url
  97. ).resolve()
  98. if archive_folder.exists():
  99. archive_path_md = archive_folder / "index.md"
  100. _, content = archive_path_md.read_text().split("archive_date:", 1)
  101. archive_date = content.split("\n", 1)[0].strip()
  102. link = f"/david/cache/2024/{hash_url}/"
  103. title = f"Copie locale au {archive_date}"
  104. archive_link = f' <a href="{link}" title="{title}">[archive]</a>'
  105. return f"{initial_link}{archive_link}"
  106. class CustomAndBlockquoteLanguageRenderer(FrenchTypographyRenderer, TagsRenderer):
  107. """Sets the English language attribute for blockquotes with `[en]` prefix."""
  108. def _get_language(self, text):
  109. if text.startswith("<p>[en] "):
  110. return "en", text.replace("<p>[en] ", "<p>")
  111. else:
  112. return None, text
  113. def block_quote(self, text):
  114. language, text = self._get_language(text)
  115. if language:
  116. return f'\n<blockquote lang="{language}">\n{text}</blockquote>\n'
  117. else:
  118. return f"\n<blockquote>\n{text}</blockquote>\n"
  119. class ImgsWithSizesRenderer(CustomAndBlockquoteLanguageRenderer):
  120. """Renders images as <figure>s and add sizes."""
  121. def paragraph(self, text):
  122. # In case of a figure, we do not want the (non-standard) paragraph.
  123. if text.strip().startswith("<figure>"):
  124. return text
  125. return super().paragraph(text)
  126. def _generate_size(self, src, width, height):
  127. src_size = src.replace(".jpg", f"_{width}x{height}.jpg")
  128. full_path = STATIC / Path(src[1:])
  129. full_path_size = STATIC / Path(src_size[1:])
  130. if full_path_size.exists() or "/2024/" not in src:
  131. return src_size
  132. image = Image.open(full_path)
  133. image.thumbnail((width, height), resample=Image.LANCZOS)
  134. image.save(full_path_size, icc_profile=image.info.get("icc_profile"))
  135. return src_size
  136. def _generate_webp(self, src):
  137. src_webp = src.replace(".jpg", ".webp")
  138. full_path = STATIC / Path(src[1:])
  139. full_path_webp = STATIC / Path(src_webp[1:])
  140. if full_path_webp.exists() or "/2024/" not in src:
  141. return src_webp
  142. image = Image.open(full_path)
  143. image.save(
  144. full_path_webp, format="webp", icc_profile=image.info.get("icc_profile")
  145. )
  146. # command = [
  147. # "cwebp",
  148. # "-q",
  149. # "80",
  150. # full_path,
  151. # "-o",
  152. # full_path_webp,
  153. # "-metadata",
  154. # "icc",
  155. # ]
  156. # subprocess.check_output(command, stderr=subprocess.STDOUT)
  157. return src_webp
  158. def image(self, alt, url, title=None):
  159. SIZES = [(660, 440), (990, 660), (1320, 880)]
  160. full_path = STATIC / Path(url[1:])
  161. image = Image.open(full_path)
  162. width, height = image.size
  163. jpg_srcs = [(url, width, height)]
  164. # src_webp = self._generate_webp(src)
  165. # webp_srcs = [(src_webp, width, height)]
  166. for size_width, size_height in SIZES:
  167. src_size = self._generate_size(url, size_width, size_height)
  168. jpg_srcs.append((src_size, size_width, size_height))
  169. # src_size_webp = self._generate_webp(src_size)
  170. # webp_srcs.append((src_size_webp, size_width, size_height))
  171. jpg_srcsets = ", ".join(
  172. f"{jpg_src} {jpg_width}w" for jpg_src, jpg_width, jpg_height in jpg_srcs
  173. )
  174. # webp_srcsets = ", ".join(
  175. # f"{webp_src} {webp_width}w"
  176. # for webp_src, webp_width, webp_height in webp_srcs
  177. # )
  178. return dedent(
  179. f"""\
  180. <figure>
  181. <a href="{url}"
  182. title="Cliquer pour une version haute résolution">
  183. <img
  184. src="{url}"
  185. width="{width}" height="{height}"
  186. srcset="{jpg_srcsets}"
  187. sizes="min(100vw, calc(100vh * {width} / {height}))"
  188. loading="lazy"
  189. decoding="async"
  190. alt="{alt}">
  191. </a>
  192. <figcaption>{title}</figcaption>
  193. </figure>
  194. """
  195. )
  196. class AnchorsRenderer(CustomLinkAttributesRenderer, ImgsWithSizesRenderer):
  197. """Custom renderer for H2 titles and HR with anchors."""
  198. # This is fragile to say the least…
  199. global_counter = 0
  200. def heading(self, text, level):
  201. if level == 2:
  202. slug = slugify(text)
  203. return (
  204. f'<h2 id="{slug}">'
  205. f"{text} "
  206. f'<a href="#{slug}" title="Ancre vers cette partie">#</a>'
  207. f"</h2>"
  208. )
  209. else:
  210. return super().heading(text, level)
  211. def thematic_break(self) -> str:
  212. self.global_counter += 1
  213. id = f"hr-{self.global_counter}"
  214. return (
  215. f'<a href="#{id}" title="Lien vers cette section de la page">'
  216. f'<hr id="{id}" />'
  217. "</a>\n"
  218. )
  219. # We want a custom renderer to create a hash/link for each H2 headings.
  220. markdown_with_anchors = mistune.Markdown(
  221. renderer=AnchorsRenderer(escape=False),
  222. plugins=[mark, strikethrough],
  223. )
  224. # The second markdown is pertinent to generate articles for the feed,
  225. # we do not need anchors in that case.
  226. markdown_with_img_sizes = mistune.Markdown(
  227. renderer=ImgsWithSizesRenderer(escape=False),
  228. plugins=[mark, strikethrough],
  229. )
  230. # This is the jinja2 configuration to locate templates.
  231. environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
  232. def format_struct_time(value, format="%d-%m-%Y"):
  233. return time.strftime(format, value)
  234. environment.filters["format_struct_time"] = format_struct_time
  235. def format_date(value, format="%-d %B %Y"):
  236. return value.strftime(format)
  237. environment.filters["format_date"] = format_date
  238. def neighborhood(iterable, first=None, last=None):
  239. """
  240. Yield the (previous, current, next) items given an iterable.
  241. You can specify a `first` and/or `last` item for bounds.
  242. """
  243. iterator = iter(iterable)
  244. previous = first
  245. current = next(iterator) # Throws StopIteration if empty.
  246. for next_ in iterator:
  247. yield (previous, current, next_)
  248. previous = current
  249. current = next_
  250. yield (previous, current, last)
  251. def each_file_from(source_dir, pattern="*", exclude=None):
  252. """Walk across the `source_dir` and return the `pattern` file paths."""
  253. for path in _each_path_from(source_dir, pattern=pattern, exclude=exclude):
  254. if path.is_file():
  255. yield path
  256. def each_folder_from(source_dir, exclude=None):
  257. """Walk across the `source_dir` and return the folder paths."""
  258. for path in _each_path_from(source_dir, exclude=exclude):
  259. if path.is_dir():
  260. yield path
  261. def _each_path_from(source_dir, pattern="*", exclude=None):
  262. for path in sorted(Path(source_dir).glob(pattern)):
  263. if exclude is not None and path.name in exclude:
  264. continue
  265. yield path
  266. @dataclass
  267. class Page:
  268. title: str
  269. content: str
  270. tags: list
  271. file_path: str
  272. lang: str = "fr"
  273. def __post_init__(self):
  274. try:
  275. date_str, _ = self.file_path.split(" - ", 1)
  276. except ValueError:
  277. # Fallback for 2020 contents (search index)
  278. suffix = len(".md")
  279. prefix = len("YYYY/MM-DD") + suffix
  280. date_str = "2020-" + self.file_path[-prefix:-suffix]
  281. self.url = f"/david/{date_str.replace('-', '/')}/"
  282. self.date = datetime.strptime(date_str, "%Y-%m-%d").date()
  283. self.full_url = f"{DOMAIN}{self.url}"
  284. self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
  285. self.escaped_title = escape(self.title)
  286. tag_template = Template(
  287. f'<a href="{DOMAIN}/david/2024/$tag_slug/">#$tag_name</a>'
  288. )
  289. tag_links = " ".join(
  290. tag_template.substitute(tag_slug=slugify(tag), tag_name=tag)
  291. for tag in self.tags
  292. )
  293. self.escaped_content = escape(
  294. self.content.replace('href="/', f'href="{DOMAIN}/')
  295. .replace('src="/', f'src="{DOMAIN}/')
  296. .replace('href="#', f'href="{self.full_url}#')
  297. + f"<nav><p>{tag_links}</p></nav>"
  298. + '<hr/><p><a href="mailto:david@larlet.fr">Réagir ?</a></p>'
  299. )
  300. # Extract first paragraph.
  301. self.extract = self.content.split("</p>", 1)[0] + "</p>"
  302. # Create the index for the search.
  303. self.search_data = {
  304. "title": self.title,
  305. "url": self.url,
  306. "date": date_str,
  307. "content": do_striptags(self.content)
  308. .replace("\u00a0(cache)", " ")
  309. .replace("'", " "),
  310. }
  311. def __eq__(self, other):
  312. return self.url == other.url
  313. def __lt__(self, other: "Page"):
  314. if not isinstance(other, Page):
  315. return NotImplemented
  316. return self.date < other.date
  317. @staticmethod
  318. def all(source: Path, only_published=True, with_anchors=True):
  319. """Retrieve all pages sorted by desc."""
  320. page_list = []
  321. md = markdown_with_anchors if with_anchors else markdown_with_img_sizes
  322. for file_path in sorted(each_file_from(source, pattern="*.md")):
  323. result, state = md.read(file_path)
  324. result = widont(result, html=True)
  325. # Extract (and remove) the title from the generated page.
  326. title, content = result.split("</h1>", 1)
  327. h1_opening_size = len("<h1>")
  328. title = title[h1_opening_size:]
  329. tags = {}
  330. if "<nav><p>" in content:
  331. # Extract the tags from the generated page.
  332. content, tags_links = content.split("<nav><p>", 1)
  333. nav_closing_size = len("</p></nav>\n")
  334. tags_links = tags_links[:-nav_closing_size]
  335. try:
  336. tags = sorted(
  337. {
  338. tag.strip().split("#", 1)[1]
  339. for tag in tags_links.split("</a>")
  340. if tag.strip()
  341. },
  342. key=lambda tag: slugify(tag),
  343. )
  344. except IndexError:
  345. # It happens for old contents, parsed for the search index.
  346. pass
  347. page = Page(title, content, tags, file_path.name)
  348. pages_by_url[page.url] = page
  349. if not page.is_draft:
  350. all_tags.update(tags)
  351. for tag in tags:
  352. if page not in pages_by_tags[tag]:
  353. pages_by_tags[tag].append(page)
  354. if only_published and page.is_draft:
  355. continue
  356. page_list.append(page)
  357. return sorted(page_list, reverse=True)
  358. @property
  359. def is_draft(self):
  360. return (
  361. datetime(year=self.date.year, month=self.date.month, day=self.date.day)
  362. > PUBLICATION_BUFFER
  363. )
  364. @cli
  365. def pages():
  366. """Build article pages."""
  367. root_path = DAVID / "2024"
  368. for previous, page, next_ in neighborhood(
  369. reversed(Page.all(source=SOURCES_PATH, only_published=False)),
  370. first={
  371. "url": "/david/2023/",
  372. "title": "Publications 2023",
  373. "is_draft": False,
  374. },
  375. ):
  376. template = environment.get_template("article_2024.html")
  377. content = template.render(page=page, prev=previous, next=next_, slugify=slugify)
  378. target_path = Path(page.url[1:])
  379. target_path.mkdir(parents=True, exist_ok=True)
  380. (target_path / "index.html").write_text(content)
  381. if page.is_draft:
  382. print(f"Draft: {LOCAL_DOMAIN}{page.url} ({page.title})")
  383. def group_by_month_year(item):
  384. return item.date.strftime("%B %Y").title()
  385. template = environment.get_template("archives_2024.html")
  386. page_list = reversed(Page.all(source=SOURCES_PATH))
  387. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  388. content = template.render(
  389. page_list=groupby(page_list, key=group_by_month_year), tags=tags
  390. )
  391. (root_path / "index.html").write_text(content)
  392. @cli
  393. def tags():
  394. """Build tags pages."""
  395. # Parse all pages to collect tags.
  396. Page.all(source=SOURCES_PATH, only_published=True)
  397. for tag in all_tags:
  398. template = environment.get_template("tag_2024.html")
  399. content = template.render(
  400. page_list=sorted(pages_by_tags[tag], reverse=True),
  401. tag_name=tag,
  402. slugify=slugify,
  403. )
  404. target_path = DAVID / "2024" / slugify(tag)
  405. target_path.mkdir(parents=True, exist_ok=True)
  406. (target_path / "index.html").write_text(content)
  407. @cli
  408. def home():
  409. """Build the home page with last published items."""
  410. template = environment.get_template("profil_2024.html")
  411. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  412. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  413. content = template.render(page_list=page_list, tags=tags)
  414. (DAVID / "index.html").write_text(content)
  415. @dataclass
  416. class Website:
  417. name: str
  418. url: str
  419. feed: str
  420. def __post_init__(self):
  421. start = perf_counter()
  422. content = feedparser.parse(self.feed)
  423. elapsed = perf_counter() - start
  424. entries = content.get("entries")
  425. print(f"Fetched {self.feed} in {elapsed:.5f} seconds.")
  426. print(f"{len(entries)} entries.")
  427. date_key_parsed = (
  428. "published_parsed" if "published_parsed" in entries[0] else "updated_parsed"
  429. )
  430. try:
  431. entries = sorted(entries, key=attrgetter(date_key_parsed), reverse=True)[:2]
  432. except AttributeError:
  433. print("No `{date_key_parsed}` in", entries)
  434. for entry in entries:
  435. entry.website = self
  436. entry.date_parsed = entry[date_key_parsed]
  437. self.entries = entries
  438. @cli
  439. def blogroll():
  440. """Build the blogroll with last published items from people."""
  441. template = environment.get_template("blogroll.html")
  442. website_list = [
  443. # Invalid feed, date not standard.
  444. # Website(
  445. # name="Alex Sirac",
  446. # url="https://alexsirac.com/",
  447. # feed="https://alexsirac.com/feed",
  448. # ),
  449. Website(
  450. name="Maïtané Lenoir",
  451. url="https://www.maiwann.net/",
  452. feed="https://www.maiwann.net/feed.xml",
  453. ),
  454. Website(
  455. name="Fanny Cheung",
  456. url="https://ynote.hk/",
  457. feed="https://ynote.hk/feed.xml",
  458. ),
  459. Website(
  460. name="La Lune Mauve",
  461. url="https://lalunemauve.fr/",
  462. feed="https://lalunemauve.fr/feed/",
  463. ),
  464. Website(
  465. name="Eliness",
  466. url="https://www.hypothermia.fr/",
  467. feed="https://www.hypothermia.fr/rss",
  468. ),
  469. Website(
  470. name="Luce Carević",
  471. url="https://luce.carevic.eu/fr",
  472. feed="https://luce.carevic.eu/fr/flux",
  473. ),
  474. Website(
  475. name="Emma",
  476. url="https://emmaclit.com/",
  477. feed="https://emmaclit.com/feed/",
  478. ),
  479. Website(
  480. name="Karl Dubost",
  481. url="https://www.la-grange.net/",
  482. feed="https://www.la-grange.net/feed.atom",
  483. ),
  484. Website(
  485. name="Thomas Parisot",
  486. url="https://thom4.net/",
  487. feed="https://thom4.net/feed/",
  488. ),
  489. Website(
  490. name="Arthur Perret",
  491. url="https://www.arthurperret.fr/",
  492. feed="https://www.arthurperret.fr/feed.xml",
  493. ),
  494. Website(
  495. name="Antoine Fauchié",
  496. url="https://www.quaternum.net/",
  497. feed="https://www.quaternum.net/atom.xml",
  498. ),
  499. Website(
  500. name="Éric D.",
  501. url="https://n.survol.fr/",
  502. feed="https://n.survol.fr/feed",
  503. ),
  504. Website(
  505. name="Aude",
  506. url="https://blog.ecologie-politique.eu/",
  507. feed="https://blog.ecologie-politique.eu/feed/atom",
  508. ),
  509. # Site en maintenance.
  510. # Website(
  511. # name="Llu",
  512. # url="https://bribesdereel.net/",
  513. # feed="https://bribesdereel.net/feed/rss2",
  514. # ),
  515. Website(
  516. name="Winnie Lim (en)",
  517. url="https://winnielim.org/",
  518. feed="https://winnielim.org/feed/",
  519. ),
  520. Website(
  521. name="brr (en)",
  522. url="https://brr.fyi/",
  523. feed="https://brr.fyi/feed.xml",
  524. ),
  525. ]
  526. entry_list = sorted(
  527. chain(*[website.entries for website in website_list]),
  528. key=attrgetter("date_parsed"),
  529. reverse=True,
  530. )
  531. content = template.render(website_list=website_list, entry_list=entry_list)
  532. (DAVID / "blogroll" / "index.html").write_text(content)
  533. @cli
  534. def toot():
  535. """Pre-write the Mastodon message."""
  536. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  537. last_published = page_list[0]
  538. print(f"✍️ QUOTE? — {last_published.title}, {last_published.full_url}")
  539. print()
  540. print("#blog #larletfr #rss")
  541. print(" ".join([f"#{tag}" for tag in last_published.tags]))
  542. @cli
  543. def search():
  544. """Build the static search page with custom index."""
  545. template = environment.get_template("recherche_2024.html")
  546. page_list_2024 = Page.all(
  547. source=SOURCES_PATH, only_published=True, with_anchors=False
  548. )
  549. page_list_2023 = Page.all(
  550. source=SOURCES_PATH, only_published=True, with_anchors=False
  551. )
  552. page_list_2022 = Page.all(
  553. source=DAVID / "2022" / "_sources", only_published=True, with_anchors=False
  554. )
  555. page_list_2021 = Page.all(
  556. source=DAVID / "2021" / "sources", only_published=True, with_anchors=False
  557. )
  558. page_list_2020 = Page.all(
  559. source=DAVID / "2020", only_published=True, with_anchors=False
  560. )
  561. blog_page_list_2019 = BlogPage.all(source=DAVID / "blog" / "2019")
  562. blog_page_list_2018 = BlogPage.all(source=DAVID / "blog" / "2018")
  563. blog_page_list_2017 = BlogPage.all(source=DAVID / "blog" / "2017")
  564. stream_page_list_2019 = StreamPage.all(source=DAVID / "stream" / "2019")
  565. stream_page_list_2018 = StreamPage.all(source=DAVID / "stream" / "2018")
  566. page_list = (
  567. page_list_2024
  568. + page_list_2023
  569. + page_list_2022
  570. + page_list_2021
  571. + page_list_2020
  572. + blog_page_list_2019
  573. + blog_page_list_2018
  574. + blog_page_list_2017
  575. + stream_page_list_2019
  576. + stream_page_list_2018
  577. )
  578. search_index = json.dumps([page.search_data for page in page_list], indent=2)
  579. content = template.render(search_index=search_index)
  580. (DAVID / "recherche" / "index.html").write_text(content)
  581. @cli
  582. def feed():
  583. """Generate a feed from last published items."""
  584. template = environment.get_template("feed.xml")
  585. page_list = Page.all(source=SOURCES_PATH, with_anchors=False)
  586. content = template.render(
  587. page_list=page_list[:NB_ITEMS_IN_FEED],
  588. current_dt=TODAY.strftime(NORMALIZED_STRFTIME),
  589. BASE_URL=f"{DOMAIN}/david/",
  590. )
  591. (DAVID / "log" / "index.xml").write_text(content)
  592. @wrap
  593. def perf_wrapper():
  594. start = perf_counter()
  595. yield
  596. elapsed = perf_counter() - start
  597. print(f"Done in {elapsed:.5f} seconds.")
  598. # Below are legacy blog contents, still useful for search indexation.
  599. @dataclass
  600. class BlogPage:
  601. title: str
  602. content: str
  603. file_path: str
  604. date_str: str
  605. def __post_init__(self):
  606. self.date = datetime.strptime(self.date_str, "%Y-%m-%d").date()
  607. self.url = f"/{self.file_path}/"
  608. # Create the index for the search.
  609. self.search_data = {
  610. "title": self.title,
  611. "url": self.url,
  612. "date": self.date_str,
  613. "content": do_striptags(self.content)
  614. .replace("\u00a0(cache)", " ")
  615. .replace("'", " ")
  616. .replace("<", "&lt;")
  617. .replace(">", "&gt;"),
  618. }
  619. def __eq__(self, other):
  620. return self.url == other.url
  621. def __lt__(self, other: "BlogPage"):
  622. if not isinstance(other, self.__class__):
  623. return NotImplemented
  624. return self.date < other.date
  625. @staticmethod
  626. def all(source: Path):
  627. """Retrieve all pages sorted by desc."""
  628. page_list = []
  629. for folder in each_folder_from(source):
  630. for path in each_file_from(folder, pattern="*.md"):
  631. metadata, content = path.read_text().split("\n\n", 1)
  632. if "lang:" in metadata:
  633. title, slug, date_, chapo, lang = metadata.split("\n")
  634. else:
  635. title, slug, date_, chapo = metadata.split("\n")
  636. title = title[len("title: ") :].strip()
  637. date_str = date_[len("date: ") :].strip()
  638. content = markdown_with_img_sizes(content)
  639. page = BlogPage(title, content, path.parent, date_str)
  640. page_list.append(page)
  641. return sorted(page_list, reverse=True)
  642. @dataclass
  643. class StreamPage:
  644. title: str
  645. content: str
  646. file_path: str
  647. date_str: str
  648. def __post_init__(self):
  649. self.date = datetime.strptime(self.date_str, "%Y/%m/%d").date()
  650. self.url = f"/{self.file_path}/"
  651. # Create the index for the search.
  652. self.search_data = {
  653. "title": self.title,
  654. "url": self.url,
  655. "date": self.date.isoformat(),
  656. "content": do_striptags(self.content)
  657. .replace("\u00a0(cache)", " ")
  658. .replace("'", " ")
  659. .replace("<", "&lt;")
  660. .replace(">", "&gt;"),
  661. }
  662. def __eq__(self, other):
  663. return self.url == other.url
  664. def __lt__(self, other: "StreamPage"):
  665. if not isinstance(other, self.__class__):
  666. return NotImplemented
  667. return self.date < other.date
  668. @staticmethod
  669. def all(source: Path):
  670. """Retrieve all pages sorted by desc."""
  671. page_list = []
  672. for folder in each_folder_from(source):
  673. for subfolder in each_folder_from(folder):
  674. for path in each_file_from(subfolder, pattern="*.md"):
  675. metadata, content = path.read_text().split("\n\n", 1)
  676. if "lang:" in metadata:
  677. title, lang = metadata.split("\n")
  678. else:
  679. title = metadata.strip()
  680. title = title[len("title: ") :].strip()
  681. date_str = str(path.parent)[-len("YYYY/MM/DD") :]
  682. content = markdown_with_img_sizes(content)
  683. page = StreamPage(title, content, path.parent, date_str)
  684. page_list.append(page)
  685. return sorted(page_list, reverse=True)
  686. if __name__ == "__main__":
  687. run()