Repository with sources and generator of https://larlet.fr/david/ https://larlet.fr/david/
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

пре 10 месеци
пре 5 година
пре 1 година
пре 10 месеци
пре 1 година
пре 4 година
пре 1 година
пре 4 година
пре 5 година
пре 2 година
пре 5 година
пре 10 месеци
пре 4 година
пре 2 година
пре 10 месеци
пре 4 година
пре 10 месеци
пре 10 месеци
пре 1 година
пре 10 месеци
пре 10 месеци
пре 1 година
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 3 година
пре 1 година
пре 3 година
пре 10 месеци
пре 4 година
пре 4 година
пре 4 година
пре 1 година
пре 1 година
пре 4 година
пре 10 месеци
пре 4 година
пре 5 година
пре 4 година
пре 4 година
пре 3 година
пре 10 месеци
пре 4 година
пре 4 година
пре 4 година
пре 4 година
пре 4 година
пре 1 година
пре 2 година
пре 4 година
пре 4 година
пре 4 година
пре 4 година
пре 3 година
пре 4 година
пре 4 година
пре 10 месеци
пре 4 година
пре 2 година
пре 4 година
пре 10 месеци
пре 4 година
пре 4 година
пре 10 месеци
пре 4 година
пре 4 година
пре 2 година
пре 4 година
пре 2 година
пре 10 месеци
пре 10 месеци
пре 10 месеци
пре 5 година
пре 2 година
пре 1 година
пре 5 година
пре 1 година
пре 1 година
пре 1 година
пре 10 месеци
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 10 месеци
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 1 година
пре 10 месеци
пре 1 година
пре 1 година
пре 10 месеци
пре 1 година
пре 5 година
пре 2 година
пре 4 година
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. #!/usr/bin/env python3
  2. import hashlib
  3. import json
  4. import locale
  5. import time
  6. from collections import defaultdict
  7. from dataclasses import dataclass
  8. from datetime import datetime, timedelta
  9. from html import escape
  10. from itertools import chain, groupby
  11. from operator import attrgetter
  12. from pathlib import Path
  13. from string import Template
  14. from textwrap import dedent
  15. from time import perf_counter
  16. from urllib.parse import urlparse
  17. import feedparser
  18. import mistune
  19. from jinja2 import Environment as Env
  20. from jinja2 import FileSystemLoader
  21. from jinja2.filters import do_striptags
  22. from minicli import cli, run, wrap
  23. from mistune.plugins.formatting import mark, strikethrough
  24. from mistune.util import safe_entity
  25. from PIL import Image
  26. from slugify import slugify
  27. from typography import typographie
  28. from widont import widont
  29. # Useful for dates rendering within Jinja2.
  30. locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
  31. VERSION = "2024.01.01"
  32. HERE = Path(".")
  33. DAVID = HERE / "david"
  34. STATIC = HERE / ".." / "larlet-fr-static"
  35. DOMAIN = "https://larlet.fr"
  36. LOCAL_DOMAIN = "http://larlet.test:3579"
  37. # Hardcoding publication at 12 in Paris timezone.
  38. NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
  39. TODAY = datetime.today() + timedelta(hours=6)
  40. PUBLICATION_BUFFER = TODAY - timedelta(days=0)
  41. NB_ITEMS_IN_FEED = 30
  42. SOURCES_PATH = DAVID / "2024" / "_sources"
  43. all_tags = set()
  44. pages_by_tags = defaultdict(list)
  45. pages_by_url = {}
  46. class TagsRenderer(mistune.HTMLRenderer):
  47. """Make the asumption each line starting with a `#` is a tag."""
  48. def paragraph(self, text):
  49. if text.startswith("#"):
  50. tags = " ".join(
  51. f'<a href="/david/2024/{slugify(tag.strip())}/">#{tag.strip()}</a>'
  52. for tag in text.split("#")
  53. if tag.strip()
  54. )
  55. return f"<nav><p>{tags}</p></nav>\n"
  56. return super().paragraph(text)
  57. class FrenchTypographyRenderer(mistune.HTMLRenderer):
  58. """Apply French typographic rules to text."""
  59. def text(self, text):
  60. return typographie(super().text(text), html=True)
  61. def block_html(self, html):
  62. return typographie(super().block_html(html), html=True)
  63. class CustomLinkAttributesRenderer(mistune.HTMLRenderer):
  64. """Automatically generate the title for internal links.
  65. Also, set the domain as a data-attribute for a remote link.
  66. Also, add an archive link if it exists.
  67. Also, split that.
  68. """
  69. def link(self, text, url, title=None):
  70. attrs = {}
  71. attrs["href"] = self.safe_url(url)
  72. if not title and url.startswith("/david/2024/"):
  73. # It will not work for internal urls referencing the future.
  74. page = pages_by_url.get(url)
  75. if page:
  76. title = page.title
  77. else:
  78. hostname = urlparse(url).hostname
  79. if hostname is not None:
  80. if hostname.startswith("www."):
  81. domain = hostname[len("www.") :]
  82. else:
  83. domain = hostname
  84. attrs["data-link-domain"] = domain
  85. if title:
  86. attrs["title"] = safe_entity(title)
  87. attributes = {f'{attr}="{value}"' for attr, value in attrs.items()}
  88. initial_link = f'<a {" ".join(sorted(attributes))}>{text}</a>'
  89. archive_link = ""
  90. hash_url = hashlib.md5(url.encode("utf-8")).hexdigest()
  91. archive_folder = (
  92. HERE.resolve().parent
  93. / "larlet-fr-david-cache"
  94. / "cache"
  95. / "2024"
  96. / hash_url
  97. ).resolve()
  98. if archive_folder.exists():
  99. archive_path_md = archive_folder / "index.md"
  100. _, content = archive_path_md.read_text().split("archive_date:", 1)
  101. archive_date = content.split("\n", 1)[0].strip()
  102. link = f"/david/cache/2024/{hash_url}/"
  103. title = f"Copie locale au {archive_date}"
  104. archive_link = f' <a href="{link}" title="{title}">[archive]</a>'
  105. return f"{initial_link}{archive_link}"
  106. class CustomAndBlockquoteLanguageRenderer(FrenchTypographyRenderer, TagsRenderer):
  107. """Sets the English language attribute for blockquotes with `[en]` prefix."""
  108. def _get_language(self, text):
  109. if text.startswith("<p>[en] "):
  110. return "en", text.replace("<p>[en] ", "<p>")
  111. else:
  112. return None, text
  113. def block_quote(self, text):
  114. language, text = self._get_language(text)
  115. if language:
  116. return f'\n<blockquote lang="{language}">\n{text}</blockquote>\n'
  117. else:
  118. return f"\n<blockquote>\n{text}</blockquote>\n"
  119. class ImgsWithSizesRenderer(CustomAndBlockquoteLanguageRenderer):
  120. """Renders images as <figure>s and add sizes."""
  121. def paragraph(self, text):
  122. # In case of a figure, we do not want the (non-standard) paragraph.
  123. if text.strip().startswith("<figure>"):
  124. return text
  125. return super().paragraph(text)
  126. def _generate_size(self, src, width, height):
  127. src_size = src.replace(".jpg", f"_{width}x{height}.jpg")
  128. full_path = STATIC / Path(src[1:])
  129. full_path_size = STATIC / Path(src_size[1:])
  130. if full_path_size.exists() or "/2024/" not in src:
  131. return src_size
  132. image = Image.open(full_path)
  133. image.thumbnail((width, height), resample=Image.LANCZOS)
  134. image.save(full_path_size, icc_profile=image.info.get("icc_profile"))
  135. return src_size
  136. def _generate_webp(self, src):
  137. src_webp = src.replace(".jpg", ".webp")
  138. full_path = STATIC / Path(src[1:])
  139. full_path_webp = STATIC / Path(src_webp[1:])
  140. if full_path_webp.exists() or "/2024/" not in src:
  141. return src_webp
  142. image = Image.open(full_path)
  143. image.save(
  144. full_path_webp, format="webp", icc_profile=image.info.get("icc_profile")
  145. )
  146. # command = [
  147. # "cwebp",
  148. # "-q",
  149. # "80",
  150. # full_path,
  151. # "-o",
  152. # full_path_webp,
  153. # "-metadata",
  154. # "icc",
  155. # ]
  156. # subprocess.check_output(command, stderr=subprocess.STDOUT)
  157. return src_webp
  158. def image(self, alt, url, title=None):
  159. SIZES = [(660, 440), (990, 660), (1320, 880)]
  160. full_path = STATIC / Path(url[1:])
  161. image = Image.open(full_path)
  162. width, height = image.size
  163. jpg_srcs = [(url, width, height)]
  164. # src_webp = self._generate_webp(src)
  165. # webp_srcs = [(src_webp, width, height)]
  166. for size_width, size_height in SIZES:
  167. src_size = self._generate_size(url, size_width, size_height)
  168. jpg_srcs.append((src_size, size_width, size_height))
  169. # src_size_webp = self._generate_webp(src_size)
  170. # webp_srcs.append((src_size_webp, size_width, size_height))
  171. jpg_srcsets = ", ".join(
  172. f"{jpg_src} {jpg_width}w" for jpg_src, jpg_width, jpg_height in jpg_srcs
  173. )
  174. # webp_srcsets = ", ".join(
  175. # f"{webp_src} {webp_width}w"
  176. # for webp_src, webp_width, webp_height in webp_srcs
  177. # )
  178. return dedent(
  179. f"""\
  180. <figure>
  181. <a href="{url}"
  182. title="Cliquer pour une version haute résolution">
  183. <img
  184. src="{url}"
  185. width="{width}" height="{height}"
  186. srcset="{jpg_srcsets}"
  187. sizes="min(100vw, calc(100vh * {width} / {height}))"
  188. loading="lazy"
  189. decoding="async"
  190. alt="{alt}">
  191. </a>
  192. <figcaption>{title}</figcaption>
  193. </figure>
  194. """
  195. )
  196. class H2AnchorsRenderer(CustomLinkAttributesRenderer, ImgsWithSizesRenderer):
  197. """Custom renderer for H2 titles with anchors."""
  198. def heading(self, text, level):
  199. if level == 2:
  200. slug = slugify(text)
  201. return (
  202. f'<h2 id="{slug}">'
  203. f"{text} "
  204. f'<a href="#{slug}" title="Ancre vers cette partie">#</a>'
  205. f"</h2>"
  206. )
  207. else:
  208. return super().heading(text, level)
  209. # We want a custom renderer to create a hash/link for each H2 headings.
  210. markdown_with_h2_anchors = mistune.Markdown(
  211. renderer=H2AnchorsRenderer(escape=False),
  212. plugins=[mark, strikethrough],
  213. )
  214. # The second markdown is pertinent to generate articles for the feed,
  215. # we do not need anchors in that case.
  216. markdown_with_img_sizes = mistune.Markdown(
  217. renderer=ImgsWithSizesRenderer(escape=False),
  218. plugins=[mark, strikethrough],
  219. )
  220. # This is the jinja2 configuration to locate templates.
  221. environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
  222. def format_struct_time(value, format="%d-%m-%Y"):
  223. return time.strftime(format, value)
  224. environment.filters["format_struct_time"] = format_struct_time
  225. def format_date(value, format="%-d %B %Y"):
  226. return value.strftime(format)
  227. environment.filters["format_date"] = format_date
  228. def neighborhood(iterable, first=None, last=None):
  229. """
  230. Yield the (previous, current, next) items given an iterable.
  231. You can specify a `first` and/or `last` item for bounds.
  232. """
  233. iterator = iter(iterable)
  234. previous = first
  235. current = next(iterator) # Throws StopIteration if empty.
  236. for next_ in iterator:
  237. yield (previous, current, next_)
  238. previous = current
  239. current = next_
  240. yield (previous, current, last)
  241. def each_file_from(source_dir, pattern="*", exclude=None):
  242. """Walk across the `source_dir` and return the `pattern` file paths."""
  243. for path in _each_path_from(source_dir, pattern=pattern, exclude=exclude):
  244. if path.is_file():
  245. yield path
  246. def each_folder_from(source_dir, exclude=None):
  247. """Walk across the `source_dir` and return the folder paths."""
  248. for path in _each_path_from(source_dir, exclude=exclude):
  249. if path.is_dir():
  250. yield path
  251. def _each_path_from(source_dir, pattern="*", exclude=None):
  252. for path in sorted(Path(source_dir).glob(pattern)):
  253. if exclude is not None and path.name in exclude:
  254. continue
  255. yield path
  256. @dataclass
  257. class Page:
  258. title: str
  259. content: str
  260. tags: list
  261. file_path: str
  262. lang: str = "fr"
  263. def __post_init__(self):
  264. try:
  265. date_str, _ = self.file_path.split(" - ", 1)
  266. except ValueError:
  267. # Fallback for 2020 contents (search index)
  268. suffix = len(".md")
  269. prefix = len("YYYY/MM-DD") + suffix
  270. date_str = "2020-" + self.file_path[-prefix:-suffix]
  271. self.url = f"/david/{date_str.replace('-', '/')}/"
  272. self.date = datetime.strptime(date_str, "%Y-%m-%d").date()
  273. self.full_url = f"{DOMAIN}{self.url}"
  274. self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
  275. self.escaped_title = escape(self.title)
  276. tag_template = Template(
  277. f'<a href="{DOMAIN}/david/2024/$tag_slug/">#$tag_name</a>'
  278. )
  279. tag_links = " ".join(
  280. tag_template.substitute(tag_slug=slugify(tag), tag_name=tag)
  281. for tag in self.tags
  282. )
  283. self.escaped_content = escape(
  284. self.content.replace('href="/', f'href="{DOMAIN}/')
  285. .replace('src="/', f'src="{DOMAIN}/')
  286. .replace('href="#', f'href="{self.full_url}#')
  287. + f"<nav><p>{tag_links}</p></nav>"
  288. + '<hr/><p><a href="mailto:david@larlet.fr">Réagir ?</a></p>'
  289. )
  290. # Extract first paragraph.
  291. self.extract = self.content.split("</p>", 1)[0] + "</p>"
  292. # Create the index for the search.
  293. self.search_data = {
  294. "title": self.title,
  295. "url": self.url,
  296. "date": date_str,
  297. "content": do_striptags(self.content)
  298. .replace("\u00a0(cache)", " ")
  299. .replace("'", " "),
  300. }
  301. def __eq__(self, other):
  302. return self.url == other.url
  303. def __lt__(self, other: "Page"):
  304. if not isinstance(other, Page):
  305. return NotImplemented
  306. return self.date < other.date
  307. @staticmethod
  308. def all(source: Path, only_published=True, with_h2_anchors=True):
  309. """Retrieve all pages sorted by desc."""
  310. page_list = []
  311. md = markdown_with_h2_anchors if with_h2_anchors else markdown_with_img_sizes
  312. for file_path in sorted(each_file_from(source, pattern="*.md")):
  313. result, state = md.read(file_path)
  314. result = widont(result, html=True)
  315. # Extract (and remove) the title from the generated page.
  316. title, content = result.split("</h1>", 1)
  317. h1_opening_size = len("<h1>")
  318. title = title[h1_opening_size:]
  319. tags = {}
  320. if "<nav><p>" in content:
  321. # Extract the tags from the generated page.
  322. content, tags_links = content.split("<nav><p>", 1)
  323. nav_closing_size = len("</p></nav>\n")
  324. tags_links = tags_links[:-nav_closing_size]
  325. try:
  326. tags = sorted(
  327. {
  328. tag.strip().split("#", 1)[1]
  329. for tag in tags_links.split("</a>")
  330. if tag.strip()
  331. },
  332. key=lambda tag: slugify(tag),
  333. )
  334. except IndexError:
  335. # It happens for old contents, parsed for the search index.
  336. pass
  337. page = Page(title, content, tags, file_path.name)
  338. pages_by_url[page.url] = page
  339. if not page.is_draft:
  340. all_tags.update(tags)
  341. for tag in tags:
  342. if page not in pages_by_tags[tag]:
  343. pages_by_tags[tag].append(page)
  344. if only_published and page.is_draft:
  345. continue
  346. page_list.append(page)
  347. return sorted(page_list, reverse=True)
  348. @property
  349. def is_draft(self):
  350. return (
  351. datetime(year=self.date.year, month=self.date.month, day=self.date.day)
  352. > PUBLICATION_BUFFER
  353. )
  354. @cli
  355. def pages():
  356. """Build article pages."""
  357. root_path = DAVID / "2024"
  358. for previous, page, next_ in neighborhood(
  359. reversed(Page.all(source=SOURCES_PATH, only_published=False)),
  360. first={
  361. "url": "/david/2023/",
  362. "title": "Publications 2023",
  363. "is_draft": False,
  364. },
  365. ):
  366. template = environment.get_template("article_2024.html")
  367. content = template.render(page=page, prev=previous, next=next_, slugify=slugify)
  368. target_path = Path(page.url[1:])
  369. target_path.mkdir(parents=True, exist_ok=True)
  370. (target_path / "index.html").write_text(content)
  371. if page.is_draft:
  372. print(f"Draft: {LOCAL_DOMAIN}{page.url} ({page.title})")
  373. def group_by_month_year(item):
  374. return item.date.strftime("%B %Y").title()
  375. template = environment.get_template("archives_2020.html")
  376. page_list = reversed(Page.all(source=SOURCES_PATH))
  377. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  378. content = template.render(
  379. page_list=groupby(page_list, key=group_by_month_year), tags=tags
  380. )
  381. (root_path / "index.html").write_text(content)
  382. @cli
  383. def tags():
  384. """Build tags pages."""
  385. # Parse all pages to collect tags.
  386. Page.all(source=SOURCES_PATH, only_published=True)
  387. for tag in all_tags:
  388. template = environment.get_template("tag_2024.html")
  389. content = template.render(
  390. page_list=sorted(pages_by_tags[tag], reverse=True),
  391. tag_name=tag,
  392. slugify=slugify,
  393. )
  394. target_path = DAVID / "2024" / slugify(tag)
  395. target_path.mkdir(parents=True, exist_ok=True)
  396. (target_path / "index.html").write_text(content)
  397. @cli
  398. def home():
  399. """Build the home page with last published items."""
  400. template = environment.get_template("profil.html")
  401. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  402. tags = sorted((slugify(tag), tag, len(pages_by_tags[tag])) for tag in all_tags)
  403. content = template.render(page_list=page_list, tags=tags)
  404. (DAVID / "index.html").write_text(content)
  405. @dataclass
  406. class Website:
  407. name: str
  408. url: str
  409. feed: str
  410. def __post_init__(self):
  411. start = perf_counter()
  412. content = feedparser.parse(self.feed)
  413. elapsed = perf_counter() - start
  414. entries = content.get("entries")
  415. print(f"Fetched {self.feed} in {elapsed:.5f} seconds.")
  416. print(f"{len(entries)} entries.")
  417. date_key_parsed = (
  418. "published_parsed" if "published_parsed" in entries[0] else "updated_parsed"
  419. )
  420. try:
  421. entries = sorted(entries, key=attrgetter(date_key_parsed), reverse=True)[:2]
  422. except AttributeError:
  423. print("No `{date_key_parsed}` in", entries)
  424. for entry in entries:
  425. entry.website = self
  426. entry.date_parsed = entry[date_key_parsed]
  427. self.entries = entries
  428. @cli
  429. def blogroll():
  430. """Build the blogroll with last published items from people."""
  431. template = environment.get_template("blogroll.html")
  432. website_list = [
  433. # Invalid feed, date not standard.
  434. # Website(
  435. # name="Alex Sirac",
  436. # url="https://alexsirac.com/",
  437. # feed="https://alexsirac.com/feed",
  438. # ),
  439. Website(
  440. name="Maïtané Lenoir",
  441. url="https://www.maiwann.net/",
  442. feed="https://www.maiwann.net/feed.xml",
  443. ),
  444. Website(
  445. name="Fanny Cheung",
  446. url="https://ynote.hk/",
  447. feed="https://ynote.hk/feed.xml",
  448. ),
  449. Website(
  450. name="La Lune Mauve",
  451. url="https://lalunemauve.fr/",
  452. feed="https://lalunemauve.fr/feed/",
  453. ),
  454. Website(
  455. name="Eliness",
  456. url="https://www.hypothermia.fr/",
  457. feed="https://www.hypothermia.fr/rss",
  458. ),
  459. Website(
  460. name="Luce Carević",
  461. url="https://luce.carevic.eu/fr",
  462. feed="https://luce.carevic.eu/fr/flux",
  463. ),
  464. Website(
  465. name="Emma",
  466. url="https://emmaclit.com/",
  467. feed="https://emmaclit.com/feed/",
  468. ),
  469. Website(
  470. name="Karl Dubost",
  471. url="https://www.la-grange.net/",
  472. feed="https://www.la-grange.net/feed.atom",
  473. ),
  474. Website(
  475. name="Thomas Parisot",
  476. url="https://thom4.net/",
  477. feed="https://thom4.net/feed/",
  478. ),
  479. Website(
  480. name="Arthur Perret",
  481. url="https://www.arthurperret.fr/",
  482. feed="https://www.arthurperret.fr/feed.xml",
  483. ),
  484. Website(
  485. name="Antoine Fauchié",
  486. url="https://www.quaternum.net/",
  487. feed="https://www.quaternum.net/atom.xml",
  488. ),
  489. Website(
  490. name="Éric D.",
  491. url="https://n.survol.fr/",
  492. feed="https://n.survol.fr/feed",
  493. ),
  494. Website(
  495. name="Aude",
  496. url="https://blog.ecologie-politique.eu/",
  497. feed="https://blog.ecologie-politique.eu/feed/atom",
  498. ),
  499. # Site en maintenance.
  500. # Website(
  501. # name="Llu",
  502. # url="https://bribesdereel.net/",
  503. # feed="https://bribesdereel.net/feed/rss2",
  504. # ),
  505. Website(
  506. name="Winnie Lim (en)",
  507. url="https://winnielim.org/",
  508. feed="https://winnielim.org/feed/",
  509. ),
  510. Website(
  511. name="brr (en)",
  512. url="https://brr.fyi/",
  513. feed="https://brr.fyi/feed.xml",
  514. ),
  515. ]
  516. entry_list = sorted(
  517. chain(*[website.entries for website in website_list]),
  518. key=attrgetter("date_parsed"),
  519. reverse=True,
  520. )
  521. content = template.render(website_list=website_list, entry_list=entry_list)
  522. (DAVID / "blogroll" / "index.html").write_text(content)
  523. @cli
  524. def toot():
  525. """Pre-write the Mastodon message."""
  526. page_list = Page.all(source=SOURCES_PATH, only_published=True)
  527. last_published = page_list[0]
  528. print(f"✍️ QUOTE? — {last_published.title}, {last_published.full_url}")
  529. print()
  530. print("#blog #larletfr #rss")
  531. print(" ".join([f"#{tag}" for tag in last_published.tags]))
  532. @cli
  533. def search():
  534. """Build the static search page with custom index."""
  535. template = environment.get_template("recherche.html")
  536. page_list_2024 = Page.all(
  537. source=SOURCES_PATH, only_published=True, with_h2_anchors=False
  538. )
  539. page_list_2023 = Page.all(
  540. source=SOURCES_PATH, only_published=True, with_h2_anchors=False
  541. )
  542. page_list_2022 = Page.all(
  543. source=DAVID / "2022" / "_sources", only_published=True, with_h2_anchors=False
  544. )
  545. page_list_2021 = Page.all(
  546. source=DAVID / "2021" / "sources", only_published=True, with_h2_anchors=False
  547. )
  548. page_list_2020 = Page.all(
  549. source=DAVID / "2020", only_published=True, with_h2_anchors=False
  550. )
  551. blog_page_list_2019 = BlogPage.all(source=DAVID / "blog" / "2019")
  552. blog_page_list_2018 = BlogPage.all(source=DAVID / "blog" / "2018")
  553. blog_page_list_2017 = BlogPage.all(source=DAVID / "blog" / "2017")
  554. stream_page_list_2019 = StreamPage.all(source=DAVID / "stream" / "2019")
  555. stream_page_list_2018 = StreamPage.all(source=DAVID / "stream" / "2018")
  556. page_list = (
  557. page_list_2024
  558. + page_list_2023
  559. + page_list_2022
  560. + page_list_2021
  561. + page_list_2020
  562. + blog_page_list_2019
  563. + blog_page_list_2018
  564. + blog_page_list_2017
  565. + stream_page_list_2019
  566. + stream_page_list_2018
  567. )
  568. search_index = json.dumps([page.search_data for page in page_list], indent=2)
  569. content = template.render(search_index=search_index)
  570. (DAVID / "recherche" / "index.html").write_text(content)
  571. @cli
  572. def feed():
  573. """Generate a feed from last published items."""
  574. template = environment.get_template("feed.xml")
  575. page_list = Page.all(source=SOURCES_PATH, with_h2_anchors=False)
  576. content = template.render(
  577. page_list=page_list[:NB_ITEMS_IN_FEED],
  578. current_dt=TODAY.strftime(NORMALIZED_STRFTIME),
  579. BASE_URL=f"{DOMAIN}/david/",
  580. )
  581. (DAVID / "log" / "index.xml").write_text(content)
  582. @wrap
  583. def perf_wrapper():
  584. start = perf_counter()
  585. yield
  586. elapsed = perf_counter() - start
  587. print(f"Done in {elapsed:.5f} seconds.")
  588. # Below are legacy blog contents, still useful for search indexation.
  589. @dataclass
  590. class BlogPage:
  591. title: str
  592. content: str
  593. file_path: str
  594. date_str: str
  595. def __post_init__(self):
  596. self.date = datetime.strptime(self.date_str, "%Y-%m-%d").date()
  597. self.url = f"/{self.file_path}/"
  598. # Create the index for the search.
  599. self.search_data = {
  600. "title": self.title,
  601. "url": self.url,
  602. "date": self.date_str,
  603. "content": do_striptags(self.content)
  604. .replace("\u00a0(cache)", " ")
  605. .replace("'", " ")
  606. .replace("<", "&lt;")
  607. .replace(">", "&gt;"),
  608. }
  609. def __eq__(self, other):
  610. return self.url == other.url
  611. def __lt__(self, other: "BlogPage"):
  612. if not isinstance(other, self.__class__):
  613. return NotImplemented
  614. return self.date < other.date
  615. @staticmethod
  616. def all(source: Path):
  617. """Retrieve all pages sorted by desc."""
  618. page_list = []
  619. for folder in each_folder_from(source):
  620. for path in each_file_from(folder, pattern="*.md"):
  621. metadata, content = path.read_text().split("\n\n", 1)
  622. if "lang:" in metadata:
  623. title, slug, date_, chapo, lang = metadata.split("\n")
  624. else:
  625. title, slug, date_, chapo = metadata.split("\n")
  626. title = title[len("title: ") :].strip()
  627. date_str = date_[len("date: ") :].strip()
  628. content = markdown_with_img_sizes(content)
  629. page = BlogPage(title, content, path.parent, date_str)
  630. page_list.append(page)
  631. return sorted(page_list, reverse=True)
  632. @dataclass
  633. class StreamPage:
  634. title: str
  635. content: str
  636. file_path: str
  637. date_str: str
  638. def __post_init__(self):
  639. self.date = datetime.strptime(self.date_str, "%Y/%m/%d").date()
  640. self.url = f"/{self.file_path}/"
  641. # Create the index for the search.
  642. self.search_data = {
  643. "title": self.title,
  644. "url": self.url,
  645. "date": self.date.isoformat(),
  646. "content": do_striptags(self.content)
  647. .replace("\u00a0(cache)", " ")
  648. .replace("'", " ")
  649. .replace("<", "&lt;")
  650. .replace(">", "&gt;"),
  651. }
  652. def __eq__(self, other):
  653. return self.url == other.url
  654. def __lt__(self, other: "StreamPage"):
  655. if not isinstance(other, self.__class__):
  656. return NotImplemented
  657. return self.date < other.date
  658. @staticmethod
  659. def all(source: Path):
  660. """Retrieve all pages sorted by desc."""
  661. page_list = []
  662. for folder in each_folder_from(source):
  663. for subfolder in each_folder_from(folder):
  664. for path in each_file_from(subfolder, pattern="*.md"):
  665. metadata, content = path.read_text().split("\n\n", 1)
  666. if "lang:" in metadata:
  667. title, lang = metadata.split("\n")
  668. else:
  669. title = metadata.strip()
  670. title = title[len("title: ") :].strip()
  671. date_str = str(path.parent)[-len("YYYY/MM/DD") :]
  672. content = markdown_with_img_sizes(content)
  673. page = StreamPage(title, content, path.parent, date_str)
  674. page_list.append(page)
  675. return sorted(page_list, reverse=True)
  676. if __name__ == "__main__":
  677. run()