Repository with sources and generator of https://larlet.fr/david/ https://larlet.fr/david/
Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. #!/usr/bin/env python3
  2. import fnmatch
  3. import locale
  4. import os
  5. from dataclasses import dataclass
  6. from datetime import date, datetime
  7. from html import escape
  8. from pathlib import Path
  9. from time import perf_counter
  10. import mistune
  11. from jinja2 import Environment as Env
  12. from jinja2 import FileSystemLoader
  13. from minicli import cli, run, wrap
  14. from mistune.directives import DirectiveInclude
  15. from slugify import slugify
  16. # Useful for dates rendering within Jinja2.
  17. locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
  18. HERE = Path(".")
  19. DAVID = HERE / "david"
  20. DOMAIN = "https://larlet.fr"
  21. # Hardcoding publication at 12 in Paris timezone.
  22. NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
  23. class CustomHTMLRenderer(mistune.HTMLRenderer):
  24. def heading(self, text, level):
  25. # Set an anchor to h2 headings.
  26. if level == 2:
  27. slug = slugify(text)
  28. return (
  29. f'<h2 id="{slug}">'
  30. f"{text} "
  31. f'<a href="#{slug}" title="Ancre vers cette partie">#</a>'
  32. f"</h2>"
  33. )
  34. else:
  35. return super().heading(text, level)
  36. markdown = mistune.create_markdown(
  37. renderer=CustomHTMLRenderer(escape=False), plugins=[DirectiveInclude()]
  38. )
  39. environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
  40. def neighborhood(iterable, first=None, last=None):
  41. """
  42. Yield the (previous, current, next) items given an iterable.
  43. You can specify a `first` and/or `last` item for bounds.
  44. """
  45. iterator = iter(iterable)
  46. previous = first
  47. current = next(iterator) # Throws StopIteration if empty.
  48. for next_ in iterator:
  49. yield (previous, current, next_)
  50. previous = current
  51. current = next_
  52. yield (previous, current, last)
  53. def each_markdown_from(source_dir, file_name="*.md"):
  54. """Walk across the `source_dir` and return the md file paths."""
  55. for filename in fnmatch.filter(os.listdir(source_dir), file_name):
  56. yield os.path.join(source_dir, filename)
  57. @dataclass
  58. class Page:
  59. title: str
  60. content: str
  61. file_path: str
  62. lang: str = "fr"
  63. def __post_init__(self):
  64. suffix = len(".md")
  65. prefix = len("YYYY/MM-DD") + suffix
  66. date_str = self.file_path[-prefix:-suffix].replace("-", "/")
  67. self.url = f"/david/{date_str}/"
  68. self.date = datetime.strptime(date_str, "%Y/%m/%d").date()
  69. self.full_url = f"{DOMAIN}{self.url}"
  70. self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
  71. self.escaped_title = escape(self.title)
  72. self.escaped_content = escape(
  73. self.content.replace('href="/', f'href="{DOMAIN}/')
  74. .replace('src="/', f'src="{DOMAIN}/')
  75. .replace('href="#', f'href="{self.full_url}#')
  76. )
  77. # Extract first paragraph.
  78. self.extract = self.content.split("</p>", 1)[0] + "</p>"
  79. def __lt__(self, other: "Page"):
  80. if not isinstance(other, Page):
  81. return NotImplemented
  82. return self.date < other.date
  83. @staticmethod
  84. def all(source: Path):
  85. """Retrieve all pages sorted by desc."""
  86. page_list = []
  87. for file_path in each_markdown_from(source):
  88. result = markdown.read(file_path)
  89. title, content = result.split("</h1>", 1)
  90. h1_opening_size = len("<h1>")
  91. title = title[h1_opening_size:]
  92. page = Page(title, content, file_path)
  93. page_list.append(page)
  94. return sorted(page_list, reverse=True)
  95. @cli
  96. def fragment(title: str):
  97. """Create a new fragment and open it in iA Writer."""
  98. fragment_path = DAVID / "2020" / "fragments" / f"{title}.md"
  99. open(fragment_path, "w+").write(f"## {title}")
  100. os.popen(f'open -a "iA Writer" "{fragment_path}"')
  101. @cli
  102. def pages():
  103. """Build the agregations from fragments."""
  104. root_path = DAVID / "2020"
  105. page_list = Page.all(source=root_path)
  106. for previous, page, next_ in neighborhood(
  107. page_list, last={"url": "/david/stream/", "title": "Streams 2009-2019"}
  108. ):
  109. template = environment.get_template("article_2020.html")
  110. content = template.render(page=page, next=previous, prev=next_,)
  111. target_path = Path(page.url[1:])
  112. target_path.mkdir(parents=True, exist_ok=True)
  113. open(target_path / "index.html", "w").write(content)
  114. template = environment.get_template("archives_2020.html")
  115. content = template.render(page_list=page_list)
  116. open(root_path / "index.html", "w").write(content)
  117. @cli
  118. def home():
  119. """Build the home page with last published items."""
  120. template = environment.get_template("profil.html")
  121. content = template.render(page_list=Page.all(source=DAVID / "2020"),)
  122. open(DAVID / "index.html", "w").write(content)
  123. @cli
  124. def feed():
  125. """Generate a feed from last published items."""
  126. template = environment.get_template("feed.xml")
  127. content = template.render(
  128. page_list=Page.all(source=DAVID / "2020"),
  129. current_dt=datetime.now().strftime(NORMALIZED_STRFTIME),
  130. BASE_URL=f"{DOMAIN}/david/",
  131. )
  132. open(DAVID / "log" / "index.xml", "w").write(content)
  133. @wrap
  134. def perf_wrapper():
  135. start = perf_counter()
  136. yield
  137. elapsed = perf_counter() - start
  138. print(f"Done in {elapsed:.5f} seconds.")
  139. if __name__ == "__main__":
  140. run()