123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275 |
- #!/usr/bin/env python3
-
- import codecs
- import fnmatch
- import locale
- import os
- from collections import namedtuple
- from dataclasses import dataclass
- from datetime import date, datetime
- from html import escape
- from operator import attrgetter
- from pathlib import Path
- from time import perf_counter
-
- import markdown
- from jinja2 import Environment as Env
- from jinja2 import FileSystemLoader
- from minicli import cli, run, wrap
-
- # Useful for dates rendering within Jinja2.
- locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")
-
- HERE = Path(".")
- DAVID = HERE / "david"
- DOMAIN = "https://larlet.fr"
- # Hardcoding publication at 12 in Paris timezone.
- NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
-
- environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))
-
-
- def neighborhood(iterable, first=None, last=None):
- """
- Yield the (previous, current, next) items given an iterable.
-
- You can specify a `first` and/or `last` item for bounds.
- """
- iterator = iter(iterable)
- previous = first
- current = next(iterator) # Throws StopIteration if empty.
- for next_ in iterator:
- yield (previous, current, next_)
- previous = current
- current = next_
- yield (previous, current, last)
-
-
- def parse_markdown(file_path):
- """Extract title, (HTML) content and metadata from a markdown file."""
- parser = markdown.Markdown(extensions=["meta"])
- with codecs.open(file_path, "r") as source:
- source = source.read()
- # Avoid replacing quotes from code #PoorManParsing.
- if ":::" not in source and "`" not in source:
- source = source.replace("'", "’")
- content = parser.convert(source)
- metadata = parser.Meta if hasattr(parser, "Meta") else None
- title = metadata["title"][0] if metadata is not None else ""
- return title, content, metadata
-
-
- def each_markdown_from(source_dir, file_name="index.md"):
- """Walk across the `source_dir` and return the md file paths."""
- for root, dirnames, filenames in os.walk(source_dir):
- for filename in fnmatch.filter(filenames, file_name):
- yield os.path.join(root, filename)
-
-
- @dataclass
- class Item:
- title: str
- content: str
- file_path: str
-
- def __post_init__(self):
- self.full_url = f"{DOMAIN}{self.url}"
- self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
- self.escaped_title = escape(self.title)
- self.escaped_content = escape(
- self.content.replace('href="/', f'href="{DOMAIN}/').replace(
- 'src="/', f'src="{DOMAIN}/'
- )
- )
-
- @property
- def is_draft(self):
- return self.date > date.today()
-
-
- @dataclass
- class Note(Item):
- lang: str = "fr"
-
- def __post_init__(self):
- suffix = len("/index.md")
- prefix = len("YYYY/MM/DD") + suffix
- date_str = self.file_path[-prefix:-suffix]
- self.url = f"/david/stream/{date_str}/"
- self.date = datetime.strptime(date_str, "%Y/%m/%d").date()
- super().__post_init__()
- self.extract = self.content.split("</p>", 1)[0] + "</p>"
-
- @staticmethod
- def all(source, only_published=True):
- """Retrieve all (published) notes sorted by date desc."""
- note_list = []
- for file_path in each_markdown_from(source):
- title, content, _ = parse_markdown(file_path)
- note = Note(title, content, file_path)
- if only_published and note.is_draft:
- continue
- note_list.append(note)
- return sorted(note_list, key=attrgetter("date"), reverse=True)
-
-
- @dataclass
- class Post(Item):
- date: str
- slug: str
- chapo: str
- lang: str
-
- def __post_init__(self):
- self.url = f"/david/blog/{self.date.year}/{self.slug}/"
- super().__post_init__()
- self.url_image = f"/static/david/blog/{self.date.year}/{self.slug}.jpg"
- self.url_image_thumbnail = (
- f"/static/david/blog/{self.date.year}/thumbnails/{self.slug}.jpg"
- )
- self.full_img_url = f"{DOMAIN}{self.url_image}"
- self.full_img_url_thumbnail = f"{DOMAIN}{self.url_image_thumbnail}"
- self.escaped_content = self.escaped_content + escape(
- f'<img src="{self.full_img_url_thumbnail}" width="500px" height="500px" />'
- )
- self.escaped_chapo = escape(self.chapo)
-
- @staticmethod
- def all(source, only_published=True):
- """Retrieve all (published) posts sorted by date desc."""
- post_list = []
- for file_path in each_markdown_from(source):
- title, content, metadata = parse_markdown(file_path)
- date = datetime.strptime(metadata["date"][0], "%Y-%m-%d").date()
- slug = metadata["slug"][0]
- chapo = metadata["chapo"][0]
- lang = metadata.get("lang", ["fr"])[0]
- post = Post(title, content, file_path, date, slug, chapo, lang)
- if only_published and post.is_draft:
- continue
- post_list.append(post)
- return sorted(post_list, key=attrgetter("date"), reverse=True)
-
-
- @cli
- def note(when=None):
- """Create a new note and open it in iA Writer.
-
- :when: Optional date in ISO format (YYYY-MM-DD)
- """
- when = datetime.strptime(when, "%Y-%m-%d") if when else date.today()
- note_path = DAVID / "stream" / str(when.year) / str(when.month) / str(when.day)
- os.makedirs(note_path)
- filename = note_path / "index.md"
- open(filename, "w+").write("title: ")
- os.popen(f'open -a "iA Writer" "{filename}"')
-
-
- @cli
- def stream():
- """Generate articles and archives for the stream."""
- template_article = environment.get_template("stream_2019_article.html")
- template_archives = environment.get_template("stream_2019_archives.html")
- # Default when you reach the last item.
- FakeNote = namedtuple("FakeNote", ["url", "title"])
- notes_2018 = FakeNote(url="/david/stream/2018/", title="Anciennes notes (2018)")
- note_base = DAVID / "stream" / "2019"
- unpublished = Note.all(source=note_base, only_published=False)
- published = [note for note in unpublished if not note.is_draft]
- for previous, note, next_ in neighborhood(unpublished, last=notes_2018):
- if note.is_draft:
- print(f"Soon: http://larlet.test:8001{note.url} ({note.title})")
- # Detect if there is code for syntax highlighting + monospaced font.
- has_code = "<code>" in note.content
- # Do not link to unpublished notes.
- previous = previous and not previous.is_draft and previous or None
- page_article = template_article.render(
- note=note,
- next=previous,
- prev=next_,
- has_code=has_code,
- note_list=published,
- )
- open(
- note_base / f"{note.date.month:02}" / f"{note.date.day:02}" / "index.html",
- "w",
- ).write(page_article)
-
- page_archive = template_archives.render(note_list=published)
- open(note_base / "index.html", "w").write(page_archive)
- print(f"Done: http://larlet.test:8001/{note_base}/")
-
-
- @cli
- def blog():
- """Generate articles and archives for the blog."""
- template_article = environment.get_template("blog_article.html")
- template_archives = environment.get_template("blog_archives.html")
- # Default when you reach the last item.
- FakePost = namedtuple("FakePost", ["url", "title"])
- posts_2012 = FakePost(
- url="/david/thoughts/", title="Pensées précédentes (en anglais)"
- )
- post_base = DAVID / "blog"
- unpublished = Post.all(source=post_base, only_published=False)
- published = [post for post in unpublished if not post.is_draft]
- published_en = [post for post in published if post.lang == "en"]
- note_list = Note.all(source=DAVID / "stream" / "2019")
- for previous, post, next_ in neighborhood(unpublished, last=posts_2012):
- if post.date.year < 2018:
- continue # Speed up + do not overwrite old comments.
- if post.is_draft:
- print(f"Soon: http://larlet.test:8001{post.url} ({post.title})")
- # Detect if there is code for syntax highlighting + monospaced font.
- has_code = "<code>" in post.content
- # Do not link to unpublished posts.
- previous = previous and not previous.is_draft and previous or None
- page_article = template_article.render(
- post=post,
- next=previous,
- prev=next_,
- has_code=has_code,
- post_list=published,
- published_posts_en=published_en,
- note_list=note_list,
- )
- open(post_base / str(post.date.year) / post.slug / "index.html", "w",).write(
- page_article
- )
-
- page_archive = template_archives.render(posts=published, note_list=note_list)
- open(post_base / "index.html", "w").write(page_archive)
- print(f"Done: http://larlet.test:8001/{post_base}/")
-
-
- @cli
- def home():
- """Build the home page with last published items."""
- template = environment.get_template("profil.html")
- content = template.render(note_list=Note.all(source=DAVID / "stream" / "2019"),)
- open(DAVID / "index.html", "w").write(content)
-
-
- @cli
- def feed():
- """Generate a feed from last published items."""
- template = environment.get_template("feed.xml")
- content = template.render(
- note_list=Note.all(source=DAVID / "stream" / "2019")[:15],
- post_list=Post.all(source=DAVID / "blog")[:5],
- current_dt=datetime.now().strftime(NORMALIZED_STRFTIME),
- BASE_URL=f"{DOMAIN}/david/",
- )
- open(DAVID / "log" / "index.xml", "w").write(content)
-
-
- @wrap
- def perf_wrapper():
- start = perf_counter()
- yield
- elapsed = perf_counter() - start
- print(f"Done in {elapsed:.5f} seconds.")
-
-
- if __name__ == "__main__":
- run()
|