|
|
@@ -5,6 +5,7 @@ import fnmatch |
|
|
|
import locale |
|
|
|
import os |
|
|
|
import socketserver |
|
|
|
from collections import namedtuple |
|
|
|
from dataclasses import dataclass |
|
|
|
from datetime import date, datetime |
|
|
|
from html import escape |
|
|
@@ -64,18 +65,13 @@ def each_markdown_from(source_dir, file_name="index.md"): |
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
|
class Note: |
|
|
|
class Item: |
|
|
|
title: str |
|
|
|
content: str |
|
|
|
file_path: str |
|
|
|
|
|
|
|
def __post_init__(self): |
|
|
|
suffix = len("/index.md") |
|
|
|
prefix = len("YYYY/MM/DD") + suffix |
|
|
|
date_str = self.file_path[-prefix:-suffix] |
|
|
|
self.url = f"/david/stream/{date_str}/" |
|
|
|
self.full_url = f"{DOMAIN}{self.url}" |
|
|
|
self.date = datetime.strptime(date_str, "%Y/%m/%d").date() |
|
|
|
self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME) |
|
|
|
self.escaped_title = escape(self.title) |
|
|
|
self.escaped_content = escape( |
|
|
@@ -83,12 +79,23 @@ class Note: |
|
|
|
'src="/', f'src="{DOMAIN}/' |
|
|
|
) |
|
|
|
) |
|
|
|
self.extract = self.content.split("</p>", 1)[0] + "</p>" |
|
|
|
|
|
|
|
@property |
|
|
|
def is_draft(self): |
|
|
|
return self.date > date.today() |
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
|
class Note(Item): |
|
|
|
def __post_init__(self): |
|
|
|
suffix = len("/index.md") |
|
|
|
prefix = len("YYYY/MM/DD") + suffix |
|
|
|
date_str = self.file_path[-prefix:-suffix] |
|
|
|
self.url = f"/david/stream/{date_str}/" |
|
|
|
self.date = datetime.strptime(date_str, "%Y/%m/%d").date() |
|
|
|
super().__post_init__() |
|
|
|
self.extract = self.content.split("</p>", 1)[0] + "</p>" |
|
|
|
|
|
|
|
@staticmethod |
|
|
|
def all(source, only_published=True): |
|
|
|
"""Retrieve all (published) notes sorted by date desc.""" |
|
|
@@ -96,12 +103,50 @@ class Note: |
|
|
|
for file_path in each_markdown_from(source): |
|
|
|
title, content, _ = parse_markdown(file_path) |
|
|
|
note = Note(title, content, file_path) |
|
|
|
if only_published and note.date > date.today(): |
|
|
|
if only_published and note.is_draft: |
|
|
|
continue |
|
|
|
note_list.append(note) |
|
|
|
return sorted(note_list, key=attrgetter("date"), reverse=True) |
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
|
|
class Post(Item): |
|
|
|
date: str |
|
|
|
slug: str |
|
|
|
chapo: str |
|
|
|
lang: str |
|
|
|
|
|
|
|
def __post_init__(self): |
|
|
|
self.url = f"/david/blog/{self.date.year}/{self.slug}/" |
|
|
|
super().__post_init__() |
|
|
|
self.url_image = f"/static/david/blog/{self.date.year}/{self.slug}.jpg" |
|
|
|
self.url_image_thumbnail = ( |
|
|
|
f"/static/david/blog/{self.date.year}/thumbnails/{self.slug}.jpg" |
|
|
|
) |
|
|
|
self.full_img_url = f"{DOMAIN}{self.url_image}" |
|
|
|
self.full_img_url_thumbnail = f"{DOMAIN}{self.url_image_thumbnail}" |
|
|
|
self.escaped_content = self.escaped_content + escape( |
|
|
|
f'<img src="{self.full_img_url_thumbnail}" width="500px" height="500px" />' |
|
|
|
) |
|
|
|
self.escaped_chapo = escape(self.chapo) |
|
|
|
|
|
|
|
@staticmethod |
|
|
|
def all(source, only_published=True): |
|
|
|
"""Retrieve all (published) posts sorted by date desc.""" |
|
|
|
post_list = [] |
|
|
|
for file_path in each_markdown_from(source): |
|
|
|
title, content, metadata = parse_markdown(file_path) |
|
|
|
date = datetime.strptime(metadata["date"][0], "%Y-%m-%d").date() |
|
|
|
slug = metadata["slug"][0] |
|
|
|
chapo = metadata["chapo"][0] |
|
|
|
lang = metadata.get("lang", ["fr"])[0] |
|
|
|
post = Post(title, content, file_path, date, slug, chapo, lang) |
|
|
|
if only_published and post.is_draft: |
|
|
|
continue |
|
|
|
post_list.append(post) |
|
|
|
return sorted(post_list, key=attrgetter("date"), reverse=True) |
|
|
|
|
|
|
|
|
|
|
|
@cli |
|
|
|
def note(when=None): |
|
|
|
"""Create a new note and open it in iA Writer. |
|
|
@@ -122,14 +167,11 @@ def stream(): |
|
|
|
template_article = environment.get_template("stream_2019_article.html") |
|
|
|
template_archives = environment.get_template("stream_2019_archives.html") |
|
|
|
# Default when you reach the last item. |
|
|
|
notes_2018 = Note( |
|
|
|
title="Anciennes notes (2018)", |
|
|
|
content="", |
|
|
|
file_path="/david/stream/2018/12/31/index.md", |
|
|
|
) |
|
|
|
FakeNote = namedtuple("FakeNote", ["url", "title"]) |
|
|
|
notes_2018 = FakeNote(url="/david/stream/2018/", title="Anciennes notes (2018)") |
|
|
|
note_base = DAVID / "stream" / "2019" |
|
|
|
published = Note.all(source=note_base) |
|
|
|
unpublished = Note.all(source=note_base, only_published=False) |
|
|
|
published = [note for note in unpublished if not note.is_draft] |
|
|
|
for previous, note, next_ in neighborhood(unpublished, last=notes_2018): |
|
|
|
if note.is_draft: |
|
|
|
print(f"Soon: http://larlet.test:8001/{note.url} ({note.title})") |
|
|
@@ -154,12 +196,53 @@ def stream(): |
|
|
|
print(f"Done: http://larlet.test:8001/{note_base}/") |
|
|
|
|
|
|
|
|
|
|
|
@cli |
|
|
|
def blog(): |
|
|
|
"""Generate articles and archives for the blog.""" |
|
|
|
template_article = environment.get_template("blog_article.html") |
|
|
|
template_archives = environment.get_template("blog_archives.html") |
|
|
|
# Default when you reach the last item. |
|
|
|
FakePost = namedtuple("FakePost", ["url", "title"]) |
|
|
|
posts_2012 = FakePost( |
|
|
|
url="/david/thoughts/", title="Pensées précédentes (en anglais)" |
|
|
|
) |
|
|
|
post_base = DAVID / "blog" |
|
|
|
unpublished = Post.all(source=post_base, only_published=False) |
|
|
|
published = [post for post in unpublished if not post.is_draft] |
|
|
|
published_en = [post for post in published if post.lang == "en"] |
|
|
|
note_list = Note.all(source=DAVID / "stream" / "2019") |
|
|
|
for previous, post, next_ in neighborhood(unpublished, last=posts_2012): |
|
|
|
if post.is_draft: |
|
|
|
print(f"Soon: http://larlet.test:8001{post.url} ({post.title})") |
|
|
|
# Detect if there is code for syntax highlighting + monospaced font. |
|
|
|
has_code = "<code>" in post.content |
|
|
|
# Do not link to unpublished posts. |
|
|
|
previous = previous and not previous.is_draft and previous or None |
|
|
|
page_article = template_article.render( |
|
|
|
post=post, |
|
|
|
next=previous, |
|
|
|
prev=next_, |
|
|
|
has_code=has_code, |
|
|
|
post_list=published, |
|
|
|
published_posts_en=published_en, |
|
|
|
note_list=note_list, |
|
|
|
) |
|
|
|
open(post_base / str(post.date.year) / post.slug / "index.html", "w",).write( |
|
|
|
page_article |
|
|
|
) |
|
|
|
|
|
|
|
page_archive = template_archives.render(posts=published) |
|
|
|
open(post_base / "index.html", "w").write(page_archive) |
|
|
|
print(f"Done: http://larlet.test:8001/{post_base}/") |
|
|
|
|
|
|
|
|
|
|
|
@cli |
|
|
|
def feed(): |
|
|
|
"""Generate a feed from 15 last published Notes in stream.""" |
|
|
|
"""Generate a feed from last published items in stream.""" |
|
|
|
template = environment.get_template("feed.xml") |
|
|
|
content = template.render( |
|
|
|
note_list=Note.all(source=DAVID / "stream" / "2019")[:15], |
|
|
|
post_list=Post.all(source=DAVID / "blog")[:5], |
|
|
|
current_dt=datetime.now().strftime(NORMALIZED_STRFTIME), |
|
|
|
BASE_URL=f"{DOMAIN}/david/", |
|
|
|
) |