Просмотр исходного кода

Clean up code from past year

master
David Larlet 4 лет назад
Родитель
Сommit
1796d79bce
Не найден GPG ключ соответствующий данной подписи
3 измененных файлов: 21 добавлений и 1488 удалений
  1. 0
    1270
      david/log/index.xml
  2. 0
    1
      requirements.txt
  3. 21
    217
      site.py

+ 0
- 1270
david/log/index.xml
Разница между файлами не показана из-за своего большого размера
Просмотреть файл


+ 0
- 1
requirements.txt Просмотреть файл

@@ -1,5 +1,4 @@
Jinja2==2.10.3
Markdown==3.1.1
minicli==0.4.4
mistune==2.0.0a2
python-slugify==4.0.0

+ 21
- 217
site.py Просмотреть файл

@@ -1,18 +1,14 @@
#!/usr/bin/env python3

import codecs
import fnmatch
import locale
import os
from collections import namedtuple
from dataclasses import dataclass
from datetime import date, datetime
from html import escape
from operator import attrgetter
from pathlib import Path
from time import perf_counter

import markdown
import mistune
from jinja2 import Environment as Env
from jinja2 import FileSystemLoader
@@ -45,56 +41,31 @@ class CustomHTMLRenderer(mistune.HTMLRenderer):
return super().heading(text, level)


mistune_markdown = mistune.create_markdown(
markdown = mistune.create_markdown(
renderer=CustomHTMLRenderer(escape=False), plugins=[DirectiveInclude()]
)
environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))


def neighborhood(iterable, first=None, last=None):
"""
Yield the (previous, current, next) items given an iterable.

You can specify a `first` and/or `last` item for bounds.
"""
iterator = iter(iterable)
previous = first
current = next(iterator) # Throws StopIteration if empty.
for next_ in iterator:
yield (previous, current, next_)
previous = current
current = next_
yield (previous, current, last)


def parse_markdown(file_path):
"""Extract title, (HTML) content and metadata from a markdown file."""
parser = markdown.Markdown(extensions=["meta"])
with codecs.open(file_path, "r") as source:
source = source.read()
# Avoid replacing quotes from code #PoorManParsing.
if ":::" not in source and "`" not in source:
source = source.replace("'", "’")
content = parser.convert(source)
metadata = parser.Meta if hasattr(parser, "Meta") else None
title = metadata["title"][0] if metadata is not None else ""
return title, content, metadata


def each_markdown_from(source_dir, file_name="index.md"):
def each_markdown_from(source_dir, file_name="*.md"):
"""Walk across the `source_dir` and return the md file paths."""
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, file_name):
yield os.path.join(root, filename)
for filename in fnmatch.filter(os.listdir(source_dir), file_name):
yield os.path.join(source_dir, filename)


@dataclass
class Item:
class Page:
title: str
content: str
file_path: str
lang: str = "fr"

def __post_init__(self):
suffix = len(".md")
prefix = len("YYYY/MM-DD") + suffix
date_str = self.file_path[-prefix:-suffix].replace("-", "/")
self.url = f"/david/{date_str}/"
self.date = datetime.strptime(date_str, "%Y/%m/%d").date()
self.full_url = f"{DOMAIN}{self.url}"
self.normalized_date = self.date.strftime(NORMALIZED_STRFTIME)
self.escaped_title = escape(self.title)
@@ -103,49 +74,6 @@ class Item:
.replace('src="/', f'src="{DOMAIN}/')
.replace('href="#', f'href="{self.full_url}#')
)

@property
def is_draft(self):
return self.date > date.today()


@dataclass
class Note(Item):
lang: str = "fr"

def __post_init__(self):
suffix = len("/index.md")
prefix = len("YYYY/MM/DD") + suffix
date_str = self.file_path[-prefix:-suffix]
self.url = f"/david/stream/{date_str}/"
self.date = datetime.strptime(date_str, "%Y/%m/%d").date()
super().__post_init__()
self.extract = self.content.split("</p>", 1)[0] + "</p>"

@staticmethod
def all(source, only_published=True):
"""Retrieve all (published) notes sorted by date desc."""
note_list = []
for file_path in each_markdown_from(source):
title, content, _ = parse_markdown(file_path)
note = Note(title, content, file_path)
if only_published and note.is_draft:
continue
note_list.append(note)
return sorted(note_list, key=attrgetter("date"), reverse=True)


@dataclass
class Page(Item):
lang: str = "fr"

def __post_init__(self):
suffix = len(".md")
prefix = len("YYYY/MM-DD") + suffix
date_str = self.file_path[-prefix:-suffix].replace("-", "/")
self.url = f"/david/{date_str}/"
self.date = datetime.strptime(date_str, "%Y/%m/%d").date()
super().__post_init__()
# Extract first paragraph.
self.extract = self.content.split("</p>", 1)[0] + "</p>"

@@ -153,144 +81,22 @@ class Page(Item):
def all(source):
"""Retrieve all pages sorted by desc."""
page_list = []
for file_path in each_markdown_from(source, file_name="*.md"):
if "/fragments/" in file_path:
continue
result = mistune_markdown.read(file_path)
for file_path in each_markdown_from(source):
result = markdown.read(file_path)
title, content = result.split("</h1>", 1)
title = title[len("<h1>") :]
h1_opening_size = len("<h1>")
title = title[h1_opening_size:]
page = Page(title, content, file_path)
page_list.append(page)
return sorted(page_list, reverse=True)


@dataclass
class Post(Item):
date: str
slug: str
chapo: str
lang: str

def __post_init__(self):
self.url = f"/david/blog/{self.date.year}/{self.slug}/"
super().__post_init__()
self.url_image = f"/static/david/blog/{self.date.year}/{self.slug}.jpg"
self.url_image_thumbnail = (
f"/static/david/blog/{self.date.year}/thumbnails/{self.slug}.jpg"
)
self.full_img_url = f"{DOMAIN}{self.url_image}"
self.full_img_url_thumbnail = f"{DOMAIN}{self.url_image_thumbnail}"
self.escaped_content = self.escaped_content + escape(
f'<img src="{self.full_img_url_thumbnail}" width="500px" height="500px" />'
)
self.escaped_chapo = escape(self.chapo)

@staticmethod
def all(source, only_published=True):
"""Retrieve all (published) posts sorted by date desc."""
post_list = []
for file_path in each_markdown_from(source):
title, content, metadata = parse_markdown(file_path)
date = datetime.strptime(metadata["date"][0], "%Y-%m-%d").date()
slug = metadata["slug"][0]
chapo = metadata["chapo"][0]
lang = metadata.get("lang", ["fr"])[0]
post = Post(title, content, file_path, date, slug, chapo, lang)
if only_published and post.is_draft:
continue
post_list.append(post)
return sorted(post_list, key=attrgetter("date"), reverse=True)


@cli
def note(when=None):
"""Create a new note and open it in iA Writer.

:when: Optional date in ISO format (YYYY-MM-DD)
"""
when = datetime.strptime(when, "%Y-%m-%d") if when else date.today()
note_path = DAVID / "stream" / str(when.year) / str(when.month) / str(when.day)
os.makedirs(note_path)
filename = note_path / "index.md"
open(filename, "w+").write("title: ")
os.popen(f'open -a "iA Writer" "{filename}"')


@cli
def stream():
"""Generate articles and archives for the stream."""
template_article = environment.get_template("stream_2019_article.html")
template_archives = environment.get_template("stream_2019_archives.html")
# Default when you reach the last item.
FakeNote = namedtuple("FakeNote", ["url", "title"])
notes_2018 = FakeNote(url="/david/stream/2018/", title="Anciennes notes (2018)")
note_base = DAVID / "stream" / "2019"
unpublished = Note.all(source=note_base, only_published=False)
published = [note for note in unpublished if not note.is_draft]
for previous, note, next_ in neighborhood(unpublished, last=notes_2018):
if note.is_draft:
print(f"Soon: http://larlet.test:3579{note.url} ({note.title})")
# Detect if there is code for syntax highlighting + monospaced font.
has_code = "<code>" in note.content
# Do not link to unpublished notes.
previous = previous and not previous.is_draft and previous or None
page_article = template_article.render(
note=note,
next=previous,
prev=next_,
has_code=has_code,
note_list=published,
)
open(
note_base / f"{note.date.month:02}" / f"{note.date.day:02}" / "index.html",
"w",
).write(page_article)

page_archive = template_archives.render(note_list=published)
open(note_base / "index.html", "w").write(page_archive)
print(f"Done: http://larlet.test:3579/{note_base}/")


@cli
def blog():
"""Generate articles and archives for the blog."""
template_article = environment.get_template("blog_article.html")
template_archives = environment.get_template("blog_archives.html")
# Default when you reach the last item.
FakePost = namedtuple("FakePost", ["url", "title"])
posts_2012 = FakePost(
url="/david/thoughts/", title="Pensées précédentes (en anglais)"
)
post_base = DAVID / "blog"
unpublished = Post.all(source=post_base, only_published=False)
published = [post for post in unpublished if not post.is_draft]
published_en = [post for post in published if post.lang == "en"]
note_list = Note.all(source=DAVID / "stream" / "2019")
for previous, post, next_ in neighborhood(unpublished, last=posts_2012):
if post.date.year < 2018:
continue # Speed up + do not overwrite old comments.
if post.is_draft:
print(f"Soon: http://larlet.test:3579{post.url} ({post.title})")
# Detect if there is code for syntax highlighting + monospaced font.
has_code = "<code>" in post.content
# Do not link to unpublished posts.
previous = previous and not previous.is_draft and previous or None
page_article = template_article.render(
post=post,
next=previous,
prev=next_,
has_code=has_code,
post_list=published,
published_posts_en=published_en,
note_list=note_list,
)
open(post_base / str(post.date.year) / post.slug / "index.html", "w",).write(
page_article
)

page_archive = template_archives.render(posts=published, note_list=note_list)
open(post_base / "index.html", "w").write(page_archive)
print(f"Done: http://larlet.test:3579/{post_base}/")
def fragment(title: str):
"""Create a new fragment and open it in iA Writer."""
fragment_path = DAVID / "2020" / "fragments" / f"{title}.md"
open(fragment_path, "w+").write(f"## {title}")
os.popen(f'open -a "iA Writer" "{fragment_path}"')


@cli
@@ -313,7 +119,7 @@ def pages():
def home():
"""Build the home page with last published items."""
template = environment.get_template("profil.html")
content = template.render(note_list=Note.all(source=DAVID / "stream" / "2019"),)
content = template.render(page_list=Page.all(source=DAVID / "2020"),)
open(DAVID / "index.html", "w").write(content)


@@ -323,8 +129,6 @@ def feed():
template = environment.get_template("feed.xml")
content = template.render(
page_list=Page.all(source=DAVID / "2020"),
note_list=Note.all(source=DAVID / "stream" / "2019")[:15],
post_list=Post.all(source=DAVID / "blog")[:5],
current_dt=datetime.now().strftime(NORMALIZED_STRFTIME),
BASE_URL=f"{DOMAIN}/david/",
)

Загрузка…
Отмена
Сохранить