Browse Source

Deal with stream generation

master
David Larlet 4 years ago
parent
commit
9eb03316e7
No known key found for this signature in database
2 changed files with 92 additions and 40 deletions
  1. 92
    2
      runner.py
  2. 0
    38
      utils.py

+ 92
- 2
runner.py View File

@@ -1,17 +1,25 @@
#!/usr/bin/env python3

import codecs
import fnmatch
import locale
import os
import socketserver
from dataclasses import dataclass
from datetime import date, datetime
from html import escape
from http.server import SimpleHTTPRequestHandler
from operator import attrgetter
from pathlib import Path
from time import perf_counter

import markdown
from jinja2 import Environment as Env
from jinja2 import FileSystemLoader
from minicli import cli, run, wrap
from utils import each_markdown_from, parse_markdown

# Useful for dates rendering within Jinja2.
locale.setlocale(locale.LC_ALL, "fr_FR.UTF-8")

HERE = Path(".")
DAVID = HERE / "david"
@@ -22,6 +30,39 @@ NORMALIZED_STRFTIME = "%Y-%m-%dT12:00:00+01:00"
environment = Env(loader=FileSystemLoader(str(DAVID / "templates")))


def neighborhood(iterable, first=None, last=None):
"""
Yield the (previous, current, next) items given an iterable.

You can specify a `first` and/or `last` item for bounds.
"""
iterator = iter(iterable)
previous = first
current = next(iterator) # Throws StopIteration if empty.
for next_ in iterator:
yield (previous, current, next_)
previous = current
current = next_
yield (previous, current, last)


def parse_markdown(file_path):
"""Extract title, (HTML) content and metadata from a markdown file."""
parser = markdown.Markdown(extensions=["meta"])
with codecs.open(file_path, "r") as source:
content = parser.convert(source.read())
metadata = parser.Meta if hasattr(parser, "Meta") else None
title = metadata["title"][0] if metadata is not None else ""
return title, content, metadata


def each_markdown_from(source_dir, file_name="index.md"):
"""Walk across the `source_dir` and return the md file paths."""
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, file_name):
yield os.path.join(root, filename)


@dataclass
class Note:
title: str
@@ -44,6 +85,10 @@ class Note:
)
self.extract = self.content.split("</p>", 1)[0] + "</p>"

@property
def is_draft(self):
return self.date > date.today()

@staticmethod
def all(source, only_published=True):
"""Retrieve all (published) notes sorted by date desc."""
@@ -58,7 +103,7 @@ class Note:


@cli
def stream(when=None):
def note(when=None):
"""Create a new note and open it in iA Writer.

:when: Optional date in ISO format (YYYY-MM-DD)
@@ -71,6 +116,44 @@ def stream(when=None):
os.popen(f'open -a "iA Writer" "{filename}"')


@cli
def stream():
"""Generate articles and archives for the stream."""
template_article = environment.get_template("stream_2019_article.html")
template_archives = environment.get_template("stream_2019_archives.html")
# Default when you reach the last item.
notes_2018 = Note(
title="Anciennes notes (2018)",
content="",
file_path="/david/stream/2018/12/31/index.md",
)
note_base = DAVID / "stream" / "2019"
published = Note.all(source=note_base)
unpublished = Note.all(source=note_base, only_published=False)
for previous, note, next_ in neighborhood(unpublished, last=notes_2018):
if note.is_draft:
print(f"Soon: http://larlet.test:8001/{note.url} ({note.title})")
# Detect if there is code for syntax highlighting + monospaced font.
has_code = "<code>" in note.content
# Do not link to unpublished notes.
previous = previous and not previous.is_draft and previous or None
page_article = template_article.render(
note=note,
next=previous,
prev=next_,
has_code=has_code,
note_list=published,
)
open(
note_base / f"{note.date.month:02}" / f"{note.date.day:02}" / "index.html",
"w",
).write(page_article)

page_archive = template_archives.render(note_list=published)
open(note_base / "index.html", "w").write(page_archive)
print(f"Done: http://larlet.test:8001/{note_base}/")


@cli
def feed():
"""Generate a feed from 15 last published Notes in stream."""
@@ -83,6 +166,13 @@ def feed():
open(DAVID / "log" / "index.xml", "w").write(content)


@cli
def serve():
httpd = socketserver.TCPServer(("larlet.test", 8001), SimpleHTTPRequestHandler)
print("Serving at http://larlet.test:8001/david/")
httpd.serve_forever()


@wrap
def perf_wrapper():
start = perf_counter()

+ 0
- 38
utils.py View File

@@ -1,38 +0,0 @@
import codecs
import fnmatch
import os

import markdown


def neighborhood(iterable, first=None, last=None):
"""
Yield the (previous, current, next) items given an iterable.

You can specify a `first` and/or `last` item for bounds.
"""
iterator = iter(iterable)
previous = first
current = iterator.next() # Throws StopIteration if empty.
for next in iterator:
yield (previous, current, next)
previous = current
current = next
yield (previous, current, last)


def parse_markdown(file_path):
"""Extract title, (HTML) content and metadata from a markdown file."""
parser = markdown.Markdown(extensions=["meta"])
with codecs.open(file_path, "r") as source:
content = parser.convert(source.read())
metadata = parser.Meta if hasattr(parser, "Meta") else None
title = metadata["title"][0] if metadata is not None else ""
return title, content, metadata


def each_markdown_from(source_dir, file_name="index.md"):
"""Walk across the `source_dir` and return the md file paths."""
for root, dirnames, filenames in os.walk(source_dir):
for filename in fnmatch.filter(filenames, file_name):
yield os.path.join(root, filename)

Loading…
Cancel
Save