A place to cache linked articles (think custom and personal wayback machine)
您最多选择25个主题 主题必须以字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. #!/usr/bin/env python3
  2. import codecs
  3. import fnmatch
  4. import hashlib
  5. import os
  6. from dataclasses import dataclass
  7. from datetime import date
  8. from pathlib import Path
  9. from time import perf_counter
  10. import httpx
  11. import lxml
  12. import markdown
  13. from jinja2 import Environment as Env
  14. from jinja2 import FileSystemLoader
  15. from minicli import cli, run, wrap
  16. from readability.readability import Document
  17. HERE = Path(".")
  18. YEAR = "2024"
  19. CACHE_PATH = HERE / "cache" / YEAR
  20. LOCAL_DOMAIN = "http://larlet.test:3579"
  21. environment = Env(loader=FileSystemLoader(str(HERE / "templates")))
  22. def parse_markdown(file_path):
  23. """Extract title, (HTML) content and metadata from a markdown file."""
  24. parser = markdown.Markdown(extensions=["meta"])
  25. with codecs.open(file_path, "r") as source:
  26. content = parser.convert(source.read())
  27. metadata = parser.Meta if hasattr(parser, "Meta") else None
  28. title = metadata["title"][0] if metadata is not None else ""
  29. return title, content, metadata
  30. def each_markdown_from(source_dir, file_name="index.md"):
  31. """Walk across the `source_dir` and return the md file paths."""
  32. for root, dirnames, filenames in os.walk(source_dir):
  33. for filename in fnmatch.filter(filenames, file_name):
  34. yield os.path.join(root, filename)
  35. @dataclass
  36. class Cache:
  37. title: str
  38. content: str
  39. url: str
  40. hash_url: str
  41. archive_date: str
  42. @staticmethod
  43. def all(source_dir=CACHE_PATH):
  44. for file_path in each_markdown_from(source_dir):
  45. title, content, metadata = parse_markdown(file_path)
  46. url = metadata["url"][0]
  47. hash_url = metadata["hash_url"][0]
  48. archive_date = metadata["archive_date"][0]
  49. yield Cache(title, content, url, hash_url, archive_date)
  50. @staticmethod
  51. def one(hash_url):
  52. return next(Cache.all(source_dir=CACHE_PATH / hash_url))
  53. def extract_page(url):
  54. """From an URL, extract title and content using Readability.
  55. The title is shortened through the `short_title` native method.
  56. The content doesn't contain `<body>` tags to be directly
  57. embeddable in the template and rendered as is.
  58. """
  59. # Retrieves the resource and turns it into a Readability doc.
  60. response = httpx.get(url)
  61. document = Document(response.text)
  62. # The short title is more concise and readable.
  63. title = document.short_title()
  64. content = document.summary(html_partial=True)
  65. # Removing the added <div> and spaces.
  66. content = content[5:-6].strip()
  67. return title, content
  68. def create(hash_url):
  69. """Turn new MD file into HTML file."""
  70. template = environment.get_template("cache_article.html")
  71. cache = Cache.one(hash_url)
  72. page = template.render(cache=cache)
  73. cache_target = CACHE_PATH / hash_url
  74. if not os.path.exists(cache_target):
  75. os.makedirs(cache_target)
  76. open(cache_target / "index.html", "w").write(page)
  77. print(f"Done: {LOCAL_DOMAIN}/david/cache/{YEAR}/{hash_url}/")
  78. @cli
  79. def generate():
  80. """Generate caches MD files into HTML files."""
  81. cache_list = []
  82. template = environment.get_template("cache_article.html")
  83. for cache in Cache.all():
  84. page = template.render(cache=cache)
  85. open(CACHE_PATH / cache.hash_url / "index.html", "w").write(page)
  86. cache_list.append(cache)
  87. template = environment.get_template("cache_archives.html")
  88. page = template.render(cache_list=cache_list)
  89. open(CACHE_PATH / "index.html", "w").write(page)
  90. print(f"Done: {LOCAL_DOMAIN}/david/cache/{YEAR}/")
  91. @cli
  92. def new(url):
  93. """Turn the given URL into a MD and a HTML files.
  94. :url: The URL of the page to put into cache.
  95. """
  96. hash_url = hashlib.md5(url.encode("utf-8")).hexdigest()
  97. try:
  98. title, content = extract_page(url)
  99. except (
  100. lxml.etree.XMLSyntaxError,
  101. httpx.HTTPError,
  102. httpx.ReadTimeout,
  103. ) as e:
  104. print(f"WARNING: {e}")
  105. title, content = "", ""
  106. cache_path = os.path.join(CACHE_PATH, hash_url)
  107. if not os.path.exists(cache_path):
  108. os.makedirs(cache_path)
  109. archive_date = date.today()
  110. # Caching a markdown file.
  111. template = environment.get_template("cache_article.md")
  112. page = template.render(
  113. title=title,
  114. content=content,
  115. url=url,
  116. hash_url=hash_url,
  117. archive_date=archive_date,
  118. )
  119. result_path = os.path.join(cache_path, "index.md")
  120. open(result_path, "w").write(page)
  121. # Generating the HTML file.
  122. create(hash_url)
  123. md_line = f"> <cite>*[{title}]({url})*</cite>"
  124. print(md_line)
  125. os.popen(f'subl "{result_path}"')
  126. @wrap
  127. def perf_wrapper():
  128. start = perf_counter()
  129. yield
  130. elapsed = perf_counter() - start
  131. print(f"Done in {elapsed:.5f} seconds.")
  132. if __name__ == "__main__":
  133. run()