A place to cache linked articles (think custom and personal wayback machine)
Nelze vybrat více než 25 témat Téma musí začínat písmenem nebo číslem, může obsahovat pomlčky („-“) a může být dlouhé až 35 znaků.

cache.py 4.5KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. #!/usr/bin/env python3
  2. import codecs
  3. import fnmatch
  4. import hashlib
  5. import os
  6. from dataclasses import dataclass
  7. from pathlib import Path
  8. from time import perf_counter
  9. import httpx
  10. import lxml
  11. import markdown
  12. from jinja2 import Environment as Env
  13. from jinja2 import FileSystemLoader
  14. from minicli import cli, run, wrap
  15. from readability.readability import Document
  16. HERE = Path(".")
  17. CACHE_PATH = HERE / "cache"
  18. environment = Env(loader=FileSystemLoader(str(HERE / "templates")))
  19. def parse_markdown(file_path):
  20. """Extract title, (HTML) content and metadata from a markdown file."""
  21. parser = markdown.Markdown(extensions=["meta"])
  22. with codecs.open(file_path, "r") as source:
  23. content = parser.convert(source.read())
  24. metadata = parser.Meta if hasattr(parser, "Meta") else None
  25. title = metadata["title"][0] if metadata is not None else ""
  26. return title, content, metadata
  27. def each_markdown_from(source_dir, file_name="index.md"):
  28. """Walk across the `source_dir` and return the md file paths."""
  29. for root, dirnames, filenames in os.walk(source_dir):
  30. for filename in fnmatch.filter(filenames, file_name):
  31. yield os.path.join(root, filename)
  32. @dataclass
  33. class Cache:
  34. title: str
  35. content: str
  36. url: str
  37. hash_url: str
  38. @staticmethod
  39. def all(source_dir=CACHE_PATH):
  40. for file_path in each_markdown_from(source_dir):
  41. title, content, metadata = parse_markdown(file_path)
  42. url = metadata["url"][0]
  43. hash_url = metadata["hash_url"][0]
  44. yield Cache(title, content, url, hash_url)
  45. @staticmethod
  46. def one(hash_url):
  47. return next(Cache.all(source_dir=CACHE_PATH / hash_url))
  48. def extract_page(url):
  49. """From an URL, extract title and content using Readability.
  50. The title is shortened through the `short_title` native method.
  51. The content doesn't contain `<body>` tags to be directly
  52. embeddable in the template and rendered as is.
  53. """
  54. # Retrieves the resource and turns it into a Readability doc.
  55. response = httpx.get(url)
  56. document = Document(response.text)
  57. # The short title is more concise and readable.
  58. title = document.short_title()
  59. content = document.summary(html_partial=True)
  60. # Removing the added <div> and spaces.
  61. content = content[5:-6].strip()
  62. return title, content
  63. def create(hash_url):
  64. """Turn new MD file into HTML file."""
  65. template = environment.get_template("cache_article.html")
  66. cache = Cache.one(hash_url)
  67. page = template.render(cache=cache)
  68. cache_target = CACHE_PATH / hash_url
  69. if not os.path.exists(cache_target):
  70. os.makedirs(cache_target)
  71. open(cache_target / "index.html", "w").write(page)
  72. print(f"Done: http://larlet.test:8001/david/cache/{hash_url}/")
  73. @cli
  74. def generate():
  75. """Generate caches MD files into HTML files."""
  76. caches = []
  77. template = environment.get_template("cache_article.html")
  78. for cache in Cache.all():
  79. page = template.render(cache=cache)
  80. open(CACHE_PATH / cache.hash_url / "index.html", "w").write(page)
  81. caches.append(cache)
  82. template = environment.get_template("cache_archives.html")
  83. page = template.render(caches=caches)
  84. open(CACHE_PATH / "index.html", "w").write(page)
  85. print("Done: http://larlet.test:8001/david/cache/")
  86. @cli
  87. def new(url):
  88. """Turn the given URL into a MD and a HTML files.
  89. :url: The URL of the page to put into cache.
  90. """
  91. hash_url = hashlib.md5(url.encode("utf-8")).hexdigest()
  92. url_cache = f"/david/cache/{hash_url}/"
  93. link_line = f"]({url}) ([cache]({url_cache}))"
  94. print(link_line)
  95. try:
  96. title, content = extract_page(url)
  97. except (lxml.etree.XMLSyntaxError, httpx.exceptions.HTTPError,) as e:
  98. print(f"WARNING: {e}")
  99. title, content = "", ""
  100. cache_path = os.path.join(CACHE_PATH, hash_url)
  101. if not os.path.exists(cache_path):
  102. os.makedirs(cache_path)
  103. # Caching a markdown file.
  104. template = environment.get_template("cache_article.md")
  105. page = template.render(title=title, content=content, url=url, hash_url=hash_url)
  106. result_path = os.path.join(cache_path, "index.md")
  107. open(result_path, "w").write(page)
  108. # Generating the HTML file.
  109. create(hash_url)
  110. md_line = f"> <cite>*[{title}]({url})* ([cache]({url_cache}))</cite>"
  111. print(md_line)
  112. os.popen(f'subl "{result_path}"')
  113. return md_line
  114. @wrap
  115. def perf_wrapper():
  116. start = perf_counter()
  117. yield
  118. elapsed = perf_counter() - start
  119. print(f"Done in {elapsed:.5f} seconds.")
  120. if __name__ == "__main__":
  121. run()