A place to cache linked articles (think custom and personal wayback machine)
Nevar pievienot vairāk kā 25 tēmas Tēmai ir jāsākas ar burtu vai ciparu, tā var saturēt domu zīmes ('-') un var būt līdz 35 simboliem gara.

index.html 37KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. <!doctype html><!-- This is a valid HTML5 document. -->
  2. <!-- Screen readers, SEO, extensions and so on. -->
  3. <html lang="fr">
  4. <!-- Has to be within the first 1024 bytes, hence before the `title` element
  5. See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
  6. <meta charset="utf-8">
  7. <!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
  8. <!-- The viewport meta is quite crowded and we are responsible for that.
  9. See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
  10. <meta name="viewport" content="width=device-width,initial-scale=1">
  11. <!-- Required to make a valid HTML5 document. -->
  12. <title>Building a full-text search engine in 150 lines of Python code (archive) — David Larlet</title>
  13. <meta name="description" content="Publication mise en cache pour en conserver une trace.">
  14. <!-- That good ol' feed, subscribe :). -->
  15. <link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
  16. <!-- Generated from https://realfavicongenerator.net/ such a mess. -->
  17. <link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
  18. <link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
  19. <link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
  20. <link rel="manifest" href="/static/david/icons2/site.webmanifest">
  21. <link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
  22. <link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
  23. <meta name="msapplication-TileColor" content="#f7f7f7">
  24. <meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
  25. <meta name="theme-color" content="#f7f7f7" media="(prefers-color-scheme: light)">
  26. <meta name="theme-color" content="#272727" media="(prefers-color-scheme: dark)">
  27. <!-- Documented, feel free to shoot an email. -->
  28. <link rel="stylesheet" href="/static/david/css/style_2021-01-20.css">
  29. <!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
  30. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  31. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  32. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  33. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  34. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  35. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  36. <script>
  37. function toggleTheme(themeName) {
  38. document.documentElement.classList.toggle(
  39. 'forced-dark',
  40. themeName === 'dark'
  41. )
  42. document.documentElement.classList.toggle(
  43. 'forced-light',
  44. themeName === 'light'
  45. )
  46. }
  47. const selectedTheme = localStorage.getItem('theme')
  48. if (selectedTheme !== 'undefined') {
  49. toggleTheme(selectedTheme)
  50. }
  51. </script>
  52. <meta name="robots" content="noindex, nofollow">
  53. <meta content="origin-when-cross-origin" name="referrer">
  54. <!-- Canonical URL for SEO purposes -->
  55. <link rel="canonical" href="https://bart.degoe.de/building-a-full-text-search-engine-150-lines-of-code/">
  56. <body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick" data-instant-intensity="viewport-all">
  57. <article>
  58. <header>
  59. <h1>Building a full-text search engine in 150 lines of Python code</h1>
  60. </header>
  61. <nav>
  62. <p class="center">
  63. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  64. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  65. </svg> Accueil</a> •
  66. <a href="https://bart.degoe.de/building-a-full-text-search-engine-150-lines-of-code/" title="Lien vers le contenu original">Source originale</a>
  67. </p>
  68. </nav>
  69. <hr>
  70. <p>Full-text search is everywhere. From finding a book on Scribd, a movie on Netflix, toilet paper on Amazon, or anything else on the web through Google (like <a href="https://localghost.dev/2019/09/everything-i-googled-in-a-week-as-a-professional-software-engineer/">how to do your job as a software engineer</a>), you’ve searched vast amounts of unstructured data multiple times today. What’s even more amazing, is that you’ve even though you searched millions (or <a href="https://www.worldwidewebsize.com/">billions</a>) of records, you got a response in milliseconds. In this post, we are going to explore the basic components of a full-text search engine, and use them to build one that can search across millions of documents and rank them according to their relevance in milliseconds, in less than 150 lines of Python code!</p>
  71. <div id="player">
  72. <p class="listen">Listen to this article instead</p>
  73. <audio controls class="audio_controls " preload="metadata">
  74. <source src="https://bart.degoe.de/audio/2021-03-24-python-full-text-search-engine.mp3" type="audio/mp3">
  75. Your browser does not support the audio element
  76. </source></audio>
  77. </div>
  78. <h1 id="data">Data</h1>
  79. <p>All the code you in this blog post can be found on <a href="https://github.com/bartdegoede/python-searchengine/">Github</a>. I’ll provide links with the code snippets here, so you can try running this yourself. You can run the full example by installing <a href="https://github.com/bartdegoede/python-searchengine/blob/master/requirements.txt">the requirements</a> (<code>pip install -r requirements.txt</code>) and <a href="https://github.com/bartdegoede/python-searchengine/blob/master/run.py">run <code>python run.py</code></a>. This will download all the data and execute the example query with and without rankings.</p>
  80. <p>Before we’re jumping into building a search engine, we first need some full-text, unstructured data to search. We are going to be searching abstracts of articles from the English Wikipedia, which is currently a gzipped XML file of about 785mb and contains about 6.27 million abstracts<sup id="fnref:1"></sup>. I’ve written <a href="https://github.com/bartdegoede/python-searchengine/blob/master/download.py">a simple function to download</a> the gzipped XML, but you can also just manually download the file.</p>
  81. <h2 id="data-preparation">Data preparation</h2>
  82. <p>The file is one large XML file that contains all abstracts. One abstract in this file is contained by a <code>&lt;doc&gt;</code> element, and looks roughly like this (I’ve omitted elements we’re not interested in):</p>
  83. <div class="highlight"><pre><code class="language-xml" data-lang="xml"><span>&lt;doc&gt;</span>
  84. <span>&lt;title&gt;</span>Wikipedia: London Beer Flood<span>&lt;/title&gt;</span>
  85. <span>&lt;url&gt;</span>https://en.wikipedia.org/wiki/London_Beer_Flood<span>&lt;/url&gt;</span>
  86. <span>&lt;abstract&gt;</span>The London Beer Flood was an accident at Meux <span>&amp;</span> Co's Horse Shoe Brewery, London, on 17 October 1814. It took place when one of the wooden vats of fermenting porter burst.<span>&lt;/abstract&gt;</span>
  87. ...
  88. <span>&lt;/doc&gt;</span>
  89. </code></pre></div>
  90. <p>The bits were interested in are the <code>title</code>, the <code>url</code> and the <code>abstract</code> text itself. We’ll represent documents with a <a href="https://realpython.com/python-data-classes/">Python dataclass</a> for convenient data access. We’ll add a property that concatenates the title and the contents of the abstract. You can find the code <a href="https://github.com/bartdegoede/python-searchengine/blob/master/search/documents.py">here</a>.</p>
  91. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>from</span> dataclasses <span>import</span> dataclass
  92. <span>@dataclass</span>
  93. <span>class</span> <span>Abstract</span>:
  94. <span>"""Wikipedia abstract"""</span>
  95. ID: int
  96. title: str
  97. abstract: str
  98. url: str
  99. <span>@property</span>
  100. <span>def</span> <span>fulltext</span>(self):
  101. <span>return</span> <span>' '</span><span>.</span>join([self<span>.</span>title, self<span>.</span>abstract])
  102. </code></pre></div>
  103. <p>Then, we’ll want to extract the abstracts data from the XML and parse it so we can create instances of our <code>Abstract</code> object. We are going to stream through the gzipped XML without loading the entire file into memory first<sup id="fnref:2"></sup>. We’ll assign each document an ID in order of loading (ie the first document will have ID=1, the second one will have ID=2, etcetera). You can find the code <a href="https://github.com/bartdegoede/python-searchengine/blob/master/load.py">here</a>.</p>
  104. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>import</span> gzip
  105. <span>from</span> lxml <span>import</span> etree
  106. <span>from</span> search.documents <span>import</span> Abstract
  107. <span>def</span> <span>load_documents</span>():
  108. <span># open a filehandle to the gzipped Wikipedia dump</span>
  109. <span>with</span> gzip<span>.</span>open(<span>'data/enwiki.latest-abstract.xml.gz'</span>, <span>'rb'</span>) <span>as</span> f:
  110. doc_id <span>=</span> <span>1</span>
  111. <span># iterparse will yield the entire `doc` element once it finds the</span>
  112. <span># closing `&lt;/doc&gt;` tag</span>
  113. <span>for</span> _, element <span>in</span> etree<span>.</span>iterparse(f, events<span>=</span>(<span>'end'</span>,), tag<span>=</span><span>'doc'</span>):
  114. title <span>=</span> element<span>.</span>findtext(<span>'./title'</span>)
  115. url <span>=</span> element<span>.</span>findtext(<span>'./url'</span>)
  116. abstract <span>=</span> element<span>.</span>findtext(<span>'./abstract'</span>)
  117. <span>yield</span> Abstract(ID<span>=</span>doc_id, title<span>=</span>title, url<span>=</span>url, abstract<span>=</span>abstract)
  118. doc_id <span>+=</span> <span>1</span>
  119. <span># the `element.clear()` call will explicitly free up the memory</span>
  120. <span># used to store the element</span>
  121. element<span>.</span>clear()
  122. </code></pre></div>
  123. <h1 id="indexing">Indexing</h1>
  124. <p>We are going to store this in a data structure known as an <a href="https://en.wikipedia.org/wiki/Inverted_index">“inverted index” or a “postings list”</a>. Think of it as the index in the back of a book that has an alphabetized list of relevant words and concepts, and on what page number a reader can find them.</p>
  125. <figure>
  126. <img src="https://bart.degoe.de/img/2021-03-24-building-a-full-text-search-engine-150-lines-of-code/book-index-1080x675.png"> <figcaption>
  127. <h4>Back of the book index</h4>
  128. </figcaption>
  129. </figure>
  130. <p>Practically, what this means is that we’re going to create a dictionary where we map all the words in our corpus to the IDs of the documents they occur in. That will look something like this:</p>
  131. <div class="highlight"><pre><code class="language-json" data-lang="json">{
  132. <span>...</span>
  133. <span>"london"</span>: [<span>5245250</span>, <span>2623812</span>, <span>133455</span>, <span>3672401</span>, <span>...</span>],
  134. <span>"beer"</span>: [<span>1921376</span>, <span>4411744</span>, <span>684389</span>, <span>2019685</span>, <span>...</span>],
  135. <span>"flood"</span>: [<span>3772355</span>, <span>2895814</span>, <span>3461065</span>, <span>5132238</span>, <span>...</span>],
  136. <span>...</span>
  137. }
  138. </code></pre></div>
  139. <p>Note that in the example above the words in the dictionary are lowercased; before building the index we are going to break down or <code>analyze</code> the raw text into a list of words or <code>tokens</code>. The idea is that we first break up or <code>tokenize</code> the text into words, and then apply zero or more <code>filters</code> (such as lowercasing or stemming) on each token to improve the odds of matching queries to text.</p>
  140. <figure>
  141. <img src="https://bart.degoe.de/img/2021-03-24-building-a-full-text-search-engine-150-lines-of-code/tokenization.png"> <figcaption>
  142. <h4>Tokenization</h4>
  143. </figcaption>
  144. </figure>
  145. <h2 id="analysis">Analysis</h2>
  146. <p>We are going to apply very simple tokenization, by just splitting the text on whitespace. Then, we are going to apply a couple of filters on each of the tokens: we are going to lowercase each token, remove any punctuation, remove the 25 most common words in the English language (and the word “wikipedia” because it occurs in every title in every abstract) and apply <a href="https://en.wikipedia.org/wiki/Stemming">stemming</a> to every word (ensuring that different forms of a word map to the same stem, like <em>brewery</em> and <em>breweries</em><sup id="fnref:3"></sup>).</p>
  147. <p>The tokenization and lowercase filter are very simple:</p>
  148. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>import</span> Stemmer
  149. STEMMER <span>=</span> Stemmer<span>.</span>Stemmer(<span>'english'</span>)
  150. <span>def</span> <span>tokenize</span>(text):
  151. <span>return</span> text<span>.</span>split()
  152. <span>def</span> <span>lowercase_filter</span>(tokens):
  153. <span>return</span> [token<span>.</span>lower() <span>for</span> token <span>in</span> tokens]
  154. <span>def</span> <span>stem_filter</span>(tokens):
  155. <span>return</span> STEMMER<span>.</span>stemWords(tokens)
  156. </code></pre></div>
  157. <p>Punctuation is nothing more than a regular expression on the set of punctuation:</p>
  158. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>import</span> re
  159. <span>import</span> string
  160. PUNCTUATION <span>=</span> re<span>.</span>compile(<span>'[</span><span>%s</span><span>]'</span> <span>%</span> re<span>.</span>escape(string<span>.</span>punctuation))
  161. <span>def</span> <span>punctuation_filter</span>(tokens):
  162. <span>return</span> [PUNCTUATION<span>.</span>sub(<span>''</span>, token) <span>for</span> token <span>in</span> tokens]
  163. </code></pre></div>
  164. <p>Stopwords are words that are very common and we would expect to occcur in (almost) every document in the corpus. As such, they won’t contribute much when we search for them (i.e. (almost) every document will match when we search for those terms) and will just take up space, so we will filter them out at index time. The Wikipedia abstract corpus includes the word “Wikipedia” in every title, so we’ll add that word to the stopword list as well. We drop the 25 most common words in English.</p>
  165. <div class="highlight"><pre><code class="language-python" data-lang="python"><span># top 25 most common words in English and "wikipedia":</span>
  166. <span># https://en.wikipedia.org/wiki/Most_common_words_in_English</span>
  167. STOPWORDS <span>=</span> set([<span>'the'</span>, <span>'be'</span>, <span>'to'</span>, <span>'of'</span>, <span>'and'</span>, <span>'a'</span>, <span>'in'</span>, <span>'that'</span>, <span>'have'</span>,
  168. <span>'I'</span>, <span>'it'</span>, <span>'for'</span>, <span>'not'</span>, <span>'on'</span>, <span>'with'</span>, <span>'he'</span>, <span>'as'</span>, <span>'you'</span>,
  169. <span>'do'</span>, <span>'at'</span>, <span>'this'</span>, <span>'but'</span>, <span>'his'</span>, <span>'by'</span>, <span>'from'</span>, <span>'wikipedia'</span>])
  170. <span>def</span> <span>stopword_filter</span>(tokens):
  171. <span>return</span> [token <span>for</span> token <span>in</span> tokens <span>if</span> token <span>not</span> <span>in</span> STOPWORDS]
  172. </code></pre></div>
  173. <p>Bringing all these filters together, we’ll <a href="https://github.com/bartdegoede/python-searchengine/blob/master/search/analysis.py#L28-L35">construct an <code>analyze</code> function</a> that will operate on the <code>text</code> in each abstract; it will tokenize the text into individual words (or rather, <em>tokens</em>), and then apply each filter in succession to the list of tokens. The order is important, because we use a non-stemmed list of stopwords, so we should apply the <code>stopword_filter</code> before the <code>stem_filter</code>.</p>
  174. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>def</span> <span>analyze</span>(text):
  175. tokens <span>=</span> tokenize(text)
  176. tokens <span>=</span> lowercase_filter(tokens)
  177. tokens <span>=</span> punctuation_filter(tokens)
  178. tokens <span>=</span> stopword_filter(tokens)
  179. tokens <span>=</span> stem_filter(tokens)
  180. <span>return</span> [token <span>for</span> token <span>in</span> tokens <span>if</span> token]
  181. </code></pre></div>
  182. <h2 id="indexing-the-corpus">Indexing the corpus</h2>
  183. <p>We’ll create an <code>Index</code> class that will store the <code>index</code> and the <code>documents</code>. The <code>documents</code> dictionary stores the dataclasses by ID, and the <code>index</code> keys will be the tokens, with the values being the document IDs the token occurs in:</p>
  184. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>class</span> <span>Index</span>:
  185. <span>def</span> __init__(self):
  186. self<span>.</span>index <span>=</span> {}
  187. self<span>.</span>documents <span>=</span> {}
  188. <span>def</span> <span>index_document</span>(self, document):
  189. <span>if</span> document<span>.</span>ID <span>not</span> <span>in</span> self<span>.</span>documents:
  190. self<span>.</span>documents[document<span>.</span>ID] <span>=</span> document
  191. <span>for</span> token <span>in</span> analyze(document<span>.</span>fulltext):
  192. <span>if</span> token <span>not</span> <span>in</span> self<span>.</span>index:
  193. self<span>.</span>index[token] <span>=</span> set()
  194. self<span>.</span>index[token]<span>.</span>add(document<span>.</span>ID)
  195. </code></pre></div>
  196. <h1 id="searching">Searching</h1>
  197. <p>Now we have all tokens indexed, searching for a query becomes a matter of analyzing the query text with the same analyzer as we applied to the documents; this way we’ll end up with tokens that should match the tokens we have in the index. For each token, we’ll do a lookup in the dictionary, finding the document IDs that the token occurs in. We do this for every token, and then find the IDs of documents in all these sets (i.e. for a document to match the query, it needs to contain all the tokens in the query). We will then take the resulting list of document IDs, and fetch the actual data from our <code>documents</code> store<sup id="fnref:4"></sup>.</p>
  198. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>def</span> <span>_results</span>(self, analyzed_query):
  199. <span>return</span> [self<span>.</span>index<span>.</span>get(token, set()) <span>for</span> token <span>in</span> analyzed_query]
  200. <span>def</span> <span>search</span>(self, query):
  201. <span>"""
  202. </span><span> Boolean search; this will return documents that contain all words from the
  203. </span><span> query, but not rank them (sets are fast, but unordered).
  204. </span><span> """</span>
  205. analyzed_query <span>=</span> analyze(query)
  206. results <span>=</span> self<span>.</span>_results(analyzed_query)
  207. documents <span>=</span> [self<span>.</span>documents[doc_id] <span>for</span> doc_id <span>in</span> set<span>.</span>intersection(<span>*</span>results)]
  208. <span>return</span> documents
  209. In [<span>1</span>]: index<span>.</span>search(<span>'London Beer Flood'</span>)
  210. search took <span>0.16307830810546875</span> milliseconds
  211. Out[<span>1</span>]:
  212. [Abstract(ID<span>=</span><span>1501027</span>, title<span>=</span><span>'Wikipedia: Horse Shoe Brewery'</span>, abstract<span>=</span><span>'The Horse Shoe Brewery was an English brewery in the City of Westminster that was established in 1764 and became a major producer of porter, from 1809 as Henry Meux &amp; Co. It was the site of the London Beer Flood in 1814, which killed eight people after a porter vat burst.'</span>, url<span>=</span><span>'https://en.wikipedia.org/wiki/Horse_Shoe_Brewery'</span>),
  213. Abstract(ID<span>=</span><span>1828015</span>, title<span>=</span><span>'Wikipedia: London Beer Flood'</span>, abstract<span>=</span><span>"The London Beer Flood was an accident at Meux &amp; Co's Horse Shoe Brewery, London, on 17 October 1814. It took place when one of the wooden vats of fermenting porter burst."</span>, url<span>=</span><span>'https://en.wikipedia.org/wiki/London_Beer_Flood'</span>)]
  214. </code></pre></div>
  215. <p>Now, this will make our queries very precise, especially for long query strings (the more tokens our query contains, the less likely it’ll be that there will be a document that has all of these tokens). We could optimize our search function for <a href="https://en.wikipedia.org/wiki/Precision_and_recall">recall rather than precision</a> by allowing users to specify that only one occurrence of a token is enough to match our query:</p>
  216. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>def</span> <span>search</span>(self, query, search_type<span>=</span><span>'AND'</span>):
  217. <span>"""
  218. </span><span> Still boolean search; this will return documents that contain either all words
  219. </span><span> from the query or just one of them, depending on the search_type specified.
  220. </span><span>
  221. </span><span> We are still not ranking the results (sets are fast, but unordered).
  222. </span><span> """</span>
  223. <span>if</span> search_type <span>not</span> <span>in</span> (<span>'AND'</span>, <span>'OR'</span>):
  224. <span>return</span> []
  225. analyzed_query <span>=</span> analyze(query)
  226. results <span>=</span> self<span>.</span>_results(analyzed_query)
  227. <span>if</span> search_type <span>==</span> <span>'AND'</span>:
  228. <span># all tokens must be in the document</span>
  229. documents <span>=</span> [self<span>.</span>documents[doc_id] <span>for</span> doc_id <span>in</span> set<span>.</span>intersection(<span>*</span>results)]
  230. <span>if</span> search_type <span>==</span> <span>'OR'</span>:
  231. <span># only one token has to be in the document</span>
  232. documents <span>=</span> [self<span>.</span>documents[doc_id] <span>for</span> doc_id <span>in</span> set<span>.</span>union(<span>*</span>results)]
  233. <span>return</span> documents
  234. In [<span>2</span>]: index<span>.</span>search(<span>'London Beer Flood'</span>, search_type<span>=</span><span>'OR'</span>)
  235. search took <span>0.02816295623779297</span> seconds
  236. Out[<span>2</span>]:
  237. [Abstract(ID<span>=</span><span>5505026</span>, title<span>=</span><span>'Wikipedia: Addie Pryor'</span>, abstract<span>=</span><span>'| birth_place = London, England'</span>, url<span>=</span><span>'https://en.wikipedia.org/wiki/Addie_Pryor'</span>),
  238. Abstract(ID<span>=</span><span>1572868</span>, title<span>=</span><span>'Wikipedia: Tim Steward'</span>, abstract<span>=</span><span>'|birth_place = London, United Kingdom'</span>, url<span>=</span><span>'https://en.wikipedia.org/wiki/Tim_Steward'</span>),
  239. Abstract(ID<span>=</span><span>5111814</span>, title<span>=</span><span>'Wikipedia: 1877 Birthday Honours'</span>, abstract<span>=</span><span>'The 1877 Birthday Honours were appointments by Queen Victoria to various orders and honours to reward and highlight good works by citizens of the British Empire. The appointments were made to celebrate the official birthday of the Queen, and were published in The London Gazette on 30 May and 2 June 1877.'</span>, url<span>=</span><span>'https://en.wikipedia.org/wiki/1877_Birthday_Honours'</span>),
  240. <span>...</span>
  241. In [<span>3</span>]: len(index<span>.</span>search(<span>'London Beer Flood'</span>, search_type<span>=</span><span>'OR'</span>))
  242. search took <span>0.029065370559692383</span> seconds
  243. Out[<span>3</span>]: <span>49627</span>
  244. </code></pre></div>
  245. <h1 id="relevancy">Relevancy</h1>
  246. <p>We have implemented a pretty quick search engine with just some basic Python, but there’s one aspect that’s obviously missing from our little engine, and that’s the <a href="https://livebook.manning.com/book/relevant-search/chapter-1/13">idea of <strong>relevance</strong></a>. Right now we just return an unordered list of documents, and we leave it up to the user to figure out which of those (s)he is actually interested in. Especially for large result sets, that is painful or just impossible (in our <code>OR</code> example, there are almost 50,000 results).</p>
  247. <p>This is where the idea of relevancy comes in; what if we could assign each document a score that would indicate how well it matches the query, and just order by that score? A naive and simple way of assigning a score to a document for a given query is to just count how often that document mentions that particular word. After all, the more that document mentions that term, the more likely it is that it is about our query!</p>
  248. <h2 id="term-frequency">Term frequency</h2>
  249. <p>Let’s expand our <code>Abstract</code> dataclass to compute and store it’s term frequencies when we index it. That way, we’ll have easy access to those numbers when we want to rank our unordered list of documents:</p>
  250. <div class="highlight"><pre><code class="language-python" data-lang="python"><span># in documents.py</span>
  251. <span>from</span> collections <span>import</span> Counter
  252. <span>from</span> .analysis <span>import</span> analyze
  253. <span>@dataclass</span>
  254. <span>class</span> <span>Abstract</span>:
  255. <span># snip</span>
  256. <span>def</span> <span>analyze</span>(self):
  257. <span># Counter will create a dictionary counting the unique values in an array:</span>
  258. <span># {'london': 12, 'beer': 3, ...}</span>
  259. self<span>.</span>term_frequencies <span>=</span> Counter(analyze(self<span>.</span>fulltext))
  260. <span>def</span> <span>term_frequency</span>(self, term):
  261. <span>return</span> self<span>.</span>term_frequencies<span>.</span>get(term, <span>0</span>)
  262. </code></pre></div>
  263. <p>We need to make sure to generate these frequency counts when we index our data:</p>
  264. <div class="highlight"><pre><code class="language-python" data-lang="python"><span># in index.py we add `document.analyze()</span>
  265. <span>def</span> <span>index_document</span>(self, document):
  266. <span>if</span> document<span>.</span>ID <span>not</span> <span>in</span> self<span>.</span>documents:
  267. self<span>.</span>documents[document<span>.</span>ID] <span>=</span> document
  268. document<span>.</span>analyze()
  269. </code></pre></div>
  270. <p>We’ll modify our search function so we can apply a ranking to the documents in our result set. We’ll fetch the documents using the same Boolean query from the index and document store, and then we’ll for every document in that result set, we’ll simply sum up how often each term occurs in that document</p>
  271. <div class="highlight"><pre><code class="language-python" data-lang="python"><span>def</span> <span>search</span>(self, query, search_type<span>=</span><span>'AND'</span>, rank<span>=</span>True):
  272. <span># snip</span>
  273. <span>if</span> rank:
  274. <span>return</span> self<span>.</span>rank(analyzed_query, documents)
  275. <span>return</span> documents
  276. <span>def</span> <span>rank</span>(self, analyzed_query, documents):
  277. results <span>=</span> []
  278. <span>if</span> <span>not</span> documents:
  279. <span>return</span> results
  280. <span>for</span> document <span>in</span> documents:
  281. score <span>=</span> sum([document<span>.</span>term_frequency(token) <span>for</span> token <span>in</span> analyzed_query])
  282. results<span>.</span>append((document, score))
  283. <span>return</span> sorted(results, key<span>=</span><span>lambda</span> doc: doc[<span>1</span>], reverse<span>=</span>True)
  284. </code></pre></div>
  285. <h2 id="inverse-document-frequency">Inverse Document Frequency</h2>
  286. <p>That’s already a lot better, but there are some obvious short-comings. We’re considering all query terms to be of equivalent value when assessing the relevancy for the query. However, it’s likely that certain terms have very little to no discriminating power when determining relevancy; for example, a collection with lots of documents about beer would be expected to have the term “beer” appear often in almost every document (in fact, we’re already trying to address that by dropping the 25 most common English words from the index). Searching for the word “beer” in such a case would essentially do another random sort.</p>
  287. <p>In order to address that, we’ll add another component to our scoring algorithm that will reduce the contribution of terms that occur very often in the index to the final score. We could use the <em>collection frequency</em> of a term (i.e. how often does this term occur across <em>all</em> documents), but <a href="https://nlp.stanford.edu/IR-book/html/htmledition/inverse-document-frequency-1.html">in practice</a> the <em>document frequency</em> is used instead (i.e. how many <em>documents</em> in the index contain this term). We’re trying to rank documents after all, so it makes sense to have a document level statistic.</p>
  288. <p>We’ll compute the <em>inverse document frequency</em> for a term by dividing the number of documents (<em>N</em>) in the index by the amount of documents that contain the term, and take a logarithm of that.</p>
  289. <figure>
  290. <img src="https://bart.degoe.de/img/2021-03-24-building-a-full-text-search-engine-150-lines-of-code/idf.jpg"> <figcaption>
  291. <h4>IDF; taken from https://moz.com/blog/inverse-document-frequency-and-the-importance-of-uniqueness</h4>
  292. </figcaption>
  293. </figure>
  294. <p>We’ll then simply multiple the term frequency with the inverse document frequency during our ranking, so matches on terms that are rare in the corpus will contribute more to the relevancy score<sup id="fnref:5"></sup>. We can easily compute the inverse document frequency from the data available in our index:</p>
  295. <div class="highlight"><pre><code class="language-python" data-lang="python"><span># index.py</span>
  296. <span>import</span> math
  297. <span>def</span> <span>document_frequency</span>(self, token):
  298. <span>return</span> len(self<span>.</span>index<span>.</span>get(token, set()))
  299. <span>def</span> <span>inverse_document_frequency</span>(self, token):
  300. <span># Manning, Hinrich and Schütze use log10, so we do too, even though it</span>
  301. <span># doesn't really matter which log we use anyway</span>
  302. <span># https://nlp.stanford.edu/IR-book/html/htmledition/inverse-document-frequency-1.html</span>
  303. <span>return</span> math<span>.</span>log10(len(self<span>.</span>documents) <span>/</span> self<span>.</span>document_frequency(token))
  304. <span>def</span> <span>rank</span>(self, analyzed_query, documents):
  305. results <span>=</span> []
  306. <span>if</span> <span>not</span> documents:
  307. <span>return</span> results
  308. <span>for</span> document <span>in</span> documents:
  309. score <span>=</span> <span>0.0</span>
  310. <span>for</span> token <span>in</span> analyzed_query:
  311. tf <span>=</span> document<span>.</span>term_frequency(token)
  312. idf <span>=</span> self<span>.</span>inverse_document_frequency(token)
  313. score <span>+=</span> tf <span>*</span> idf
  314. results<span>.</span>append((document, score))
  315. <span>return</span> sorted(results, key<span>=</span><span>lambda</span> doc: doc[<span>1</span>], reverse<span>=</span>True)
  316. </code></pre></div>
  317. <h1 id="future-work">Future Work™</h1>
  318. <p>And that’s a basic search engine in just a few lines of Python code! You can find all the code on <a href="https://github.com/bartdegoede/python-searchengine">Github</a>, and I’ve provided a utility function that will download the Wikipedia abstracts and build an index. Install the requirements, run it in your Python console of choice and have fun messing with the data structures and searching.</p>
  319. <p>Now, obviously this is a project to illustrate the concepts of search and how it can be so fast (even with ranking, I can search and rank 6.27m documents on my laptop with a “slow” language like Python) and not production grade software. It runs entirely in memory on my laptop, whereas libraries like Lucene utilize hyper-efficient data structures and even optimize disk seeks, and software like Elasticsearch and Solr scale Lucene to hundreds if not thousands of machines.</p>
  320. <p>That doesn’t mean that we can’t think about fun expansions on this basic functionality though; for example, we assume that every field in the document has the same contribution to relevancy, whereas a query term match in the title should probably be weighted more strongly than a match in the description. Another fun project could be to expand the query parsing; there’s no reason why either all or just one term need to match. Why not exclude certain terms, or do <code>AND</code> and <code>OR</code> between individual terms? Can we persist the index to disk and make it scale beyond the confines of my laptop RAM?</p>
  321. </article>
  322. <hr>
  323. <footer>
  324. <p>
  325. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  326. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  327. </svg> Accueil</a> •
  328. <a href="/david/log/" title="Accès au flux RSS"><svg class="icon icon-rss2">
  329. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-rss2"></use>
  330. </svg> Suivre</a> •
  331. <a href="http://larlet.com" title="Go to my English profile" data-instant><svg class="icon icon-user-tie">
  332. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-user-tie"></use>
  333. </svg> Pro</a> •
  334. <a href="mailto:david%40larlet.fr" title="Envoyer un courriel"><svg class="icon icon-mail">
  335. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-mail"></use>
  336. </svg> Email</a> •
  337. <abbr class="nowrap" title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340"><svg class="icon icon-hammer2">
  338. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-hammer2"></use>
  339. </svg> Légal</abbr>
  340. </p>
  341. <template id="theme-selector">
  342. <form>
  343. <fieldset>
  344. <legend><svg class="icon icon-brightness-contrast">
  345. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-brightness-contrast"></use>
  346. </svg> Thème</legend>
  347. <label>
  348. <input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
  349. </label>
  350. <label>
  351. <input type="radio" value="dark" name="chosen-color-scheme"> Foncé
  352. </label>
  353. <label>
  354. <input type="radio" value="light" name="chosen-color-scheme"> Clair
  355. </label>
  356. </fieldset>
  357. </form>
  358. </template>
  359. </footer>
  360. <script src="/static/david/js/instantpage-5.1.0.min.js" type="module"></script>
  361. <script>
  362. function loadThemeForm(templateName) {
  363. const themeSelectorTemplate = document.querySelector(templateName)
  364. const form = themeSelectorTemplate.content.firstElementChild
  365. themeSelectorTemplate.replaceWith(form)
  366. form.addEventListener('change', (e) => {
  367. const chosenColorScheme = e.target.value
  368. localStorage.setItem('theme', chosenColorScheme)
  369. toggleTheme(chosenColorScheme)
  370. })
  371. const selectedTheme = localStorage.getItem('theme')
  372. if (selectedTheme && selectedTheme !== 'undefined') {
  373. form.querySelector(`[value="${selectedTheme}"]`).checked = true
  374. }
  375. }
  376. const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
  377. window.addEventListener('load', () => {
  378. let hasDarkRules = false
  379. for (const styleSheet of Array.from(document.styleSheets)) {
  380. let mediaRules = []
  381. for (const cssRule of styleSheet.cssRules) {
  382. if (cssRule.type !== CSSRule.MEDIA_RULE) {
  383. continue
  384. }
  385. // WARNING: Safari does not have/supports `conditionText`.
  386. if (cssRule.conditionText) {
  387. if (cssRule.conditionText !== prefersColorSchemeDark) {
  388. continue
  389. }
  390. } else {
  391. if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
  392. continue
  393. }
  394. }
  395. mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
  396. }
  397. // WARNING: do not try to insert a Rule to a styleSheet you are
  398. // currently iterating on, otherwise the browser will be stuck
  399. // in a infinite loop…
  400. for (const mediaRule of mediaRules) {
  401. styleSheet.insertRule(mediaRule.cssText)
  402. hasDarkRules = true
  403. }
  404. }
  405. if (hasDarkRules) {
  406. loadThemeForm('#theme-selector')
  407. }
  408. })
  409. </script>
  410. </body>
  411. </html>