A place to cache linked articles (think custom and personal wayback machine)
選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

index.html 21KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
  1. <!doctype html><!-- This is a valid HTML5 document. -->
  2. <!-- Screen readers, SEO, extensions and so on. -->
  3. <html lang="fr">
  4. <!-- Has to be within the first 1024 bytes, hence before the `title` element
  5. See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
  6. <meta charset="utf-8">
  7. <!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
  8. <!-- The viewport meta is quite crowded and we are responsible for that.
  9. See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
  10. <meta name="viewport" content="width=device-width,initial-scale=1">
  11. <!-- Required to make a valid HTML5 document. -->
  12. <title>Block the Bots that Feed “AI” Models by Scraping Your Website (archive) — David Larlet</title>
  13. <meta name="description" content="Publication mise en cache pour en conserver une trace.">
  14. <!-- That good ol' feed, subscribe :). -->
  15. <link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
  16. <!-- Generated from https://realfavicongenerator.net/ such a mess. -->
  17. <link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
  18. <link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
  19. <link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
  20. <link rel="manifest" href="/static/david/icons2/site.webmanifest">
  21. <link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
  22. <link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
  23. <meta name="msapplication-TileColor" content="#f7f7f7">
  24. <meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
  25. <meta name="theme-color" content="#f7f7f7" media="(prefers-color-scheme: light)">
  26. <meta name="theme-color" content="#272727" media="(prefers-color-scheme: dark)">
  27. <!-- Is that even respected? Retrospectively? What a shAItshow…
  28. https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/ -->
  29. <meta name="robots" content="noai, noimageai">
  30. <!-- Documented, feel free to shoot an email. -->
  31. <link rel="stylesheet" href="/static/david/css/style_2021-01-20.css">
  32. <!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
  33. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  34. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  35. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  36. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  37. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  38. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  39. <script>
  40. function toggleTheme(themeName) {
  41. document.documentElement.classList.toggle(
  42. 'forced-dark',
  43. themeName === 'dark'
  44. )
  45. document.documentElement.classList.toggle(
  46. 'forced-light',
  47. themeName === 'light'
  48. )
  49. }
  50. const selectedTheme = localStorage.getItem('theme')
  51. if (selectedTheme !== 'undefined') {
  52. toggleTheme(selectedTheme)
  53. }
  54. </script>
  55. <meta name="robots" content="noindex, nofollow">
  56. <meta content="origin-when-cross-origin" name="referrer">
  57. <!-- Canonical URL for SEO purposes -->
  58. <link rel="canonical" href="https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/">
  59. <body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick" data-instant-intensity="viewport-all">
  60. <article>
  61. <header>
  62. <h1>Block the Bots that Feed “AI” Models by Scraping Your Website</h1>
  63. </header>
  64. <nav>
  65. <p class="center">
  66. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  67. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  68. </svg> Accueil</a> •
  69. <a href="https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/" title="Lien vers le contenu original">Source originale</a>
  70. </p>
  71. </nav>
  72. <hr>
  73. <p>“AI” companies think that we should have to opt-out of data-scraping bots that take our work to train their products. There isn’t even a required no-scraping period between the announcement and when they start. Too late? Tough. Once they have your data, they don’t provide you with a way to have it deleted, even before they’ve processed it for training.</p>
  74. <p>These companies should be prevented from using data that they haven’t been given explicit consent for. Opt-out is problematic as it counts on concerned parties hearing about new or modified bots BEFORE their sites are targeted by them. That is simply not practical.</p>
  75. <p>It should be strictly <strong>opt-in</strong>. No one should be required to provide their work for free to any person or organization. The online community is under no responsibility to help them create their products. Some will declare that I am “Anti-AI” for saying such things, but that would be a misrepresentation. I am not declaring that these systems should be torn down, simply that their developers aren’t entitled to our work. They can still build those systems with purchased or donated data.</p>
  76. <p>There are ongoing court cases and debates in political circles around the world. Decisions and policies will move more slowly than either side on this issue would like, but in the meantime, SOME of the bots involved in scraping data for training have been identified and can be blocked. (Others may still be secret or operate without respect for the wishes of a website’s owner.) Here’s how:</p>
  77. <p><strong><span>(If you are not technically inclined, please talk to your webmaster, whatever support options are at your disposal, or a tech-savvy friend.)</span></strong></p>
  78. <h2>robots.txt</h2>
  79. <p>This is a file placed in the home directory of your website that is used to tell web crawlers and bots which portions of your website they are allowed to visit. Well-behaved bots honor these directives. (Not all scraping bots are well-behaved and there are no consequences, short of negative public opinion, for ignoring them. At this point, there have been no claims that bots being named in this post have ignored these directives.)</p>
  80. <p>This what our robots.txt looks like:</p>
  81. <pre>User-agent: CCBot
  82. Disallow: /
  83. User-agent: ChatGPT-User
  84. Disallow: /
  85. User-agent: GPTBot
  86. Disallow: /
  87. User-agent: Google-Extended
  88. Disallow: /
  89. User-agent: Omgilibot
  90. Disallow: /
  91. User-agent: Omgili
  92. Disallow: /
  93. User-agent: FacebookBot
  94. Disallow: /
  95. </pre>
  96. <p>The first line identifies <a href="https://commoncrawl.org/big-picture/frequently-asked-questions/">CCBot</a>, the bot used by the Common Crawl. This data has been used by ChatGPT, Bard, and others for training a number of models. The second line states that this user-agent is not allowed to access data from our entire website. Some image scraping bots also use Common Crawl data to find images.</p>
  97. <p>The next two user-agents identify ChatGPT-specific bots.</p>
  98. <p><a href="https://platform.openai.com/docs/plugins/bot">ChatGPT-User</a> is the bot used when a ChatGPT user instructs it to reference your website. It’s not automatically going to your site on its own, but it is still accessing and using data from your site.</p>
  99. <p><a href="https://platform.openai.com/docs/gptbot">GPTBot</a> is a bot that OpenAI specifically uses to collect bulk training data from your website for ChatGPT.</p>
  100. <p><a href="https://developers.google.com/search/docs/crawling-indexing/overview-google-crawlers">Google-Extended</a> is the recently announced product token that allows you to block Google from scraping your site for Bard and VertexAI. This will not have an impact on Google Search indexing. <strong>The only way this works is if it is in your robots.txt.</strong> According to their documentation: “Google-Extended doesn’t have a separate HTTP request user agent string. Crawling is done with existing Google user agent strings; the robots.txt user-agent token is used in a control capacity.”</p>
  101. <p><a href="https://webz.io/blog/web-data/what-is-the-omgili-bot-and-why-is-it-crawling-your-website/">Omgilibot</a> and Omgili are from webz.io. I noticed The New York Times was blocking them and discovered that they sell <a href="https://webz.io/blog/machine-learning/large-language-models-what-your-data-must-include/">data for training LLMs</a>.</p>
  102. <p><a href="https://developers.facebook.com/docs/sharing/bot">FacebookBot</a> is Meta’s bot that crawls public web pages to improve language models for their speech recognition technology. This is not what Facebook uses to get the image and snippet for when you post a link there.</p>
  103. <p>ChatGPT has been previously reported to use another unnamed bot that had been referencing Reddit posts to find “quality data.” That bot’s user agent has never been officially identified and its current status is unknown.</p>
  104. <h2>Updating or Installing robots.txt</h2>
  105. <p>You can check if your website has a robots.txt by going to yourwebsite.com/robots.txt.  If it doesn’t find that page, then you don’t have one.</p>
  106. <p>If your site is hosted by Squarespace, or another simple website-building site, you could have a problem. At present, many of those companies don’t allow you to update or add your own robots.txt. They may not even have the ability to do it for you. I recommend contacting support so you can get specific information regarding their current abilities and plans to offer such functionality. Remind them that once slurped up, you have no ability to remove your work from their hold, so this is an urgent priority. (It also demonstrates once again why “opt-out” is a bad model.)</p>
  107. <p>If you are using Wix, they provide directions for modifying your robots.txt <a href="https://support.wix.com/en/article/editing-your-sites-robotstxt-file">here</a>.</p>
  108. <p>If you are using WordPress, there are a few plugins that allow you to modify your robots.txt. Many of these include SEO (Search Engine Optimization) plugins have robots.txt editing features. (Use those instead of making your own.) Here’s a few we’ve run into:</p>
  109. <ul>
  110. <li>Yoast: <a href="https://yoast.com/help/how-to-edit-robots-txt-through-yoast-seo/">directions</a></li>
  111. <li>AIOSEO: <a href="https://aioseo.com/docs/using-the-robots-txt-tool-in-all-in-one-seo/">directions</a> (there’s a report in the comments that user agent blocking may not be working at the moment)</li>
  112. <li>SEOPress: <a href="https://www.seopress.org/support/guides/edit-robots-txt-file/">directions</a></li>
  113. </ul>
  114. <p>If your WordPress site doesn’t have a robots.txt or something else that modifies robots.txt, these two plugins can block <a href="https://wordpress.org/plugins/block-chat-gpt-via-robots-txt/">GPTBot</a> and <a href="https://wordpress.org/plugins/block-common-crawl-via-robots-txt/">CCBot</a> for you. (Disclaimer: I don’t use these plugins, but know people who do.)</p>
  115. <p><strong>For more experienced users:</strong> If you don’t have a robots.txt, you can create a text file by that name and upload it via FTP to your website’s home directory. If you have one, it can be downloaded, altered and reuploaded. If your hosting company provides you with cPanel or some other control panel, you can use its file manager to view, modify, or create the file as well.</p>
  116. <p><strong>If your site already has a robots.txt, it’s important to know where it came from as something else may be updating it. You don’t want to accidentally break something, so talk to whoever set up your website or your hosting provider’s support team.</strong></p>
  117. <h2>Firewalls and CDNs (less common, but better option)</h2>
  118. <p>Your website may have a firewall or CDN in front of your actual server. Many of these products have the ability to block bots and specific user agents. Blocking the four user agents (<strong>CCBot</strong>, <strong>GPTBot</strong>, <strong>ChatGPT-User</strong>, <strong>Omgilibot</strong>, <strong>Omgili</strong>, and <strong>FacebookBot</strong>) there is even more effective than using a robots.txt directive. (As I mentioned, directives can be ignored. Blocks at the firewall level prevent them from accessing your site at all.) Some of these products include <a href="https://docs.sucuri.net/website-firewall/whitelist-and-blacklist/block-user-agents">Sucuri</a>, <a href="https://developers.cloudflare.com/waf/tools/user-agent-blocking/#cloudflare-user-agent-blocking">Cloudflare</a>, <a href="https://www.quic.cloud/docs/cdn/cdn-security-configuration/#user-agent-allowlist-and-blocklist">QUIC.cloud</a>, and <a href="https://www.wordfence.com/help/blocking/#custom-pattern">Wordfence</a>. (Happy to add more if people let me know about them. Please include a link to their user agent blocking documentation as well.) Contact their support if you need further assistance.</p>
  119. <p><strong>NOTE:</strong> Google-Extended isn’t a bot. You need to have this in your robots.txt file if you want to prevent them from using your site content as training.</p>
  120. <h2>.htaccess (another option)</h2>
  121. <p>In the comments, DJ Mary pointed out that you can also block user agents with your website’s .htaccess file by adding these lines:</p>
  122. <pre>RewriteEngine On
  123. RewriteCond %{HTTP_USER_AGENT} (CCBot|ChatGPT|GPTBot|Omgilibot|Omgili|FacebookBot) [NC]
  124. RewriteRule ^ – [F]</pre>
  125. <p>I’d rate this one as something for more experienced people to do. This has a similar effect to that of the firewall and CDN blocks above.</p>
  126. <p><strong>NOTE:</strong> Google-Extended isn’t a bot. You need to have this in your robots.txt file if you want to prevent them from using your site content as training.</p>
  127. <h2>Additional Protection for Images</h2>
  128. <p>There are some image-scraping tools that honor the following directive:</p>
  129. <pre>&lt;meta name="robots" content="noai, noimageai"&gt;</pre>
  130. <p>when placed in the header section of your webpages. Unfortunately, many image-scraping tools allow their users to ignore this directives.</p>
  131. <p>Tools like <a href="https://glaze.cs.uchicago.edu/">Glaze</a> and <a href="https://mist-project.github.io/index_en.html">Mist</a> that can make it more difficult for models to perform style mimicry based on altered images. (Assuming they don’t get or already have an unaltered copy from another source.)</p>
  132. <p>There are other techniques that you can apply for further protection (blocking direct access to images, watermarking, etc.) but I’m probably not the best person to talk to for this one. If you know a good source, recommend them in the comments.</p>
  133. <h2>Podcasts</h2>
  134. <p>The standard lack of transparency from the “AI” industry makes it difficult to know what is being done with regards to audio. It is clear, however, that the Common Crawl has audio listed among the types of data it has acquired. Blocks to the bots mentioned should protect an RSS feed (the part of your site that shares information about episodes), but if your audio files (or RSS feed) are hosted on a third party website (like Libsyn, PodBean, Blubrry, etc.), it may be open from their end if they aren’t blocking. I am presently unaware of any that are blocking those bots, but I have started asking. The very nature of how podcasts are distributed makes it very difficult to close up the holes that would allow access. This is yet another reason why Opt-In needs to be the standard.</p>
  135. <h2>ai.txt</h2>
  136. <p>I just came across this one recently and I don’t know which “AI” companies are respecting Spawning’s ai.txt settings, but if anyone is, it’s worth having. They provide a tool to generate the file and an assortment of installation directions for different websites.</p>
  137. <p><a href="https://site.spawning.ai/spawning-ai-txt">https://site.spawning.ai/spawning-ai-txt</a></p>
  138. <h2>Closing</h2>
  139. <p>None of these options are guarantees. They are based on an honor system and there’s no shortage of dishonorable people who want to acquire your data for the “AI” gold rush or other purposes. Sadly, the most effective means of protecting your work from scraping is to not put it online at all. Even paywall models can be compromised by someone determined to do so.</p>
  140. <p>Writers and artists should also start advocating for “AI”-specific clauses in their contracts to restrict publishers using, selling, donating, or licensing your work for the purposes of training these systems. Online works might be the most vulnerable to being fed to training algorithms, but print, audio, and ebook editions developed by publishers can be used too. It is not safe to assume that anyone will take the necessary efforts to protect your work from these uses, so get it in writing.</p>
  141. <p> </p>
  142. <p>[This post will be updated with additional information as it becomes available.]</p>
  143. <p>9/28/2023 – Added the recently announced Google-Extended robots.txt product token. This must be in robots.txt. There are no alternatives.</p>
  144. <p>9/28/2023 – Added Omgilibot/Omgili, bots apparently used by a company that sells data for LLM training.</p>
  145. <p>9/29/2023 – Adam Johnson on Mastodon pointed us at FacebookBot, which is used by Meta to help improve their language models.</p>
  146. </article>
  147. <hr>
  148. <footer>
  149. <p>
  150. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  151. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  152. </svg> Accueil</a> •
  153. <a href="/david/log/" title="Accès au flux RSS"><svg class="icon icon-rss2">
  154. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-rss2"></use>
  155. </svg> Suivre</a> •
  156. <a href="http://larlet.com" title="Go to my English profile" data-instant><svg class="icon icon-user-tie">
  157. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-user-tie"></use>
  158. </svg> Pro</a> •
  159. <a href="mailto:david%40larlet.fr" title="Envoyer un courriel"><svg class="icon icon-mail">
  160. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-mail"></use>
  161. </svg> Email</a> •
  162. <abbr class="nowrap" title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340"><svg class="icon icon-hammer2">
  163. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-hammer2"></use>
  164. </svg> Légal</abbr>
  165. </p>
  166. <template id="theme-selector">
  167. <form>
  168. <fieldset>
  169. <legend><svg class="icon icon-brightness-contrast">
  170. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-brightness-contrast"></use>
  171. </svg> Thème</legend>
  172. <label>
  173. <input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
  174. </label>
  175. <label>
  176. <input type="radio" value="dark" name="chosen-color-scheme"> Foncé
  177. </label>
  178. <label>
  179. <input type="radio" value="light" name="chosen-color-scheme"> Clair
  180. </label>
  181. </fieldset>
  182. </form>
  183. </template>
  184. </footer>
  185. <script src="/static/david/js/instantpage-5.1.0.min.js" type="module"></script>
  186. <script>
  187. function loadThemeForm(templateName) {
  188. const themeSelectorTemplate = document.querySelector(templateName)
  189. const form = themeSelectorTemplate.content.firstElementChild
  190. themeSelectorTemplate.replaceWith(form)
  191. form.addEventListener('change', (e) => {
  192. const chosenColorScheme = e.target.value
  193. localStorage.setItem('theme', chosenColorScheme)
  194. toggleTheme(chosenColorScheme)
  195. })
  196. const selectedTheme = localStorage.getItem('theme')
  197. if (selectedTheme && selectedTheme !== 'undefined') {
  198. form.querySelector(`[value="${selectedTheme}"]`).checked = true
  199. }
  200. }
  201. const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
  202. window.addEventListener('load', () => {
  203. let hasDarkRules = false
  204. for (const styleSheet of Array.from(document.styleSheets)) {
  205. let mediaRules = []
  206. for (const cssRule of styleSheet.cssRules) {
  207. if (cssRule.type !== CSSRule.MEDIA_RULE) {
  208. continue
  209. }
  210. // WARNING: Safari does not have/supports `conditionText`.
  211. if (cssRule.conditionText) {
  212. if (cssRule.conditionText !== prefersColorSchemeDark) {
  213. continue
  214. }
  215. } else {
  216. if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
  217. continue
  218. }
  219. }
  220. mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
  221. }
  222. // WARNING: do not try to insert a Rule to a styleSheet you are
  223. // currently iterating on, otherwise the browser will be stuck
  224. // in a infinite loop…
  225. for (const mediaRule of mediaRules) {
  226. styleSheet.insertRule(mediaRule.cssText)
  227. hasDarkRules = true
  228. }
  229. }
  230. if (hasDarkRules) {
  231. loadThemeForm('#theme-selector')
  232. }
  233. })
  234. </script>
  235. </body>
  236. </html>