A place to cache linked articles (think custom and personal wayback machine)
選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

index.html 20KB

1年前
1年前
1年前
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. <!doctype html><!-- This is a valid HTML5 document. -->
  2. <!-- Screen readers, SEO, extensions and so on. -->
  3. <html lang="fr">
  4. <!-- Has to be within the first 1024 bytes, hence before the `title` element
  5. See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
  6. <meta charset="utf-8">
  7. <!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
  8. <!-- The viewport meta is quite crowded and we are responsible for that.
  9. See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
  10. <meta name="viewport" content="width=device-width,initial-scale=1">
  11. <!-- Required to make a valid HTML5 document. -->
  12. <title>The mounting human and environmental costs of generative AI (archive) — David Larlet</title>
  13. <meta name="description" content="Publication mise en cache pour en conserver une trace.">
  14. <!-- That good ol' feed, subscribe :). -->
  15. <link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
  16. <!-- Generated from https://realfavicongenerator.net/ such a mess. -->
  17. <link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
  18. <link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
  19. <link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
  20. <link rel="manifest" href="/static/david/icons2/site.webmanifest">
  21. <link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
  22. <link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
  23. <meta name="msapplication-TileColor" content="#f7f7f7">
  24. <meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
  25. <meta name="theme-color" content="#f7f7f7" media="(prefers-color-scheme: light)">
  26. <meta name="theme-color" content="#272727" media="(prefers-color-scheme: dark)">
  27. <!-- Is that even respected? Retrospectively? What a shAItshow…
  28. https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/ -->
  29. <meta name="robots" content="noai, noimageai">
  30. <!-- Documented, feel free to shoot an email. -->
  31. <link rel="stylesheet" href="/static/david/css/style_2021-01-20.css">
  32. <!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
  33. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  34. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  35. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  36. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  37. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  38. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  39. <script>
  40. function toggleTheme(themeName) {
  41. document.documentElement.classList.toggle(
  42. 'forced-dark',
  43. themeName === 'dark'
  44. )
  45. document.documentElement.classList.toggle(
  46. 'forced-light',
  47. themeName === 'light'
  48. )
  49. }
  50. const selectedTheme = localStorage.getItem('theme')
  51. if (selectedTheme !== 'undefined') {
  52. toggleTheme(selectedTheme)
  53. }
  54. </script>
  55. <meta name="robots" content="noindex, nofollow">
  56. <meta content="origin-when-cross-origin" name="referrer">
  57. <!-- Canonical URL for SEO purposes -->
  58. <link rel="canonical" href="https://arstechnica.com/gadgets/2023/04/generative-ai-is-cool-but-lets-not-forget-its-human-and-environmental-costs/">
  59. <body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick" data-instant-intensity="viewport-all">
  60. <article>
  61. <header>
  62. <h1>The mounting human and environmental costs of generative AI</h1>
  63. </header>
  64. <nav>
  65. <p class="center">
  66. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  67. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  68. </svg> Accueil</a> •
  69. <a href="https://arstechnica.com/gadgets/2023/04/generative-ai-is-cool-but-lets-not-forget-its-human-and-environmental-costs/" title="Lien vers le contenu original">Source originale</a>
  70. </p>
  71. </nav>
  72. <hr>
  73. <p>Over the past few months, the field of artificial intelligence has seen rapid growth, with wave after wave of new models like Dall-E and GPT-4 emerging one after another. Every week brings the promise of new and exciting models, products, and tools. It’s easy to get swept up in the waves of hype, but these shiny capabilities come at a real cost to society and the planet.</p>
  74. <p>Downsides include the environmental toll of mining rare minerals, the human costs of the labor-intensive process of data annotation, and the escalating financial investment required to train AI models as they incorporate more parameters.</p>
  75. <p>Let’s look at the innovations that have fueled recent generations of these models—and raised their associated costs.</p>
  76. <h2>Bigger models</h2>
  77. <p>In recent years, AI models have been getting bigger, with researchers now measuring their size in the hundreds of billions of parameters. “Parameters” are the internal connections used within the models to learn patterns based on the training data.</p>
  78. <p>For large language models (LLMs) like ChatGPT, we’ve gone from around 100 million parameters in 2018 to 500 billion in 2023 with Google’s PaLM model. The theory behind this growth is that models with more parameters should have better performance, even on tasks they were not initially trained on, although this hypothesis remains unproven.
  79. Model size growth over the years.
  80. Enlarge / Model size growth over the years.</p>
  81. <p>Bigger models typically take longer to train, which means they also need more GPUs, which cost more money, so only a select few organizations are able to train them. Estimates put the training cost of GPT-3, which has 175 billion parameters, at $4.6 million—out of reach for the majority of companies and organizations. (It's worth noting that the cost of training models is dropping in some cases, such as in the case of LLaMA, the recent model trained by Meta.)</p>
  82. <p>This creates a digital divide in the AI community between those who can train the most cutting-edge LLMs (mostly Big Tech companies and rich institutions in the Global North) and those who can’t (nonprofit organizations, startups, and anyone without access to a supercomputer or millions in cloud credits). Building and deploying these behemoths requires a lot of planetary resources: rare metals for manufacturing GPUs, water to cool huge data centers, energy to keep those data centers running 24/7 on a planetary scale… all of these are often overlooked in favor of focusing on the future potential of the resulting models.</p>
  83. <h2>Planetary impacts</h2>
  84. <p>A study from Carnegie Melon University professor Emma Strubell about the carbon footprint of training LLMs estimated that training a 2019 model called BERT, which has only 213 million parameters, emitted 280 metric tons of carbon emissions, roughly equivalent to the emissions from five cars over their lifetimes. Since then, models have grown and hardware has become more efficient, so where are we now?</p>
  85. <p>In a recent academic article I wrote to study the carbon emissions incurred by training BLOOM, a 176-billion parameter language model, we compared the power consumption and ensuing carbon emissions of several LLMs, all of which came out in the last few years. The goal of the comparison was to get an idea of the scale of emissions of different sizes of LLMs and what impacts them.
  86. Enlarge
  87. Sasha Luccioni, et al.</p>
  88. <p>Depending on the energy source used for training and its carbon intensity, training a 2022-era LLM emits at least 25 metric tons of carbon equivalents if you use renewable energy, as we did for the BLOOM model. If you use carbon-intensive energy sources like coal and natural gas, which was the case for GPT-3, this number quickly goes up to 500 metric tons of carbon emissions, roughly equivalent to over a million miles driven by an average gasoline-powered car.</p>
  89. <p>And this calculation doesn’t consider the manufacturing of the hardware used for training the models, nor the emissions incurred when LLMs are deployed in the real world. For instance, with ChatGPT, which was queried by tens of millions of users at its peak a month ago, thousands of copies of the model are running in parallel, responding to user queries in real time, all while using megawatt hours of electricity and generating metric tons of carbon emissions. It’s hard to estimate the exact quantity of emissions this results in, given the secrecy and lack of transparency around these big LLMs.</p>
  90. <h2>Closed, proprietary models</h2>
  91. <p>Let’s go back to the LLM size plot above. You may notice that neither ChatGPT nor GPT-4 are on it. Why? Because we have no idea how big they are. Although there are several reports published about them, we know almost nothing about their size and how they work. Access is provided via APIs, which means they are essentially black boxes that can be queried by users.</p>
  92. <p>These boxes may contain either a single model (with a trillion parameters?) or multiple models, or, as I told Bloomberg, “It could be three raccoons in a trench coat.” We really don’t know.</p>
  93. <p>The plot below presents a timeline of recent releases of LLMs and the type of access that each model creator provided. As you can see, the biggest models (Megatron, PaLM, Gopher, etc.) are all closed source. And if you buy into the theory that the bigger the model, the more powerful it is (I don’t), this means the most powerful AI tech is only accessible to a select few organizations, who monopolize access to it.
  94. A timeline of recent releases of LLMs and the type of access each model creator provided.
  95. Enlarge / A timeline of recent releases of LLMs and the type of access each model creator provided.
  96. Irene Solaiman</p>
  97. <p>Why is this problematic? It means it’s difficult to carry out external evaluations and audits of these models since you can’t even be sure that the underlying model is the same every time you query it. It also means that you can’t do scientific research on them, given that studies must be reproducible.</p>
  98. <p>The only people who can keep improving these models are the organizations that trained them in the first place, which is something they keep doing to improve their models and provide new features over time.</p>
  99. <h2>Human costs</h2>
  100. <p>How many humans does it take to train an AI model? You may think the answer is zero, but the amount of human labor needed to make recent generations of LLMs is steadily rising.</p>
  101. <p>When Transformer models came out a few years ago, researchers heralded them as a new era in AI because they could be trained on “raw data.” In this case, raw data means “unlabeled data”—books, encyclopedia articles, and websites that have been scraped and collected in massive quantities.</p>
  102. <p>That was the case for models like BERT and GPT-2, which required relatively little human intervention in terms of data gathering and filtering. While this was convenient for the model creators, it also meant that all sorts of undesirable content, like hate speech and pornography, were sucked up during the model training process, then often parroted back by the models themselves.</p>
  103. <p>This data collection approach changed with the advent of RLHF (reinforcement learning with human feedback), the technique used by newer generations of LLMs like ChatGPT. As its name indicates, RLHF adds additional steps to the LLM training process, and these steps require much more human intervention.</p>
  104. <p>Essentially, once a model has been trained on large quantities of unlabeled data (from the web, books, etc.), humans are then asked to interact with the model, coming up with prompts (e.g., “Write me a recipe for chocolate cake”) and provide their own answers or evaluate answers provided by the model. This data is used to continue training the model, which is then again tested by humans, ad nauseam, until the model is deemed good enough to be released into the world.</p>
  105. <p>This kind of RLHF training is what made ChatGPT feasible for wide release since it could decline to answer many classes of potentially harmful questions.
  106. An illustration of RLHF training.
  107. Enlarge / An illustration of RLHF training.</p>
  108. <p>But that success has a dirty secret behind it: To keep the costs of AI low, the people providing this “human feedback” are underpaid, overexploited workers. In January, Time wrote a report about Kenyan laborers paid less than $2 an hour to examine thousands of messages for OpenAI. This kind of work can have long-lasting psychological impacts, as we've seen in content-moderation workers.</p>
  109. <p>To make it worse, the efforts of these nameless workers aren’t recognized in the reports accompanying AI models. Their labor remains invisible.</p>
  110. <h2>What should we do about it?</h2>
  111. <p>For the creators of these models, instead of focusing on scale and size and optimizing solely for performance, it’s possible to train smaller, more efficient models and make models accessible so that they can be reused and fine-tuned (read: adapted) by members of the AI community, who won’t need to train models from scratch. Dedicating more efforts toward improving the safety and security of these models—developing features like watermarks for machine-generated content, more reliable safety filters, and the ability to cite sources when generating answers to questions—can also contribute toward making LLMs more accessible and robust.</p>
  112. <p>As users of these models (sometimes despite ourselves), it's within our power to demand transparency and push back against the deployment of AI models in high-risk scenarios, such as services that provide mental help therapy or generate forensic sketches. These models are still too new, poorly documented, and unpredictable to be deployed in circumstances that can have such major repercussions.</p>
  113. <p>And the next time someone tells you that the latest AI model will benefit humanity at large or that it displays evidence of artificial general intelligence, I hope you'll think about its hidden costs to people and the planet, some of which I’ve addressed in the sections above. And these are only a fraction of the broader societal impacts and costs of these systems (some of which you can see on the image below, crowdsourced via Twitter)—things like job impacts, the spread of disinformation and propaganda, and copyright infringement concerns.
  114. There are many hidden costs of generative AI.
  115. Enlarge / There are many hidden costs of generative AI.</p>
  116. <p>The current trend is toward creating bigger and more closed and opaque models. But there’s still time to push back, demand transparency, and get a better understanding of the costs and impacts of LLMs while limiting how they are deployed in society at large. Legislation like the Algorithmic Accountability Act in the US and legal frameworks on AI governance in the European Union and Canada are defining our AI future and putting safeguards in place to ensure safety and accountability in future generations of AI systems deployed in society. As members of that society and users of these systems, we should have our voices heard by their creators.</p>
  117. </article>
  118. <hr>
  119. <footer>
  120. <p>
  121. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  122. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  123. </svg> Accueil</a> •
  124. <a href="/david/log/" title="Accès au flux RSS"><svg class="icon icon-rss2">
  125. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-rss2"></use>
  126. </svg> Suivre</a> •
  127. <a href="http://larlet.com" title="Go to my English profile" data-instant><svg class="icon icon-user-tie">
  128. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-user-tie"></use>
  129. </svg> Pro</a> •
  130. <a href="mailto:david%40larlet.fr" title="Envoyer un courriel"><svg class="icon icon-mail">
  131. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-mail"></use>
  132. </svg> Email</a> •
  133. <abbr class="nowrap" title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340"><svg class="icon icon-hammer2">
  134. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-hammer2"></use>
  135. </svg> Légal</abbr>
  136. </p>
  137. <template id="theme-selector">
  138. <form>
  139. <fieldset>
  140. <legend><svg class="icon icon-brightness-contrast">
  141. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-brightness-contrast"></use>
  142. </svg> Thème</legend>
  143. <label>
  144. <input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
  145. </label>
  146. <label>
  147. <input type="radio" value="dark" name="chosen-color-scheme"> Foncé
  148. </label>
  149. <label>
  150. <input type="radio" value="light" name="chosen-color-scheme"> Clair
  151. </label>
  152. </fieldset>
  153. </form>
  154. </template>
  155. </footer>
  156. <script src="/static/david/js/instantpage-5.1.0.min.js" type="module"></script>
  157. <script>
  158. function loadThemeForm(templateName) {
  159. const themeSelectorTemplate = document.querySelector(templateName)
  160. const form = themeSelectorTemplate.content.firstElementChild
  161. themeSelectorTemplate.replaceWith(form)
  162. form.addEventListener('change', (e) => {
  163. const chosenColorScheme = e.target.value
  164. localStorage.setItem('theme', chosenColorScheme)
  165. toggleTheme(chosenColorScheme)
  166. })
  167. const selectedTheme = localStorage.getItem('theme')
  168. if (selectedTheme && selectedTheme !== 'undefined') {
  169. form.querySelector(`[value="${selectedTheme}"]`).checked = true
  170. }
  171. }
  172. const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
  173. window.addEventListener('load', () => {
  174. let hasDarkRules = false
  175. for (const styleSheet of Array.from(document.styleSheets)) {
  176. let mediaRules = []
  177. for (const cssRule of styleSheet.cssRules) {
  178. if (cssRule.type !== CSSRule.MEDIA_RULE) {
  179. continue
  180. }
  181. // WARNING: Safari does not have/supports `conditionText`.
  182. if (cssRule.conditionText) {
  183. if (cssRule.conditionText !== prefersColorSchemeDark) {
  184. continue
  185. }
  186. } else {
  187. if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
  188. continue
  189. }
  190. }
  191. mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
  192. }
  193. // WARNING: do not try to insert a Rule to a styleSheet you are
  194. // currently iterating on, otherwise the browser will be stuck
  195. // in a infinite loop…
  196. for (const mediaRule of mediaRules) {
  197. styleSheet.insertRule(mediaRule.cssText)
  198. hasDarkRules = true
  199. }
  200. }
  201. if (hasDarkRules) {
  202. loadThemeForm('#theme-selector')
  203. }
  204. })
  205. </script>
  206. </body>
  207. </html>