A place to cache linked articles (think custom and personal wayback machine)
Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

index.html 31KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. <!doctype html><!-- This is a valid HTML5 document. -->
  2. <!-- Screen readers, SEO, extensions and so on. -->
  3. <html lang="fr">
  4. <!-- Has to be within the first 1024 bytes, hence before the `title` element
  5. See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
  6. <meta charset="utf-8">
  7. <!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
  8. <!-- The viewport meta is quite crowded and we are responsible for that.
  9. See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
  10. <meta name="viewport" content="width=device-width,initial-scale=1">
  11. <!-- Required to make a valid HTML5 document. -->
  12. <title>Artificial General Intelligence and the bird brains of Silicon Valley (archive) — David Larlet</title>
  13. <meta name="description" content="Publication mise en cache pour en conserver une trace.">
  14. <!-- That good ol' feed, subscribe :). -->
  15. <link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
  16. <!-- Generated from https://realfavicongenerator.net/ such a mess. -->
  17. <link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
  18. <link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
  19. <link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
  20. <link rel="manifest" href="/static/david/icons2/site.webmanifest">
  21. <link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
  22. <link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
  23. <meta name="msapplication-TileColor" content="#f7f7f7">
  24. <meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
  25. <meta name="theme-color" content="#f7f7f7" media="(prefers-color-scheme: light)">
  26. <meta name="theme-color" content="#272727" media="(prefers-color-scheme: dark)">
  27. <!-- Is that even respected? Retrospectively? What a shAItshow…
  28. https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/ -->
  29. <meta name="robots" content="noai, noimageai">
  30. <!-- Documented, feel free to shoot an email. -->
  31. <link rel="stylesheet" href="/static/david/css/style_2021-01-20.css">
  32. <!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
  33. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  34. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  35. <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
  36. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  37. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  38. <link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
  39. <script>
  40. function toggleTheme(themeName) {
  41. document.documentElement.classList.toggle(
  42. 'forced-dark',
  43. themeName === 'dark'
  44. )
  45. document.documentElement.classList.toggle(
  46. 'forced-light',
  47. themeName === 'light'
  48. )
  49. }
  50. const selectedTheme = localStorage.getItem('theme')
  51. if (selectedTheme !== 'undefined') {
  52. toggleTheme(selectedTheme)
  53. }
  54. </script>
  55. <meta name="robots" content="noindex, nofollow">
  56. <meta content="origin-when-cross-origin" name="referrer">
  57. <!-- Canonical URL for SEO purposes -->
  58. <link rel="canonical" href="https://softwarecrisis.dev/letters/ai-bird-brains-silicon-valley/">
  59. <body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick" data-instant-intensity="viewport-all">
  60. <article>
  61. <header>
  62. <h1>Artificial General Intelligence and the bird brains of Silicon Valley</h1>
  63. </header>
  64. <nav>
  65. <p class="center">
  66. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  67. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  68. </svg> Accueil</a> •
  69. <a href="https://softwarecrisis.dev/letters/ai-bird-brains-silicon-valley/" title="Lien vers le contenu original">Source originale</a>
  70. </p>
  71. </nav>
  72. <hr>
  73. <blockquote>
  74. <p>
  75. The problem is, if one side of the communication does not have meaning,
  76. then the comprehension of the implicit meaning is an illusion arising
  77. from our singular human understanding of language (independent of the
  78. model). Contrary to how it may seem when we observe its output, an LM is
  79. a system for haphazardly stitching together sequences of linguistic
  80. forms it has observed in its vast training data, according to
  81. probabilistic information about how they combine, but without any
  82. reference to meaning: a stochastic parrot.
  83. </p>
  84. </blockquote>
  85. <figcaption>
  86. <p>Emily M. Bender, Timnit Gebru, et al., <em>On the Dangers of Stochastic
  87. Parrots: Can Language Models Be Too Big?</em>.</p>
  88. </figcaption>
  89. </figure>
  90. <p>Bird brains have a bad reputation. The diminutive size of your average
  91. bird and their brain has lead people to assume that they are, well,
  92. dumb.</p>
  93. <p>But, bird brains are amazing. Birds commonly outperform mammals with
  94. larger brains at a variety of general reasoning and problem-solving
  95. tasks. Some by a large margin. Their small brains manage this by
  96. packing numerous neurons in a small space using structures that are
  97. unlike from those you find in mammals.</p>
  98. <p>Even though birds have extremely capable minds, those minds are built in
  99. ways that are different from our own or other mammals. Similar
  100. capabilities; different structure.</p>
  101. <p>The ambition of the Silicon Valley AI industry is to create something
  102. analogous to a bird brain: a new kind of mind that is functionally
  103. similar to the human mind, possibly outperforming it, while being built
  104. using very different mechanisms. Similar capabilities; different
  105. structure.</p>
  106. <p>This effort goes back decades, to the dawn of computing, and has had
  107. limited success.</p>
  108. <p>Until recently, it seems.</p>
  109. <p>If you’re reading this, you’ve almost certainly interacted with a
  110. Generative AI, however indirectly. Maybe you’ve tried Bing Chat. Maybe
  111. you’ve subscribed to the paid tier for ChatGPT. Or, maybe you’ve used
  112. Midjourney to generate images. At the very least you’ve been forced to
  113. see the images or text posted by the overenthusiastic on social media.</p>
  114. <p>These AI models are created by pushing an enormous amount of training
  115. data through various algorithms:</p>
  116. <ul>
  117. <li>Language models like ChatGPT is trained on a good chunk of the textual material available in digital form in the world.</li>
  118. <li>Image models like Midjourney and Stable Diffusion are trained on a huge collection of images found on the internet.</li>
  119. </ul>
  120. <p>What comes out the other end is a mathematical model of the media domain
  121. in question: text or images.</p>
  122. <p>You know what Generative AI is in terms of how it presents to you as
  123. software: clever chatbots that do or say things in response to what you
  124. say: <em>your prompt</em>. Some of those responses are useful, and they give
  125. you an impression of sophisticated comprehension. The models that
  126. generate text are fluent and often quite engaging.</p>
  127. <p>This fluency is misleading. What Bender and Gebru meant when they coined
  128. the term <em>stochastic parrot</em> wasn’t to imply that these are, indeed, the
  129. new bird brains of Silicon Valley, but that they are unthinking text
  130. synthesis engines that just repeat phrases. They are the proverbial
  131. parrot who echoes without thinking, not the actual parrot who is capable
  132. of complex reasoning and problem-solving.</p>
  133. <p>A <em>zombie parrot</em>, if you will, that screams for <em>brains</em> because it has
  134. none.</p>
  135. <p>The fluency of the zombie parrot—the unerring confidence and a style of
  136. writing that some find endearing—creates a strong illusion of
  137. intelligence.</p>
  138. <p>Every other time we read text, we are engaging with the product of
  139. another mind. We are so used to the idea of text as a representation of
  140. another person’s thoughts that we have come to mistake their writing
  141. <em>for</em> their thoughts. But they aren’t. Text and media are tools that
  142. authors and artists create to let people change their own state of
  143. mind—hopefully in specific ways to form the image or effect the author
  144. was after.</p>
  145. <p>Reading is an indirect collaboration with the author, mediated through
  146. the writing. Text has no inherent reasoning or intelligence. Agatha
  147. Christie’s ghost does not inhabit the words of <em>Murder on the Orient Express</em>.
  148. Stephen King isn’t hovering over you when you read <em>Carrie</em>. The ghost
  149. you feel while reading is an illusion you’ve made out of your own
  150. experience, knowledge, and imagination. Every word you read causes your
  151. mind to reconstruct its meaning using your memories and creativity. The
  152. idea that there is intelligence somehow inherent in writing is an
  153. illusion. The intelligence is <em>all</em> yours, all the time: thoughts you
  154. make yourself in order to make sense of another person’s words. This can
  155. prompt us to greatness, broaden our minds, inspire new thoughts, and
  156. introduce us to new concepts. A book can contain worlds, but we’re the
  157. ones that bring them into being as we read. What we see is uniquely our
  158. own. The thoughts are not transported from the author’s mind and
  159. injected into ours.</p>
  160. <p>The words themselves are just line forms on a background with no
  161. inherent meaning or intelligence. The word “horse” doesn’t come with the
  162. Platonic ideal of a horse attached to it. The word “anger” isn’t full of
  163. seething emotion or the restrained urge towards violence. Even words
  164. that are arguably onomatopoeic, like the word “brabra” we use in
  165. Icelandic for the sound a duck makes, are still incredibly specific to
  166. the cultures and context they come from. We are the ones doing the heavy
  167. lifting in terms of reconstructing a picture of an intelligence behind
  168. the text. When there is no actual intelligence, such as with ChatGPT, we
  169. are the ones who end up filling in the gaps with our memories,
  170. experience and imagination.</p>
  171. <p>When ChatGPT demonstrates intelligence, that comes from us. Some of
  172. it we construct ourselves. Some of it comes from our inherent
  173. biases.</p>
  174. <p>There is no ‘there’ there. We are alone in the room, reconstructing an
  175. abstract representation of a mind. The reasoning you see is only in your
  176. head. You are hallucinating intelligence where there is none. You are
  177. doing the textual equivalent of seeing a face in a power outlet.</p>
  178. <p>This drive—<em>anthropomorphism</em>—seems to be innate. Our first instinct
  179. when faced with anything unfamiliar—whose drives, motivations, and
  180. mechanisms we don’t understand—is to assume that they think much like a
  181. human would. When that unfamiliar agent uses language like a human
  182. would, the urge to see them as near or fully human is impossible to
  183. resist—a recurring issue in the history of AI research that dates all
  184. the way back to 1966.</p>
  185. <p>These tools solve problems and return fluent, if untruthful, answers,
  186. which is what creates such a convincing illusion of intelligence.</p>
  187. <p>Text synthesis engines like ChatGPT and GPT-4 do not have any
  188. self-awareness. They are mathematical models of the various patterns to
  189. be found in the collected body of human text. How granular the model is
  190. depends on its design and the languages in question. Some of the
  191. tokens—the smallest unit of language the model works with—will be
  192. characters or punctuation marks, some of them will be words, syllables,
  193. or even phrases. Many language models are a mixture of both.</p>
  194. <p>With enough detail—a big enough collection of text—these tools will
  195. model enough of the probabilistic distribution of various words or
  196. characters to be able to perform what looks like magic:</p>
  197. <ul>
  198. <li>They generate fluent answers by calculating the most probable sequence
  199. of words, at that time, which would serve as the continuation of or
  200. response to the prompt.</li>
  201. <li>They can perform limited reasoning tasks that correlate with textual
  202. descriptions of prior reasoning tasks in the training data.</li>
  203. </ul>
  204. <p>With enough of these correlative shortcuts, the model can perform
  205. something that looks like common sense reasoning: its output is text
  206. that replicates prior representations of reasoning. This works for
  207. as long as you don’t accidentally use the wrong phrasing in your prompt
  208. and break the correlation.</p>
  209. <p>The mechanism behind these systems is entirely correlative from the
  210. ground up.What looks like reasoning is incredibly fragile and
  211. breaks as soon as you rephrase or reword your prompt. It exists
  212. only as a probabilistic model of text. A Generative AI chatbot is a
  213. language engine incapable of genuine thought.</p>
  214. <p>These language models are interactive but static snapshots of the
  215. probability distributions of a written language.</p>
  216. <p>It’s obviously interactive, that’s the whole point of a chatbot. It’s
  217. static in that it does not change when it’s used or activated. In fact,
  218. changing it requires an enormous amount of computing power over a long
  219. period of time. What the system models are the distributions and
  220. correlations of the tokens it records for the texts in its training data
  221. set—how the various words, syllables, and punctuation relate to each
  222. other over as much of the written history of a language as the company
  223. can find.</p>
  224. <p>That’s what distinguishes biological minds from these algorithmic
  225. hindsight factories: a biological mind does not reason using the
  226. probability distributions of all the prior cultural records of its
  227. ancestors. Biological minds learn primarily through trial and error.
  228. Try, fail, try again. They build their neural network, which is
  229. functionally very different from what you see in a software model,
  230. through constant feedback, experimentation, and repeated failure—driven
  231. by a chemical network that often manifests as instinct, emotion,
  232. motivation, and drive. The neural network—bounded, defined, and driven
  233. by the chemical network—is constantly changing and responding to outside
  234. stimuli. Every time an animal’s nervous system is “used”, it changes. It
  235. is always changing, until it dies.</p>
  236. <p>Biological minds <em>experience</em>. Synthesis engines parse imperfect
  237. <em>records</em> of experiences. The former are forward-looking and operate
  238. primarily in the present, sometimes to their own detriment. The latter
  239. exist exclusively as probabilistic manifestations of imperfect
  240. representations of thoughts past. They are snapshots. Generative AI are
  241. themselves cultural records.</p>
  242. <p>These models aren’t new bird brains—new alien minds that are peers to
  243. our own. They aren’t even insect brains. Insects have autonomy. They are
  244. capable of general problem-solving—some of them dealing with tasks of
  245. surprising complexity—and their abilities tolerate the kind of
  246. minor alterations in the problem environment that would break the
  247. correlative pseudo-reasoning of a language model. Large Language
  248. Models are something lesser. They are water running down pathways etched
  249. into the ground over centuries by the rivers of human culture. Their
  250. originality comes entirely from random combinations of historical
  251. thought. They do not know the ‘meaning’ of anything—they only know the
  252. records humans find meaningful enough to store. Their unreliability
  253. comes from their unpredictable behaviour in novel circumstances. When
  254. there is no riverbed to follow, they drown the surrounding landscape.</p>
  255. <p>The entirety of their documented features, capabilities, and recorded
  256. behaviour—emergent or not—is explained by this conceptual model of
  257. generative AI. There are no unexplained corner cases that don’t fit or
  258. actively disprove this theory.</p>
  259. <p>Yet people keep assuming that what ChatGPT does can only be explained as
  260. the first glimmer of genuine Artificial General Intelligence. The bird
  261. brain of Silicon Valley is born at last!</p>
  262. <p>Because text and language are the primary ways we experience other
  263. people’s reasoning, it’ll be next to impossible to dislodge the notion
  264. that these are genuine intelligences. No amount of examples, scientific
  265. research, or analysis will convince those who want to maintain a
  266. pseudo-religious belief in alien peer intelligences. After all, if you
  267. want to believe in aliens, an artificial one made out of supercomputers
  268. and wishful thinking <em>feels</em> much more plausible than little grey men
  269. from outer space. But that’s what it is: <em>a belief in aliens.</em></p>
  270. <p>It doesn’t help that so many working in AI seem to <em>want</em> this to be
  271. true. They seem to be true believers who are convinced that the spark of
  272. Artificial General Intelligence has been struck.</p>
  273. <p>They are inspired by the science fictional notion that if you make
  274. something complex enough, it will spontaneously become intelligent. This
  275. isn’t an uncommon belief. You see it in movies and novels—the notion
  276. that any network of sufficient complexity will spontaneously become
  277. sentient has embedded itself in our popular psyche. James Cameron’s
  278. skull-crushing metal skeletons have a lot to answer for.</p>
  279. <p>That notion doesn’t seem to have any basis in science. The idea that
  280. general intelligence is an emergent property of neural networks that
  281. appears once the network reaches sufficient complexity, is a view based
  282. on archaic notions of animal intelligence—that animals are soulless
  283. automata incapable of feeling or reasoning. That view that was
  284. formed during a period where we didn’t realise just how common
  285. self-awareness (i.e. the mirror test) and general reasoning is in the
  286. animal kingdom. Animals are smarter than we assumed and the
  287. difference between our reasoning and theirs seems to be a matter of
  288. degree, not of presence or absence.</p>
  289. <p>General reasoning seems to be an <em>inherent</em>, not emergent, property of
  290. pretty much any biological lifeform with a notable nervous system.</p>
  291. <p>The bumblebee, despite having only a tiny fraction of the neurons of a
  292. human brain, is capable of not only solving puzzles but also of
  293. <em>teaching other bees to solve those puzzles.</em> They reason and have a
  294. culture. They have more genuine and robust general reasoning
  295. skills—that don’t collapse into incoherence at minor adjustments to the
  296. problem space—than GPT-4 or any large language model on the market.
  297. That’s with only around half a million neurons to work with.</p>
  298. <p>Conversely, GPT-3 is made up of 175 <em>billion</em> parameters—what passes for
  299. a “neuron” in a digital neural network. GPT-4 is even larger, with
  300. some estimates coming in at a <em>trillion</em> parameters. Then you have
  301. fine-tuned systems such as ChatGPT, that are built from multiple
  302. interacting models layered on top of GPT-3.5 or GPT-4, which make for an
  303. even more complex interactive system.</p>
  304. <p>ChatGPT, running on GPT-4 is, easily a <em>million</em> times more complex than
  305. the “neural network” of a bumblebee and yet, out of the two, it’s the
  306. striped invertebrate that demonstrates robust and adaptive
  307. general-purpose reasoning skills. Very simple minds, those belonging to
  308. small organisms that barely have a brain, are capable of reasoning about
  309. themselves, the world around them, and the behaviour of other
  310. animals.</p>
  311. <p>Unlike the evidence for ‘sparks’ of AGI in language models, the evidence
  312. for animal reasoning—even consciousness—is broad, compelling, and
  313. encompasses decades of work by numerous scientists.</p>
  314. <p>AI models are flawed attempts at digitally synthesising neurologies.
  315. They are built on the assumption that all the rest—metabolisms,
  316. hormones, chemicals, and senses—aren’t necessary for developing
  317. intelligence.</p>
  318. <p>Reasoning in biological minds does not seem to be a property that
  319. emerges from complexity. The capacity to reason looks more likely to be
  320. a <em>built-in</em> property of most animal minds. A reasoning mind
  321. appears to be a direct consequence of how animals are structured as a
  322. whole—chemicals, hormones, and physical body included. The animal
  323. capacity for problem-solving, social reasoning, and self-awareness seem
  324. to increase, unevenly, and fitfully with the number of neurons until it
  325. reaches the level we see in humans. Reasoning does not ‘emerge’ or
  326. appear. Some creatures are better at it than others, but it’s there in
  327. some form even in very small, very simple beings like the bumblebee. It
  328. doesn’t happen magically when you hook up a bunch of disparate objects
  329. together in a complex enough network. A reasoning mind is the <em>starting
  330. point</em> of biological thinking, not the endpoint that only “emerges” with
  331. sufficient complexity.</p>
  332. <p>The internet—a random interconnected collection of marketing offal,
  333. holiday snaps, insufferable meetings, and porn—isn’t going to become
  334. self-aware and suddenly acquire the capacity for general reasoning once
  335. it reaches a certain size, and neither will Large-Language-Models. The
  336. notion that we are making autonomous beings capable of Artificial
  337. General Intelligence just by loading a neural network up with an
  338. increasingly bigger collection of garbage from the internet is not one
  339. that has any basis in anything we understand about biology or animal
  340. reasoning.</p>
  341. <p>But, AI companies insist that they are on the verge of AGI. Their
  342. rhetoric around it verges on the religious as the idea of an AGI is
  343. idealised and almost worshipped. They claim to be close to making a
  344. new form of thinking life, but they refuse to release the data required
  345. to prove it. They’ve built software that performs well on the
  346. arbitrary benchmarks they’ve chosen and claim are evidence of general
  347. intelligence, but those tests prove no such thing and have no such
  348. validity. The benchmarks are theatrics that have no applicability
  349. towards demonstrating genuine general intelligence.</p>
  350. <p>AI researchers love to resurrect outdated pseudoscience such as
  351. phrenology—shipping AI software that promises to be able to tell you if
  352. somebody is likely to be a criminal based on the shape of their
  353. skull. It’s a field where researchers and vendors routinely claim
  354. that their AIs can detect whether you’re a potential criminal, gay, a
  355. good employee, liberal or conservative, or even a psychopath, based on
  356. “your face, body, gait, and tone of voice.”</p>
  357. <p><em>It’s pseudoscience</em>.</p>
  358. <p>This is the field and the industry that claims to have accomplished the
  359. first ‘spark’ of Artificial General Intelligence?</p>
  360. <p>Last time we saw a claim this grand, with this little scientific
  361. evidence, the men in the white coats were promising us room-temperature
  362. fusion, giving us free energy for life, and ending the world’s
  363. dependence on fossil fuels.</p>
  364. <p>Why give the tech industry the benefit of the doubt when they are all
  365. but claiming godhood—that they’ve created a new form of life never seen
  366. before?</p>
  367. <p>As <a href="https://en.wikipedia.org/wiki/Sagan_standard">Carl Sagan said</a>:
  368. <em>“extraordinary claims require extraordinary evidence.”</em></p>
  369. <p>He didn’t say “extraordinary claims require only vague insinuations and
  370. pinky-swear promises.”</p>
  371. <p>To claim you’ve created a completely new kind of mind that’s on par with
  372. any animal mind—or, even superior—and provides general intelligence
  373. using mechanisms that don’t resemble anything anybody has ever seen in
  374. nature, is by definition the most extraordinary of claims.</p>
  375. <p>The AI industry is backing their claims of Artificial General
  376. Intelligence with hot air, hand-waving, and cryptic references to data
  377. and software nobody outside their organisations is allowed to review or
  378. analyse.</p>
  379. <p>They are pouring an every-increasing amount of energy and work into
  380. ever-larger models all in the hope of triggering the
  381. ‘<a href="https://en.wikipedia.org/wiki/Technological_singularity">singularity</a>’
  382. and creating a digital superbeing. Like a cult of monks boiling the
  383. oceans in order to hear whispers of the name of God.</p>
  384. <p>It’s a farce. All theatre; no evidence. Whether they realise it or not,
  385. they are taking us for a ride. The sooner we see that they aren’t
  386. backing their claims with science, the sooner we can focus on finding
  387. safe and productive uses—limiting its harm, at least—for the technology
  388. as it exists today.</p>
  389. <p>After everything the tech industry has done over the past decade, the
  390. financial bubbles, the gig economy, legless virtual reality avatars,
  391. crypto, the endless software failures—just think about it—do you think
  392. we should believe them when they make grand, unsubstantiated claims
  393. about miraculous discoveries? Have they earned our trust? Have they
  394. shown that their word is worth more than that of independent scientists?</p>
  395. <p>Do you think that they, with this little evidence, have really done what
  396. they claim, and discovered a literal new form of life? But are
  397. conveniently unable to prove it because of ‘safety’?</p>
  398. <p>Me neither.</p>
  399. <p>The notion that large language models are on the path towards Artificial
  400. General Intelligence is a dangerous one. It’s a myth that directly
  401. undermines any effort to think clearly or strategise about generative AI
  402. because it strongly reinforces <em>anthropomorphism</em>.</p>
  403. <p>That’s when you reason about an object or animal <em>as if it were a
  404. person</em>. It prevents you from forming an accurate mental model of the non-human thing’s behaviour. AI is especially prone to creating this reaction. Software such as chatbots trigger all three major factors that promote
  405. anthropomorphism in people:</p>
  406. <ol>
  407. <li><em>Understanding.</em> If we lack an understanding of how an object works,
  408. our minds will resort to thinking of it in terms of something that’s
  409. familiar to us: people. We understand the world as people because
  410. that’s what we are. This becomes stronger the more similar we
  411. perceive the object to be to ourselves.</li>
  412. <li><em>Motivation.</em> We are motivated to both seek out human interaction
  413. and to interact effectively with our environment. This reinforces
  414. the first factor. The more uncertain we are of how that thing works,
  415. the stronger the anthropomorphism. The less control we have over it,
  416. the stronger the anthropomorphism.</li>
  417. <li><em>Sociality</em>. We have a need for human contact and our tendency
  418. towards anthropomorphising objects in our environment increase with
  419. our isolation.</li>
  420. </ol>
  421. <p>Because we lack cohesive cognitive models for what makes these language
  422. models so fluent, feel a strong motivation to understand and use them as
  423. they are integrated into our work, and, increasingly, our socialisation
  424. in the office takes on the very same text conversation form as a chatbot
  425. does, we inevitably feel a strong drive to see these software systems as
  426. people. The myth of AGI reinforces this—supercharges the anthropomorphism—because it implies that “people”
  427. is indeed an appropriate cognitive model for how these systems behave.</p>
  428. <p>It isn’t. <strong><em>AI are not people.</em></strong> Treating them as such is a major
  429. strategic error as it will prevent you from thinking clearly about their
  430. capabilities and limitations.</p>
  431. <p>Believing the myth of Artificial General Intelligence makes you incapable of understanding what language models today are and how they work.</p>
  432. </article>
  433. <hr>
  434. <footer>
  435. <p>
  436. <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
  437. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
  438. </svg> Accueil</a> •
  439. <a href="/david/log/" title="Accès au flux RSS"><svg class="icon icon-rss2">
  440. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-rss2"></use>
  441. </svg> Suivre</a> •
  442. <a href="http://larlet.com" title="Go to my English profile" data-instant><svg class="icon icon-user-tie">
  443. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-user-tie"></use>
  444. </svg> Pro</a> •
  445. <a href="mailto:david%40larlet.fr" title="Envoyer un courriel"><svg class="icon icon-mail">
  446. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-mail"></use>
  447. </svg> Email</a> •
  448. <abbr class="nowrap" title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340"><svg class="icon icon-hammer2">
  449. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-hammer2"></use>
  450. </svg> Légal</abbr>
  451. </p>
  452. <template id="theme-selector">
  453. <form>
  454. <fieldset>
  455. <legend><svg class="icon icon-brightness-contrast">
  456. <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-brightness-contrast"></use>
  457. </svg> Thème</legend>
  458. <label>
  459. <input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
  460. </label>
  461. <label>
  462. <input type="radio" value="dark" name="chosen-color-scheme"> Foncé
  463. </label>
  464. <label>
  465. <input type="radio" value="light" name="chosen-color-scheme"> Clair
  466. </label>
  467. </fieldset>
  468. </form>
  469. </template>
  470. </footer>
  471. <script src="/static/david/js/instantpage-5.1.0.min.js" type="module"></script>
  472. <script>
  473. function loadThemeForm(templateName) {
  474. const themeSelectorTemplate = document.querySelector(templateName)
  475. const form = themeSelectorTemplate.content.firstElementChild
  476. themeSelectorTemplate.replaceWith(form)
  477. form.addEventListener('change', (e) => {
  478. const chosenColorScheme = e.target.value
  479. localStorage.setItem('theme', chosenColorScheme)
  480. toggleTheme(chosenColorScheme)
  481. })
  482. const selectedTheme = localStorage.getItem('theme')
  483. if (selectedTheme && selectedTheme !== 'undefined') {
  484. form.querySelector(`[value="${selectedTheme}"]`).checked = true
  485. }
  486. }
  487. const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
  488. window.addEventListener('load', () => {
  489. let hasDarkRules = false
  490. for (const styleSheet of Array.from(document.styleSheets)) {
  491. let mediaRules = []
  492. for (const cssRule of styleSheet.cssRules) {
  493. if (cssRule.type !== CSSRule.MEDIA_RULE) {
  494. continue
  495. }
  496. // WARNING: Safari does not have/supports `conditionText`.
  497. if (cssRule.conditionText) {
  498. if (cssRule.conditionText !== prefersColorSchemeDark) {
  499. continue
  500. }
  501. } else {
  502. if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
  503. continue
  504. }
  505. }
  506. mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
  507. }
  508. // WARNING: do not try to insert a Rule to a styleSheet you are
  509. // currently iterating on, otherwise the browser will be stuck
  510. // in a infinite loop…
  511. for (const mediaRule of mediaRules) {
  512. styleSheet.insertRule(mediaRule.cssText)
  513. hasDarkRules = true
  514. }
  515. }
  516. if (hasDarkRules) {
  517. loadThemeForm('#theme-selector')
  518. }
  519. })
  520. </script>
  521. </body>
  522. </html>