|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533 |
- <!doctype html><!-- This is a valid HTML5 document. -->
- <!-- Screen readers, SEO, extensions and so on. -->
- <html lang="fr">
- <!-- Has to be within the first 1024 bytes, hence before the `title` element
- See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
- <meta charset="utf-8">
- <!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
- <!-- The viewport meta is quite crowded and we are responsible for that.
- See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
- <meta name="viewport" content="width=device-width,initial-scale=1">
- <!-- Required to make a valid HTML5 document. -->
- <title>Artificial General Intelligence and the bird brains of Silicon Valley (archive) — David Larlet</title>
- <meta name="description" content="Publication mise en cache pour en conserver une trace.">
- <!-- That good ol' feed, subscribe :). -->
- <link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
- <!-- Generated from https://realfavicongenerator.net/ such a mess. -->
- <link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
- <link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
- <link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
- <link rel="manifest" href="/static/david/icons2/site.webmanifest">
- <link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
- <link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
- <meta name="msapplication-TileColor" content="#f7f7f7">
- <meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
- <meta name="theme-color" content="#f7f7f7" media="(prefers-color-scheme: light)">
- <meta name="theme-color" content="#272727" media="(prefers-color-scheme: dark)">
- <!-- Is that even respected? Retrospectively? What a shAItshow…
- https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/ -->
- <meta name="robots" content="noai, noimageai">
- <!-- Documented, feel free to shoot an email. -->
- <link rel="stylesheet" href="/static/david/css/style_2021-01-20.css">
- <!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
- <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
- <script>
- function toggleTheme(themeName) {
- document.documentElement.classList.toggle(
- 'forced-dark',
- themeName === 'dark'
- )
- document.documentElement.classList.toggle(
- 'forced-light',
- themeName === 'light'
- )
- }
- const selectedTheme = localStorage.getItem('theme')
- if (selectedTheme !== 'undefined') {
- toggleTheme(selectedTheme)
- }
- </script>
-
- <meta name="robots" content="noindex, nofollow">
- <meta content="origin-when-cross-origin" name="referrer">
- <!-- Canonical URL for SEO purposes -->
- <link rel="canonical" href="https://softwarecrisis.dev/letters/ai-bird-brains-silicon-valley/">
-
- <body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick" data-instant-intensity="viewport-all">
-
-
- <article>
- <header>
- <h1>Artificial General Intelligence and the bird brains of Silicon Valley</h1>
- </header>
- <nav>
- <p class="center">
- <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
- </svg> Accueil</a> •
- <a href="https://softwarecrisis.dev/letters/ai-bird-brains-silicon-valley/" title="Lien vers le contenu original">Source originale</a>
- </p>
- </nav>
- <hr>
- <blockquote>
- <p>
- The problem is, if one side of the communication does not have meaning,
- then the comprehension of the implicit meaning is an illusion arising
- from our singular human understanding of language (independent of the
- model). Contrary to how it may seem when we observe its output, an LM is
- a system for haphazardly stitching together sequences of linguistic
- forms it has observed in its vast training data, according to
- probabilistic information about how they combine, but without any
- reference to meaning: a stochastic parrot.
- </p>
- </blockquote>
- <figcaption>
- <p>Emily M. Bender, Timnit Gebru, et al., <em>On the Dangers of Stochastic
- Parrots: Can Language Models Be Too Big?</em>.</p>
- </figcaption>
- </figure>
- <p>Bird brains have a bad reputation. The diminutive size of your average
- bird and their brain has lead people to assume that they are, well,
- dumb.</p>
- <p>But, bird brains are amazing. Birds commonly outperform mammals with
- larger brains at a variety of general reasoning and problem-solving
- tasks. Some by a large margin. Their small brains manage this by
- packing numerous neurons in a small space using structures that are
- unlike from those you find in mammals.</p>
- <p>Even though birds have extremely capable minds, those minds are built in
- ways that are different from our own or other mammals. Similar
- capabilities; different structure.</p>
- <p>The ambition of the Silicon Valley AI industry is to create something
- analogous to a bird brain: a new kind of mind that is functionally
- similar to the human mind, possibly outperforming it, while being built
- using very different mechanisms. Similar capabilities; different
- structure.</p>
- <p>This effort goes back decades, to the dawn of computing, and has had
- limited success.</p>
- <p>Until recently, it seems.</p>
- <p>If you’re reading this, you’ve almost certainly interacted with a
- Generative AI, however indirectly. Maybe you’ve tried Bing Chat. Maybe
- you’ve subscribed to the paid tier for ChatGPT. Or, maybe you’ve used
- Midjourney to generate images. At the very least you’ve been forced to
- see the images or text posted by the overenthusiastic on social media.</p>
- <p>These AI models are created by pushing an enormous amount of training
- data through various algorithms:</p>
- <ul>
- <li>Language models like ChatGPT is trained on a good chunk of the textual material available in digital form in the world.</li>
- <li>Image models like Midjourney and Stable Diffusion are trained on a huge collection of images found on the internet.</li>
- </ul>
- <p>What comes out the other end is a mathematical model of the media domain
- in question: text or images.</p>
- <p>You know what Generative AI is in terms of how it presents to you as
- software: clever chatbots that do or say things in response to what you
- say: <em>your prompt</em>. Some of those responses are useful, and they give
- you an impression of sophisticated comprehension. The models that
- generate text are fluent and often quite engaging.</p>
- <p>This fluency is misleading. What Bender and Gebru meant when they coined
- the term <em>stochastic parrot</em> wasn’t to imply that these are, indeed, the
- new bird brains of Silicon Valley, but that they are unthinking text
- synthesis engines that just repeat phrases. They are the proverbial
- parrot who echoes without thinking, not the actual parrot who is capable
- of complex reasoning and problem-solving.</p>
- <p>A <em>zombie parrot</em>, if you will, that screams for <em>brains</em> because it has
- none.</p>
- <p>The fluency of the zombie parrot—the unerring confidence and a style of
- writing that some find endearing—creates a strong illusion of
- intelligence.</p>
- <p>Every other time we read text, we are engaging with the product of
- another mind. We are so used to the idea of text as a representation of
- another person’s thoughts that we have come to mistake their writing
- <em>for</em> their thoughts. But they aren’t. Text and media are tools that
- authors and artists create to let people change their own state of
- mind—hopefully in specific ways to form the image or effect the author
- was after.</p>
- <p>Reading is an indirect collaboration with the author, mediated through
- the writing. Text has no inherent reasoning or intelligence. Agatha
- Christie’s ghost does not inhabit the words of <em>Murder on the Orient Express</em>.
- Stephen King isn’t hovering over you when you read <em>Carrie</em>. The ghost
- you feel while reading is an illusion you’ve made out of your own
- experience, knowledge, and imagination. Every word you read causes your
- mind to reconstruct its meaning using your memories and creativity. The
- idea that there is intelligence somehow inherent in writing is an
- illusion. The intelligence is <em>all</em> yours, all the time: thoughts you
- make yourself in order to make sense of another person’s words. This can
- prompt us to greatness, broaden our minds, inspire new thoughts, and
- introduce us to new concepts. A book can contain worlds, but we’re the
- ones that bring them into being as we read. What we see is uniquely our
- own. The thoughts are not transported from the author’s mind and
- injected into ours.</p>
- <p>The words themselves are just line forms on a background with no
- inherent meaning or intelligence. The word “horse” doesn’t come with the
- Platonic ideal of a horse attached to it. The word “anger” isn’t full of
- seething emotion or the restrained urge towards violence. Even words
- that are arguably onomatopoeic, like the word “brabra” we use in
- Icelandic for the sound a duck makes, are still incredibly specific to
- the cultures and context they come from. We are the ones doing the heavy
- lifting in terms of reconstructing a picture of an intelligence behind
- the text. When there is no actual intelligence, such as with ChatGPT, we
- are the ones who end up filling in the gaps with our memories,
- experience and imagination.</p>
- <p>When ChatGPT demonstrates intelligence, that comes from us. Some of
- it we construct ourselves. Some of it comes from our inherent
- biases.</p>
- <p>There is no ‘there’ there. We are alone in the room, reconstructing an
- abstract representation of a mind. The reasoning you see is only in your
- head. You are hallucinating intelligence where there is none. You are
- doing the textual equivalent of seeing a face in a power outlet.</p>
- <p>This drive—<em>anthropomorphism</em>—seems to be innate. Our first instinct
- when faced with anything unfamiliar—whose drives, motivations, and
- mechanisms we don’t understand—is to assume that they think much like a
- human would. When that unfamiliar agent uses language like a human
- would, the urge to see them as near or fully human is impossible to
- resist—a recurring issue in the history of AI research that dates all
- the way back to 1966.</p>
- <p>These tools solve problems and return fluent, if untruthful, answers,
- which is what creates such a convincing illusion of intelligence.</p>
- <p>Text synthesis engines like ChatGPT and GPT-4 do not have any
- self-awareness. They are mathematical models of the various patterns to
- be found in the collected body of human text. How granular the model is
- depends on its design and the languages in question. Some of the
- tokens—the smallest unit of language the model works with—will be
- characters or punctuation marks, some of them will be words, syllables,
- or even phrases. Many language models are a mixture of both.</p>
- <p>With enough detail—a big enough collection of text—these tools will
- model enough of the probabilistic distribution of various words or
- characters to be able to perform what looks like magic:</p>
- <ul>
- <li>They generate fluent answers by calculating the most probable sequence
- of words, at that time, which would serve as the continuation of or
- response to the prompt.</li>
- <li>They can perform limited reasoning tasks that correlate with textual
- descriptions of prior reasoning tasks in the training data.</li>
- </ul>
- <p>With enough of these correlative shortcuts, the model can perform
- something that looks like common sense reasoning: its output is text
- that replicates prior representations of reasoning. This works for
- as long as you don’t accidentally use the wrong phrasing in your prompt
- and break the correlation.</p>
- <p>The mechanism behind these systems is entirely correlative from the
- ground up.What looks like reasoning is incredibly fragile and
- breaks as soon as you rephrase or reword your prompt. It exists
- only as a probabilistic model of text. A Generative AI chatbot is a
- language engine incapable of genuine thought.</p>
- <p>These language models are interactive but static snapshots of the
- probability distributions of a written language.</p>
- <p>It’s obviously interactive, that’s the whole point of a chatbot. It’s
- static in that it does not change when it’s used or activated. In fact,
- changing it requires an enormous amount of computing power over a long
- period of time. What the system models are the distributions and
- correlations of the tokens it records for the texts in its training data
- set—how the various words, syllables, and punctuation relate to each
- other over as much of the written history of a language as the company
- can find.</p>
- <p>That’s what distinguishes biological minds from these algorithmic
- hindsight factories: a biological mind does not reason using the
- probability distributions of all the prior cultural records of its
- ancestors. Biological minds learn primarily through trial and error.
- Try, fail, try again. They build their neural network, which is
- functionally very different from what you see in a software model,
- through constant feedback, experimentation, and repeated failure—driven
- by a chemical network that often manifests as instinct, emotion,
- motivation, and drive. The neural network—bounded, defined, and driven
- by the chemical network—is constantly changing and responding to outside
- stimuli. Every time an animal’s nervous system is “used”, it changes. It
- is always changing, until it dies.</p>
- <p>Biological minds <em>experience</em>. Synthesis engines parse imperfect
- <em>records</em> of experiences. The former are forward-looking and operate
- primarily in the present, sometimes to their own detriment. The latter
- exist exclusively as probabilistic manifestations of imperfect
- representations of thoughts past. They are snapshots. Generative AI are
- themselves cultural records.</p>
- <p>These models aren’t new bird brains—new alien minds that are peers to
- our own. They aren’t even insect brains. Insects have autonomy. They are
- capable of general problem-solving—some of them dealing with tasks of
- surprising complexity—and their abilities tolerate the kind of
- minor alterations in the problem environment that would break the
- correlative pseudo-reasoning of a language model. Large Language
- Models are something lesser. They are water running down pathways etched
- into the ground over centuries by the rivers of human culture. Their
- originality comes entirely from random combinations of historical
- thought. They do not know the ‘meaning’ of anything—they only know the
- records humans find meaningful enough to store. Their unreliability
- comes from their unpredictable behaviour in novel circumstances. When
- there is no riverbed to follow, they drown the surrounding landscape.</p>
- <p>The entirety of their documented features, capabilities, and recorded
- behaviour—emergent or not—is explained by this conceptual model of
- generative AI. There are no unexplained corner cases that don’t fit or
- actively disprove this theory.</p>
- <p>Yet people keep assuming that what ChatGPT does can only be explained as
- the first glimmer of genuine Artificial General Intelligence. The bird
- brain of Silicon Valley is born at last!</p>
- <p>Because text and language are the primary ways we experience other
- people’s reasoning, it’ll be next to impossible to dislodge the notion
- that these are genuine intelligences. No amount of examples, scientific
- research, or analysis will convince those who want to maintain a
- pseudo-religious belief in alien peer intelligences. After all, if you
- want to believe in aliens, an artificial one made out of supercomputers
- and wishful thinking <em>feels</em> much more plausible than little grey men
- from outer space. But that’s what it is: <em>a belief in aliens.</em></p>
- <p>It doesn’t help that so many working in AI seem to <em>want</em> this to be
- true. They seem to be true believers who are convinced that the spark of
- Artificial General Intelligence has been struck.</p>
- <p>They are inspired by the science fictional notion that if you make
- something complex enough, it will spontaneously become intelligent. This
- isn’t an uncommon belief. You see it in movies and novels—the notion
- that any network of sufficient complexity will spontaneously become
- sentient has embedded itself in our popular psyche. James Cameron’s
- skull-crushing metal skeletons have a lot to answer for.</p>
- <p>That notion doesn’t seem to have any basis in science. The idea that
- general intelligence is an emergent property of neural networks that
- appears once the network reaches sufficient complexity, is a view based
- on archaic notions of animal intelligence—that animals are soulless
- automata incapable of feeling or reasoning. That view that was
- formed during a period where we didn’t realise just how common
- self-awareness (i.e. the mirror test) and general reasoning is in the
- animal kingdom. Animals are smarter than we assumed and the
- difference between our reasoning and theirs seems to be a matter of
- degree, not of presence or absence.</p>
- <p>General reasoning seems to be an <em>inherent</em>, not emergent, property of
- pretty much any biological lifeform with a notable nervous system.</p>
- <p>The bumblebee, despite having only a tiny fraction of the neurons of a
- human brain, is capable of not only solving puzzles but also of
- <em>teaching other bees to solve those puzzles.</em> They reason and have a
- culture. They have more genuine and robust general reasoning
- skills—that don’t collapse into incoherence at minor adjustments to the
- problem space—than GPT-4 or any large language model on the market.
- That’s with only around half a million neurons to work with.</p>
- <p>Conversely, GPT-3 is made up of 175 <em>billion</em> parameters—what passes for
- a “neuron” in a digital neural network. GPT-4 is even larger, with
- some estimates coming in at a <em>trillion</em> parameters. Then you have
- fine-tuned systems such as ChatGPT, that are built from multiple
- interacting models layered on top of GPT-3.5 or GPT-4, which make for an
- even more complex interactive system.</p>
- <p>ChatGPT, running on GPT-4 is, easily a <em>million</em> times more complex than
- the “neural network” of a bumblebee and yet, out of the two, it’s the
- striped invertebrate that demonstrates robust and adaptive
- general-purpose reasoning skills. Very simple minds, those belonging to
- small organisms that barely have a brain, are capable of reasoning about
- themselves, the world around them, and the behaviour of other
- animals.</p>
- <p>Unlike the evidence for ‘sparks’ of AGI in language models, the evidence
- for animal reasoning—even consciousness—is broad, compelling, and
- encompasses decades of work by numerous scientists.</p>
- <p>AI models are flawed attempts at digitally synthesising neurologies.
- They are built on the assumption that all the rest—metabolisms,
- hormones, chemicals, and senses—aren’t necessary for developing
- intelligence.</p>
- <p>Reasoning in biological minds does not seem to be a property that
- emerges from complexity. The capacity to reason looks more likely to be
- a <em>built-in</em> property of most animal minds. A reasoning mind
- appears to be a direct consequence of how animals are structured as a
- whole—chemicals, hormones, and physical body included. The animal
- capacity for problem-solving, social reasoning, and self-awareness seem
- to increase, unevenly, and fitfully with the number of neurons until it
- reaches the level we see in humans. Reasoning does not ‘emerge’ or
- appear. Some creatures are better at it than others, but it’s there in
- some form even in very small, very simple beings like the bumblebee. It
- doesn’t happen magically when you hook up a bunch of disparate objects
- together in a complex enough network. A reasoning mind is the <em>starting
- point</em> of biological thinking, not the endpoint that only “emerges” with
- sufficient complexity.</p>
- <p>The internet—a random interconnected collection of marketing offal,
- holiday snaps, insufferable meetings, and porn—isn’t going to become
- self-aware and suddenly acquire the capacity for general reasoning once
- it reaches a certain size, and neither will Large-Language-Models. The
- notion that we are making autonomous beings capable of Artificial
- General Intelligence just by loading a neural network up with an
- increasingly bigger collection of garbage from the internet is not one
- that has any basis in anything we understand about biology or animal
- reasoning.</p>
- <p>But, AI companies insist that they are on the verge of AGI. Their
- rhetoric around it verges on the religious as the idea of an AGI is
- idealised and almost worshipped. They claim to be close to making a
- new form of thinking life, but they refuse to release the data required
- to prove it. They’ve built software that performs well on the
- arbitrary benchmarks they’ve chosen and claim are evidence of general
- intelligence, but those tests prove no such thing and have no such
- validity. The benchmarks are theatrics that have no applicability
- towards demonstrating genuine general intelligence.</p>
- <p>AI researchers love to resurrect outdated pseudoscience such as
- phrenology—shipping AI software that promises to be able to tell you if
- somebody is likely to be a criminal based on the shape of their
- skull. It’s a field where researchers and vendors routinely claim
- that their AIs can detect whether you’re a potential criminal, gay, a
- good employee, liberal or conservative, or even a psychopath, based on
- “your face, body, gait, and tone of voice.”</p>
- <p><em>It’s pseudoscience</em>.</p>
- <p>This is the field and the industry that claims to have accomplished the
- first ‘spark’ of Artificial General Intelligence?</p>
- <p>Last time we saw a claim this grand, with this little scientific
- evidence, the men in the white coats were promising us room-temperature
- fusion, giving us free energy for life, and ending the world’s
- dependence on fossil fuels.</p>
- <p>Why give the tech industry the benefit of the doubt when they are all
- but claiming godhood—that they’ve created a new form of life never seen
- before?</p>
- <p>As <a href="https://en.wikipedia.org/wiki/Sagan_standard">Carl Sagan said</a>:
- <em>“extraordinary claims require extraordinary evidence.”</em></p>
- <p>He didn’t say “extraordinary claims require only vague insinuations and
- pinky-swear promises.”</p>
- <p>To claim you’ve created a completely new kind of mind that’s on par with
- any animal mind—or, even superior—and provides general intelligence
- using mechanisms that don’t resemble anything anybody has ever seen in
- nature, is by definition the most extraordinary of claims.</p>
- <p>The AI industry is backing their claims of Artificial General
- Intelligence with hot air, hand-waving, and cryptic references to data
- and software nobody outside their organisations is allowed to review or
- analyse.</p>
- <p>They are pouring an every-increasing amount of energy and work into
- ever-larger models all in the hope of triggering the
- ‘<a href="https://en.wikipedia.org/wiki/Technological_singularity">singularity</a>’
- and creating a digital superbeing. Like a cult of monks boiling the
- oceans in order to hear whispers of the name of God.</p>
- <p>It’s a farce. All theatre; no evidence. Whether they realise it or not,
- they are taking us for a ride. The sooner we see that they aren’t
- backing their claims with science, the sooner we can focus on finding
- safe and productive uses—limiting its harm, at least—for the technology
- as it exists today.</p>
- <p>After everything the tech industry has done over the past decade, the
- financial bubbles, the gig economy, legless virtual reality avatars,
- crypto, the endless software failures—just think about it—do you think
- we should believe them when they make grand, unsubstantiated claims
- about miraculous discoveries? Have they earned our trust? Have they
- shown that their word is worth more than that of independent scientists?</p>
- <p>Do you think that they, with this little evidence, have really done what
- they claim, and discovered a literal new form of life? But are
- conveniently unable to prove it because of ‘safety’?</p>
- <p>Me neither.</p>
- <p>The notion that large language models are on the path towards Artificial
- General Intelligence is a dangerous one. It’s a myth that directly
- undermines any effort to think clearly or strategise about generative AI
- because it strongly reinforces <em>anthropomorphism</em>.</p>
- <p>That’s when you reason about an object or animal <em>as if it were a
- person</em>. It prevents you from forming an accurate mental model of the non-human thing’s behaviour. AI is especially prone to creating this reaction. Software such as chatbots trigger all three major factors that promote
- anthropomorphism in people:</p>
- <ol>
- <li><em>Understanding.</em> If we lack an understanding of how an object works,
- our minds will resort to thinking of it in terms of something that’s
- familiar to us: people. We understand the world as people because
- that’s what we are. This becomes stronger the more similar we
- perceive the object to be to ourselves.</li>
- <li><em>Motivation.</em> We are motivated to both seek out human interaction
- and to interact effectively with our environment. This reinforces
- the first factor. The more uncertain we are of how that thing works,
- the stronger the anthropomorphism. The less control we have over it,
- the stronger the anthropomorphism.</li>
- <li><em>Sociality</em>. We have a need for human contact and our tendency
- towards anthropomorphising objects in our environment increase with
- our isolation.</li>
- </ol>
- <p>Because we lack cohesive cognitive models for what makes these language
- models so fluent, feel a strong motivation to understand and use them as
- they are integrated into our work, and, increasingly, our socialisation
- in the office takes on the very same text conversation form as a chatbot
- does, we inevitably feel a strong drive to see these software systems as
- people. The myth of AGI reinforces this—supercharges the anthropomorphism—because it implies that “people”
- is indeed an appropriate cognitive model for how these systems behave.</p>
- <p>It isn’t. <strong><em>AI are not people.</em></strong> Treating them as such is a major
- strategic error as it will prevent you from thinking clearly about their
- capabilities and limitations.</p>
- <p>Believing the myth of Artificial General Intelligence makes you incapable of understanding what language models today are and how they work.</p>
- </article>
-
-
- <hr>
-
- <footer>
- <p>
- <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
- </svg> Accueil</a> •
- <a href="/david/log/" title="Accès au flux RSS"><svg class="icon icon-rss2">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-rss2"></use>
- </svg> Suivre</a> •
- <a href="http://larlet.com" title="Go to my English profile" data-instant><svg class="icon icon-user-tie">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-user-tie"></use>
- </svg> Pro</a> •
- <a href="mailto:david%40larlet.fr" title="Envoyer un courriel"><svg class="icon icon-mail">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-mail"></use>
- </svg> Email</a> •
- <abbr class="nowrap" title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340"><svg class="icon icon-hammer2">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-hammer2"></use>
- </svg> Légal</abbr>
- </p>
- <template id="theme-selector">
- <form>
- <fieldset>
- <legend><svg class="icon icon-brightness-contrast">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-brightness-contrast"></use>
- </svg> Thème</legend>
- <label>
- <input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
- </label>
- <label>
- <input type="radio" value="dark" name="chosen-color-scheme"> Foncé
- </label>
- <label>
- <input type="radio" value="light" name="chosen-color-scheme"> Clair
- </label>
- </fieldset>
- </form>
- </template>
- </footer>
- <script src="/static/david/js/instantpage-5.1.0.min.js" type="module"></script>
- <script>
- function loadThemeForm(templateName) {
- const themeSelectorTemplate = document.querySelector(templateName)
- const form = themeSelectorTemplate.content.firstElementChild
- themeSelectorTemplate.replaceWith(form)
-
- form.addEventListener('change', (e) => {
- const chosenColorScheme = e.target.value
- localStorage.setItem('theme', chosenColorScheme)
- toggleTheme(chosenColorScheme)
- })
-
- const selectedTheme = localStorage.getItem('theme')
- if (selectedTheme && selectedTheme !== 'undefined') {
- form.querySelector(`[value="${selectedTheme}"]`).checked = true
- }
- }
-
- const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
- window.addEventListener('load', () => {
- let hasDarkRules = false
- for (const styleSheet of Array.from(document.styleSheets)) {
- let mediaRules = []
- for (const cssRule of styleSheet.cssRules) {
- if (cssRule.type !== CSSRule.MEDIA_RULE) {
- continue
- }
- // WARNING: Safari does not have/supports `conditionText`.
- if (cssRule.conditionText) {
- if (cssRule.conditionText !== prefersColorSchemeDark) {
- continue
- }
- } else {
- if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
- continue
- }
- }
- mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
- }
-
- // WARNING: do not try to insert a Rule to a styleSheet you are
- // currently iterating on, otherwise the browser will be stuck
- // in a infinite loop…
- for (const mediaRule of mediaRules) {
- styleSheet.insertRule(mediaRule.cssText)
- hasDarkRules = true
- }
- }
- if (hasDarkRules) {
- loadThemeForm('#theme-selector')
- }
- })
- </script>
- </body>
- </html>
|