123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359 |
- <!doctype html><!-- This is a valid HTML5 document. -->
- <!-- Screen readers, SEO, extensions and so on. -->
- <html lang="en">
- <!-- Has to be within the first 1024 bytes, hence before the `title` element
- See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
- <meta charset="utf-8">
- <!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
- <!-- The viewport meta is quite crowded and we are responsible for that.
- See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
- <meta name="viewport" content="width=device-width,initial-scale=1">
- <!-- Required to make a valid HTML5 document. -->
- <title>Dude, you broke the future! (archive) — David Larlet</title>
- <meta name="description" content="Publication mise en cache pour en conserver une trace.">
- <!-- That good ol' feed, subscribe :). -->
- <link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
- <!-- Generated from https://realfavicongenerator.net/ such a mess. -->
- <link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
- <link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
- <link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
- <link rel="manifest" href="/static/david/icons2/site.webmanifest">
- <link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
- <link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
- <meta name="msapplication-TileColor" content="#f7f7f7">
- <meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
- <meta name="theme-color" content="#f7f7f7" media="(prefers-color-scheme: light)">
- <meta name="theme-color" content="#272727" media="(prefers-color-scheme: dark)">
- <!-- Is that even respected? Retrospectively? What a shAItshow…
- https://neil-clarke.com/block-the-bots-that-feed-ai-models-by-scraping-your-website/ -->
- <meta name="robots" content="noai, noimageai">
- <!-- Documented, feel free to shoot an email. -->
- <link rel="stylesheet" href="/static/david/css/style_2021-01-20.css">
- <!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
- <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
- <link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
- <script>
- function toggleTheme(themeName) {
- document.documentElement.classList.toggle(
- 'forced-dark',
- themeName === 'dark'
- )
- document.documentElement.classList.toggle(
- 'forced-light',
- themeName === 'light'
- )
- }
- const selectedTheme = localStorage.getItem('theme')
- if (selectedTheme !== 'undefined') {
- toggleTheme(selectedTheme)
- }
- </script>
-
- <meta name="robots" content="noindex, nofollow">
- <meta content="origin-when-cross-origin" name="referrer">
- <!-- Canonical URL for SEO purposes -->
- <link rel="canonical" href="https://www.antipope.org/charlie/blog-static/2018/01/dude-you-broke-the-future.html">
-
- <body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick" data-instant-intensity="viewport-all">
-
-
- <article>
- <header>
- <h1>Dude, you broke the future!</h1>
- </header>
- <nav>
- <p class="center">
- <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
- </svg> Accueil</a> •
- <a href="https://www.antipope.org/charlie/blog-static/2018/01/dude-you-broke-the-future.html" title="Lien vers le contenu original">Source originale</a>
- <br>
- Mis en cache le 2024-02-03
- </p>
- </nav>
- <hr>
- <p><em>Abstract: We're living in yesterday's future, and it's nothing like the speculations of our authors and film/TV producers. As a working science fiction novelist, I take a professional interest in how we get predictions about the future wrong, and why, so that I can avoid repeating the same mistakes. Science fiction is written by people embedded within a society with expectations and political assumptions that bias us towards looking at the shiny surface of new technologies rather than asking how human beings will use them, and to taking narratives of progress at face value rather than asking what hidden agenda they serve.</em></p>
-
- <p><em>In this talk, author Charles Stross will give a rambling, discursive, and angry tour of what went wrong with the 21st century, why we didn't see it coming, where we can expect it to go next, and a few suggestions for what to do about it if we don't like it. </em></p>
-
- <hr />
- <p>Good morning. I'm Charlie Stross, and it's my job to tell lies for money. Or rather, I write science fiction, much of it about our near future, which has in recent years become ridiculously hard to predict. </p>
-
- <p>Our species, Homo Sapiens Sapiens, is <a href="https://www.nytimes.com/2017/06/07/science/human-fossils-morocco.html">roughly three hundred thousand years old</a>. (Recent discoveries pushed back the date of our earliest remains that far, we may be even older.) For all but the last three centuries of that span, predicting the future was easy: natural disasters aside, everyday life in fifty years time would resemble everyday life fifty years ago. </p>
-
- <p>Let that sink in for a moment: for 99.9% of human existence, the future was static. Then <em>something</em> happened, and the future began to change, increasingly rapidly, until we get to the present day when things are moving so fast that it's barely possible to anticipate trends from month to month.</p>
-
- <p>As an eminent computer scientist once remarked, computer science is no more about computers than astronomy is about building telescopes. The same can be said of my field of work, written science fiction. Scifi is seldom about science—and even more rarely about predicting the future. But sometimes we dabble in futurism, and lately it's gotten very difficult.</p>
-
- <h2>How to predict the near future</h2>
-
- <p>When I write a near-future work of fiction, one set, say, a decade hence, there used to be a recipe that worked eerily well. Simply put, 90% of the next decade's stuff is already here today. Buildings are designed to last many years. Automobiles have a design life of about a decade, so half the cars on the road will probably still be around in 2027. People ... there will be new faces, aged ten and under, and some older people will have died, but most adults will still be around, albeit older and grayer. This is the 90% of the near future that's already here.</p>
-
- <p>After the already-here 90%, another 9% of the future a decade hence used to be easily predictable. You look at trends dictated by physical limits, such as Moore's Law, and you look at Intel's road map, and you use a bit of creative extrapolation, and you won't go too far wrong. If I predict that in 2027 LTE cellular phones will be everywhere, 5G will be available for high bandwidth applications, and fallback to satellite data service will be available at a price, you won't laugh at me. It's not like I'm predicting that airliners will fly slower and Nazis will take over the United States, is it?</p>
-
- <p>And therein lies the problem: it's the 1% of unknown unknowns that throws off all calculations. As it happens, airliners today <em>are</em> slower than they were in the 1970s, and don't get me started about Nazis. Nobody in 2007 was expecting a Nazi revival in 2017, right? (Only this time round Germans get to be the good guys.)</p>
-
- <p>My recipe for fiction set ten years in the future used to be 90% already-here, 9% not-here-yet but predictable, and 1% who-ordered-<em>that</em>. But unfortunately the ratios have changed. I think we're now down to maybe 80% already-here—climate change takes a huge toll on infrastructure—then 15% not-here-yet but predictable, and a whopping 5% of utterly unpredictable deep craziness.</p>
-
- <p><strong>Ruling out the singularity</strong></p>
-
- <p>Some of you might assume that, as the author of books like "Singularity Sky" and "Accelerando", I attribute this to an impending technological singularity, to our development of self-improving artificial intelligence and mind uploading and the whole wish-list of transhumanist aspirations promoted by the likes of Ray Kurzweil. Unfortunately this isn't the case. I think transhumanism is a warmed-over Christian heresy. While its adherents tend to be vehement atheists, they can't quite escape from the history that gave rise to our current western civilization. Many of you are familiar with design patterns, an approach to software engineering that focusses on abstraction and simplification in order to promote reusable code. When you look at the AI singularity as a narrative, and identify the numerous places in the story where the phrase "... and then a miracle happens" occurs, it becomes apparent pretty quickly that they've reinvented Christianity. </p>
-
- <p>Indeed, the wellsprings of today's transhumanists draw on a long, rich history of <a href="https://en.wikipedia.org/wiki/Russian_cosmism">Russian Cosmist philosophy</a> exemplified by the Russian Orthodox theologian <a href="https://en.wikipedia.org/wiki/Nikolai_Fyodorovich_Fyodorov">Nikolai Fyodorvitch Federov</a>, by way of his disciple <a href="https://en.wikipedia.org/wiki/Konstantin_Tsiolkovsky">Konstantin Tsiolkovsky</a>, whose derivation of the rocket equation makes him essentially the father of modern spaceflight. And once you start probing the nether regions of transhumanist thought and run into concepts like <a href="https://rationalwiki.org/wiki/Roko%27s_basilisk">Roko's Basilisk</a>—by the way, any of you who didn't know about the Basilisk before are now doomed to an eternity in AI hell—you realize they've mangled it to match some of the nastiest ideas in Presybterian Protestantism.</p>
-
- <p>If it walks like a duck and quacks like a duck, it's probably a duck. And if it looks like a religion it's probably a religion. I don't see much evidence for human-like, self-directed artificial intelligences coming along any time now, and a fair bit of evidence that nobody except some freaks in university cognitive science departments even want it. What we're getting, instead, is self-optimizing tools that defy human comprehension but are not, in fact, any more like our kind of intelligence than a Boeing 737 is like a seagull. So I'm going to wash my hands of the singularity as an explanatory model without further ado—I'm one of those vehement atheists too—and try and come up with a better model for what's happening to us.</p>
-
- <h2>Towards a better model for the future</h2>
-
- <p>As my fellow SF author Ken MacLeod likes to say, the secret weapon of science fiction is history. History, loosely speaking, is the written record of what and how people did things in past times—times that have slipped out of our personal memories. We science fiction writers tend to treat history as a giant toy chest to raid whenever we feel like telling a story. With a little bit of history it's really easy to whip up an entertaining yarn about a galactic empire that mirrors the development and decline of the Hapsburg Empire, or to re-spin the October Revolution as a tale of how Mars got its independence. </p>
-
- <p>But history is useful for so much more than that.</p>
-
- <p>It turns out that our personal memories don't span very much time at all. I'm 53, and I barely remember the 1960s. I only remember the 1970s with the eyes of a 6-16 year old. My father, who died last year aged 93, just about remembered the 1930s. Only those of my father's generation are able to directly remember the great depression and compare it to the 2007/08 global financial crisis directly. But westerners tend to pay little attention to cautionary tales told by ninety-somethings. We modern, change-obsessed humans tend to repeat our biggest social mistakes when they slip out of living memory, which means they recur on a time scale of seventy to a hundred years. </p>
-
- <p>So if our personal memories are usless, it's time for us to look for a better cognitive toolkit.</p>
-
- <p>History gives us the perspective to see what went wrong in the past, and to look for patterns, and check whether those patterns apply to the present and near future. And looking in particular at the history of the past 200-400 years—the age of increasingly rapid change—one glaringly obvious deviation from the norm of the preceding three thousand centuries—is the development of Artificial Intelligence, which happened <a href="https://en.wikipedia.org/wiki/Corporation#History">no earlier than 1553 and no later than 1844</a>. </p>
-
- <p>I'm talking about the very old, very slow AIs we call corporations, of course. What lessons from the history of the company can we draw that tell us about the likely behaviour of the type of artificial intelligence we are all interested in today?</p>
-
- <p><strong>Old, slow AI</strong></p>
-
- <p>Let me crib from Wikipedia for a moment:</p>
-
- <p>In the late 18th century, <a href="https://en.wikipedia.org/wiki/Stewart_Kyd">Stewart Kyd</a>, the author of the first treatise on corporate law in English, defined a corporation as:</p>
-
- <blockquote>
- <p>a collection of many individuals united into one body, under a special denomination, having perpetual succession under an artificial form, and vested, by policy of the law, with the capacity of acting, in several respects, as an individual, particularly of taking and granting property, of contracting obligations, and of suing and being sued, of enjoying privileges and immunities in common, and of exercising a variety of political rights, more or less extensive, according to the design of its institution, or the powers conferred upon it, either at the time of its creation, or at any subsequent period of its existence.</p>
- </blockquote>
-
- <p>—A Treatise on the Law of Corporations, Stewart Kyd (1793-1794)</p>
-
- <p>In 1844, the British government passed the Joint Stock Companies Act, which created a register of companies and allowed any legal person, for a fee, to register a company, which existed as a separate legal person. Subsequently, the law was extended to limit the liability of individual shareholders in event of business failure, and both Germany and the United States added their own unique extensions to what we see today as the doctrine of corporate personhood.</p>
-
- <p>(Of course, there were plenty of other things happening between the sixteenth and twenty-first centuries that changed the shape of the world we live in. I've skipped changes in agricultural productivity due to energy economics, which finally broke the Malthusian trap our predecessors lived in. This in turn broke the long term cap on economic growth of around 0.1% per year in the absence of famine, plagues, and wars depopulating territories and making way for colonial invaders. I've skipped the germ theory of diseases, and the development of trade empires in the age of sail and gunpowder that were made possible by advances in accurate time-measurement. I've skipped the rise and—hopefully—decline of the pernicious theory of scientific racism that underpinned western colonialism and the slave trade. I've skipped the rise of feminism, the ideological position that women are human beings rather than property, and the decline of patriarchy. I've skipped the whole of the Enlightenment and the age of revolutions! But this is a technocentric congress, so I want to frame this talk in terms of AI, which we all like to think we understand.)</p>
-
- <p>Here's the thing about corporations: they're clearly artificial, but legally they're people. They have goals, and operate in pursuit of these goals. And they have a natural life cycle. In the 1950s, a typical US corporation on the S&P 500 index had a lifespan of 60 years, but today it's down to <a href="https://uk.finance.yahoo.com/news/technology-killing-off-corporate-america-173100680.html">less than 20 years</a>.</p>
-
- <p>Corporations are cannibals; they consume one another. They are also hive superorganisms, like bees or ants. For their first century and a half they relied entirely on human employees for their internal operation, although they are automating their business processes increasingly rapidly this century. Each human is only retained so long as they can perform their assigned tasks, and can be replaced with another human, much as the cells in our own bodies are functionally interchangeable (and a group of cells can, in extremis, often be replaced by a prosthesis). To some extent corporations can be trained to service the personal desires of their chief executives, but even CEOs can be dispensed with if their activities damage the corporation, as Harvey Weinstein found out a couple of months ago.</p>
-
- <p>Finally, our legal environment today has been tailored for the convenience of corporate persons, rather than human persons, to the point where our governments now mimic corporations in many of their internal structures.</p>
-
- <p><strong>What do AIs want?</strong></p>
-
- <p>What do our current, actually-existing AI overlords <em>want</em>?</p>
-
- <p>Elon Musk—who I believe you have all heard of—has an obsessive fear of one particular hazard of artificial intelligence—which he conceives of as being <a href="(https://www.thenational.ae/arts-culture/mark-zuckerberg-and-elon-musk-s-debate-over-artificial-intelligence-will-robots-go-rogue-1.616530">a piece of software that functions like a brain-in-a-box</a>)—namely, the <a href="https://wiki.lesswrong.com/wiki/Paperclip_maximizer">paperclip maximizer</a>. A paperclip maximizer is a term of art for a goal-seeking AI that has a single priority, for example maximizing the number of paperclips in the universe. The paperclip maximizer is able to improve itself in pursuit of that goal but has no ability to vary its goal, so it will ultimately attempt to convert all the metallic elements in the solar system into paperclips, even if this is obviously detrimental to the wellbeing of the humans who designed it.</p>
-
- <p>Unfortunately, Musk isn't paying enough attention. Consider his own companies. <a href="https://en.wikipedia.org/wiki/Gigafactory_1">Tesla is a battery maximizer</a>—an electric car is a battery with wheels and seats. SpaceX is an orbital payload maximizer, driving down the cost of space launches in order to encourage more sales for the service it provides. Solar City is a photovoltaic panel maximizer. And so on. All three of Musk's very own slow AIs are based on an architecture that is designed to maximize return on shareholder investment, even if by doing so they cook the planet the shareholders have to live on. (But if you're Elon Musk, that's okay: you plan to retire on Mars.)</p>
-
- <p>The problem with corporations is that despite their overt goals—whether they make electric vehicles or beer or sell life insurance policies—they are all subject to <a href="https://wiki.lesswrong.com/wiki/Paperclip_maximizer">instrumental convergence</a> insofar as they all have a common implicit paperclip-maximizer goal: to generate revenue. If they don't make money, they are eaten by a bigger predator or they go bust. Making money is an <em>instrumental goal</em>—it's as vital to them as breathing is for us mammals, and without pursuing it they will fail to achieve their final goal, whatever it may be. Corporations generally pursue their instrumental goals—notably maximizing revenue—as a side-effect of the pursuit of their overt goal. But sometimes they try instead to manipulate the regulatory environment they operate in, to ensure that money flows towards them regardless.</p>
-
- <p>Human tool-making culture has become increasingly complicated over time. New technologies always come with an implicit political agenda that seeks to extend its use, governments react by legislating to control the technologies, and sometimes we end up with industries indulging in legal duels. </p>
-
- <p>For example, consider the automobile. You can't have mass automobile transport without gas stations and fuel distribution pipelines. These in turn require access to whoever owns the land the oil is extracted from—and before you know it, you end up with a permanent occupation force in Iraq and a client dictatorship in Saudi Arabia. Closer to home, automobiles imply jaywalking laws and drink-driving laws. They affect town planning regulations and encourage suburban sprawl, the construction of human infrastructure on the scale required by automobiles, not pedestrians. This in turn is bad for competing transport technologies like buses or trams (which work best in cities with a high population density). </p>
-
- <p>To get these laws in place, providing an environment conducive to doing business, corporations spend money on political lobbyists—and, when they can get away with it, on bribes. Bribery need not be blatant, of course. For example, the reforms of the British railway network in the 1960s <a href="https://en.wikipedia.org/wiki/Beeching_cuts">dismembered many branch services</a> and coincided with a surge in road building and automobile sales. These reforms were orchestrated by <a href="https://en.wikipedia.org/wiki/Ernest_Marples">Transport Minister Ernest Marples</a>, who was purely a politician. However, Marples accumulated a considerable personal fortune during this time by owning shares in <a href="https://en.wikipedia.org/wiki/Marples_Ridgway">a motorway construction corporation</a>. (So, no conflict of interest there!) </p>
-
- <p>The automobile industry in isolation isn't a pure paperclip maximizer. But if you look at it in conjunction with the fossil fuel industries, the road-construction industry, the accident insurance industry, and so on, you begin to see the outline of a paperclip maximizing <em>ecosystem</em> that invades far-flung lands and grinds up and <a href="http://www.who.int/gho/road_safety/mortality/en/">kills around one and a quarter million people per year</a>—that's the global death toll from automobile accidents according to the world health organization: it rivals the first world war on an ongoing basis—as side-effects of its drive to sell you a new car.</p>
-
- <p>Automobiles are not, of course, a total liability. Today's cars are <a href="https://en.wikipedia.org/wiki/Automobile_safety">regulated stringently for safety</a> and, in theory, to reduce toxic emissions: they're fast, efficient, and comfortable. We can thank legally mandated regulations for this, of course. Go back to the 1970s and cars didn't have crumple zones. Go back to the 1950s and cars didn't come with seat belts as standard. In the 1930s, indicators—turn signals—and brakes on all four wheels were optional, and your best hope of surviving a 50km/h crash was to be thrown clear of the car and land somewhere without breaking your neck. Regulatory agencies are our current political systems' tool of choice for preventing paperclip maximizers from running amok. But unfortunately they don't always work.</p>
-
- <p>One failure mode that you should be aware of is <a href="https://en.wikipedia.org/wiki/Regulatory_capture">regulatory capture</a>, where regulatory bodies are captured by the industries they control. <a href="https://en.wikipedia.org/wiki/Ajit_Pai">Ajit Pai</a>, head of the American Federal Communications Commission who just voted to eliminate net neutrality rules, has worked as Associate General Counsel for Verizon Communications Inc, the largest current descendant of the Bell telephone system monopoly. Why should someone with a transparent interest in a technology corporation end up in charge of a regulator for the industry that corporation operates within? Well, if you're going to regulate a highly complex technology, you need to recruit your regulators from among those people who understand it. And unfortunately most of those people are industry insiders. Ajit Pai is clearly very much aware of how Verizon is regulated, and <a href="https://www.washingtonpost.com/news/made-by-history/wp/2017/11/27/what-fcc-chair-ajit-pai-gets-wrong-about-net-neutrality/">wants to do something about it</a>—just not necessarily in the public interest. When regulators end up staffed by people drawn from the industries they are supposed to control, they frequently end up working with their former officemates to make it easier to turn a profit, either by raising barriers to keep new insurgent companies out, or by dismantling safeguards that protect the public.</p>
-
- <p>Another failure mode is regulatory lag, when a technology advances so rapidly that regulations are laughably obsolete by the time they're issued. Consider the <a href="https://en.wikipedia.org/wiki/Privacy_and_Electronic_Communications_Directive_2002">EU directive requiring cookie notices on websites</a>, to caution users that their activities were tracked and their privacy might be violated. This would have been a good idea, had it shown up in 1993 or 1996, but unfortunately it didn't show up until 2011, by which time the web was vastly more complex. Fingerprinting and tracking mechanisms that had nothing to do with cookies were already widespread by then. Tim Berners-Lee observed in 1995 that five years' worth of change was happening on the web for every twelve months of real-world time; by that yardstick, the cookie law came out nearly a century too late to do any good. </p>
-
- <p>Again, look at Uber. This month the European Court of Justice ruled that <a href="https://www.theguardian.com/technology/2017/dec/20/uber-european-court-of-justice-ruling-barcelona-taxi-drivers-ecj-eu">Uber is a taxi service</a>, not just a web app. This is arguably correct; the problem is, Uber has spread globally since it was founded eight years ago, subsidizing its drivers to put competing private hire firms out of business. Whether this is a net good for society is arguable; the problem is, a taxi driver can get awfully hungry if she has to wait eight years for a court ruling against a predator intent on disrupting her life.</p>
-
- <p>So, to recap: firstly, we already have paperclip maximizers (and Musk's AI alarmism is curiously mirror-blind). Secondly, we have mechanisms for keeping them in check, but they don't work well against <a href="https://en.wikipedia.org/wiki/Greyball">AIs that deploy the dark arts</a>—especially corruption and bribery—and they're even worse againt true AIs that evolve too fast for human-mediated mechanisms like the Law to keep up with. Finally, unlike the naive vision of a paperclip maximizer, existing AIs have multiple agendas—their overt goal, but also profit-seeking, and expansion into new areas, and to accomodate the desires of whoever is currently in the driver's seat.</p>
-
- <p><strong>How it all went wrong</strong></p>
-
- <p>It seems to me that our current political upheavals are best understood as arising from the capture of post-1917 democratic institutions by large-scale AIs. Everywhere I look I see voters protesting angrily against an entrenched establishment that seems determined to ignore the wants and needs of their human voters in favour of the machines. The Brexit upset was largely the result of a protest vote against the British political establishment; the election of Donald Trump likewise, with a side-order of racism on top. Our major political parties are led by people who are compatible with the system as it exists—a system that has been shaped over decades by corporations distorting our government and regulatory environments. We humans are living in a world shaped by the desires and needs of AIs, forced to live on their terms, and we are taught that we are valuable only insofar as we contribute to the rule of the machines.</p>
-
- <p>Now, this is CCC, and we're all more interested in computers and communications technology than this historical crap. But as I said earlier, history is a secret weapon if you know how to use it. What history is good for is enabling us to spot recurring patterns in human behaviour that repeat across time scales outside our personal experience—decades or centuries apart. If we look at our historical very slow AIs, what lessons can we learn from them about modern AI—the flash flood of unprecedented deep learning and big data technologies that have overtaken us in the past decade? </p>
-
- <p>We made a fundamentally flawed, terrible design decision back in 1995, that has damaged democratic political processes, crippled our ability to truly understand the world around us, and led to the angry upheavals of the present decade. That mistake was to fund the build-out of the public world wide web—as opposed to the earlier, government-funded corporate and academic internet—by monetizing eyeballs via advertising revenue. </p>
-
- <p>(<strong>Note</strong>: Cory Doctorow <a href="https://boingboing.net/2017/12/29/llcs-are-slow-ais.html">has a contrarian thesis</a>: <em>The dotcom boom was also an economic bubble because the dotcoms came of age at a tipping point in financial deregulation, the point at which the Reagan-Clinton-Bush reforms that took the Depression-era brakes off financialization were really picking up steam. That meant that the tech industry's heady pace of development was the first testbed for treating corporate growth as the greatest virtue, built on the lie of the fiduciary duty to increase profit above all other considerations.</em> I think he's entirely right about this, <em>but</em> it's a bit of a chicken-and-egg argument: we wouldn't have had a commercial web in the first place without a permissive, deregulated financial environment. My memory of working in the dot-com 1.0 bubble is that, outside of a couple of specific environments (the Silicon Valley area and the Boston-Cambridge corridor) venture capital was hard to find until late 1998 or thereabouts: the bubble's initial inflation was demand-driven rather than capital-driven, as the non-tech investment sector was late to the party. Caveat: I didn't win the lottery, so what do <em>I</em> know?)</p>
-
- <p>The ad-supported web that we live with today wasn't inevitable. If you recall the web as it was in 1994, there were very few ads at all, and not much in the way of commerce. (What ads there were were mostly spam, on usenet and via email.) 1995 was the year the world wide web really came to public attention in the anglophone world and consumer-facing websites began to appear. Nobody really knew how this thing was going to be paid for (the original dot com bubble was <s>all</s> largely about working out how to monetize the web for the first time, and a lot of people lost their shirts in the process). And the naive initial assumption was that the transaction cost of setting up a TCP/IP connection over modem was too high to be supported by per-use microbilling, so we would bill customers indirectly, by shoving advertising banners in front of their eyes and hoping they'd click through and buy something.</p>
-
- <p>Unfortunately, advertising is an industry. Which is to say, it's the product of one of those old-fashioned very slow AIs I've been talking about. Advertising tries to maximize its hold on the attention of the minds behind each human eyeball: the coupling of advertising with web search was an inevitable outgrowth. (How better to attract the attention of reluctant subjects than to find out what they're <em>really</em> interested in seeing, and sell ads that relate to those interests?) </p>
-
- <p>The problem with applying the paperclip maximizer approach to monopolizing eyeballs, however, is that eyeballs are a scarce resource. There are only 168 hours in every week in which I can gaze at banner ads. Moreover, most ads are irrelevant to my interests and it doesn't matter how often you flash an ad for dog biscuits at me, I'm never going to buy any. (I'm a cat person.) To make best revenue-generating use of our eyeballs, it is necessary for the ad industry to learn who we are and what interests us, and to target us increasingly minutely in hope of hooking us with stuff we're attracted to.</p>
-
- <p>At this point in a talk I'd usually go into an impassioned rant about the hideous corruption and evil of Facebook, but I'm guessing you've heard it all before so I won't bother. The too-long-didn't-read summary is, Facebook is as much a search engine as Google or Amazon. Facebook searches are optimized for Faces, that is, for human beings. If you want to find someone you fell out of touch with thirty years ago, Facebook probably knows where they live, what their favourite colour is, what size shoes they wear, and what they said about you to your friends all those years ago that made you cut them off. </p>
-
- <p><a href="https://www.theguardian.com/technology/2015/apr/10/facebook-admits-it-tracks-non-users-but-denies-claims-it-breaches-eu-privacy-law">Even if you don't have a Facebook account, Facebook has a You account</a>—a hole in their social graph with a bunch of connections pointing into it and your name tagged on your friends' photographs. They know a lot about you, and they sell access to their social graph to advertisers who then target you, even if you don't think you use Facebook. Indeed, there's barely any point in <em>not</em> using Facebook these days: they're the social media Borg, resistance is futile. </p>
-
- <p>However, Facebook is trying to get eyeballs on ads, as is Twitter, as is Google. To do this, they fine-tune the content they show you to make it more attractive to your eyes—and by 'attractive' I do not mean pleasant. We humans have an evolved automatic reflex to pay attention to threats and horrors as well as pleasurable stimuli: consider the way highway traffic always slows to a crawl as it is funnelled past an accident site. The algorithms that determine what to show us when we look at Facebook or Twitter take this bias into account. You might react more strongly to a public hanging in Iran than to a couple kissing: the algorithm knows, and will show you whatever makes you pay attention.</p>
-
- <p>This brings me to another interesting point about computerized AI, as opposed to corporatized AI: AI algorithms tend to embody the prejudices and beliefs of the programmers. A couple of years ago I ran across an account of a webcam developed by mostly-pale-skinned silicon valley engineers that <a href="http://edition.cnn.com/2009/TECH/12/22/hp.webcams/index.html">have difficulty focusing or achieving correct colour balance when pointing at dark-skinned faces</a>. That's an example of human-programmer-induced bias. But with today's deep learning, bias can creep in via the data sets the neural networks are trained on. Microsoft's first foray into a conversational chatbot driven by machine learning, Tay, was yanked offline within days because when 4chan and Reddit based trolls discovered they could <a href="https://www.theguardian.com/technology/2016/mar/24/tay-microsofts-ai-chatbot-gets-a-crash-course-in-racism-from-twitter">train it towards racism and sexism</a> for shits and giggles. </p>
-
- <p>Humans may be biased, but at least we're accountable and if someone gives you racist or sexist abuse to your face you can complain (or punch them). But it's impossible to punch a corporation, and it may not even be possible to identify the source of unfair bias when you're dealing with a machine learning system.</p>
-
- <p>AI-based systems that concretize existing prejudices and social outlooks make it harder for activists like us to achieve social change. Traditional advertising works by playing on the target customer's insecurity and fear as much as on their aspirations, which in turn play on the target's relationship with their surrounding cultural matrix. Fear of loss of social status and privilege is a powerful stimulus, and fear and xenophobia are useful tools for attracting eyeballs.</p>
-
- <p>What happens when we get pervasive social networks with learned biases against, say, feminism or Islam or melanin? Or deep learning systems trained on data sets contaminated by racist dipshits? Deep learning systems like the ones inside Facebook that <a href="http://www.slate.com/articles/technology/cover_story/2016/01/how_facebook_s_news_feed_algorithm_works.html">determine which stories to show you</a> to get you to pay as much attention as possible to the adverts?</p>
-
- <p>I think you already know the answer to that.</p>
-
- <p><strong>Look to the future (it's bleak!)</strong></p>
-
- <p>Now, if this is sounding a bit bleak and unpleasant, you'd be right. I write sci-fi, you read or watch or play sci-fi; we're acculturated to think of science and technology as good things, that make our lives better. </p>
-
- <p>But plenty of technologies have, historically, been heavily regulated or even criminalized for good reason, and once you get past the reflexive indignation at any criticism of technology and progress, you might agree that it is reasonable to ban individuals from owning nuclear weapons or nerve gas. Less obviously: they may not be weapons, but we've banned chlorofluorocarbon refrigerants because they were building up in the high stratosphere and destroying the ozone layer that protects us from UV-B radiation. And we banned <a href="https://en.wikipedia.org/wiki/Tetraethyllead">tetraethyl lead additive in gasoline</a>, because it poisoned people and <a href="https://en.wikipedia.org/wiki/Lead-crime_hypothesis">led to a crime wave</a>.</p>
-
- <p>Nerve gas and leaded gasoline were 1930s technologies, promoted by 1930s corporations. Halogenated refrigerants and nuclear weapons are totally 1940s, and intercontinental ballistic missiles date to the 1950s. I submit that the 21st century is throwing up dangerous new technologies—just as our existing strategies for regulating very slow AIs have broken down.</p>
-
- <p>Let me give you four examples—of new types of AI applications—that are going to warp our societies even worse than the old slow AIs of yore have done. This isn't an exhaustive list: these are just examples. We need to work out a general strategy for getting on top of this sort of AI before they get on top of us.</p>
-
- <p>(Note that I do <strong>not</strong> have a solution to the regulatory problems I highlighted earlier, in the context of AI. This essay is polemical, intended to highlight the existence of a problem and spark a discussion, rather than a canned solution. After all, if the problem was easy to solve it wouldn't be a problem, would it?)</p>
-
- <p>Firstly, Political hacking tools: social graph-directed propaganda</p>
-
- <p>Topping my list of dangerous technologies that need to be regulated, this is low-hanging fruit after the electoral surprises of 2016. <a href="https://www.theguardian.com/politics/2017/mar/04/nigel-oakes-cambridge-analytica-what-role-brexit-trump">Cambridge Analytica</a> pioneered the use of deep learning by scanning the Facebook and Twitter social graphs to indentify voters' political affiliations. They identified individuals vulnerable to persuasion who lived in electorally sensitive districts, and canvas them with propaganda that targeted their personal hot-button issues. The tools developed by web advertisers to sell products have now been weaponized for political purposes, and the amount of personal information about our affiliations that we expose on social media makes us vulnerable. Aside from the last US presidential election, there's mounting evidence that the British referendum on leaving the EU was subject to foreign cyberwar attack via weaponized social media, as was <a href="http://www.france24.com/en/20170426-france-macron-cyber-security-russia-presidential-campaign">the most recent French presidential election</a>. </p>
-
- <p>I'm biting my tongue and trying not to take sides here: I have my own political affiliation, after all. But if social media companies don't work out how to identify and flag micro-targeted propaganda then democratic elections will be replaced by victories for whoever can buy the most trolls. And this won't simply be billionaires like the Koch brothers and Robert Mercer in the United States throwing elections to whoever will hand them the biggest tax cuts. <a href="https://www.nytimes.com/2016/08/29/world/europe/russia-sweden-disinformation.html">Russian military cyberwar doctrine</a> calls for the use of social media to confuse and disable perceived enemies, in addition to the increasingly familiar use of zero-day exploits for espionage via spear phishing and distributed denial of service attacks on infrastructure (which are practiced by western agencies as well). Sooner or later, the use of propaganda bot armies in cyberwar will go global, and at that point, our social discourse will be irreparably poisoned.</p>
-
- <p>(By the way, I <em>really hate</em> the cyber- prefix; it usually indicates that the user has no idea what they're talking about. Unfortunately the term 'cyberwar' seems to have stuck. But I digress.)</p>
-
- <p>Secondly, an adjunct to deep learning targeted propaganda is the use of neural network generated false video media.</p>
-
- <p>We're used to Photoshopped images these days, but faking video and audio is still labour-intensive, right? Unfortunately, that's a nope: we're seeing first generation <a href="https://motherboard.vice.com/en_us/article/gydydm/gal-gadot-fake-ai-porn">AI-assisted video porn</a>, in which the faces of film stars are mapped onto those of other people in a video clip using software rather than a laborious human process. (Yes, <em>of course</em> porn is the first application: Rule 34 of the Internet applies.) Meanwhile, we have <a href="https://deepmind.com/blog/wavenet-generative-model-raw-audio/">WaveNet</a>, a system for generating realistic-sounding speech in the voice of a human speaker the neural network has been trained to mimic. This stuff is still geek-intensive and requires relatively expensive GPUs. But in less than a decade it'll be out in the wild, and just about anyone will be able to fake up a realistic-looking video of someone they don't like doing something horrible. </p>
-
- <p>We're already seeing alarm over <a href="https://medium.com/@jamesbridle/something-is-wrong-on-the-internet-c39c471271d2">bizarre YouTube channels that attempt to monetize children's TV brands</a> by scraping the video content off legitimate channels and adding their own advertising and keywords. Many of these channels are shaped by paperclip-maximizer advertising AIs that are simply trying to maximize their search ranking on YouTube. Add neural network driven tools for inserting Character A into Video B to click-maximizing bots and <a href="https://theoutline.com/post/1239/youtube-has-a-fake-peppa-pig-problem">things are going to get very weird</a> (and <em>nasty</em>). And they're only going to get weirder when these tools are deployed for political gain.</p>
-
- <p>We tend to evaluate the inputs from our eyes and ears much less critically than what random strangers on the internet tell us—and we're already too vulnerable to fake news as it is. Soon they'll come for us, armed with believable video evidence. The smart money says that by 2027 you won't be able to believe anything you see in video unless there are cryptographic signatures on it, linking it back to the device that shot the raw feed—and you know how good most people are at using encryption? The dumb money is on total chaos.</p>
-
- <p>Paperclip maximizers that focus on eyeballs are so 20th century. Advertising as an industry can only exist because of a quirk of our nervous system—that we are susceptible to addiction. Be it tobacco, gambling, or heroin, we recognize addictive behaviour when we see it. Or do we? It turns out that the human brain's reward feedback loops are relatively easy to game. Large corporations such as Zynga (Farmville) exist solely because of it; free-to-use social media platforms like Facebook and Twitter are dominant precisely because they are structured to reward frequent interaction and to generate emotional responses (not necessarily positive emotions—anger and hatred are just as good when it comes to directing eyeballs towards advertisers). "Smartphone addiction" is a side-effect of advertising as a revenue model: frequent short bursts of interaction keep us coming back for more.</p>
-
- <p>Thanks to deep learning, neuroscientists have mechanised the process of making apps more addictive. <a href="https://techcrunch.com/2017/02/13/dopamine-labs-slings-tools-to-boost-and-reduce-app-addiction/">Dopamine Labs</a>
- is one startup that provides tools to app developers to make any app more addictive, as well as to reduce the desire to continue a behaviour if it's undesirable. It goes a bit beyond automated A/B testing; A/B testing allows developers to plot a binary tree path between options, but true deep learning driven addictiveness maximizers can optimize for multiple attractors simultaneously. Now, Dopamine Labs seem, going by their public face, to have ethical qualms about the misuse of addiction maximizers in software. But neuroscience isn't a secret, and sooner or later some really unscrupulous people will try to see how far they can push it.</p>
-
- <p>Let me give you a more specific scenario.</p>
-
- <p>Apple have put a lot of effort into making <a href="https://www.macworld.com/article/3225406/iphone-ipad/face-id-iphone-x-faq.html">realtime face recognition</a> work with the iPhone X. You can't fool an iPhone X with a photo or even a simple mask: it does depth mapping to ensure your eyes are in the right place (and can tell whether they're open or closed) and recognize your face from underlying bone structure through makeup and bruises. It's running continuously, checking pretty much as often as every time you'd hit the home button on a more traditional smartphone UI, and it can see where your eyeballs are pointing. The purpose of this is to make it difficult for a phone thief to get anywhere if they steal your device. but it means your phone can monitor your facial expressions and correlate it against app usage. Your phone will be aware of precisely what you like to look at on its screen. With addiction-seeking deep learning and neural-network generated images, it is in principle possible to feed you an endlessly escallating payload of arousal-maximizing inputs. It might be Facebook or Twitter messages optimized to produce outrage, or it could be porn generated by AI to appeal to kinks you aren't even consciously aware of. But either way, the app now owns your central nervous system—and you <strong>will</strong> be monetized.</p>
-
- <p>Finally, I'd like to raise a really hair-raising spectre that goes well beyond the use of deep learning and targeted propaganda in cyberwar.</p>
-
- <p>Back in 2011, an obscure Russian software house launched an iPhone app for pickup artists called <a href="http://www.antipope.org/charlie/blog-static/2012/03/not-an-april-fool-1.html">Girls around Me</a>. (Spoiler: Apple pulled it like a hot potato when word got out.) The app works out where the user is using GPS, then queried FourSquare and Facebook for people matching a simple relational search—for single females (per Facebook) who have checked in (or been checked in by their friends) in your vicinity (via FourSquare). The app then displayed their locations on a map, along with links to their social media profiles. </p>
-
- <p>If they were doing it today the interface would be gamified, showing strike rates and a leaderboard and flagging targets who succumbed to harassment as easy lays. But these days the cool kids and single adults are all using dating apps with a missing vowel in the name: only a creeper would want something like "Girls around Me", right?</p>
-
- <p>Unfortunately there are even nastier uses than scraping social media to find potential victims for serial rapists. Does your social media profile indicate your political or religious affiliation? Nope? Don't worry, Cambridge Analytica can work them out with 99.9% precision just by scanning the tweets and Facebook comments you liked. Add a service that can identify peoples affiliation and location, and you have the beginning of a flash mob app: one that will show you people like Us and people like Them on a hyper-local map. </p>
-
- <p>Imagine you're young, female, and a supermarket has figured out you're pregnant by analysing the pattern of your recent purchases, like <a href="http://www.nytimes.com/2012/02/19/magazine/shopping-habits.html?_r=1&hp=&pagewanted=all">Target back in 2012</a>. </p>
-
- <p>Now imagine that all the anti-abortion campaigners in your town have an app called "babies at risk" on their phones. Someone has paid for the analytics feed from the supermarket and the result is that every time you go near a family planning clinic a group of unfriendly anti-abortion protesters engulfs you. </p>
-
- <p>Or imagine you're male and gay, and the "God Hates Fags" crowd has invented a 100% reliable Gaydar app (based on your Grindr profile) and is getting their fellow travellers to queer bash gay men <em>only when they're alone or out-numbered 10:1</em>. (That's the special horror of precise geolocation.) Or imagine you're in Pakistan and Christian/Muslim tensions are mounting, or you're in rural Alabama, or ... the possibilities are endless</p>
-
- <p>Someone out there is working on it: a geolocation-aware social media scraping deep learning application, that uses a gamified, competitive interface to reward its "players" for joining in acts of mob violence against whoever the app developer hates. Probably it has an inoccuous-seeming but highly addictive training mode to get the users accustomed to working in teams and obeying the app's instructions—think Ingress or Pokemon Go. Then, at some pre-planned zero hour, it switches mode and starts rewarding players for violence—players who have been primed to think of their targets as vermin, by a steady drip-feed of micro-targeted dehumanizing propaganda delivered over a period of months.</p>
-
- <p>And the worst bit of this picture? </p>
-
- <p>Is that the app developer isn't a nation-state trying to disrupt its enemies, or an extremist political group trying to murder gays, jews, or muslims; <strong>it's just a paperclip maximizer doing what it does—and you are the paper</strong>.</p>
- </article>
-
-
- <hr>
-
- <footer>
- <p>
- <a href="/david/" title="Aller à l’accueil"><svg class="icon icon-home">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-home"></use>
- </svg> Accueil</a> •
- <a href="/david/log/" title="Accès au flux RSS"><svg class="icon icon-rss2">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-rss2"></use>
- </svg> Suivre</a> •
- <a href="http://larlet.com" title="Go to my English profile" data-instant><svg class="icon icon-user-tie">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-user-tie"></use>
- </svg> Pro</a> •
- <a href="mailto:david%40larlet.fr" title="Envoyer un courriel"><svg class="icon icon-mail">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-mail"></use>
- </svg> Email</a> •
- <abbr class="nowrap" title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340"><svg class="icon icon-hammer2">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-hammer2"></use>
- </svg> Légal</abbr>
- </p>
- <template id="theme-selector">
- <form>
- <fieldset>
- <legend><svg class="icon icon-brightness-contrast">
- <use xlink:href="/static/david/icons2/symbol-defs-2021-12.svg#icon-brightness-contrast"></use>
- </svg> Thème</legend>
- <label>
- <input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
- </label>
- <label>
- <input type="radio" value="dark" name="chosen-color-scheme"> Foncé
- </label>
- <label>
- <input type="radio" value="light" name="chosen-color-scheme"> Clair
- </label>
- </fieldset>
- </form>
- </template>
- </footer>
- <script src="/static/david/js/instantpage-5.1.0.min.js" type="module"></script>
- <script>
- function loadThemeForm(templateName) {
- const themeSelectorTemplate = document.querySelector(templateName)
- const form = themeSelectorTemplate.content.firstElementChild
- themeSelectorTemplate.replaceWith(form)
-
- form.addEventListener('change', (e) => {
- const chosenColorScheme = e.target.value
- localStorage.setItem('theme', chosenColorScheme)
- toggleTheme(chosenColorScheme)
- })
-
- const selectedTheme = localStorage.getItem('theme')
- if (selectedTheme && selectedTheme !== 'undefined') {
- form.querySelector(`[value="${selectedTheme}"]`).checked = true
- }
- }
-
- const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
- window.addEventListener('load', () => {
- let hasDarkRules = false
- for (const styleSheet of Array.from(document.styleSheets)) {
- let mediaRules = []
- for (const cssRule of styleSheet.cssRules) {
- if (cssRule.type !== CSSRule.MEDIA_RULE) {
- continue
- }
- // WARNING: Safari does not have/supports `conditionText`.
- if (cssRule.conditionText) {
- if (cssRule.conditionText !== prefersColorSchemeDark) {
- continue
- }
- } else {
- if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
- continue
- }
- }
- mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
- }
-
- // WARNING: do not try to insert a Rule to a styleSheet you are
- // currently iterating on, otherwise the browser will be stuck
- // in a infinite loop…
- for (const mediaRule of mediaRules) {
- styleSheet.insertRule(mediaRule.cssText)
- hasDarkRules = true
- }
- }
- if (hasDarkRules) {
- loadThemeForm('#theme-selector')
- }
- })
- </script>
- </body>
- </html>
|