Browse Source

More links

master
David Larlet 3 years ago
parent
commit
0f61c1fd0c

+ 513
- 0
cache/2021/0c6966a8e9543b52c361ac6de68f08e4/index.html View File

@@ -0,0 +1,513 @@
<!doctype html><!-- This is a valid HTML5 document. -->
<!-- Screen readers, SEO, extensions and so on. -->
<html lang="fr">
<!-- Has to be within the first 1024 bytes, hence before the <title>
See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
<meta charset="utf-8">
<!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
<!-- The viewport meta is quite crowded and we are responsible for that.
See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
<meta name="viewport" content="width=device-width,initial-scale=1">
<!-- Required to make a valid HTML5 document. -->
<title>Understanding ProRAW (archive) — David Larlet</title>
<meta name="description" content="Publication mise en cache pour en conserver une trace.">
<!-- That good ol' feed, subscribe :). -->
<link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
<!-- Generated from https://realfavicongenerator.net/ such a mess. -->
<link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
<link rel="manifest" href="/static/david/icons2/site.webmanifest">
<link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
<link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
<meta name="msapplication-TileColor" content="#f0f0ea">
<meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
<meta name="theme-color" content="#f0f0ea">
<!-- Documented, feel free to shoot an email. -->
<link rel="stylesheet" href="/static/david/css/style_2020-06-19.css">
<!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
<link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
<script>
function toggleTheme(themeName) {
document.documentElement.classList.toggle(
'forced-dark',
themeName === 'dark'
)
document.documentElement.classList.toggle(
'forced-light',
themeName === 'light'
)
}
const selectedTheme = localStorage.getItem('theme')
if (selectedTheme !== 'undefined') {
toggleTheme(selectedTheme)
}
</script>

<meta name="robots" content="noindex, nofollow">
<meta content="origin-when-cross-origin" name="referrer">
<!-- Canonical URL for SEO purposes -->
<link rel="canonical" href="https://blog.halide.cam/understanding-proraw-4eed556d4c54">

<body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick">

<article>
<header>
<h1>Understanding ProRAW</h1>
</header>
<nav>
<p class="center">
<a href="/david/" title="Aller à l’accueil">🏠</a> •
<a href="https://blog.halide.cam/understanding-proraw-4eed556d4c54" title="Lien vers le contenu original">Source originale</a>
</p>
</nav>
<hr>
<section class="dg dh di dj dk">
<p id="655a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We make the most popular RAW camera for iPhone, so when Apple revealed their new ProRAW image format, we were beyond excited.</p>
<div class="hg aj"><figure class="hh hi hj hk hl hg aj paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*tfZD-KB2jIXjAsNhFWwANA.jpeg?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*tfZD-KB2jIXjAsNhFWwANA.jpeg" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 276w, https://miro.medium.com/max/1104/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 552w, https://miro.medium.com/max/1280/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 640w, https://miro.medium.com/max/1456/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 728w, https://miro.medium.com/max/1632/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 816w, https://miro.medium.com/max/1808/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 904w, https://miro.medium.com/max/1984/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 992w, https://miro.medium.com/max/2160/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1080w, https://miro.medium.com/max/2700/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1350w, https://miro.medium.com/max/3240/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1620w, https://miro.medium.com/max/3780/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1890w, https://miro.medium.com/max/4320/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 2160w, https://miro.medium.com/max/4800/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 2400w" sizes="100vw"/></noscript></div></div></div></figure></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="0342" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Then they announced it’s coming to the built-in camera app.</p><p id="c3f8" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Many developers in our shoes would freak out, thinking Apple wants to gobble up their customers. We were just confused.</p><p id="59f9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Apple builds products for the broadest possible audience, and RAW is a tool for photo-nerds. These powerful files take skill to edit and come with significant tradeoffs. Why would Apple cram a complicated feature into an app meant for everyone?</p><p id="e76a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As we dug deeper into ProRAW, we realized it wasn’t just about making RAW more powerful. It’s about making RAW approachable. ProRAW could very well change how everyone shoots and edits photos, beginners and experts alike.</p><p id="d908" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">To understand what makes it so special, the first half of this post explains how a digital camera develops a photo. Then we go on to explain the strengths and weaknesses of traditional RAWs. Finally, we dive into what’s unique about ProRAW, how it changes the game, and its few remaining drawbacks.</p><p id="8c99" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Grab a coffee, because this is a long read.</p><h1 id="7b37" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el">A Short Tour Through a Digital Camera</h1><p id="d544" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Imagine you’re looking at this scene through your camera.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 276w, https://miro.medium.com/max/1104/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 552w, https://miro.medium.com/max/1280/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 640w, https://miro.medium.com/max/1400/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="c734" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">When you tap the camera button on your camera, light passes through a series of optics and lands on a digital sensor where it is captured.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*h0tUHkeVGQ6DO1gKHfUU6A.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*h0tUHkeVGQ6DO1gKHfUU6A.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*h0tUHkeVGQ6DO1gKHfUU6A.png 276w, https://miro.medium.com/max/1104/1*h0tUHkeVGQ6DO1gKHfUU6A.png 552w, https://miro.medium.com/max/1280/1*h0tUHkeVGQ6DO1gKHfUU6A.png 640w, https://miro.medium.com/max/1456/1*h0tUHkeVGQ6DO1gKHfUU6A.png 728w, https://miro.medium.com/max/1632/1*h0tUHkeVGQ6DO1gKHfUU6A.png 816w, https://miro.medium.com/max/1808/1*h0tUHkeVGQ6DO1gKHfUU6A.png 904w, https://miro.medium.com/max/1984/1*h0tUHkeVGQ6DO1gKHfUU6A.png 992w, https://miro.medium.com/max/2000/1*h0tUHkeVGQ6DO1gKHfUU6A.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="8f4e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We’re going to talk through the three important steps that take place as your camera converts sensor values into a picture.</p><h2 id="4d27" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Step 1: Demosaic</h2><p id="e4fb" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Your digital sensor absorbs light and turns it into numbers. The more light it sees, the higher the number.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 276w, https://miro.medium.com/max/1104/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 552w, https://miro.medium.com/max/1280/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 640w, https://miro.medium.com/max/1400/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="503d" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I used black and white for a reason — digital sensors are totally colorblind. In 1976, a clever engineer at Kodak found a solution: put a grid of color filters over the sensor, so every pixel sees either red, green, or blue.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ki">
<div class="hw s ho hx">
<div class="kj hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*YpY7tmiCxOL11eOnyODz7A.png?q=20" width="2560" height="1664"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2560" height="1664"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/5120/1*YpY7tmiCxOL11eOnyODz7A.png" width="2560" height="1664" srcSet="https://miro.medium.com/max/552/1*YpY7tmiCxOL11eOnyODz7A.png 276w, https://miro.medium.com/max/1104/1*YpY7tmiCxOL11eOnyODz7A.png 552w, https://miro.medium.com/max/1280/1*YpY7tmiCxOL11eOnyODz7A.png 640w, https://miro.medium.com/max/1400/1*YpY7tmiCxOL11eOnyODz7A.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="e0ec" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This mosaic pattern is called a Bayer pattern, named after its inventor, Bruce Bayer. With a color filter in place, our sensor now sees a grid of alternating colors. Let’s zoom in on the leaves of the tree, and see what the sensor sees.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz kk">
<div class="hw s ho hx">
<div class="kl hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*UaFk3pbG95R8uYHHd8G8xw.png?q=20" width="600" height="600"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="600" height="600"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1200/1*UaFk3pbG95R8uYHHd8G8xw.png" width="600" height="600" srcSet="https://miro.medium.com/max/552/1*UaFk3pbG95R8uYHHd8G8xw.png 276w, https://miro.medium.com/max/1104/1*UaFk3pbG95R8uYHHd8G8xw.png 552w, https://miro.medium.com/max/1200/1*UaFk3pbG95R8uYHHd8G8xw.png 600w" sizes="600px"/></noscript></div></div></div></figure><p id="0b79" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Each pixel is either red, green or blue. We figure out the real colors by going over every pixel, looking at its neighbors, and guessing its two missing colors. This crucial step is known as “<a href="https://en.wikipedia.org/wiki/Demosaicing" class="cl km" target="_blank" rel="noopener">demosaicing</a>,” or “debayering.”</p><p id="f759" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s a tricky problem. Compare a simple algorithm to a higher quality one, and you’ll spot problems like purple fringes.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz kn">
<div class="hw s ho hx">
<div class="ko hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg?q=20" width="1800" height="600"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1800" height="600"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3600/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg" width="1800" height="600" srcSet="https://miro.medium.com/max/552/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 276w, https://miro.medium.com/max/1104/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 552w, https://miro.medium.com/max/1280/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 640w, https://miro.medium.com/max/1456/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 728w, https://miro.medium.com/max/1632/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 816w, https://miro.medium.com/max/1808/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 904w, https://miro.medium.com/max/1984/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 992w, https://miro.medium.com/max/2000/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Middle: A fast algorithm. Right: a higher quality one.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="7c7e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Look at the pebbles in the shot. The fast algorithm is a little more “pixely.”</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz kt">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg?q=20" width="960" height="480"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="960" height="480"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1920/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg" width="960" height="480" srcSet="https://miro.medium.com/max/552/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 276w, https://miro.medium.com/max/1104/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 552w, https://miro.medium.com/max/1280/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 640w, https://miro.medium.com/max/1400/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Left: Fast Right: High Quality</figcaption></figure><p id="9ccc" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">There are quite a few popular demosaic algorithms to choose from, each with strengths and weaknesses. The perfect algorithm depends on the type of sensor, your camera settings, and even the subject matter. For example, if you’re shooting the night sky, some algorithms produce better results on stars.</p><p id="0ba2" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">After demosaic, we end up with…</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*KsiUoBU9r4AuQT91N4v23Q.png?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*KsiUoBU9r4AuQT91N4v23Q.png" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*KsiUoBU9r4AuQT91N4v23Q.png 276w, https://miro.medium.com/max/1104/1*KsiUoBU9r4AuQT91N4v23Q.png 552w, https://miro.medium.com/max/1280/1*KsiUoBU9r4AuQT91N4v23Q.png 640w, https://miro.medium.com/max/1400/1*KsiUoBU9r4AuQT91N4v23Q.png 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">(Simulated results to keep things simple)</figcaption></figure><p id="9beb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That’s not great. As you can see, the color and exposure are off.</p><p id="1781" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">There’s nothing wrong with your camera. Quite the opposite: your camera captures way more information than your screen can display. We have to take those sensor readings, which measures the light from your <strong class="gk ku">scene</strong>, and transform them into pixel values that light up your <strong class="gk ku">display</strong>.</p><h2 id="77f2" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Step 2: Transform from Scene to Display</h2><p id="9532" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Let’s start with color. The following chart represents all the colors a human can see, and the triangle in the middle covers what most screens can display.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz kv">
<div class="hw s ho hx">
<div class="kw hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*aj7B542Qbk9uCeaB7rCitA.jpeg?q=20" width="2525" height="1275"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2525" height="1275"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/5050/1*aj7B542Qbk9uCeaB7rCitA.jpeg" width="2525" height="1275" srcSet="https://miro.medium.com/max/552/1*aj7B542Qbk9uCeaB7rCitA.jpeg 276w, https://miro.medium.com/max/1104/1*aj7B542Qbk9uCeaB7rCitA.jpeg 552w, https://miro.medium.com/max/1280/1*aj7B542Qbk9uCeaB7rCitA.jpeg 640w, https://miro.medium.com/max/1456/1*aj7B542Qbk9uCeaB7rCitA.jpeg 728w, https://miro.medium.com/max/1632/1*aj7B542Qbk9uCeaB7rCitA.jpeg 816w, https://miro.medium.com/max/1808/1*aj7B542Qbk9uCeaB7rCitA.jpeg 904w, https://miro.medium.com/max/1984/1*aj7B542Qbk9uCeaB7rCitA.jpeg 992w, https://miro.medium.com/max/2000/1*aj7B542Qbk9uCeaB7rCitA.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="f67a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">When your camera<em class="kx"> </em>captures colors outside the triangle, we have to push and pull those invalid colors to fit inside.</p><p id="9af7" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">One detail makes this especially tricky: deciding which white is “true” white. If you’ve ever shopped for light bulbs, you know every white light in the real world has a slight yellow or blue tint. Our brains adjust our perception of white, based on hints in our surroundings. It’s called <a href="https://en.wikipedia.org/wiki/Color_constancy" class="cl km" target="_blank" rel="noopener">color constancy</a>, and it’s why this yellow/blue dress optical illusion fools people.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ky">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*FcQhGavMhNpcrHlHIk0_uA.png?q=20" width="1200" height="900"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1200" height="900"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2400/1*FcQhGavMhNpcrHlHIk0_uA.png" width="1200" height="900" srcSet="https://miro.medium.com/max/552/1*FcQhGavMhNpcrHlHIk0_uA.png 276w, https://miro.medium.com/max/1104/1*FcQhGavMhNpcrHlHIk0_uA.png 552w, https://miro.medium.com/max/1280/1*FcQhGavMhNpcrHlHIk0_uA.png 640w, https://miro.medium.com/max/1400/1*FcQhGavMhNpcrHlHIk0_uA.png 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci"><a href="https://en.wikipedia.org/wiki/The_dress" class="cl km" target="_blank" rel="noopener">The Dress</a></figcaption></figure><p id="48ec" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Modern cameras are very good at figuring out the white point in most situations. So we pick our white point, run the math, and end up with a perfectly white-balanced image.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*JBl1Ib7wqrxsyukaHBA1TA.png?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*JBl1Ib7wqrxsyukaHBA1TA.png" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*JBl1Ib7wqrxsyukaHBA1TA.png 276w, https://miro.medium.com/max/1104/1*JBl1Ib7wqrxsyukaHBA1TA.png 552w, https://miro.medium.com/max/1280/1*JBl1Ib7wqrxsyukaHBA1TA.png 640w, https://miro.medium.com/max/1400/1*JBl1Ib7wqrxsyukaHBA1TA.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="daf0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That looks pretty close to what we saw, but the awning in the top of the frame is under-exposed.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz la">
<div class="hw s ho hx">
<div class="kl hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*WwDfyRYXScdIPBlf8ZCm5Q.png?q=20" width="563" height="563"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="563" height="563"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1126/1*WwDfyRYXScdIPBlf8ZCm5Q.png" width="563" height="563" srcSet="https://miro.medium.com/max/552/1*WwDfyRYXScdIPBlf8ZCm5Q.png 276w, https://miro.medium.com/max/1104/1*WwDfyRYXScdIPBlf8ZCm5Q.png 552w, https://miro.medium.com/max/1126/1*WwDfyRYXScdIPBlf8ZCm5Q.png 563w" sizes="563px"/></noscript></div></div></div></figure><p id="9505" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Let’s turn up the exposure to bring out detail…</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*99yfqbgXyF13DM8ooEJaLQ.png?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*99yfqbgXyF13DM8ooEJaLQ.png" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*99yfqbgXyF13DM8ooEJaLQ.png 276w, https://miro.medium.com/max/1104/1*99yfqbgXyF13DM8ooEJaLQ.png 552w, https://miro.medium.com/max/1280/1*99yfqbgXyF13DM8ooEJaLQ.png 640w, https://miro.medium.com/max/1400/1*99yfqbgXyF13DM8ooEJaLQ.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="6e98" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">… but unfortunately, now the rest of the image is too bright.</p><p id="b4c6" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The technical name for this problem is “dynamic range.” It’s range of light you can capture or display at one time, measured from the brightest bright to the darkest shadow. You’ll frequently hear this range measured in “stops.”</p><p id="56b9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Ever see terms like ‘HDR’ or ‘XDR’ being thrown around? The “DR” in “HDR” stands for Dynamic Range. People make quite a fuss about it. This is an especially difficult problem in technology, because the human eye is <strong class="gk ku">incredibly good</strong>.</p><p id="72ef" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s easily the most powerful camera in the world, as it can see up to 30 stops of dynamic range. Most screens can <em class="kx">display</em> 8 stops. Digital cameras capture up to 15 stops. When we try to display all this information on a screen with lower dynamic range, it’s going to look wrong, sometimes in unexpected ways.</p><p id="8e48" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Notice the sky has weird patch of cyan, caused by one of the color channels clipping.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lb">
<div class="hw s ho hx">
<div class="lc hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*2wHP6e5hrIxrHXyJ7Pcc1g.png?q=20" width="864" height="328"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="864" height="328"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1728/1*2wHP6e5hrIxrHXyJ7Pcc1g.png" width="864" height="328" srcSet="https://miro.medium.com/max/552/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 276w, https://miro.medium.com/max/1104/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 552w, https://miro.medium.com/max/1280/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 640w, https://miro.medium.com/max/1400/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="089e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The solution to our dynamic range problem is a little more complicated than bringing up shadows and turning down highlights everywhere. That would make the whole image feel flat.</p><p id="9dfc" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Instead, you want to darken and lighten small areas of the image. Fifty years ago, photographers took hours to tweak their negatives using a process called “dodging and burning.”</p><p id="5f1b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Today it’s called “local tone mapping.”</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ld">
<div class="hw s ho hx">
<div class="le hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*3-FXfKG16GUFHMOLOmXMKg.jpeg?q=20" width="3024" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*3-FXfKG16GUFHMOLOmXMKg.jpeg" width="3024" height="2016" srcSet="https://miro.medium.com/max/552/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 276w, https://miro.medium.com/max/1104/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 552w, https://miro.medium.com/max/1280/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 640w, https://miro.medium.com/max/1456/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 728w, https://miro.medium.com/max/1632/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 816w, https://miro.medium.com/max/1808/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 904w, https://miro.medium.com/max/1984/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 992w, https://miro.medium.com/max/2000/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">The image on the right has some gentle tone mapping applied.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="def0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">With that, our image looks great.</p><p id="8c4a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Let’s go ahead email it to our friends… oh, oops it’s 100 megabytes. The image still contains all that data we can’t see. Luckily, we don’t need all of that data now that we’re done editing.</p><h2 id="9399" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Step 3: Optimize</h2><p id="0e8a" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">In computer graphics, if you use more bits, your math gets more accurate. While editing, we needed to use 64-bits per pixel to get nice results. Once we’re done editing we can cut it down to 32-bits — halving our file size — and nobody can tell the difference.</p><p id="9792" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Next, we can throw out most of our color information. This cuts it in half <em class="kx">again</em>.</p><p id="037f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Finally, we can apply something called ‘lossy compression’ — the kind you find in JPEGs. We end up with an image that’s only 1.6 mb to share with our friends.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*mt0D2vrarvjazruDvYK7iw.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*mt0D2vrarvjazruDvYK7iw.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*mt0D2vrarvjazruDvYK7iw.png 276w, https://miro.medium.com/max/1104/1*mt0D2vrarvjazruDvYK7iw.png 552w, https://miro.medium.com/max/1280/1*mt0D2vrarvjazruDvYK7iw.png 640w, https://miro.medium.com/max/1456/1*mt0D2vrarvjazruDvYK7iw.png 728w, https://miro.medium.com/max/1632/1*mt0D2vrarvjazruDvYK7iw.png 816w, https://miro.medium.com/max/1808/1*mt0D2vrarvjazruDvYK7iw.png 904w, https://miro.medium.com/max/1984/1*mt0D2vrarvjazruDvYK7iw.png 992w, https://miro.medium.com/max/2000/1*mt0D2vrarvjazruDvYK7iw.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="8ba9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Phew. Take a breath, and let all that sink in. When you’re ready, we can finally answer…</p><h1 id="098d" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el"><strong class="az">What is RAW? Why is it Magic?</strong></h1><p id="dc70" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">We just <strong class="gk ku">developed</strong> a photo. Every step in development is destructive, meaning we lose information. For example, in Step 2, once you shift colors around to fit into that triangle, you can never figure out the original, real-world colors.</p><p id="7f4c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">So what happens when mistakes are made? Let’s go back to California’s recent wildfires that <a href="https://www.nytimes.com/2020/09/09/us/pictures-photos-california-fires.html" class="cl km" target="_blank" rel="noopener">turned the sky orange</a>. I took a photo with a color chart at the time, figuring it would come in handy someday.</p><p id="197b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In real life, it looked like this:</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lf">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*2VfJISrXMInRKX85P3Szhw.jpeg?q=20" width="2048" height="1536"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2048" height="1536"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/4096/1*2VfJISrXMInRKX85P3Szhw.jpeg" width="2048" height="1536" srcSet="https://miro.medium.com/max/552/1*2VfJISrXMInRKX85P3Szhw.jpeg 276w, https://miro.medium.com/max/1104/1*2VfJISrXMInRKX85P3Szhw.jpeg 552w, https://miro.medium.com/max/1280/1*2VfJISrXMInRKX85P3Szhw.jpeg 640w, https://miro.medium.com/max/1400/1*2VfJISrXMInRKX85P3Szhw.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="8f9c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The built-in camera app was confused by that orange sky, and tried to make everything neutral — because that’s what the world usually looks like. People were confused to find their cameras refused to take an image of the world as they saw it.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lf">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*oNgdi0nr93vU6ozzkB8l2g.jpeg?q=20" width="2048" height="1536"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2048" height="1536"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/4096/1*oNgdi0nr93vU6ozzkB8l2g.jpeg" width="2048" height="1536" srcSet="https://miro.medium.com/max/552/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 276w, https://miro.medium.com/max/1104/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 552w, https://miro.medium.com/max/1280/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 640w, https://miro.medium.com/max/1400/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="28ae" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">A lot of folks used manual settings to override their camera’s automatic white balancing, but let’s say you didn’t. I’ll take that incorrect JPEG into an image editor like Lightroom and try to match it back to the original…</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lf">
<div class="hw s ho hx">
<div class="lg hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*0IYpHEDlusoF171jVWsoMw.jpeg?q=20" width="2048" height="768"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2048" height="768"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/4096/1*0IYpHEDlusoF171jVWsoMw.jpeg" width="2048" height="768" srcSet="https://miro.medium.com/max/552/1*0IYpHEDlusoF171jVWsoMw.jpeg 276w, https://miro.medium.com/max/1104/1*0IYpHEDlusoF171jVWsoMw.jpeg 552w, https://miro.medium.com/max/1280/1*0IYpHEDlusoF171jVWsoMw.jpeg 640w, https://miro.medium.com/max/1400/1*0IYpHEDlusoF171jVWsoMw.jpeg 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">The actual scene on the left, and an attempt to adjust our poorly white-balanced image on the right.</figcaption></figure><p id="65ea" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Hmm. Notice that how it’s messed up some colors, shifting blue to purple.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz lh">
<div class="hw s ho hx">
<div class="li hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg?q=20" width="622" height="301"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="622" height="301"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1244/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg" width="622" height="301" srcSet="https://miro.medium.com/max/552/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg 276w, https://miro.medium.com/max/1104/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg 552w, https://miro.medium.com/max/1244/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg 622w" sizes="622px"/></noscript></div></div></div></figure><p id="f24a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Trying to un-process a processed image like a JPEG is like trying to un-bake a cake. When your camera produces JPEGs, you’d better love the choices it made, because there’s no going back.</p><p id="1068" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Now imagine if instead of saving a JPEG, your camera saved the original sensor values. Now you can make totally different processing decisions yourself, like white balance. You get the raw data.</p><p id="ca0b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Welcome to RAW files.</p><p id="0b8d" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We have written about RAW a lot on this blog. We love it. Shooting RAW <a class="cl km" target="_blank" rel="noopener" href="/the-power-of-raw-on-iphone-part-1-shooting-raw-ef02becb7002">gives you magical powers</a>. With a few knobs, you can rescue photos that you thought were lost to poor exposure.</p><p id="1652" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">So far, we’ve only talked about mistakes, but RAWs also give you the freedom to experiment with radically different choices for artistic effect, letting you <strong class="gk ku">develop</strong> the photo so it looks how you experienced it.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lj">
<div class="hw s ho hx">
<div class="lk hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*F-HXEeVByR8TXVwszChZkQ.jpeg?q=20" width="1400" height="933"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1400" height="933"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2800/1*F-HXEeVByR8TXVwszChZkQ.jpeg" width="1400" height="933" srcSet="https://miro.medium.com/max/552/1*F-HXEeVByR8TXVwszChZkQ.jpeg 276w, https://miro.medium.com/max/1104/1*F-HXEeVByR8TXVwszChZkQ.jpeg 552w, https://miro.medium.com/max/1280/1*F-HXEeVByR8TXVwszChZkQ.jpeg 640w, https://miro.medium.com/max/1400/1*F-HXEeVByR8TXVwszChZkQ.jpeg 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">These are the kind of edits that a RAW file enables.</figcaption></figure><p id="e4d4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Remember old film cameras? Their photos also had to be developed from a negative. RAW data is usually stored in the DNG file format, which stands for <strong class="gk ku">D</strong>igital <strong class="gk ku">N</strong>e<strong class="gk ku">g</strong>ative. Some camera makers have their own formats, but those companies are jerks.</p><p id="1643" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">DNG is an open standard, so anyone can build software that reads and writes DNGs. Best of all, the file format continues to evolve, as we’ll see shortly.</p><p id="5471" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">First, we have some bad news.</p><h2 id="696e" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">RAWs are Great Until They Aren’t</h2><p id="8cfb" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">We make a <a href="http://halide.cam" class="cl km" target="_blank" rel="noopener">RAW camera app</a>, so of course we’re fans of RAW. That also means we get plenty of support emails about it. By far, the most common question is, “Why do my RAWs look worse than the built-in camera app?”</p><p id="84d1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">iPhone cameras got a lot better over time. At first, these were leaps in hardware: better and bigger sensors and lenses allowed sharper shots. Eventually, though, the processors got faster and the cameras couldn’t get larger. The camera got smarter.</p><p id="99c1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">iPhones take many photos and combine it into one shot, picking detail in the shadows from one photo, the right exposure on your dog’s face from another, and a few other ones for extra detail. This is merged into the final result in milliseconds without requiring any effort on your behalf. It’s very clever stuff, with cool names like Smart HDR and Deep Fusion.</p><p id="cd12" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">On the other hand, iPhone RAW files are still just one image. Which means they look quite… different.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ll">
<div class="hw s ho hx">
<div class="lm hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*m6P-96kVDOdsqXt7tz7LDg.jpeg?q=20" width="4000" height="2400"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="4000" height="2400"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/8000/1*m6P-96kVDOdsqXt7tz7LDg.jpeg" width="4000" height="2400" srcSet="https://miro.medium.com/max/552/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 276w, https://miro.medium.com/max/1104/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 552w, https://miro.medium.com/max/1280/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 640w, https://miro.medium.com/max/1456/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 728w, https://miro.medium.com/max/1632/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 816w, https://miro.medium.com/max/1808/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 904w, https://miro.medium.com/max/1984/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 992w, https://miro.medium.com/max/2000/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="f49b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’re coming from the built-in iPhone camera app, switching over to manual processing is like switching from a car with automatic transmission to a stick-shift.</p><p id="6947" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">There’s a steep learning curve, and that’s why we built something called “Instant RAW” into our latest Mark II update, so you don’t have to spend an afternoon in an image editor tweaking your shot to get a nice result.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ln">
<div class="hw s ho hx">
<div class="lo hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*HvMYRDSzB0hSBNK7wW_F6g.png?q=20" width="6000" height="3029"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="6000" height="3029"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/12000/1*HvMYRDSzB0hSBNK7wW_F6g.png" width="6000" height="3029" srcSet="https://miro.medium.com/max/552/1*HvMYRDSzB0hSBNK7wW_F6g.png 276w, https://miro.medium.com/max/1104/1*HvMYRDSzB0hSBNK7wW_F6g.png 552w, https://miro.medium.com/max/1280/1*HvMYRDSzB0hSBNK7wW_F6g.png 640w, https://miro.medium.com/max/1456/1*HvMYRDSzB0hSBNK7wW_F6g.png 728w, https://miro.medium.com/max/1632/1*HvMYRDSzB0hSBNK7wW_F6g.png 816w, https://miro.medium.com/max/1808/1*HvMYRDSzB0hSBNK7wW_F6g.png 904w, https://miro.medium.com/max/1984/1*HvMYRDSzB0hSBNK7wW_F6g.png 992w, https://miro.medium.com/max/2000/1*HvMYRDSzB0hSBNK7wW_F6g.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="c1c5" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">But even with Instant RAW or editing, sometimes RAWs still look different than what comes out of the built-in camera app, and it has nothing to do with your skills. RAWs lacks that critical piece of the puzzle: Apple’s smarts. Computational photography like Smart HDR and Deep Fusion.</p><p id="bef1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Even if Apple handed these algorithms to third-party apps, they work on <em class="kx">bursts</em> of photos, fusing together the best parts of each image. iPhone RAWs are 12mb each. If you want to reproduce the results of Apple’s camera using one of these algorithms, you’re looking at ten times the storage needed per photo.</p><p id="9ae4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Oh, and there’s one more problem: neither the front-facing camera or ultra-wide camera can shoot RAW.</p><p id="6e45" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">ProRAW elegantly solves all of these problems and more. You can finally reproduce the results of the first-party camera, while retaining most of the editing latitude from traditional RAWs.</p><h1 id="2b46" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el">Enter ProRAW</h1><p id="b9af" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Technically, there’s no such thing as a ProRAW file. ProRAW images are regular DNG files that take advantage of some little known features in the specification, and introduce a few new ones.</p><p id="09d1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Remember how DNG is an open file format? Apple worked with Adobe to introduce a few new tags. DNG released the 1.6 specification, with details about these tags, the very day ProRAW went into public beta tests.</p><p id="c413" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This may be surprising to some: ProRAW is not a proprietary or closed format. Credit where it is due: Apple deserves kudos for bringing their improvements to the DNG standard. When you shoot with ProRAW, there’s absolutely nothing locking your photos into the Apple ecosystem.</p><p id="52d0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Let’s dive into what makes ProRAW files different than RAWs of days past…</p><h2 id="d615" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Demosaic is Already Done</h2><p id="166f" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">ProRAWs store pixel values <em class="kx">after</em> the demosaic step. It’s like they took the output Step 1, from earlier, and stored those values. We’ll talk about why they’re doing this in a bit.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*XvEBPGtrYv6-bD-VvTjujg.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*XvEBPGtrYv6-bD-VvTjujg.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*XvEBPGtrYv6-bD-VvTjujg.png 276w, https://miro.medium.com/max/1104/1*XvEBPGtrYv6-bD-VvTjujg.png 552w, https://miro.medium.com/max/1280/1*XvEBPGtrYv6-bD-VvTjujg.png 640w, https://miro.medium.com/max/1456/1*XvEBPGtrYv6-bD-VvTjujg.png 728w, https://miro.medium.com/max/1632/1*XvEBPGtrYv6-bD-VvTjujg.png 816w, https://miro.medium.com/max/1808/1*XvEBPGtrYv6-bD-VvTjujg.png 904w, https://miro.medium.com/max/1984/1*XvEBPGtrYv6-bD-VvTjujg.png 992w, https://miro.medium.com/max/2000/1*XvEBPGtrYv6-bD-VvTjujg.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="f17f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s just important to understand that these demosaiced color values still represent the <strong class="gk ku">scene</strong>, not your display. They contain all the original dynamic range. They contain all the out-of-range colors. They retain all the flexibility of working with a “true” RAW. They just skip step one.</p><p id="8374" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In theory, you lose flexibility in choosing specific demosaic algorithms. In practice, most expert photographers don’t bother.</p><p id="cdee" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s also quite possible that iOS can do a better job demosaicing your images than any third-party RAW editor. Apple’s greatest strength is its unity of hardware and software, so they know exactly the sensor you’re using, and how it behaves with different ISO setttings. In theory, they could even apply image recognition as part of the process; if iOS detects a night sky, it could automatically pick a star-friendly demosaic algorithm.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg?q=20" width="1500" height="1125"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1500" height="1125"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3000/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg" width="1500" height="1125" srcSet="https://miro.medium.com/max/552/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg 276w, https://miro.medium.com/max/1000/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg?q=20" width="1500" height="1125"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1500" height="1125"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3000/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg" width="1500" height="1125" srcSet="https://miro.medium.com/max/552/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg 276w, https://miro.medium.com/max/1000/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg 500w" sizes="500px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci lt ho lu lv">Photographer<a href="https://t.co/VQGZpdaA4B?amp=1" class="cl km" target="_blank" rel="noopener"> Austin Mann</a> shows of how ProRAW already changes the game for nighttime photography <a href="https://t.co/VQGZpdaA4B?amp=1" class="cl km" target="_blank" rel="noopener">in this fantastic look at ProRAW</a>.</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="d7c0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This sly move also gives Apple greater control over the image sensors they use in the future. Earlier, I said <em class="kx">most</em> cameras use a Bayer pattern. Some camera manufacturers use different patterns, which require different algorithms. Fujifilm invented the <a href="https://en.wikipedia.org/wiki/Fujifilm_X-Trans_sensor" class="cl km" target="_blank" rel="noopener">X-Trans</a> sensor that creates sharper images, with more film-like grain. There’s even a <a href="https://blog.sigmaphoto.com/2011/faqs-the-sigma-camera-and-its-foveon-x3-direct-image-sensor/" class="cl km" target="_blank" rel="noopener">Foveon</a> digital sensor that stacks color filters on top of each other.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz lw">
<div class="hw s ho hx">
<div class="lx hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*geXtGrkS1PXytzJ1kr8p6w.jpeg?q=20" width="602" height="380"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="602" height="380"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1204/1*geXtGrkS1PXytzJ1kr8p6w.jpeg" width="602" height="380" srcSet="https://miro.medium.com/max/552/1*geXtGrkS1PXytzJ1kr8p6w.jpeg 276w, https://miro.medium.com/max/1104/1*geXtGrkS1PXytzJ1kr8p6w.jpeg 552w, https://miro.medium.com/max/1204/1*geXtGrkS1PXytzJ1kr8p6w.jpeg 602w" sizes="602px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">It’s marketing, so take it with a grain of salt.</figcaption></figure><p id="23be" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Apple is now a company that designs its own silicon, and it’s very good at it. Cameras are a huge driving factor in phone purchases. It seems inevitable for Apple to innovate in the realm of sensors. Taking over the demosaic step would smooth such a transition. Hypothetically speaking, they could swap out their current bayer sensors with an “Apple C1,” and so long as it saves in ProRAW, it would work from day one in every pro photography process and app like Lightroom without having to wait for Adobe to write a new demosaic algorithm.</p><p id="9eb1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We got a taste for these benefits with the surprise reveal that <strong class="gk ku">ProRAW is available on all four cameras.</strong></p><p id="0358" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Previously, you could not shoot RAW with the front-facing or ultra-wide cameras. Apple is vague on the technical limitations, but indicated that even if third-party developers had access to the RAW data, they wouldn’t know what to do with it. With ProRAW, they can handle the annoying bits, and leave apps like editors to deal with what they’re better at: editing.</p><h2 id="6135" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Bridging the Algorithm Gap</h2><p id="bc2c" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">So now we have the data, but what about the local tone mapping and other computational photography goodies? Apple could open up their algorithms to third-party apps, but that isn’t as useful as you think. For starters, you’d need a save a ton of RAW files. We’d be back at that 100 megabyte file.</p><p id="85c6" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I also think there are reasonable questions about producing consistent results down the road as these algorithms evolve. It would be surprising to return to a photo a year from now to find Apple’s AI produces different results.</p><p id="d496" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Instead, ProRAW stores results of computational photography right inside the RAW. This is another reason they need to store demosaiced data, as these algorithms operate on color, not RAW data. Once you demosaic, there’s no going back. I mean, what would you even call that, remosaic?</p><p id="4e7e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Smart HDR does this in the least destructive way. Apple worked with Adobe to <a href="https://helpx.adobe.com/photoshop/kb/dng-specification-tags.html" class="cl km" target="_blank" rel="noopener">introduce a new type of tag</a> into the DNG standard, called a “Profile Gain Table Map.” This data gives your editor everything it needs to know to tone map your photo image and end up with results identical to the first party camera. Because it’s separate data, you can turn down its strength, or turn it off completely.</p><p id="299d" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That’s what we do in Halide when you’re looking at a ProRAW image with Instant RAW disabled.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*y-uuWP8T138MVIvGcq_rMA.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*y-uuWP8T138MVIvGcq_rMA.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*y-uuWP8T138MVIvGcq_rMA.png 276w, https://miro.medium.com/max/1104/1*y-uuWP8T138MVIvGcq_rMA.png 552w, https://miro.medium.com/max/1280/1*y-uuWP8T138MVIvGcq_rMA.png 640w, https://miro.medium.com/max/1456/1*y-uuWP8T138MVIvGcq_rMA.png 728w, https://miro.medium.com/max/1632/1*y-uuWP8T138MVIvGcq_rMA.png 816w, https://miro.medium.com/max/1808/1*y-uuWP8T138MVIvGcq_rMA.png 904w, https://miro.medium.com/max/1984/1*y-uuWP8T138MVIvGcq_rMA.png 992w, https://miro.medium.com/max/2000/1*y-uuWP8T138MVIvGcq_rMA.png 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Local tone mapping lifts the shadows on the ground.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="c655" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Even if you opt-out of local tone mapping, you now have the underlying HDR data to work with in an editor, and the results are…</p><figure class="hh hi hj hk hl hg">
<div class="hw s ho">
<div class="ly hz s"></div></div></figure><p id="62a1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Deep Fusion is a different story. While it’s popularly known as “Sweater Mode,” the more technical objective is “noise reduction in low-light.” Unlike those Gain Table Maps, there’s no elegant way to separate its effects from the final image. If you don’t want Deep Fusion, your only option is to opt-out of the process at the time of capture.</p><p id="d905" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’ve read our articles for a while, you know we’re fans of natural noise. Prior to Deep Fusion, the iPhone JPEGs were notorious for their “watercolor effects.” In this image I took a few years ago, notice faces get smeared into nothing.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lz">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg?q=20" width="1858" height="929"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1858" height="929"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3716/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg" width="1858" height="929" srcSet="https://miro.medium.com/max/552/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 276w, https://miro.medium.com/max/1104/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 552w, https://miro.medium.com/max/1280/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 640w, https://miro.medium.com/max/1456/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 728w, https://miro.medium.com/max/1632/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 816w, https://miro.medium.com/max/1808/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 904w, https://miro.medium.com/max/1984/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 992w, https://miro.medium.com/max/2000/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Taken before social distancing was a thing.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="099f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Deep Fusion produces very different results. Instead of just smearing an image, it combines several results together to sort of “average out” the results. It looks way, way more natural.</p><p id="0adb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’re not a fan of Deep Fusion, there’s an API to opt-out. We expose this in Halide under Capture Settings. As I was writing this article, I realized this toggle makes it easy to run an experiment…</p><p id="fef6" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I used a spotlight to create a high-dynamic range scene in my office.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ma">
<div class="hw s ho hx">
<div class="mb hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/24/1*KyKm_rUL60bQheIwZ27KJA.jpeg?q=20" width="390" height="1006"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="390" height="1006"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/780/1*KyKm_rUL60bQheIwZ27KJA.jpeg" width="390" height="1006" srcSet="https://miro.medium.com/max/552/1*KyKm_rUL60bQheIwZ27KJA.jpeg 276w, https://miro.medium.com/max/780/1*KyKm_rUL60bQheIwZ27KJA.jpeg 390w" sizes="390px"/></noscript></div></div></div></div></figure><p id="715b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I spot metered the two grey cards and measured a difference of 8.3 stops. I then exposed the shot using the top grey card and took three photos:</p><p id="4a3f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">1) Native Halide RAW</p><p id="4077" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">2) ProRAW, with smart processing disabled</p><p id="6819" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">3) ProRAW with smart processing enabled</p><p id="58aa" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I took these into Lightroom and boosted the shadows. Let’s zoom in on the test pattern image.</p></div></div>
<div class="hg aj"><figure class="hh hi hj hk hl hg aj paragraph-image">
<div class="hw s ho hx">
<div class="ko hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*x6-_StGFa02oMfajJ3In6g.jpeg?q=20" width="1800" height="600"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1800" height="600"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3600/1*x6-_StGFa02oMfajJ3In6g.jpeg" width="1800" height="600" srcSet="https://miro.medium.com/max/552/1*x6-_StGFa02oMfajJ3In6g.jpeg 276w, https://miro.medium.com/max/1104/1*x6-_StGFa02oMfajJ3In6g.jpeg 552w, https://miro.medium.com/max/1280/1*x6-_StGFa02oMfajJ3In6g.jpeg 640w, https://miro.medium.com/max/1456/1*x6-_StGFa02oMfajJ3In6g.jpeg 728w, https://miro.medium.com/max/1632/1*x6-_StGFa02oMfajJ3In6g.jpeg 816w, https://miro.medium.com/max/1808/1*x6-_StGFa02oMfajJ3In6g.jpeg 904w, https://miro.medium.com/max/1984/1*x6-_StGFa02oMfajJ3In6g.jpeg 992w, https://miro.medium.com/max/2160/1*x6-_StGFa02oMfajJ3In6g.jpeg 1080w, https://miro.medium.com/max/2700/1*x6-_StGFa02oMfajJ3In6g.jpeg 1350w, https://miro.medium.com/max/3240/1*x6-_StGFa02oMfajJ3In6g.jpeg 1620w, https://miro.medium.com/max/3600/1*x6-_StGFa02oMfajJ3In6g.jpeg 1800w" sizes="1800px"/></noscript></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">1) Native RAW, 2) ProRAW without the algorithms 3) ProRAW with the algorithms</figcaption></figure></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="16ca" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you wonder what camera settings I used to capture this, the answer is a bit complicated. When you enable these algorithms, the iPhone ignores manual exposure settings. After all, Deep Fusion takes multiple exposures with a variety of settings. While the metadata embedded in the final image reported an ISO of 40, it’s unlikely all photos in the burst had that setting.</p><p id="9fec" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As you can see, by disabling this Smart Processing in Halide, we can still skip a lot of the noise reduction in our ProRAW files if we so choose.</p><p id="0283" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Synthetic tests like this are all fine and good, but what about the real world?</p><p id="8787" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Sebastiaan spent the last few weeks testing ProRAW in the field with the Halide beta. He found the dynamic range in ProRAW pretty mind-blowing in the day-to-day editing process:</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ky">
<div class="hw s ho hx">
<div class="le hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*oAPorA4ZnfrWnKaDY48B0g.png?q=20" width="1200" height="800"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1200" height="800"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2400/1*oAPorA4ZnfrWnKaDY48B0g.png" width="1200" height="800" srcSet="https://miro.medium.com/max/552/1*oAPorA4ZnfrWnKaDY48B0g.png 276w, https://miro.medium.com/max/1104/1*oAPorA4ZnfrWnKaDY48B0g.png 552w, https://miro.medium.com/max/1280/1*oAPorA4ZnfrWnKaDY48B0g.png 640w, https://miro.medium.com/max/1456/1*oAPorA4ZnfrWnKaDY48B0g.png 728w, https://miro.medium.com/max/1632/1*oAPorA4ZnfrWnKaDY48B0g.png 816w, https://miro.medium.com/max/1808/1*oAPorA4ZnfrWnKaDY48B0g.png 904w, https://miro.medium.com/max/1984/1*oAPorA4ZnfrWnKaDY48B0g.png 992w, https://miro.medium.com/max/2000/1*oAPorA4ZnfrWnKaDY48B0g.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><h2 id="936d" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Semantic Maps Included</h2><p id="c322" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">ProRAW has one more surprise up its sleeve. A few years ago, Apple began using neural networks to detect interesting parts of an image, such as eyes and hair.</p><p id="d9cf" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Apple uses this to, say, add sharpening to <em class="kx">only</em> clouds in the sky. Sharping faces would be quite unflattering.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*qK8PHZG2JudesZzNj_XomA.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*qK8PHZG2JudesZzNj_XomA.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*qK8PHZG2JudesZzNj_XomA.png 276w, https://miro.medium.com/max/1104/1*qK8PHZG2JudesZzNj_XomA.png 552w, https://miro.medium.com/max/1280/1*qK8PHZG2JudesZzNj_XomA.png 640w, https://miro.medium.com/max/1456/1*qK8PHZG2JudesZzNj_XomA.png 728w, https://miro.medium.com/max/1632/1*qK8PHZG2JudesZzNj_XomA.png 816w, https://miro.medium.com/max/1808/1*qK8PHZG2JudesZzNj_XomA.png 904w, https://miro.medium.com/max/1984/1*qK8PHZG2JudesZzNj_XomA.png 992w, https://miro.medium.com/max/2000/1*qK8PHZG2JudesZzNj_XomA.png 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">An iPhone 12 Pro RAW file from the ultra-wide camera with a semantic matte for the human in the photo. Super cool.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="4644" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">ProRAW files contain these maps! Both semantic maps used on faces and Portrait Effect Mattes that power the background blur in Portrait mode.</p><h2 id="6394" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Flexibility in File Size</h2><p id="8736" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">By storing the demosaiced values, ProRAWs can also tackle unwieldy file sizes in a few interesting ways. That’s really important, because when you shoot ProRAW with the first-party camera app, each file is around 25mb — and can get even heavier than that. That’s an order of magnitude or more than a regular photo. It adds up quick.</p><p id="e2a4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">First, we can fiddle with bit-depth. By default, ProRAW uses 12-bit data, which can be overkill. JPEGs are only 8-bits per color channel, so going 10-bit means 4x the precision in editing. While the first-party camera app doesn’t present this option, it’s there in the API. we’ve added it to Halide and seen files drop as low as 8mb. In practice, you can get most of the ProRAW benefits at half the file size.</p><p id="6120" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you want to further reduce the file size, ProRAW offers lossy compression that can drop these files down to as little as 1mb, but not so fast. These compression APIs currently drop the bit depth to 8-bits. In our opinion, that too great of a tradeoff, as it leaves you with a file that’s only marginally better than a JPEG. We’re certain ProRAW compression will confuse users, so we’ve held off on compression support for the time being. Fortunately Apple’s camera team has been iterating on ProRAW very fast, so we hope 10-bit compression is on the horizon.</p><p id="8e68" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Finally, it turns out that every ProRAW file can also contain the fully-processed JPEG version. This is a fallback image for apps that don’t recognize RAWs— which is <em class="kx">most apps. </em>Even Instagram. The first-party camera doesn’t offer this, which means you cannot share ProRAW shots taken with it to apps that do not support RAW images. We’ve added the option in Halide.</p><p id="bb95" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’re planning to edit the image, it makes sense to opt-out of these 4mb images. Your RAW editing app will ignore it, and ultimately produce a new JPEG for your Instagrams.</p><h2 id="bfdc" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Batteries Included</h2><p id="ce80" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">The most underrated improvement in iOS 14.3 is that the native Photos app now supports RAW editing. This is huge, because it abstracts away all the complexity of higher-end apps. No fiddling with “black point” and “color profiles.” Casual users who only know how to edit photos in the built-in apps don’t have to do anything different. It just works.</p><p id="6067" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This is critical because we can expect it to take some time for third-party photo editors to adopt this new metadata. Until then, ProRAWs will not look like system JPEGs. Compare the results viewed inside Lightroom CC and Photos.app.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz mc">
<div class="hw s ho hx">
<div class="md hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/42/1*fEpwzq7b2PdTISl5LVMv_w.jpeg?q=20" width="550" height="778"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="550" height="778"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1100/1*fEpwzq7b2PdTISl5LVMv_w.jpeg" width="550" height="778" srcSet="https://miro.medium.com/max/552/1*fEpwzq7b2PdTISl5LVMv_w.jpeg 276w, https://miro.medium.com/max/1100/1*fEpwzq7b2PdTISl5LVMv_w.jpeg 550w" sizes="550px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Left: Photos.app, Right: Lightroom CC</figcaption></figure><p id="ee06" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It looks wrong because Lightroom doesn’t yet detect the local tone mapping metadata. However, given Adobe participated in the designing these tags, we can expect an update in the near future.</p><p id="965e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As for other iOS editors, Apple has updated its RAW processing frameworks to support these new tags. For the most part, it “just works,” so you’ll find ProRAWs display properly inside Halide’s photo gallery. You can toggle tone mapping on and off with the Instant RAW button.</p><p id="d3f2" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That said, not all metadata is exposed by Apple’s frameworks. They don’t even tell you whether the DNG is a native RAW or ProRAW. We’re certain this is just a matter running out of time. They launched this two weeks before Christmas, so this is clearly a feature that was coming down to the wire.</p><p id="40e9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">To get around this, we built our own proprietary DNG parser we call Dingus.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 276w, https://miro.medium.com/max/1104/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 552w, https://miro.medium.com/max/1280/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 640w, https://miro.medium.com/max/1456/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 728w, https://miro.medium.com/max/1632/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 816w, https://miro.medium.com/max/1808/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 904w, https://miro.medium.com/max/1984/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 992w, https://miro.medium.com/max/2000/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="a85c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Dingus lets us interrogate low level tags within DNGs, which aren’t yet exposed by Apple. We’re using this to expose the bit depth and type of RAW in our metadata reviewer, but it’s also been useful for poking and prodding at ProRAW internals.</p><h1 id="8297" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el">Is ProRAW Perfect?</h1><p id="c77f" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">ProRAW is a leap forward for everyone, but it will be especially impactful for beginning photographers.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*2xM5pAf53HtUxZAtMhaV9g.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*2xM5pAf53HtUxZAtMhaV9g.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*2xM5pAf53HtUxZAtMhaV9g.jpeg 276w, https://miro.medium.com/max/668/1*2xM5pAf53HtUxZAtMhaV9g.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg 276w, https://miro.medium.com/max/668/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*FNxf_68aztIWmO3E8KNx_Q.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*FNxf_68aztIWmO3E8KNx_Q.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*FNxf_68aztIWmO3E8KNx_Q.jpeg 276w, https://miro.medium.com/max/668/1*FNxf_68aztIWmO3E8KNx_Q.jpeg 334w" sizes="334px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci mf ho mg lv">ProRAW in and around San Francisco</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="71c4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you know nothing about RAW, but want more flexibility in editing, shoot ProRAW. For true professionals, the decision is a bit more nuanced. Sometimes you’ll want to switch back to regular RAW.</p><p id="c395" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">First, ProRAW is only available on “Pro” level iPhones. Despite the usual conspiracy theories, Apple isn’t just flipping a bit in software to force you to spend more.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz mh">
<div class="hw s ho hx">
<div class="mi hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*vmkySLtfmoeVkRBE2fMUlQ.png?q=20" width="1144" height="752"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1144" height="752"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2288/1*vmkySLtfmoeVkRBE2fMUlQ.png" width="1144" height="752" srcSet="https://miro.medium.com/max/552/1*vmkySLtfmoeVkRBE2fMUlQ.png 276w, https://miro.medium.com/max/1104/1*vmkySLtfmoeVkRBE2fMUlQ.png 552w, https://miro.medium.com/max/1280/1*vmkySLtfmoeVkRBE2fMUlQ.png 640w, https://miro.medium.com/max/1400/1*vmkySLtfmoeVkRBE2fMUlQ.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="5b12" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In the past, these algorithms did their math with lower bit depths, because their output was just low bit depth JPEGs. Outputting images in a higher bit depth requires twice the memory. Apple’s Pro iPhones have way more memory than non-Pro models, and ProRAW needs all of that. It’s that simple.</p><p id="fc3c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Once you get RAW in your hands, the first thing you’ll notice is capture speed. A traditional RAW capture takes as little as 50 milliseconds. ProRAW takes between two and three <em class="kx">seconds </em>to finish processing.</p><p id="c730" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The built in iPhone camera does a great job hiding this, apparently processing each photo in the background in a queue. However, we’ve found the shutter stalls after firing three shots in quick succession.</p><p id="4b96" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">ProRAW isn’t coming to burst mode anytime soon. This makes things difficult if you’re covering sports, have small children who refuse to stand still, or you’re a portrait photographer who takes hundreds of photos in a single session. There’s a chance you might miss that perfect shot.</p><p id="7f13" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In Halide, we decided to take a conservative approach at launch, and only let you capture one photo at a time. We’re a week away from the App Store shutting down for Christmas, so this is the worst possible time to contend with memory crashes. But we expect to speed things up soon.</p><p id="b428" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The next issue we’ve found is sharpness and noise reduction. No multi-photo fusion is perfect. If you want the sharpest images with natural noise, and you’re not planning to boost your shadows through the roof, you might find “native” RAW is still the way to go.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz mj">
<div class="hw s ho hx">
<div class="mk hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg?q=20" width="2815" height="2840"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2815" height="2840"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/5630/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg" width="2815" height="2840" srcSet="https://miro.medium.com/max/552/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 276w, https://miro.medium.com/max/1104/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 552w, https://miro.medium.com/max/1280/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 640w, https://miro.medium.com/max/1456/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 728w, https://miro.medium.com/max/1632/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 816w, https://miro.medium.com/max/1808/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 904w, https://miro.medium.com/max/1984/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 992w, https://miro.medium.com/max/2000/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">The top image is Halide’s ‘Native’ RAW — which still captures more detail at times, but at the expense of a bit of noise.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="22fb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In the field, we often found that in some conditions you can still get more detail in a shot with that quick-and-noisy regular RAW file.</p><p id="4e11" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Then there’s file size. A 12-bit ProRAW is 25mb, while a 12-bit native RAW is only around 12mb. This is almost certainly why the “RAW” button in the first party camera app defaults to off, and returns to off if you leave and return to the app. A casual photographer might leave it on all the time and eat up their iCloud storage in an afternoon.</p><p id="a6a3" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Finally, there’s compatibility. Without something like Halide’s ProRAW+ setting, apps have to be updated to support DNG files. Sharing your ProRAW shot to Instagram doesn’t work:</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ml">
<div class="hw s ho hx">
<div class="mm hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*SH8eErzpFu7UHggFEtmkXQ.png?q=20" width="1390" height="1826"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1390" height="1826"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2780/1*SH8eErzpFu7UHggFEtmkXQ.png" width="1390" height="1826" srcSet="https://miro.medium.com/max/552/1*SH8eErzpFu7UHggFEtmkXQ.png 276w, https://miro.medium.com/max/1104/1*SH8eErzpFu7UHggFEtmkXQ.png 552w, https://miro.medium.com/max/1280/1*SH8eErzpFu7UHggFEtmkXQ.png 640w, https://miro.medium.com/max/1400/1*SH8eErzpFu7UHggFEtmkXQ.png 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">You can wait for a long moment — Instagram does not support DNG files, so you can’t open your ProRAW shots unless you shoot ProRAW + JPG in an app like Halide.</figcaption></figure><p id="5caf" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">And while Apple has done an amazing job supporting ProRAW development within its own ecosystem — just hop over to Apple’s Photos app, tap the “Edit” button, and you can edit a ProRAW the same way you edit a JPEG file — the DNG spec was only updated a month ago, so there’s no telling how long it will take for your favorite third-party RAW editors to adopt the new tags.</p><p id="0fdf" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you already know how to develop RAW files, and you aren’t shooting in a scenario where computational photography shines, you may find native RAWs give you more bang for your bytes.</p><h2 id="42ad" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Introducing ProRAW for Halide</h2><p id="bec8" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">We’re excited to announce Halide’s ProRAW support. We didn’t just add ProRAW and ticked the box. By actually extensively using ProRAW in the field and testing it we found how to make the best possible camera for it.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg?q=20" width="1080" height="1440"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1080" height="1440"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2160/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg" width="1080" height="1440" srcSet="https://miro.medium.com/max/552/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg 276w, https://miro.medium.com/max/668/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg?q=20" width="1080" height="1440"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1080" height="1440"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2160/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg" width="1080" height="1440" srcSet="https://miro.medium.com/max/552/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg 276w, https://miro.medium.com/max/668/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*3mirb6ZPPpfrlqqR3MU44A.jpeg?q=20" width="1080" height="1440"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1080" height="1440"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2160/1*3mirb6ZPPpfrlqqR3MU44A.jpeg" width="1080" height="1440" srcSet="https://miro.medium.com/max/552/1*3mirb6ZPPpfrlqqR3MU44A.jpeg 276w, https://miro.medium.com/max/668/1*3mirb6ZPPpfrlqqR3MU44A.jpeg 334w" sizes="334px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci mf ho mg lv">Some shots taken with Halide 2.1 with ProRAW on iPhone 12 Pro Max</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="2a50" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It starts with ProRAW+. When set to this mode, Halide will take a ProRAW photo along with a JPG file so you can quickly share and view images in apps that do not (yet) support RAW files. This makes it a lot easier to just leave ProRAW on and not run into any issues in other apps.</p><p id="c8bb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As we mentioned, ProRAW is great, but there’s tradeoffs.</p></div></div>
<div class="hg aj"><figure class="hh hi hj hk hl hg aj paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg?q=20" width="6000" height="3000"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="6000" height="3000"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/12000/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg" width="6000" height="3000" srcSet="https://miro.medium.com/max/552/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 276w, https://miro.medium.com/max/1104/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 552w, https://miro.medium.com/max/1280/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 640w, https://miro.medium.com/max/1456/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 728w, https://miro.medium.com/max/1632/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 816w, https://miro.medium.com/max/1808/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 904w, https://miro.medium.com/max/1984/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 992w, https://miro.medium.com/max/2160/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1080w, https://miro.medium.com/max/2700/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1350w, https://miro.medium.com/max/3240/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1620w, https://miro.medium.com/max/3780/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1890w, https://miro.medium.com/max/4320/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 2160w, https://miro.medium.com/max/4800/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 2400w" sizes="100vw"/></noscript></div></div></div></figure></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="fb98" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As we beta tested our ProRAW support, it became obvious we had to make it easer to fiddle with capture settings without diving into Settings. Enter the new format picker menu. Just long-press on the RAW button, and you’ll be able to choose between RAW and ProRAW, your desired bit-depth, and whether you wish to save the processed version alongside your DNG.</p><p id="cf2e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Being able to quickly change your shooting format allows in-the-moment decisions depending on your exact needs.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*ihfv06lwin5C3KQae1P-hg.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*ihfv06lwin5C3KQae1P-hg.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*ihfv06lwin5C3KQae1P-hg.png 276w, https://miro.medium.com/max/1104/1*ihfv06lwin5C3KQae1P-hg.png 552w, https://miro.medium.com/max/1280/1*ihfv06lwin5C3KQae1P-hg.png 640w, https://miro.medium.com/max/1456/1*ihfv06lwin5C3KQae1P-hg.png 728w, https://miro.medium.com/max/1632/1*ihfv06lwin5C3KQae1P-hg.png 816w, https://miro.medium.com/max/1808/1*ihfv06lwin5C3KQae1P-hg.png 904w, https://miro.medium.com/max/1984/1*ihfv06lwin5C3KQae1P-hg.png 992w, https://miro.medium.com/max/2000/1*ihfv06lwin5C3KQae1P-hg.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="5574" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As mentioned earlier, we allow you to customize your bit-depth, and disable capturing the JPEG version of your photo, inside Capture Settings. Together, you can expect to cut ProRAW file size in half without trading too much in your editing flexibility.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="mn hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*ZfXagfeuHx9es1Z-KhP6XQ.png?q=20" width="3000" height="1257"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1257"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*ZfXagfeuHx9es1Z-KhP6XQ.png" width="3000" height="1257" srcSet="https://miro.medium.com/max/552/1*ZfXagfeuHx9es1Z-KhP6XQ.png 276w, https://miro.medium.com/max/1104/1*ZfXagfeuHx9es1Z-KhP6XQ.png 552w, https://miro.medium.com/max/1280/1*ZfXagfeuHx9es1Z-KhP6XQ.png 640w, https://miro.medium.com/max/1456/1*ZfXagfeuHx9es1Z-KhP6XQ.png 728w, https://miro.medium.com/max/1632/1*ZfXagfeuHx9es1Z-KhP6XQ.png 816w, https://miro.medium.com/max/1808/1*ZfXagfeuHx9es1Z-KhP6XQ.png 904w, https://miro.medium.com/max/1984/1*ZfXagfeuHx9es1Z-KhP6XQ.png 992w, https://miro.medium.com/max/2000/1*ZfXagfeuHx9es1Z-KhP6XQ.png 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Fine-grained ProRAW settings for fine RAW appreciators.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="00b4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We also ensure to remember and persist your ProRAW settings. With great power comes great iCloud consumption, so please use this responsibly.</p><p id="e1f4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We’re still absorbing all of the implications of ProRAW, so we expect continue to iterate on our features over the next few months. We have a list of things we want to improve after the App Store holiday shutdown, so if you’re shooting ProRAW, 2021 is going to be an amazing year.</p><p id="34ff" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">However, native RAW is not going away.</p><p id="df31" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The vast majority of users can’t shoot ProRAW, there are circumstances where regular single-shot RAWs will be superior, and there are certain computational photography algorithms that rely on bayer-level information. We’ve got enough native RAW features planned to keep us busy for quite some time — and to keep bringing fantastic features to all iPhones that can run Halide.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*vUVodwaon3ojvmb92eAILA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*vUVodwaon3ojvmb92eAILA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*vUVodwaon3ojvmb92eAILA.jpeg 276w, https://miro.medium.com/max/1000/1*vUVodwaon3ojvmb92eAILA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg 276w, https://miro.medium.com/max/1000/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure></div>
<div class="n lp"><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg 276w, https://miro.medium.com/max/1000/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg 276w, https://miro.medium.com/max/1000/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg 500w" sizes="500px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci lt ho lu lv">Maybe the real Pro RAW was the shots we made along the way.</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="fe1e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The best part of building <a href="http://halide.cam/download" class="cl km" target="_blank" rel="noopener">Halide</a>, and writing these articles, is seeing what folks do with this stuff. If you’re proud of your ProRAW photo, be sure to tag us, because we’d love to see what you can do with it.</p><p id="1933" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If a picture is worth a thousand words, congratulations on reading over three pictures. <a href="http://halide.cam/download" class="cl km" target="_blank" rel="noopener">Now get shooting!</a></p></div></div></section>

<p></div></article></p>
</article>


<hr>

<footer>
<p>
<a href="/david/" title="Aller à l’accueil">🏠</a> •
<a href="/david/log/" title="Accès au flux RSS">🤖</a> •
<a href="http://larlet.com" title="Go to my English profile" data-instant>🇨🇦</a> •
<a href="mailto:david%40larlet.fr" title="Envoyer un courriel">📮</a> •
<abbr title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340">🧚</abbr>
</p>
<template id="theme-selector">
<form>
<fieldset>
<legend>Thème</legend>
<label>
<input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
</label>
<label>
<input type="radio" value="dark" name="chosen-color-scheme"> Foncé
</label>
<label>
<input type="radio" value="light" name="chosen-color-scheme"> Clair
</label>
</fieldset>
</form>
</template>
</footer>
<script>
function loadThemeForm(templateName) {
const themeSelectorTemplate = document.querySelector(templateName)
const form = themeSelectorTemplate.content.firstElementChild
themeSelectorTemplate.replaceWith(form)

form.addEventListener('change', (e) => {
const chosenColorScheme = e.target.value
localStorage.setItem('theme', chosenColorScheme)
toggleTheme(chosenColorScheme)
})

const selectedTheme = localStorage.getItem('theme')
if (selectedTheme && selectedTheme !== 'undefined') {
form.querySelector(`[value="${selectedTheme}"]`).checked = true
}
}

const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
window.addEventListener('load', () => {
let hasDarkRules = false
for (const styleSheet of Array.from(document.styleSheets)) {
let mediaRules = []
for (const cssRule of styleSheet.cssRules) {
if (cssRule.type !== CSSRule.MEDIA_RULE) {
continue
}
// WARNING: Safari does not have/supports `conditionText`.
if (cssRule.conditionText) {
if (cssRule.conditionText !== prefersColorSchemeDark) {
continue
}
} else {
if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
continue
}
}
mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
}

// WARNING: do not try to insert a Rule to a styleSheet you are
// currently iterating on, otherwise the browser will be stuck
// in a infinite loop…
for (const mediaRule of mediaRules) {
styleSheet.insertRule(mediaRule.cssText)
hasDarkRules = true
}
}
if (hasDarkRules) {
loadThemeForm('#theme-selector')
}
})
</script>
</body>
</html>

+ 361
- 0
cache/2021/0c6966a8e9543b52c361ac6de68f08e4/index.md View File

@@ -0,0 +1,361 @@
title: Understanding ProRAW
url: https://blog.halide.cam/understanding-proraw-4eed556d4c54
hash_url: 0c6966a8e9543b52c361ac6de68f08e4

<section class="dg dh di dj dk">
<p id="655a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We make the most popular RAW camera for iPhone, so when Apple revealed their new ProRAW image format, we were beyond excited.</p>
<div class="hg aj"><figure class="hh hi hj hk hl hg aj paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*tfZD-KB2jIXjAsNhFWwANA.jpeg?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*tfZD-KB2jIXjAsNhFWwANA.jpeg" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 276w, https://miro.medium.com/max/1104/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 552w, https://miro.medium.com/max/1280/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 640w, https://miro.medium.com/max/1456/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 728w, https://miro.medium.com/max/1632/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 816w, https://miro.medium.com/max/1808/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 904w, https://miro.medium.com/max/1984/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 992w, https://miro.medium.com/max/2160/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1080w, https://miro.medium.com/max/2700/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1350w, https://miro.medium.com/max/3240/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1620w, https://miro.medium.com/max/3780/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 1890w, https://miro.medium.com/max/4320/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 2160w, https://miro.medium.com/max/4800/1*tfZD-KB2jIXjAsNhFWwANA.jpeg 2400w" sizes="100vw"/></noscript></div></div></div></figure></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="0342" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Then they announced it’s coming to the built-in camera app.</p><p id="c3f8" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Many developers in our shoes would freak out, thinking Apple wants to gobble up their customers. We were just confused.</p><p id="59f9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Apple builds products for the broadest possible audience, and RAW is a tool for photo-nerds. These powerful files take skill to edit and come with significant tradeoffs. Why would Apple cram a complicated feature into an app meant for everyone?</p><p id="e76a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As we dug deeper into ProRAW, we realized it wasn’t just about making RAW more powerful. It’s about making RAW approachable. ProRAW could very well change how everyone shoots and edits photos, beginners and experts alike.</p><p id="d908" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">To understand what makes it so special, the first half of this post explains how a digital camera develops a photo. Then we go on to explain the strengths and weaknesses of traditional RAWs. Finally, we dive into what’s unique about ProRAW, how it changes the game, and its few remaining drawbacks.</p><p id="8c99" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Grab a coffee, because this is a long read.</p><h1 id="7b37" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el">A Short Tour Through a Digital Camera</h1><p id="d544" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Imagine you’re looking at this scene through your camera.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 276w, https://miro.medium.com/max/1104/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 552w, https://miro.medium.com/max/1280/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 640w, https://miro.medium.com/max/1400/1*w2cp8OmPzmfqfc3K4QXHMQ.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="c734" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">When you tap the camera button on your camera, light passes through a series of optics and lands on a digital sensor where it is captured.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*h0tUHkeVGQ6DO1gKHfUU6A.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*h0tUHkeVGQ6DO1gKHfUU6A.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*h0tUHkeVGQ6DO1gKHfUU6A.png 276w, https://miro.medium.com/max/1104/1*h0tUHkeVGQ6DO1gKHfUU6A.png 552w, https://miro.medium.com/max/1280/1*h0tUHkeVGQ6DO1gKHfUU6A.png 640w, https://miro.medium.com/max/1456/1*h0tUHkeVGQ6DO1gKHfUU6A.png 728w, https://miro.medium.com/max/1632/1*h0tUHkeVGQ6DO1gKHfUU6A.png 816w, https://miro.medium.com/max/1808/1*h0tUHkeVGQ6DO1gKHfUU6A.png 904w, https://miro.medium.com/max/1984/1*h0tUHkeVGQ6DO1gKHfUU6A.png 992w, https://miro.medium.com/max/2000/1*h0tUHkeVGQ6DO1gKHfUU6A.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="8f4e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We’re going to talk through the three important steps that take place as your camera converts sensor values into a picture.</p><h2 id="4d27" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Step 1: Demosaic</h2><p id="e4fb" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Your digital sensor absorbs light and turns it into numbers. The more light it sees, the higher the number.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 276w, https://miro.medium.com/max/1104/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 552w, https://miro.medium.com/max/1280/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 640w, https://miro.medium.com/max/1400/1*4U6Q8jnQiSdoZn59hcjWfA.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="503d" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I used black and white for a reason — digital sensors are totally colorblind. In 1976, a clever engineer at Kodak found a solution: put a grid of color filters over the sensor, so every pixel sees either red, green, or blue.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ki">
<div class="hw s ho hx">
<div class="kj hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*YpY7tmiCxOL11eOnyODz7A.png?q=20" width="2560" height="1664"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2560" height="1664"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/5120/1*YpY7tmiCxOL11eOnyODz7A.png" width="2560" height="1664" srcSet="https://miro.medium.com/max/552/1*YpY7tmiCxOL11eOnyODz7A.png 276w, https://miro.medium.com/max/1104/1*YpY7tmiCxOL11eOnyODz7A.png 552w, https://miro.medium.com/max/1280/1*YpY7tmiCxOL11eOnyODz7A.png 640w, https://miro.medium.com/max/1400/1*YpY7tmiCxOL11eOnyODz7A.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="e0ec" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This mosaic pattern is called a Bayer pattern, named after its inventor, Bruce Bayer. With a color filter in place, our sensor now sees a grid of alternating colors. Let’s zoom in on the leaves of the tree, and see what the sensor sees.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz kk">
<div class="hw s ho hx">
<div class="kl hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*UaFk3pbG95R8uYHHd8G8xw.png?q=20" width="600" height="600"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="600" height="600"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1200/1*UaFk3pbG95R8uYHHd8G8xw.png" width="600" height="600" srcSet="https://miro.medium.com/max/552/1*UaFk3pbG95R8uYHHd8G8xw.png 276w, https://miro.medium.com/max/1104/1*UaFk3pbG95R8uYHHd8G8xw.png 552w, https://miro.medium.com/max/1200/1*UaFk3pbG95R8uYHHd8G8xw.png 600w" sizes="600px"/></noscript></div></div></div></figure><p id="0b79" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Each pixel is either red, green or blue. We figure out the real colors by going over every pixel, looking at its neighbors, and guessing its two missing colors. This crucial step is known as “<a href="https://en.wikipedia.org/wiki/Demosaicing" class="cl km" target="_blank" rel="noopener">demosaicing</a>,” or “debayering.”</p><p id="f759" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s a tricky problem. Compare a simple algorithm to a higher quality one, and you’ll spot problems like purple fringes.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz kn">
<div class="hw s ho hx">
<div class="ko hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg?q=20" width="1800" height="600"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1800" height="600"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3600/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg" width="1800" height="600" srcSet="https://miro.medium.com/max/552/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 276w, https://miro.medium.com/max/1104/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 552w, https://miro.medium.com/max/1280/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 640w, https://miro.medium.com/max/1456/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 728w, https://miro.medium.com/max/1632/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 816w, https://miro.medium.com/max/1808/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 904w, https://miro.medium.com/max/1984/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 992w, https://miro.medium.com/max/2000/1*wPzwzg_jz0xA-TydcZa_Lg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Middle: A fast algorithm. Right: a higher quality one.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="7c7e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Look at the pebbles in the shot. The fast algorithm is a little more “pixely.”</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz kt">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg?q=20" width="960" height="480"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="960" height="480"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1920/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg" width="960" height="480" srcSet="https://miro.medium.com/max/552/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 276w, https://miro.medium.com/max/1104/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 552w, https://miro.medium.com/max/1280/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 640w, https://miro.medium.com/max/1400/1*dnBVd9aUeky6f1Ckl_M2GQ.jpeg 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Left: Fast Right: High Quality</figcaption></figure><p id="9ccc" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">There are quite a few popular demosaic algorithms to choose from, each with strengths and weaknesses. The perfect algorithm depends on the type of sensor, your camera settings, and even the subject matter. For example, if you’re shooting the night sky, some algorithms produce better results on stars.</p><p id="0ba2" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">After demosaic, we end up with…</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*KsiUoBU9r4AuQT91N4v23Q.png?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*KsiUoBU9r4AuQT91N4v23Q.png" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*KsiUoBU9r4AuQT91N4v23Q.png 276w, https://miro.medium.com/max/1104/1*KsiUoBU9r4AuQT91N4v23Q.png 552w, https://miro.medium.com/max/1280/1*KsiUoBU9r4AuQT91N4v23Q.png 640w, https://miro.medium.com/max/1400/1*KsiUoBU9r4AuQT91N4v23Q.png 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">(Simulated results to keep things simple)</figcaption></figure><p id="9beb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That’s not great. As you can see, the color and exposure are off.</p><p id="1781" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">There’s nothing wrong with your camera. Quite the opposite: your camera captures way more information than your screen can display. We have to take those sensor readings, which measures the light from your <strong class="gk ku">scene</strong>, and transform them into pixel values that light up your <strong class="gk ku">display</strong>.</p><h2 id="77f2" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Step 2: Transform from Scene to Display</h2><p id="9532" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Let’s start with color. The following chart represents all the colors a human can see, and the triangle in the middle covers what most screens can display.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz kv">
<div class="hw s ho hx">
<div class="kw hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*aj7B542Qbk9uCeaB7rCitA.jpeg?q=20" width="2525" height="1275"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2525" height="1275"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/5050/1*aj7B542Qbk9uCeaB7rCitA.jpeg" width="2525" height="1275" srcSet="https://miro.medium.com/max/552/1*aj7B542Qbk9uCeaB7rCitA.jpeg 276w, https://miro.medium.com/max/1104/1*aj7B542Qbk9uCeaB7rCitA.jpeg 552w, https://miro.medium.com/max/1280/1*aj7B542Qbk9uCeaB7rCitA.jpeg 640w, https://miro.medium.com/max/1456/1*aj7B542Qbk9uCeaB7rCitA.jpeg 728w, https://miro.medium.com/max/1632/1*aj7B542Qbk9uCeaB7rCitA.jpeg 816w, https://miro.medium.com/max/1808/1*aj7B542Qbk9uCeaB7rCitA.jpeg 904w, https://miro.medium.com/max/1984/1*aj7B542Qbk9uCeaB7rCitA.jpeg 992w, https://miro.medium.com/max/2000/1*aj7B542Qbk9uCeaB7rCitA.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="f67a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">When your camera<em class="kx"> </em>captures colors outside the triangle, we have to push and pull those invalid colors to fit inside.</p><p id="9af7" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">One detail makes this especially tricky: deciding which white is “true” white. If you’ve ever shopped for light bulbs, you know every white light in the real world has a slight yellow or blue tint. Our brains adjust our perception of white, based on hints in our surroundings. It’s called <a href="https://en.wikipedia.org/wiki/Color_constancy" class="cl km" target="_blank" rel="noopener">color constancy</a>, and it’s why this yellow/blue dress optical illusion fools people.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ky">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*FcQhGavMhNpcrHlHIk0_uA.png?q=20" width="1200" height="900"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1200" height="900"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2400/1*FcQhGavMhNpcrHlHIk0_uA.png" width="1200" height="900" srcSet="https://miro.medium.com/max/552/1*FcQhGavMhNpcrHlHIk0_uA.png 276w, https://miro.medium.com/max/1104/1*FcQhGavMhNpcrHlHIk0_uA.png 552w, https://miro.medium.com/max/1280/1*FcQhGavMhNpcrHlHIk0_uA.png 640w, https://miro.medium.com/max/1400/1*FcQhGavMhNpcrHlHIk0_uA.png 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci"><a href="https://en.wikipedia.org/wiki/The_dress" class="cl km" target="_blank" rel="noopener">The Dress</a></figcaption></figure><p id="48ec" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Modern cameras are very good at figuring out the white point in most situations. So we pick our white point, run the math, and end up with a perfectly white-balanced image.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*JBl1Ib7wqrxsyukaHBA1TA.png?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*JBl1Ib7wqrxsyukaHBA1TA.png" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*JBl1Ib7wqrxsyukaHBA1TA.png 276w, https://miro.medium.com/max/1104/1*JBl1Ib7wqrxsyukaHBA1TA.png 552w, https://miro.medium.com/max/1280/1*JBl1Ib7wqrxsyukaHBA1TA.png 640w, https://miro.medium.com/max/1400/1*JBl1Ib7wqrxsyukaHBA1TA.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="daf0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That looks pretty close to what we saw, but the awning in the top of the frame is under-exposed.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz la">
<div class="hw s ho hx">
<div class="kl hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*WwDfyRYXScdIPBlf8ZCm5Q.png?q=20" width="563" height="563"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="563" height="563"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1126/1*WwDfyRYXScdIPBlf8ZCm5Q.png" width="563" height="563" srcSet="https://miro.medium.com/max/552/1*WwDfyRYXScdIPBlf8ZCm5Q.png 276w, https://miro.medium.com/max/1104/1*WwDfyRYXScdIPBlf8ZCm5Q.png 552w, https://miro.medium.com/max/1126/1*WwDfyRYXScdIPBlf8ZCm5Q.png 563w" sizes="563px"/></noscript></div></div></div></figure><p id="9505" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Let’s turn up the exposure to bring out detail…</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz je">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*99yfqbgXyF13DM8ooEJaLQ.png?q=20" width="1512" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1512" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3024/1*99yfqbgXyF13DM8ooEJaLQ.png" width="1512" height="2016" srcSet="https://miro.medium.com/max/552/1*99yfqbgXyF13DM8ooEJaLQ.png 276w, https://miro.medium.com/max/1104/1*99yfqbgXyF13DM8ooEJaLQ.png 552w, https://miro.medium.com/max/1280/1*99yfqbgXyF13DM8ooEJaLQ.png 640w, https://miro.medium.com/max/1400/1*99yfqbgXyF13DM8ooEJaLQ.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="6e98" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">… but unfortunately, now the rest of the image is too bright.</p><p id="b4c6" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The technical name for this problem is “dynamic range.” It’s range of light you can capture or display at one time, measured from the brightest bright to the darkest shadow. You’ll frequently hear this range measured in “stops.”</p><p id="56b9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Ever see terms like ‘HDR’ or ‘XDR’ being thrown around? The “DR” in “HDR” stands for Dynamic Range. People make quite a fuss about it. This is an especially difficult problem in technology, because the human eye is <strong class="gk ku">incredibly good</strong>.</p><p id="72ef" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s easily the most powerful camera in the world, as it can see up to 30 stops of dynamic range. Most screens can <em class="kx">display</em> 8 stops. Digital cameras capture up to 15 stops. When we try to display all this information on a screen with lower dynamic range, it’s going to look wrong, sometimes in unexpected ways.</p><p id="8e48" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Notice the sky has weird patch of cyan, caused by one of the color channels clipping.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lb">
<div class="hw s ho hx">
<div class="lc hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*2wHP6e5hrIxrHXyJ7Pcc1g.png?q=20" width="864" height="328"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="864" height="328"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1728/1*2wHP6e5hrIxrHXyJ7Pcc1g.png" width="864" height="328" srcSet="https://miro.medium.com/max/552/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 276w, https://miro.medium.com/max/1104/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 552w, https://miro.medium.com/max/1280/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 640w, https://miro.medium.com/max/1400/1*2wHP6e5hrIxrHXyJ7Pcc1g.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="089e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The solution to our dynamic range problem is a little more complicated than bringing up shadows and turning down highlights everywhere. That would make the whole image feel flat.</p><p id="9dfc" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Instead, you want to darken and lighten small areas of the image. Fifty years ago, photographers took hours to tweak their negatives using a process called “dodging and burning.”</p><p id="5f1b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Today it’s called “local tone mapping.”</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ld">
<div class="hw s ho hx">
<div class="le hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*3-FXfKG16GUFHMOLOmXMKg.jpeg?q=20" width="3024" height="2016"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="2016"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*3-FXfKG16GUFHMOLOmXMKg.jpeg" width="3024" height="2016" srcSet="https://miro.medium.com/max/552/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 276w, https://miro.medium.com/max/1104/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 552w, https://miro.medium.com/max/1280/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 640w, https://miro.medium.com/max/1456/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 728w, https://miro.medium.com/max/1632/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 816w, https://miro.medium.com/max/1808/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 904w, https://miro.medium.com/max/1984/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 992w, https://miro.medium.com/max/2000/1*3-FXfKG16GUFHMOLOmXMKg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">The image on the right has some gentle tone mapping applied.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="def0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">With that, our image looks great.</p><p id="8c4a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Let’s go ahead email it to our friends… oh, oops it’s 100 megabytes. The image still contains all that data we can’t see. Luckily, we don’t need all of that data now that we’re done editing.</p><h2 id="9399" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Step 3: Optimize</h2><p id="0e8a" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">In computer graphics, if you use more bits, your math gets more accurate. While editing, we needed to use 64-bits per pixel to get nice results. Once we’re done editing we can cut it down to 32-bits — halving our file size — and nobody can tell the difference.</p><p id="9792" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Next, we can throw out most of our color information. This cuts it in half <em class="kx">again</em>.</p><p id="037f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Finally, we can apply something called ‘lossy compression’ — the kind you find in JPEGs. We end up with an image that’s only 1.6 mb to share with our friends.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*mt0D2vrarvjazruDvYK7iw.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*mt0D2vrarvjazruDvYK7iw.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*mt0D2vrarvjazruDvYK7iw.png 276w, https://miro.medium.com/max/1104/1*mt0D2vrarvjazruDvYK7iw.png 552w, https://miro.medium.com/max/1280/1*mt0D2vrarvjazruDvYK7iw.png 640w, https://miro.medium.com/max/1456/1*mt0D2vrarvjazruDvYK7iw.png 728w, https://miro.medium.com/max/1632/1*mt0D2vrarvjazruDvYK7iw.png 816w, https://miro.medium.com/max/1808/1*mt0D2vrarvjazruDvYK7iw.png 904w, https://miro.medium.com/max/1984/1*mt0D2vrarvjazruDvYK7iw.png 992w, https://miro.medium.com/max/2000/1*mt0D2vrarvjazruDvYK7iw.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="8ba9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Phew. Take a breath, and let all that sink in. When you’re ready, we can finally answer…</p><h1 id="098d" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el"><strong class="az">What is RAW? Why is it Magic?</strong></h1><p id="dc70" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">We just <strong class="gk ku">developed</strong> a photo. Every step in development is destructive, meaning we lose information. For example, in Step 2, once you shift colors around to fit into that triangle, you can never figure out the original, real-world colors.</p><p id="7f4c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">So what happens when mistakes are made? Let’s go back to California’s recent wildfires that <a href="https://www.nytimes.com/2020/09/09/us/pictures-photos-california-fires.html" class="cl km" target="_blank" rel="noopener">turned the sky orange</a>. I took a photo with a color chart at the time, figuring it would come in handy someday.</p><p id="197b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In real life, it looked like this:</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lf">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*2VfJISrXMInRKX85P3Szhw.jpeg?q=20" width="2048" height="1536"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2048" height="1536"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/4096/1*2VfJISrXMInRKX85P3Szhw.jpeg" width="2048" height="1536" srcSet="https://miro.medium.com/max/552/1*2VfJISrXMInRKX85P3Szhw.jpeg 276w, https://miro.medium.com/max/1104/1*2VfJISrXMInRKX85P3Szhw.jpeg 552w, https://miro.medium.com/max/1280/1*2VfJISrXMInRKX85P3Szhw.jpeg 640w, https://miro.medium.com/max/1400/1*2VfJISrXMInRKX85P3Szhw.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="8f9c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The built-in camera app was confused by that orange sky, and tried to make everything neutral — because that’s what the world usually looks like. People were confused to find their cameras refused to take an image of the world as they saw it.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lf">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*oNgdi0nr93vU6ozzkB8l2g.jpeg?q=20" width="2048" height="1536"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2048" height="1536"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/4096/1*oNgdi0nr93vU6ozzkB8l2g.jpeg" width="2048" height="1536" srcSet="https://miro.medium.com/max/552/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 276w, https://miro.medium.com/max/1104/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 552w, https://miro.medium.com/max/1280/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 640w, https://miro.medium.com/max/1400/1*oNgdi0nr93vU6ozzkB8l2g.jpeg 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="28ae" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">A lot of folks used manual settings to override their camera’s automatic white balancing, but let’s say you didn’t. I’ll take that incorrect JPEG into an image editor like Lightroom and try to match it back to the original…</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lf">
<div class="hw s ho hx">
<div class="lg hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*0IYpHEDlusoF171jVWsoMw.jpeg?q=20" width="2048" height="768"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2048" height="768"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/4096/1*0IYpHEDlusoF171jVWsoMw.jpeg" width="2048" height="768" srcSet="https://miro.medium.com/max/552/1*0IYpHEDlusoF171jVWsoMw.jpeg 276w, https://miro.medium.com/max/1104/1*0IYpHEDlusoF171jVWsoMw.jpeg 552w, https://miro.medium.com/max/1280/1*0IYpHEDlusoF171jVWsoMw.jpeg 640w, https://miro.medium.com/max/1400/1*0IYpHEDlusoF171jVWsoMw.jpeg 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">The actual scene on the left, and an attempt to adjust our poorly white-balanced image on the right.</figcaption></figure><p id="65ea" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Hmm. Notice that how it’s messed up some colors, shifting blue to purple.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz lh">
<div class="hw s ho hx">
<div class="li hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg?q=20" width="622" height="301"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="622" height="301"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1244/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg" width="622" height="301" srcSet="https://miro.medium.com/max/552/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg 276w, https://miro.medium.com/max/1104/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg 552w, https://miro.medium.com/max/1244/1*sJwJVYYqkmU1Z21M4IJB4A.jpeg 622w" sizes="622px"/></noscript></div></div></div></figure><p id="f24a" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Trying to un-process a processed image like a JPEG is like trying to un-bake a cake. When your camera produces JPEGs, you’d better love the choices it made, because there’s no going back.</p><p id="1068" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Now imagine if instead of saving a JPEG, your camera saved the original sensor values. Now you can make totally different processing decisions yourself, like white balance. You get the raw data.</p><p id="ca0b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Welcome to RAW files.</p><p id="0b8d" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We have written about RAW a lot on this blog. We love it. Shooting RAW <a class="cl km" target="_blank" rel="noopener" href="/the-power-of-raw-on-iphone-part-1-shooting-raw-ef02becb7002">gives you magical powers</a>. With a few knobs, you can rescue photos that you thought were lost to poor exposure.</p><p id="1652" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">So far, we’ve only talked about mistakes, but RAWs also give you the freedom to experiment with radically different choices for artistic effect, letting you <strong class="gk ku">develop</strong> the photo so it looks how you experienced it.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lj">
<div class="hw s ho hx">
<div class="lk hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*F-HXEeVByR8TXVwszChZkQ.jpeg?q=20" width="1400" height="933"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1400" height="933"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2800/1*F-HXEeVByR8TXVwszChZkQ.jpeg" width="1400" height="933" srcSet="https://miro.medium.com/max/552/1*F-HXEeVByR8TXVwszChZkQ.jpeg 276w, https://miro.medium.com/max/1104/1*F-HXEeVByR8TXVwszChZkQ.jpeg 552w, https://miro.medium.com/max/1280/1*F-HXEeVByR8TXVwszChZkQ.jpeg 640w, https://miro.medium.com/max/1400/1*F-HXEeVByR8TXVwszChZkQ.jpeg 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">These are the kind of edits that a RAW file enables.</figcaption></figure><p id="e4d4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Remember old film cameras? Their photos also had to be developed from a negative. RAW data is usually stored in the DNG file format, which stands for <strong class="gk ku">D</strong>igital <strong class="gk ku">N</strong>e<strong class="gk ku">g</strong>ative. Some camera makers have their own formats, but those companies are jerks.</p><p id="1643" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">DNG is an open standard, so anyone can build software that reads and writes DNGs. Best of all, the file format continues to evolve, as we’ll see shortly.</p><p id="5471" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">First, we have some bad news.</p><h2 id="696e" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">RAWs are Great Until They Aren’t</h2><p id="8cfb" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">We make a <a href="http://halide.cam" class="cl km" target="_blank" rel="noopener">RAW camera app</a>, so of course we’re fans of RAW. That also means we get plenty of support emails about it. By far, the most common question is, “Why do my RAWs look worse than the built-in camera app?”</p><p id="84d1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">iPhone cameras got a lot better over time. At first, these were leaps in hardware: better and bigger sensors and lenses allowed sharper shots. Eventually, though, the processors got faster and the cameras couldn’t get larger. The camera got smarter.</p><p id="99c1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">iPhones take many photos and combine it into one shot, picking detail in the shadows from one photo, the right exposure on your dog’s face from another, and a few other ones for extra detail. This is merged into the final result in milliseconds without requiring any effort on your behalf. It’s very clever stuff, with cool names like Smart HDR and Deep Fusion.</p><p id="cd12" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">On the other hand, iPhone RAW files are still just one image. Which means they look quite… different.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ll">
<div class="hw s ho hx">
<div class="lm hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*m6P-96kVDOdsqXt7tz7LDg.jpeg?q=20" width="4000" height="2400"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="4000" height="2400"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/8000/1*m6P-96kVDOdsqXt7tz7LDg.jpeg" width="4000" height="2400" srcSet="https://miro.medium.com/max/552/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 276w, https://miro.medium.com/max/1104/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 552w, https://miro.medium.com/max/1280/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 640w, https://miro.medium.com/max/1456/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 728w, https://miro.medium.com/max/1632/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 816w, https://miro.medium.com/max/1808/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 904w, https://miro.medium.com/max/1984/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 992w, https://miro.medium.com/max/2000/1*m6P-96kVDOdsqXt7tz7LDg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="f49b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’re coming from the built-in iPhone camera app, switching over to manual processing is like switching from a car with automatic transmission to a stick-shift.</p><p id="6947" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">There’s a steep learning curve, and that’s why we built something called “Instant RAW” into our latest Mark II update, so you don’t have to spend an afternoon in an image editor tweaking your shot to get a nice result.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ln">
<div class="hw s ho hx">
<div class="lo hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*HvMYRDSzB0hSBNK7wW_F6g.png?q=20" width="6000" height="3029"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="6000" height="3029"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/12000/1*HvMYRDSzB0hSBNK7wW_F6g.png" width="6000" height="3029" srcSet="https://miro.medium.com/max/552/1*HvMYRDSzB0hSBNK7wW_F6g.png 276w, https://miro.medium.com/max/1104/1*HvMYRDSzB0hSBNK7wW_F6g.png 552w, https://miro.medium.com/max/1280/1*HvMYRDSzB0hSBNK7wW_F6g.png 640w, https://miro.medium.com/max/1456/1*HvMYRDSzB0hSBNK7wW_F6g.png 728w, https://miro.medium.com/max/1632/1*HvMYRDSzB0hSBNK7wW_F6g.png 816w, https://miro.medium.com/max/1808/1*HvMYRDSzB0hSBNK7wW_F6g.png 904w, https://miro.medium.com/max/1984/1*HvMYRDSzB0hSBNK7wW_F6g.png 992w, https://miro.medium.com/max/2000/1*HvMYRDSzB0hSBNK7wW_F6g.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="c1c5" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">But even with Instant RAW or editing, sometimes RAWs still look different than what comes out of the built-in camera app, and it has nothing to do with your skills. RAWs lacks that critical piece of the puzzle: Apple’s smarts. Computational photography like Smart HDR and Deep Fusion.</p><p id="bef1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Even if Apple handed these algorithms to third-party apps, they work on <em class="kx">bursts</em> of photos, fusing together the best parts of each image. iPhone RAWs are 12mb each. If you want to reproduce the results of Apple’s camera using one of these algorithms, you’re looking at ten times the storage needed per photo.</p><p id="9ae4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Oh, and there’s one more problem: neither the front-facing camera or ultra-wide camera can shoot RAW.</p><p id="6e45" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">ProRAW elegantly solves all of these problems and more. You can finally reproduce the results of the first-party camera, while retaining most of the editing latitude from traditional RAWs.</p><h1 id="2b46" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el">Enter ProRAW</h1><p id="b9af" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">Technically, there’s no such thing as a ProRAW file. ProRAW images are regular DNG files that take advantage of some little known features in the specification, and introduce a few new ones.</p><p id="09d1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Remember how DNG is an open file format? Apple worked with Adobe to introduce a few new tags. DNG released the 1.6 specification, with details about these tags, the very day ProRAW went into public beta tests.</p><p id="c413" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This may be surprising to some: ProRAW is not a proprietary or closed format. Credit where it is due: Apple deserves kudos for bringing their improvements to the DNG standard. When you shoot with ProRAW, there’s absolutely nothing locking your photos into the Apple ecosystem.</p><p id="52d0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Let’s dive into what makes ProRAW files different than RAWs of days past…</p><h2 id="d615" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Demosaic is Already Done</h2><p id="166f" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">ProRAWs store pixel values <em class="kx">after</em> the demosaic step. It’s like they took the output Step 1, from earlier, and stored those values. We’ll talk about why they’re doing this in a bit.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*XvEBPGtrYv6-bD-VvTjujg.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*XvEBPGtrYv6-bD-VvTjujg.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*XvEBPGtrYv6-bD-VvTjujg.png 276w, https://miro.medium.com/max/1104/1*XvEBPGtrYv6-bD-VvTjujg.png 552w, https://miro.medium.com/max/1280/1*XvEBPGtrYv6-bD-VvTjujg.png 640w, https://miro.medium.com/max/1456/1*XvEBPGtrYv6-bD-VvTjujg.png 728w, https://miro.medium.com/max/1632/1*XvEBPGtrYv6-bD-VvTjujg.png 816w, https://miro.medium.com/max/1808/1*XvEBPGtrYv6-bD-VvTjujg.png 904w, https://miro.medium.com/max/1984/1*XvEBPGtrYv6-bD-VvTjujg.png 992w, https://miro.medium.com/max/2000/1*XvEBPGtrYv6-bD-VvTjujg.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="f17f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s just important to understand that these demosaiced color values still represent the <strong class="gk ku">scene</strong>, not your display. They contain all the original dynamic range. They contain all the out-of-range colors. They retain all the flexibility of working with a “true” RAW. They just skip step one.</p><p id="8374" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In theory, you lose flexibility in choosing specific demosaic algorithms. In practice, most expert photographers don’t bother.</p><p id="cdee" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It’s also quite possible that iOS can do a better job demosaicing your images than any third-party RAW editor. Apple’s greatest strength is its unity of hardware and software, so they know exactly the sensor you’re using, and how it behaves with different ISO setttings. In theory, they could even apply image recognition as part of the process; if iOS detects a night sky, it could automatically pick a star-friendly demosaic algorithm.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg?q=20" width="1500" height="1125"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1500" height="1125"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3000/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg" width="1500" height="1125" srcSet="https://miro.medium.com/max/552/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg 276w, https://miro.medium.com/max/1000/1*iBZPgEFTrzK6J2uYMa6CTA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="kz hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg?q=20" width="1500" height="1125"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1500" height="1125"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3000/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg" width="1500" height="1125" srcSet="https://miro.medium.com/max/552/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg 276w, https://miro.medium.com/max/1000/1*K6xZ3e_TqggD5SSVBVh7cA.jpeg 500w" sizes="500px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci lt ho lu lv">Photographer<a href="https://t.co/VQGZpdaA4B?amp=1" class="cl km" target="_blank" rel="noopener"> Austin Mann</a> shows of how ProRAW already changes the game for nighttime photography <a href="https://t.co/VQGZpdaA4B?amp=1" class="cl km" target="_blank" rel="noopener">in this fantastic look at ProRAW</a>.</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="d7c0" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This sly move also gives Apple greater control over the image sensors they use in the future. Earlier, I said <em class="kx">most</em> cameras use a Bayer pattern. Some camera manufacturers use different patterns, which require different algorithms. Fujifilm invented the <a href="https://en.wikipedia.org/wiki/Fujifilm_X-Trans_sensor" class="cl km" target="_blank" rel="noopener">X-Trans</a> sensor that creates sharper images, with more film-like grain. There’s even a <a href="https://blog.sigmaphoto.com/2011/faqs-the-sigma-camera-and-its-foveon-x3-direct-image-sensor/" class="cl km" target="_blank" rel="noopener">Foveon</a> digital sensor that stacks color filters on top of each other.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz lw">
<div class="hw s ho hx">
<div class="lx hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*geXtGrkS1PXytzJ1kr8p6w.jpeg?q=20" width="602" height="380"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="602" height="380"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1204/1*geXtGrkS1PXytzJ1kr8p6w.jpeg" width="602" height="380" srcSet="https://miro.medium.com/max/552/1*geXtGrkS1PXytzJ1kr8p6w.jpeg 276w, https://miro.medium.com/max/1104/1*geXtGrkS1PXytzJ1kr8p6w.jpeg 552w, https://miro.medium.com/max/1204/1*geXtGrkS1PXytzJ1kr8p6w.jpeg 602w" sizes="602px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">It’s marketing, so take it with a grain of salt.</figcaption></figure><p id="23be" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Apple is now a company that designs its own silicon, and it’s very good at it. Cameras are a huge driving factor in phone purchases. It seems inevitable for Apple to innovate in the realm of sensors. Taking over the demosaic step would smooth such a transition. Hypothetically speaking, they could swap out their current bayer sensors with an “Apple C1,” and so long as it saves in ProRAW, it would work from day one in every pro photography process and app like Lightroom without having to wait for Adobe to write a new demosaic algorithm.</p><p id="9eb1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We got a taste for these benefits with the surprise reveal that <strong class="gk ku">ProRAW is available on all four cameras.</strong></p><p id="0358" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Previously, you could not shoot RAW with the front-facing or ultra-wide cameras. Apple is vague on the technical limitations, but indicated that even if third-party developers had access to the RAW data, they wouldn’t know what to do with it. With ProRAW, they can handle the annoying bits, and leave apps like editors to deal with what they’re better at: editing.</p><h2 id="6135" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Bridging the Algorithm Gap</h2><p id="bc2c" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">So now we have the data, but what about the local tone mapping and other computational photography goodies? Apple could open up their algorithms to third-party apps, but that isn’t as useful as you think. For starters, you’d need a save a ton of RAW files. We’d be back at that 100 megabyte file.</p><p id="85c6" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I also think there are reasonable questions about producing consistent results down the road as these algorithms evolve. It would be surprising to return to a photo a year from now to find Apple’s AI produces different results.</p><p id="d496" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Instead, ProRAW stores results of computational photography right inside the RAW. This is another reason they need to store demosaiced data, as these algorithms operate on color, not RAW data. Once you demosaic, there’s no going back. I mean, what would you even call that, remosaic?</p><p id="4e7e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Smart HDR does this in the least destructive way. Apple worked with Adobe to <a href="https://helpx.adobe.com/photoshop/kb/dng-specification-tags.html" class="cl km" target="_blank" rel="noopener">introduce a new type of tag</a> into the DNG standard, called a “Profile Gain Table Map.” This data gives your editor everything it needs to know to tone map your photo image and end up with results identical to the first party camera. Because it’s separate data, you can turn down its strength, or turn it off completely.</p><p id="299d" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That’s what we do in Halide when you’re looking at a ProRAW image with Instant RAW disabled.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*y-uuWP8T138MVIvGcq_rMA.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*y-uuWP8T138MVIvGcq_rMA.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*y-uuWP8T138MVIvGcq_rMA.png 276w, https://miro.medium.com/max/1104/1*y-uuWP8T138MVIvGcq_rMA.png 552w, https://miro.medium.com/max/1280/1*y-uuWP8T138MVIvGcq_rMA.png 640w, https://miro.medium.com/max/1456/1*y-uuWP8T138MVIvGcq_rMA.png 728w, https://miro.medium.com/max/1632/1*y-uuWP8T138MVIvGcq_rMA.png 816w, https://miro.medium.com/max/1808/1*y-uuWP8T138MVIvGcq_rMA.png 904w, https://miro.medium.com/max/1984/1*y-uuWP8T138MVIvGcq_rMA.png 992w, https://miro.medium.com/max/2000/1*y-uuWP8T138MVIvGcq_rMA.png 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Local tone mapping lifts the shadows on the ground.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="c655" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Even if you opt-out of local tone mapping, you now have the underlying HDR data to work with in an editor, and the results are…</p><figure class="hh hi hj hk hl hg">
<div class="hw s ho">
<div class="ly hz s"></div></div></figure><p id="62a1" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Deep Fusion is a different story. While it’s popularly known as “Sweater Mode,” the more technical objective is “noise reduction in low-light.” Unlike those Gain Table Maps, there’s no elegant way to separate its effects from the final image. If you don’t want Deep Fusion, your only option is to opt-out of the process at the time of capture.</p><p id="d905" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’ve read our articles for a while, you know we’re fans of natural noise. Prior to Deep Fusion, the iPhone JPEGs were notorious for their “watercolor effects.” In this image I took a few years ago, notice faces get smeared into nothing.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz lz">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg?q=20" width="1858" height="929"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1858" height="929"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3716/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg" width="1858" height="929" srcSet="https://miro.medium.com/max/552/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 276w, https://miro.medium.com/max/1104/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 552w, https://miro.medium.com/max/1280/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 640w, https://miro.medium.com/max/1456/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 728w, https://miro.medium.com/max/1632/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 816w, https://miro.medium.com/max/1808/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 904w, https://miro.medium.com/max/1984/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 992w, https://miro.medium.com/max/2000/1*NLfPxx9klj-P3IBbqY3sNQ.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Taken before social distancing was a thing.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="099f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Deep Fusion produces very different results. Instead of just smearing an image, it combines several results together to sort of “average out” the results. It looks way, way more natural.</p><p id="0adb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’re not a fan of Deep Fusion, there’s an API to opt-out. We expose this in Halide under Capture Settings. As I was writing this article, I realized this toggle makes it easy to run an experiment…</p><p id="fef6" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I used a spotlight to create a high-dynamic range scene in my office.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ma">
<div class="hw s ho hx">
<div class="mb hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/24/1*KyKm_rUL60bQheIwZ27KJA.jpeg?q=20" width="390" height="1006"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="390" height="1006"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/780/1*KyKm_rUL60bQheIwZ27KJA.jpeg" width="390" height="1006" srcSet="https://miro.medium.com/max/552/1*KyKm_rUL60bQheIwZ27KJA.jpeg 276w, https://miro.medium.com/max/780/1*KyKm_rUL60bQheIwZ27KJA.jpeg 390w" sizes="390px"/></noscript></div></div></div></div></figure><p id="715b" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I spot metered the two grey cards and measured a difference of 8.3 stops. I then exposed the shot using the top grey card and took three photos:</p><p id="4a3f" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">1) Native Halide RAW</p><p id="4077" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">2) ProRAW, with smart processing disabled</p><p id="6819" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">3) ProRAW with smart processing enabled</p><p id="58aa" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">I took these into Lightroom and boosted the shadows. Let’s zoom in on the test pattern image.</p></div></div>
<div class="hg aj"><figure class="hh hi hj hk hl hg aj paragraph-image">
<div class="hw s ho hx">
<div class="ko hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*x6-_StGFa02oMfajJ3In6g.jpeg?q=20" width="1800" height="600"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1800" height="600"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/3600/1*x6-_StGFa02oMfajJ3In6g.jpeg" width="1800" height="600" srcSet="https://miro.medium.com/max/552/1*x6-_StGFa02oMfajJ3In6g.jpeg 276w, https://miro.medium.com/max/1104/1*x6-_StGFa02oMfajJ3In6g.jpeg 552w, https://miro.medium.com/max/1280/1*x6-_StGFa02oMfajJ3In6g.jpeg 640w, https://miro.medium.com/max/1456/1*x6-_StGFa02oMfajJ3In6g.jpeg 728w, https://miro.medium.com/max/1632/1*x6-_StGFa02oMfajJ3In6g.jpeg 816w, https://miro.medium.com/max/1808/1*x6-_StGFa02oMfajJ3In6g.jpeg 904w, https://miro.medium.com/max/1984/1*x6-_StGFa02oMfajJ3In6g.jpeg 992w, https://miro.medium.com/max/2160/1*x6-_StGFa02oMfajJ3In6g.jpeg 1080w, https://miro.medium.com/max/2700/1*x6-_StGFa02oMfajJ3In6g.jpeg 1350w, https://miro.medium.com/max/3240/1*x6-_StGFa02oMfajJ3In6g.jpeg 1620w, https://miro.medium.com/max/3600/1*x6-_StGFa02oMfajJ3In6g.jpeg 1800w" sizes="1800px"/></noscript></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">1) Native RAW, 2) ProRAW without the algorithms 3) ProRAW with the algorithms</figcaption></figure></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="16ca" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you wonder what camera settings I used to capture this, the answer is a bit complicated. When you enable these algorithms, the iPhone ignores manual exposure settings. After all, Deep Fusion takes multiple exposures with a variety of settings. While the metadata embedded in the final image reported an ISO of 40, it’s unlikely all photos in the burst had that setting.</p><p id="9fec" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As you can see, by disabling this Smart Processing in Halide, we can still skip a lot of the noise reduction in our ProRAW files if we so choose.</p><p id="0283" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Synthetic tests like this are all fine and good, but what about the real world?</p><p id="8787" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Sebastiaan spent the last few weeks testing ProRAW in the field with the Halide beta. He found the dynamic range in ProRAW pretty mind-blowing in the day-to-day editing process:</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ky">
<div class="hw s ho hx">
<div class="le hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*oAPorA4ZnfrWnKaDY48B0g.png?q=20" width="1200" height="800"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1200" height="800"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2400/1*oAPorA4ZnfrWnKaDY48B0g.png" width="1200" height="800" srcSet="https://miro.medium.com/max/552/1*oAPorA4ZnfrWnKaDY48B0g.png 276w, https://miro.medium.com/max/1104/1*oAPorA4ZnfrWnKaDY48B0g.png 552w, https://miro.medium.com/max/1280/1*oAPorA4ZnfrWnKaDY48B0g.png 640w, https://miro.medium.com/max/1456/1*oAPorA4ZnfrWnKaDY48B0g.png 728w, https://miro.medium.com/max/1632/1*oAPorA4ZnfrWnKaDY48B0g.png 816w, https://miro.medium.com/max/1808/1*oAPorA4ZnfrWnKaDY48B0g.png 904w, https://miro.medium.com/max/1984/1*oAPorA4ZnfrWnKaDY48B0g.png 992w, https://miro.medium.com/max/2000/1*oAPorA4ZnfrWnKaDY48B0g.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><h2 id="936d" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Semantic Maps Included</h2><p id="c322" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">ProRAW has one more surprise up its sleeve. A few years ago, Apple began using neural networks to detect interesting parts of an image, such as eyes and hair.</p><p id="d9cf" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Apple uses this to, say, add sharpening to <em class="kx">only</em> clouds in the sky. Sharping faces would be quite unflattering.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*qK8PHZG2JudesZzNj_XomA.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*qK8PHZG2JudesZzNj_XomA.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*qK8PHZG2JudesZzNj_XomA.png 276w, https://miro.medium.com/max/1104/1*qK8PHZG2JudesZzNj_XomA.png 552w, https://miro.medium.com/max/1280/1*qK8PHZG2JudesZzNj_XomA.png 640w, https://miro.medium.com/max/1456/1*qK8PHZG2JudesZzNj_XomA.png 728w, https://miro.medium.com/max/1632/1*qK8PHZG2JudesZzNj_XomA.png 816w, https://miro.medium.com/max/1808/1*qK8PHZG2JudesZzNj_XomA.png 904w, https://miro.medium.com/max/1984/1*qK8PHZG2JudesZzNj_XomA.png 992w, https://miro.medium.com/max/2000/1*qK8PHZG2JudesZzNj_XomA.png 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">An iPhone 12 Pro RAW file from the ultra-wide camera with a semantic matte for the human in the photo. Super cool.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="4644" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">ProRAW files contain these maps! Both semantic maps used on faces and Portrait Effect Mattes that power the background blur in Portrait mode.</p><h2 id="6394" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Flexibility in File Size</h2><p id="8736" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">By storing the demosaiced values, ProRAWs can also tackle unwieldy file sizes in a few interesting ways. That’s really important, because when you shoot ProRAW with the first-party camera app, each file is around 25mb — and can get even heavier than that. That’s an order of magnitude or more than a regular photo. It adds up quick.</p><p id="e2a4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">First, we can fiddle with bit-depth. By default, ProRAW uses 12-bit data, which can be overkill. JPEGs are only 8-bits per color channel, so going 10-bit means 4x the precision in editing. While the first-party camera app doesn’t present this option, it’s there in the API. we’ve added it to Halide and seen files drop as low as 8mb. In practice, you can get most of the ProRAW benefits at half the file size.</p><p id="6120" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you want to further reduce the file size, ProRAW offers lossy compression that can drop these files down to as little as 1mb, but not so fast. These compression APIs currently drop the bit depth to 8-bits. In our opinion, that too great of a tradeoff, as it leaves you with a file that’s only marginally better than a JPEG. We’re certain ProRAW compression will confuse users, so we’ve held off on compression support for the time being. Fortunately Apple’s camera team has been iterating on ProRAW very fast, so we hope 10-bit compression is on the horizon.</p><p id="8e68" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Finally, it turns out that every ProRAW file can also contain the fully-processed JPEG version. This is a fallback image for apps that don’t recognize RAWs— which is <em class="kx">most apps. </em>Even Instagram. The first-party camera doesn’t offer this, which means you cannot share ProRAW shots taken with it to apps that do not support RAW images. We’ve added the option in Halide.</p><p id="bb95" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you’re planning to edit the image, it makes sense to opt-out of these 4mb images. Your RAW editing app will ignore it, and ultimately produce a new JPEG for your Instagrams.</p><h2 id="bfdc" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Batteries Included</h2><p id="ce80" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">The most underrated improvement in iOS 14.3 is that the native Photos app now supports RAW editing. This is huge, because it abstracts away all the complexity of higher-end apps. No fiddling with “black point” and “color profiles.” Casual users who only know how to edit photos in the built-in apps don’t have to do anything different. It just works.</p><p id="6067" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">This is critical because we can expect it to take some time for third-party photo editors to adopt this new metadata. Until then, ProRAWs will not look like system JPEGs. Compare the results viewed inside Lightroom CC and Photos.app.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div class="cy cz mc">
<div class="hw s ho hx">
<div class="md hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/42/1*fEpwzq7b2PdTISl5LVMv_w.jpeg?q=20" width="550" height="778"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="550" height="778"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/1100/1*fEpwzq7b2PdTISl5LVMv_w.jpeg" width="550" height="778" srcSet="https://miro.medium.com/max/552/1*fEpwzq7b2PdTISl5LVMv_w.jpeg 276w, https://miro.medium.com/max/1100/1*fEpwzq7b2PdTISl5LVMv_w.jpeg 550w" sizes="550px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Left: Photos.app, Right: Lightroom CC</figcaption></figure><p id="ee06" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It looks wrong because Lightroom doesn’t yet detect the local tone mapping metadata. However, given Adobe participated in the designing these tags, we can expect an update in the near future.</p><p id="965e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As for other iOS editors, Apple has updated its RAW processing frameworks to support these new tags. For the most part, it “just works,” so you’ll find ProRAWs display properly inside Halide’s photo gallery. You can toggle tone mapping on and off with the Instant RAW button.</p><p id="d3f2" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">That said, not all metadata is exposed by Apple’s frameworks. They don’t even tell you whether the DNG is a native RAW or ProRAW. We’re certain this is just a matter running out of time. They launched this two weeks before Christmas, so this is clearly a feature that was coming down to the wire.</p><p id="40e9" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">To get around this, we built our own proprietary DNG parser we call Dingus.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 276w, https://miro.medium.com/max/1104/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 552w, https://miro.medium.com/max/1280/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 640w, https://miro.medium.com/max/1456/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 728w, https://miro.medium.com/max/1632/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 816w, https://miro.medium.com/max/1808/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 904w, https://miro.medium.com/max/1984/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 992w, https://miro.medium.com/max/2000/1*iXBvbnD1qKTiHD4qJAHuIg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="a85c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Dingus lets us interrogate low level tags within DNGs, which aren’t yet exposed by Apple. We’re using this to expose the bit depth and type of RAW in our metadata reviewer, but it’s also been useful for poking and prodding at ProRAW internals.</p><h1 id="8297" class="id ie do cf if ig ih gn ii ij ik gr il im in io ip iq ir is it iu iv iw ix iy el">Is ProRAW Perfect?</h1><p id="c77f" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">ProRAW is a leap forward for everyone, but it will be especially impactful for beginning photographers.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*2xM5pAf53HtUxZAtMhaV9g.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*2xM5pAf53HtUxZAtMhaV9g.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*2xM5pAf53HtUxZAtMhaV9g.jpeg 276w, https://miro.medium.com/max/668/1*2xM5pAf53HtUxZAtMhaV9g.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg 276w, https://miro.medium.com/max/668/1*ejqAfKY8rUOvgB7oVRkkkw.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*FNxf_68aztIWmO3E8KNx_Q.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*FNxf_68aztIWmO3E8KNx_Q.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*FNxf_68aztIWmO3E8KNx_Q.jpeg 276w, https://miro.medium.com/max/668/1*FNxf_68aztIWmO3E8KNx_Q.jpeg 334w" sizes="334px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci mf ho mg lv">ProRAW in and around San Francisco</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="71c4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you know nothing about RAW, but want more flexibility in editing, shoot ProRAW. For true professionals, the decision is a bit more nuanced. Sometimes you’ll want to switch back to regular RAW.</p><p id="c395" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">First, ProRAW is only available on “Pro” level iPhones. Despite the usual conspiracy theories, Apple isn’t just flipping a bit in software to force you to spend more.</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz mh">
<div class="hw s ho hx">
<div class="mi hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*vmkySLtfmoeVkRBE2fMUlQ.png?q=20" width="1144" height="752"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1144" height="752"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2288/1*vmkySLtfmoeVkRBE2fMUlQ.png" width="1144" height="752" srcSet="https://miro.medium.com/max/552/1*vmkySLtfmoeVkRBE2fMUlQ.png 276w, https://miro.medium.com/max/1104/1*vmkySLtfmoeVkRBE2fMUlQ.png 552w, https://miro.medium.com/max/1280/1*vmkySLtfmoeVkRBE2fMUlQ.png 640w, https://miro.medium.com/max/1400/1*vmkySLtfmoeVkRBE2fMUlQ.png 700w" sizes="700px"/></noscript></div></div></div></div></figure><p id="5b12" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In the past, these algorithms did their math with lower bit depths, because their output was just low bit depth JPEGs. Outputting images in a higher bit depth requires twice the memory. Apple’s Pro iPhones have way more memory than non-Pro models, and ProRAW needs all of that. It’s that simple.</p><p id="fc3c" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Once you get RAW in your hands, the first thing you’ll notice is capture speed. A traditional RAW capture takes as little as 50 milliseconds. ProRAW takes between two and three <em class="kx">seconds </em>to finish processing.</p><p id="c730" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The built in iPhone camera does a great job hiding this, apparently processing each photo in the background in a queue. However, we’ve found the shutter stalls after firing three shots in quick succession.</p><p id="4b96" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">ProRAW isn’t coming to burst mode anytime soon. This makes things difficult if you’re covering sports, have small children who refuse to stand still, or you’re a portrait photographer who takes hundreds of photos in a single session. There’s a chance you might miss that perfect shot.</p><p id="7f13" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In Halide, we decided to take a conservative approach at launch, and only let you capture one photo at a time. We’re a week away from the App Store shutting down for Christmas, so this is the worst possible time to contend with memory crashes. But we expect to speed things up soon.</p><p id="b428" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The next issue we’ve found is sharpness and noise reduction. No multi-photo fusion is perfect. If you want the sharpest images with natural noise, and you’re not planning to boost your shadows through the roof, you might find “native” RAW is still the way to go.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz mj">
<div class="hw s ho hx">
<div class="mk hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg?q=20" width="2815" height="2840"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="2815" height="2840"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/5630/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg" width="2815" height="2840" srcSet="https://miro.medium.com/max/552/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 276w, https://miro.medium.com/max/1104/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 552w, https://miro.medium.com/max/1280/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 640w, https://miro.medium.com/max/1456/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 728w, https://miro.medium.com/max/1632/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 816w, https://miro.medium.com/max/1808/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 904w, https://miro.medium.com/max/1984/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 992w, https://miro.medium.com/max/2000/1*2TjK0rucDUtN_UOcDzZ8yg.jpeg 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">The top image is Halide’s ‘Native’ RAW — which still captures more detail at times, but at the expense of a bit of noise.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="22fb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">In the field, we often found that in some conditions you can still get more detail in a shot with that quick-and-noisy regular RAW file.</p><p id="4e11" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Then there’s file size. A 12-bit ProRAW is 25mb, while a 12-bit native RAW is only around 12mb. This is almost certainly why the “RAW” button in the first party camera app defaults to off, and returns to off if you leave and return to the app. A casual photographer might leave it on all the time and eat up their iCloud storage in an afternoon.</p><p id="a6a3" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Finally, there’s compatibility. Without something like Halide’s ProRAW+ setting, apps have to be updated to support DNG files. Sharing your ProRAW shot to Instagram doesn’t work:</p><figure class="hh hi hj hk hl hg cy cz paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz ml">
<div class="hw s ho hx">
<div class="mm hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*SH8eErzpFu7UHggFEtmkXQ.png?q=20" width="1390" height="1826"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1390" height="1826"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2780/1*SH8eErzpFu7UHggFEtmkXQ.png" width="1390" height="1826" srcSet="https://miro.medium.com/max/552/1*SH8eErzpFu7UHggFEtmkXQ.png 276w, https://miro.medium.com/max/1104/1*SH8eErzpFu7UHggFEtmkXQ.png 552w, https://miro.medium.com/max/1280/1*SH8eErzpFu7UHggFEtmkXQ.png 640w, https://miro.medium.com/max/1400/1*SH8eErzpFu7UHggFEtmkXQ.png 700w" sizes="700px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">You can wait for a long moment — Instagram does not support DNG files, so you can’t open your ProRAW shots unless you shoot ProRAW + JPG in an app like Halide.</figcaption></figure><p id="5caf" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">And while Apple has done an amazing job supporting ProRAW development within its own ecosystem — just hop over to Apple’s Photos app, tap the “Edit” button, and you can edit a ProRAW the same way you edit a JPEG file — the DNG spec was only updated a month ago, so there’s no telling how long it will take for your favorite third-party RAW editors to adopt the new tags.</p><p id="0fdf" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If you already know how to develop RAW files, and you aren’t shooting in a scenario where computational photography shines, you may find native RAWs give you more bang for your bytes.</p><h2 id="42ad" class="jr ie do cf if js jt ju ii jv jw jx il jy jz ka ip kb kc kd it ke kf kg ix kh el">Introducing ProRAW for Halide</h2><p id="bec8" class="gi gj do gk b gl iz gn go gp ja gr gs gt jb gv gw gx jc gz ha hb jd hd he hf dg el">We’re excited to announce Halide’s ProRAW support. We didn’t just add ProRAW and ticked the box. By actually extensively using ProRAW in the field and testing it we found how to make the best possible camera for it.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg?q=20" width="1080" height="1440"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1080" height="1440"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2160/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg" width="1080" height="1440" srcSet="https://miro.medium.com/max/552/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg 276w, https://miro.medium.com/max/668/1*KFD1EkOmp-M6wyzRXuWhwQ.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg?q=20" width="1080" height="1440"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1080" height="1440"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2160/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg" width="1080" height="1440" srcSet="https://miro.medium.com/max/552/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg 276w, https://miro.medium.com/max/668/1*-EH6tH3PDodnyNdAtlb3Eg.jpeg 334w" sizes="334px"/></noscript></div></div></div></figure><figure class="cs hg me lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*3mirb6ZPPpfrlqqR3MU44A.jpeg?q=20" width="1080" height="1440"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="1080" height="1440"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/2160/1*3mirb6ZPPpfrlqqR3MU44A.jpeg" width="1080" height="1440" srcSet="https://miro.medium.com/max/552/1*3mirb6ZPPpfrlqqR3MU44A.jpeg 276w, https://miro.medium.com/max/668/1*3mirb6ZPPpfrlqqR3MU44A.jpeg 334w" sizes="334px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci mf ho mg lv">Some shots taken with Halide 2.1 with ProRAW on iPhone 12 Pro Max</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="2a50" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">It starts with ProRAW+. When set to this mode, Halide will take a ProRAW photo along with a JPG file so you can quickly share and view images in apps that do not (yet) support RAW files. This makes it a lot easier to just leave ProRAW on and not run into any issues in other apps.</p><p id="c8bb" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As we mentioned, ProRAW is great, but there’s tradeoffs.</p></div></div>
<div class="hg aj"><figure class="hh hi hj hk hl hg aj paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg?q=20" width="6000" height="3000"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="6000" height="3000"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/12000/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg" width="6000" height="3000" srcSet="https://miro.medium.com/max/552/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 276w, https://miro.medium.com/max/1104/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 552w, https://miro.medium.com/max/1280/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 640w, https://miro.medium.com/max/1456/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 728w, https://miro.medium.com/max/1632/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 816w, https://miro.medium.com/max/1808/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 904w, https://miro.medium.com/max/1984/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 992w, https://miro.medium.com/max/2160/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1080w, https://miro.medium.com/max/2700/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1350w, https://miro.medium.com/max/3240/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1620w, https://miro.medium.com/max/3780/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 1890w, https://miro.medium.com/max/4320/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 2160w, https://miro.medium.com/max/4800/1*Ls9ZjreNdAu3ZA4Pup1xqA.jpeg 2400w" sizes="100vw"/></noscript></div></div></div></figure></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="fb98" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As we beta tested our ProRAW support, it became obvious we had to make it easer to fiddle with capture settings without diving into Settings. Enter the new format picker menu. Just long-press on the RAW button, and you’ll be able to choose between RAW and ProRAW, your desired bit-depth, and whether you wish to save the processed version alongside your DNG.</p><p id="cf2e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">Being able to quickly change your shooting format allows in-the-moment decisions depending on your exact needs.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="hy hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*ihfv06lwin5C3KQae1P-hg.png?q=20" width="3000" height="1500"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1500"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*ihfv06lwin5C3KQae1P-hg.png" width="3000" height="1500" srcSet="https://miro.medium.com/max/552/1*ihfv06lwin5C3KQae1P-hg.png 276w, https://miro.medium.com/max/1104/1*ihfv06lwin5C3KQae1P-hg.png 552w, https://miro.medium.com/max/1280/1*ihfv06lwin5C3KQae1P-hg.png 640w, https://miro.medium.com/max/1456/1*ihfv06lwin5C3KQae1P-hg.png 728w, https://miro.medium.com/max/1632/1*ihfv06lwin5C3KQae1P-hg.png 816w, https://miro.medium.com/max/1808/1*ihfv06lwin5C3KQae1P-hg.png 904w, https://miro.medium.com/max/1984/1*ihfv06lwin5C3KQae1P-hg.png 992w, https://miro.medium.com/max/2000/1*ihfv06lwin5C3KQae1P-hg.png 1000w" sizes="1000px"/></noscript></div></div></div></div></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="5574" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">As mentioned earlier, we allow you to customize your bit-depth, and disable capturing the JPEG version of your photo, inside Capture Settings. Together, you can expect to cut ProRAW file size in half without trading too much in your editing flexibility.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj"><figure class="hh hi hj hk hl hg jp jq paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="cy cz jo">
<div class="hw s ho hx">
<div class="mn hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/60/1*ZfXagfeuHx9es1Z-KhP6XQ.png?q=20" width="3000" height="1257"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3000" height="1257"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6000/1*ZfXagfeuHx9es1Z-KhP6XQ.png" width="3000" height="1257" srcSet="https://miro.medium.com/max/552/1*ZfXagfeuHx9es1Z-KhP6XQ.png 276w, https://miro.medium.com/max/1104/1*ZfXagfeuHx9es1Z-KhP6XQ.png 552w, https://miro.medium.com/max/1280/1*ZfXagfeuHx9es1Z-KhP6XQ.png 640w, https://miro.medium.com/max/1456/1*ZfXagfeuHx9es1Z-KhP6XQ.png 728w, https://miro.medium.com/max/1632/1*ZfXagfeuHx9es1Z-KhP6XQ.png 816w, https://miro.medium.com/max/1808/1*ZfXagfeuHx9es1Z-KhP6XQ.png 904w, https://miro.medium.com/max/1984/1*ZfXagfeuHx9es1Z-KhP6XQ.png 992w, https://miro.medium.com/max/2000/1*ZfXagfeuHx9es1Z-KhP6XQ.png 1000w" sizes="1000px"/></noscript></div></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci">Fine-grained ProRAW settings for fine RAW appreciators.</figcaption></figure></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="00b4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We also ensure to remember and persist your ProRAW settings. With great power comes great iCloud consumption, so please use this responsibly.</p><p id="e1f4" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">We’re still absorbing all of the implications of ProRAW, so we expect continue to iterate on our features over the next few months. We have a list of things we want to improve after the App Store holiday shutdown, so if you’re shooting ProRAW, 2021 is going to be an amazing year.</p><p id="34ff" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">However, native RAW is not going away.</p><p id="df31" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The vast majority of users can’t shoot ProRAW, there are circumstances where regular single-shot RAWs will be superior, and there are certain computational photography algorithms that rely on bayer-level information. We’ve got enough native RAW features planned to keep us busy for quite some time — and to keep bringing fantastic features to all iPhones that can run Halide.</p></div></div>
<div class="hg">
<div class="n p">
<div class="jg jh ji jj jk jl af jm ag jn ai aj">
<div class="hh hi hj hk hl n lp"><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*vUVodwaon3ojvmb92eAILA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*vUVodwaon3ojvmb92eAILA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*vUVodwaon3ojvmb92eAILA.jpeg 276w, https://miro.medium.com/max/1000/1*vUVodwaon3ojvmb92eAILA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg 276w, https://miro.medium.com/max/1000/1*UFBjSexxr5Yfvp7mo7_WHA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure></div>
<div class="n lp"><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg 276w, https://miro.medium.com/max/1000/1*8ewT4cLPtJZYI8Qf_VFegA.jpeg 500w" sizes="500px"/></noscript></div></div></div></figure><figure class="cs hg lq lr jq jp ls paragraph-image">
<div role="button" tabindex="0" class="hm hn ho hp aj hq">
<div class="hw s ho hx">
<div class="jf hz s">
<div class="hr hs t u v ht aj bl hu hv"><img alt="Image for post" class="t u v ht aj ia ib ic" src="https://miro.medium.com/max/46/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg?q=20" width="3024" height="4032"/></div><img alt="Image for post" class="hr hs t u v ht aj c" width="3024" height="4032"/><noscript><img alt="Image for post" class="t u v ht aj" src="https://miro.medium.com/max/6048/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg" width="3024" height="4032" srcSet="https://miro.medium.com/max/552/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg 276w, https://miro.medium.com/max/1000/1*xwWdZJQpXmyrW7C_9lwYDA.jpeg 500w" sizes="500px"/></noscript></div></div></div><figcaption class="kp kq da cy cz kr ks cf b ev ch ci lt ho lu lv">Maybe the real Pro RAW was the shots we made along the way.</figcaption></figure></div></div></div></div>
<div class="n p">
<div class="ab ac ae af ag dl ai aj"><p id="fe1e" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">The best part of building <a href="http://halide.cam/download" class="cl km" target="_blank" rel="noopener">Halide</a>, and writing these articles, is seeing what folks do with this stuff. If you’re proud of your ProRAW photo, be sure to tag us, because we’d love to see what you can do with it.</p><p id="1933" class="gi gj do gk b gl gm gn go gp gq gr gs gt gu gv gw gx gy gz ha hb hc hd he hf dg el">If a picture is worth a thousand words, congratulations on reading over three pictures. <a href="http://halide.cam/download" class="cl km" target="_blank" rel="noopener">Now get shooting!</a></p></div></div></section></div></article>

+ 783
- 0
cache/2021/ef2067bf42482ed7c48e1d166cde117a/index.html View File

@@ -0,0 +1,783 @@
<!doctype html><!-- This is a valid HTML5 document. -->
<!-- Screen readers, SEO, extensions and so on. -->
<html lang="fr">
<!-- Has to be within the first 1024 bytes, hence before the <title>
See: https://www.w3.org/TR/2012/CR-html5-20121217/document-metadata.html#charset -->
<meta charset="utf-8">
<!-- Why no `X-UA-Compatible` meta: https://stackoverflow.com/a/6771584 -->
<!-- The viewport meta is quite crowded and we are responsible for that.
See: https://codepen.io/tigt/post/meta-viewport-for-2015 -->
<meta name="viewport" content="width=device-width,initial-scale=1">
<!-- Required to make a valid HTML5 document. -->
<title>Cameras and Lenses (archive) — David Larlet</title>
<meta name="description" content="Publication mise en cache pour en conserver une trace.">
<!-- That good ol' feed, subscribe :). -->
<link rel="alternate" type="application/atom+xml" title="Feed" href="/david/log/">
<!-- Generated from https://realfavicongenerator.net/ such a mess. -->
<link rel="apple-touch-icon" sizes="180x180" href="/static/david/icons2/apple-touch-icon.png">
<link rel="icon" type="image/png" sizes="32x32" href="/static/david/icons2/favicon-32x32.png">
<link rel="icon" type="image/png" sizes="16x16" href="/static/david/icons2/favicon-16x16.png">
<link rel="manifest" href="/static/david/icons2/site.webmanifest">
<link rel="mask-icon" href="/static/david/icons2/safari-pinned-tab.svg" color="#07486c">
<link rel="shortcut icon" href="/static/david/icons2/favicon.ico">
<meta name="msapplication-TileColor" content="#f0f0ea">
<meta name="msapplication-config" content="/static/david/icons2/browserconfig.xml">
<meta name="theme-color" content="#f0f0ea">
<!-- Documented, feel free to shoot an email. -->
<link rel="stylesheet" href="/static/david/css/style_2020-06-19.css">
<!-- See https://www.zachleat.com/web/comprehensive-webfonts/ for the trade-off. -->
<link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t4_poly_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: light), (prefers-color-scheme: no-preference)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t3_regular.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t3_bold.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
<link rel="preload" href="/static/david/css/fonts/triplicate_t3_italic.woff2" as="font" type="font/woff2" media="(prefers-color-scheme: dark)" crossorigin>
<script>
function toggleTheme(themeName) {
document.documentElement.classList.toggle(
'forced-dark',
themeName === 'dark'
)
document.documentElement.classList.toggle(
'forced-light',
themeName === 'light'
)
}
const selectedTheme = localStorage.getItem('theme')
if (selectedTheme !== 'undefined') {
toggleTheme(selectedTheme)
}
</script>

<meta name="robots" content="noindex, nofollow">
<meta content="origin-when-cross-origin" name="referrer">
<!-- Canonical URL for SEO purposes -->
<link rel="canonical" href="https://ciechanow.ski/cameras-and-lenses/">

<body class="remarkdown h1-underline h2-underline h3-underline em-underscore hr-center ul-star pre-tick">

<article>
<header>
<h1>Cameras and Lenses</h1>
</header>
<nav>
<p class="center">
<a href="/david/" title="Aller à l’accueil">🏠</a> •
<a href="https://ciechanow.ski/cameras-and-lenses/" title="Lien vers le contenu original">Source originale</a>
</p>
</nav>
<hr>
<p>Pictures have always been a meaningful part of the human experience. From the first cave drawings, to sketches and paintings, to modern photography, we&rsquo;ve mastered the art of recording what we see.</p>

<p>Cameras and the lenses inside them may seem a little mystifying. In this blog post I&rsquo;d like to explain not only how they work, but also how adjusting a few tunable parameters can produce fairly different results:</p>

<div class="drawer_container double_drawer" id="lens_hero"></div>

<div class="lens_yellow" id="lens_hero_sl2"></div>

<div class="lens_blue" id="lens_hero_sl1"></div>

<div class="lens_black" id="lens_hero_sl0"></div>

<p>Over the course of this article we&rsquo;ll build a simple camera from first principles. Our first steps will be very modest – we&rsquo;ll simply try to take any picture. To do that we need to have a sensor capable of detecting and measuring light that shines onto it.</p>

<h1 id="recording-light">Recording Light<a href="https://ciechanow.ski/cameras-and-lenses/#recording-light" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>Before the dawn of the digital era, photographs were taken on a piece of film covered in crystals of <a href="https://en.wikipedia.org/wiki/Silver_halide">silver halide</a>. Those compounds are light-sensitive and when exposed to light they form a speck of metallic silver that can later be developed with further chemical processes.</p>

<p>For better or for worse, I&rsquo;m not going to discuss analog devices – these days most cameras are digital. Before we continue the discussion relating to light we&rsquo;ll use the classic trick of turning the illumination off. Don&rsquo;t worry though, we&rsquo;re not going to stay in darkness for too long.</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>

<div class="dark_light_bg"><div class="bg_content">

<p><br></p>

<p>The <a href="https://en.wikipedia.org/wiki/Image_sensor">image sensor</a> of a digital camera consists of a grid of photodetectors. A&nbsp;photodetector converts photons into electric current that can be measured – the more photons hitting the detector the higher the signal.</p>

<p>In the demonstration below you can observe how photons fall onto the arrangement of detectors represented by small squares. After some processing, the value read by each detector is converted to the brightness of the resulting image pixels which you can see on the right side. I&rsquo;m also symbolically showing which <em>photosite</em> was hit with a short highlight. The slider below controls the flow of time:</p>

<div class="drawer_container double_drawer" id="lens_detector"></div>
<div class="long_slider" id="lens_detector_sl0"></div>

<p>The longer the time of collection of photons the more of them are hitting the detectors and the brighter the resulting pixels in the image. When we don&rsquo;t gather enough photons the image is <a href="#" class="link_button" onclick="lens_under_exp();return false;">underexposed</a>, but if we allow the photon collection to run for too long the image will be <a href="#" class="link_button" onclick="lens_over_exp();return false;">overexposed</a>.</p>

<p>While the photons have the &ldquo;color&rdquo; of their <a href="https://ciechanow.ski/color-spaces/#and-there-was-light">wavelength</a>, the photodetectors don&rsquo;t see that hue – they only measure the total intensity which results in a black and white image. To record the color information we need to separate the incoming photons into distinct groups. We can put tiny color filters on top of the detectors so that they will only accept, more or less, red, green, or blue light:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_rgb_filter"></div>
<div id="lens_rgb_filter_sl0"></div>

<p>This <a href="https://en.wikipedia.org/wiki/Color_filter_array">color filter array</a> can be arranged in many different formations. One of the simplest is a <a href="https://en.wikipedia.org/wiki/Bayer_filter">Bayer filter</a> which uses one red, one blue, and <em>two</em> green filters arranged in a 2x2 grid:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_bayer"></div>

<p>A Bayer filter uses two green filters because light in green part of the spectrum heavily <a href="https://en.wikipedia.org/wiki/Luminosity_function">correlates</a> with perceived brightness. If we now repeat this pattern across the entire sensor we&rsquo;re able to collect color information. For the next demo we will also double the resolution to an astonishing 1 kilopixel arranged in a 32x32 grid:</p>

<div class="drawer_container double_drawer" id="lens_detector_rgbg"></div>
<div class="long_slider" id="lens_detector_rgbg_sl0"></div>

<p>Note that the individual sensors themselves still only see the intensity, and not the color, but knowing the arrangement of the filters we can recreate the colored intensity of each sensor, as shown on the right side of the simulation.</p>

<p>The final step of obtaining a normal image is called <a href="https://en.wikipedia.org/wiki/Demosaicing"><em>demosaicing</em></a>. During demosaicing we want to reconstruct the full color information by filling in the gaps in the captured RGB values. One of the simplest way to do it is to just linearly interpolate the values between the existing neighbors. I&rsquo;m not going to focus on the details of many other available demosaicing algorithms and I&rsquo;ll just present the resulting image created by the process:</p>

<div class="drawer_container double_drawer" id="lens_detector_rgb"></div>
<div class="long_slider" id="lens_detector_rgb_sl0"></div>

<p>Notice that yet again the overall brightness of the image depends on the length of time for which we let the photons through. That duration is known as <a href="https://en.wikipedia.org/wiki/Shutter_speed"><em>shutter speed</em></a> or exposure time. For most of this presentation I will ignore the time component and we will simply assume that the shutter speed has been set <em>just right</em> so that the image is well exposed.</p>

<p>The examples we&rsquo;ve discussed so far were very convenient – we were surrounded by complete darkness with the photons neatly hitting the pixels to form a coherent image. Unfortunately, we can&rsquo;t count on the photon paths to be as favorable in real environments, so let&rsquo;s see how the sensor performs in more realistic scenarios.</p>

<p><br></p>


</div>
</div>

<div class="dark_light_bg_grad_bottom"></div>

<p><br></p>

<p>Over the course of this article we will be taking pictures of this simple scene. The almost white background of this website is also a part of the scenery – it represents a bright overcast sky. You can drag around the demo to see it from other directions:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_scene"></div>

<p>Let&rsquo;s try to see what sort of picture would be taken by a sensor that is placed near the objects without any enclosure. I&rsquo;ll also significantly increase the sensor&rsquo;s resolution to make the pixels of the final image align with the pixels of your display. In the demonstration below the left side represents a view of the scene with the small greenish sensor present, while the right one shows the taken picture:</p>

<div class="drawer_container double_drawer" id="lens_bare_film"></div>

<p>This is not a mistake. As you can see, the obtained image doesn&rsquo;t really resemble anything. To understand why this happens let&rsquo;s first look at the light radiated from the scene.</p>

<p>If you had a chance to explore how <a href="https://ciechanow.ski/lights-and-shadows/#reflections">surfaces reflect light</a>, you may recall that most matte surfaces scatter the incoming light in every direction. While I&rsquo;m only showing a few examples, <em>every</em> point on every surface of this scene reflects the photons it receives from the whiteish background light source all around itself:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_scene_rays"></div>

<p>The red sphere ends up radiating red light, the green sphere radiates green light, and the gray checkerboard floor reflects white light of lesser intensity. Most importantly, however, the light emitted from the background is <em>also</em> visible to the sensor.</p>

<p>The problem with our current approach to taking pictures is that every pixel of the sensor is exposed to the <em>entire</em> environment. Light radiated from every point of the scene and the white background hits every point of the sensor. In the simulation below you can witness how light from different directions hits one point on the surface of the sensor:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_sensor_rays"></div>

<p>Clearly, to obtain a discernible image we have to limit the range of directions that affect a given pixel on the sensor. With that in mind, let&rsquo;s put the sensor in a box that has a small hole in it. The first slider controls the <span class="lens_black">diameter</span> of the hole, while the second one controls the <span class="lens_yellow">distance</span> between the opening and the sensor:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_box"></div>

<div class="lens_black" id="lens_box_sl0"></div>

<div class="lens_yellow" id="lens_box_sl1"></div>

<p>While not shown here, the inner sides of the walls are all black so that no light is reflected inside the box. I also put the sensor on the back wall so that the light from the hole shines onto it. We&rsquo;ve just built a <a href="https://en.wikipedia.org/wiki/Pinhole_camera"><em>pinhole camera</em></a>, let&rsquo;s see how it performs. Observe what happens to the taken image as we tweak the <span class="lens_black">diameter</span> of the hole with the first slider, or change the <span class="lens_yellow">distance</span> between the opening and the sensor with the second one:</p>

<div class="drawer_container double_drawer" id="lens_film"></div>

<div class="lens_black" id="lens_film_sl0"></div>

<div class="lens_yellow" id="lens_film_sl1"></div>

<p>There are so many interesting things happening here! The most pronounced effect is that the image is inverted. To understand why this happens let&rsquo;s look at the schematic view of the scene that shows the light rays radiated from the objects, going through the hole, and hitting the sensor:</p>

<div class="drawer_container double_drawer move_cursor" id="lens_film_invert"></div>

<p>As you can see the rays cross over in the hole and the formed image is a horizontal and a vertical reflection of the actual scene. Those two flips end up forming a 180&deg; rotation. Since rotated images aren&rsquo;t convenient to look at, all cameras automatically rotate the image for presentation and for the rest of this article I will do so as well.</p>

<p>When we change the <span class="lens_yellow">distance</span> between the hole and the sensor the viewing angle changes drastically. If we trace the rays falling on the corner pixels of the sensor we can see that they define the extent of the visible section of the scene:</p>

<div class="drawer_container double_drawer" id="lens_frustum"></div>

<div class="lens_yellow" id="lens_frustum_sl0"></div>

<p>Rays of light coming from outside of that shape still go through the pinhole, but they land outside of the sensor and aren&rsquo;t recorded. As the hole moves further away from the sensor, the angle, and thus the <a href="https://en.wikipedia.org/wiki/Field_of_view">field of view</a> visible to the sensor gets smaller. We can see this in a top-down view of the camera:</p>

<div class="drawer_container double_drawer" id="lens_field"></div>

<div class="lens_yellow" id="lens_field_sl0"></div>

<p>Coincidentally, this diagram also helps us explain two other effects. Firstly, in the photograph the red sphere looks almost as big as the green one, even though the scene view shows the latter is much larger. However, both spheres end up occupying roughly <a href="#" class="link_button" onclick="lens_field_0();return false;">the same span</a> on the sensor and their size in the picture is similar. It&rsquo;s also worth noting that the spheres seem to grow when the field of view gets narrower because their light covers larger part of the sensor.</p>

<p>Secondly, notice that different pixels of the sensor have different distance and relative orientation to the hole. The pixels right in the center of the sensor see the pinhole straight on, but pixels positioned at an angle to the main axis see a distorted pinhole that is further away. The ellipse in the bottom right corner of the demonstration below shows how a pixel positioned at the blue point sees the pinhole:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_hole_pixel_view"></div>

<div class="lens_blue" id="lens_hole_pixel_view_sl0"></div>

<p>This change in the visible area of the hole causes the darkening we see in the corners of the photograph. The value of the <em>cosine</em> of the angle I&rsquo;ve marked with a <span class="lens_yellow">yellow color</span> is quite important as it contributes to the reduction of visible light in four different ways:</p>

<ul>
<li>Two cosine factors from the increased distance to the hole, it&rsquo;s essentially the <a href="https://ciechanow.ski/lights-and-shadows/#inverse_square">inverse square law</a></li>
<li>A cosine factor from the side squeeze of the circular hole seen at an angle</li>
<li>A cosine factor from the relative <a href="https://ciechanow.ski/lights-and-shadows/#cosine_factor">tilt of the receptor</a></li>
</ul>

<p>These four factors conspire together to reduce the illumination by a factor of <strong>cos<sup>4</sup>(&alpha;)</strong> in what is known as <em>cosine-fourth-power law</em>, also described as <a href="https://en.wikipedia.org/wiki/Vignetting#Natural_vignetting">natural vignetting</a>.</p>

<p>Since we know the relative geometry of the camera and the opening we can correct for this effect by simply dividing by the falloff factor and from this point on I will make sure that the images don&rsquo;t have darkened corners.</p>

<p>The final effect we can observe is that when the hole gets smaller the image gets sharper. Let&rsquo;s see how the light radiated from two points of the scene ends up going through the camera depending on the <span class="lens_black">diameter</span> of the pinhole:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_hole_solid_angle"></div>

<div class="lens_black" id="lens_hole_solid_angle_sl0"></div>

<p>We can already see that larger hole size ends up creating a bigger spread on the sensor. Let&rsquo;s see this situation up close on a simple grid of detecting cells. Notice what happens to the size of the final circle hitting the sensor as that <span class="lens_black">diameter</span> of the hole changes:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_hole_sharpness"></div>

<div class="lens_black" id="lens_hole_sharpness_sl0"></div>

<p>When the hole is <a href="#" class="link_button" onclick="lens_sharp_0();return false;">small enough</a> rays from the source only manage to hit one pixel on the sensor. However, at <a href="#" class="link_button" onclick="lens_sharp_1();return false;">larger radii</a> the light spreads onto other pixels and a tiny point in the scene is no longer represented by a single pixel causing the image to no longer be sharp.</p>

<p>It&rsquo;s worth pointing out that sharpness is ultimately arbitrary – it depends on the size at which the final image is seen, viewing conditions, and visual acuity of the observer. The same photograph that looks sharp on a postage stamp may in fact be very blurry when seen on a big display.</p>

<p>By reducing the size of the cone of light we can make sure that the source light affects a limited number of pixels. Here, however, lays the problem. The sensor we&rsquo;ve been using so far has been an idealized detector capable of flawless adjustment of its sensitivity to the lighting conditions. If we instead were to fix the sensor sensitivity adjustment, the captured image would look more like this:</p>

<div class="drawer_container double_drawer" id="lens_film_exposure"></div>

<div class="lens_black" id="lens_film_exposure_sl0"></div>

<div class="lens_yellow" id="lens_film_exposure_sl1"></div>

<p>As the relative size of the hole visible to the pixels of the sensor gets smaller, be it due to reduced <span class="lens_black">diameter</span> or increased <span class="lens_yellow">distance</span>, fewer photons hit the surface and the image gets dimmer.</p>

<p>To increase the number of photons we capture we could extend the duration of collection, but increasing the exposure time comes with its own problems – if the photographed object moves or the camera isn&rsquo;t held steady we risk introducing some <a href="https://en.wikipedia.org/wiki/Motion_blur">motion blur</a>.</p>

<p>Alternatively, we could increase the <a href="https://en.wikipedia.org/wiki/Film_speed">sensitivity</a> of the sensor which is described using the ISO rating. However, boosting the ISO may introduce a higher level of <a href="https://en.wikipedia.org/wiki/Image_noise">noise</a>. Even with these problems solved an actual image obtained by smaller and smaller holes would actually start getting blurry again due to <a href="https://en.wikipedia.org/wiki/Diffraction">diffraction</a> effects of light.</p>

<p>If you recall how diffuse surfaces reflect light you may also realize how incredibly inefficient a pinhole camera is. A single point on the surface of an object radiates light into its surrounding hemisphere, however, the pinhole captures only a tiny portion of that light.</p>

<p>More importantly, however, a pinhole camera gives us minimal artistic control over <em>which</em> parts of the picture are blurry. In the demonstration below you can witness how changing which object is in focus heavily affects what is the primary target of attention of the photograph:</p>

<div class="drawer_container double_drawer" id="lens_focus_demo"></div>

<div id="lens_focus_demo_sl0"></div>

<p>Let&rsquo;s try to build an optical device that would solve both of these problems: we want to find a way to harness a bigger part of the energy radiated by the objects and also control what is blurry and <em>how</em> blurry it is. For the objects in the scene that are supposed to be sharp we want to collect a big chunk of their light and make it converge to the smallest possible point. In essence, we&rsquo;re looking for an instrument that will do something like this:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_device"></div>

<p>We could then put the sensor at the focus point and obtain a sharp image. Naturally, the contraption we&rsquo;ll try to create has to be transparent so that the light can pass through it and get to the sensor, so let&rsquo;s begin the investigation by looking at a piece of glass.</p>

<h1 id="glass">Glass<a href="https://ciechanow.ski/cameras-and-lenses/#glass" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>In the demonstration below I put a red stick behind a pane of glass. You can adjust the thickness of this pane with the <span class="lens_gray">gray slider</span> below:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_glass"></div>

<div id="lens_glass_sl0"></div>

<p>When you look at the stick through the surface of a thick glass <a href="#" class="link_button" onclick="lens_glass_0();return false;">straight on</a>, everything looks normal. However, as your viewing direction <a href="#" class="link_button" onclick="lens_glass_1();return false;">changes</a> the stick seen through the glass seems out of place. The thicker the glass and the steeper the viewing angle the bigger the offset.</p>

<p>Let&rsquo;s focus on one point on the surface of the stick and see how the rays of light radiated from its surface propagate through the subsection of the glass. The <span class="lens_red">red slider</span> controls the position of the source and the <span class="lens_gray">gray slider</span> controls the thickness. You can drag the demo around to see it from different viewpoints:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_glass_rays"></div>

<div class="lens_red" id="lens_glass_rays_sl0"></div>

<div id="lens_glass_rays_sl1"></div>

<p>For some reason the rays passing through glass at an angle are <a href="#" class="link_button" onclick="lens_glass_rays_0();return false;">deflected off their paths</a>. The change of direction happens whenever the ray enters or leaves the glass.</p>

<p>To understand <em>why</em> the light changes direction we have to peek under the covers of <a href="https://en.wikipedia.org/wiki/Classical_electromagnetism">classical electromagnetism</a> and talk a bit more about waves.</p>

<h1 id="waves">Waves<a href="https://ciechanow.ski/cameras-and-lenses/#waves" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>It&rsquo;s impossible to talk about wave propagation without involving the time component, so the simulations in this sections are animated – you can play and pause them by <span class="click_word">clicking</span><span class="tap_word">tapping</span> on the button in their bottom left corner.</p>

<p>By default all animations are <span id="global_animate_on">enabled, but if you find them distracting, or if you want to save power, you can <a href="#" class="link_button" onclick="global_animate(false);return false;">globally pause</a> all the following demonstrations.</span><span id="global_animate_off" class="hidden">disabled, but if you&rsquo;d prefer to have things moving as you read you can <a href="#" class="link_button" onclick="global_animate(true);return false;">globally unpause</a> them and see all the waves oscillating.</span></p>

<p>Let&rsquo;s begin by introducing the simplest sinusoidal wave:</p>

<div class="drawer_container double_drawer" id="lens_sine"></div>

<div class="lens_black" id="lens_sine_sl0"></div>

<div id="lens_sine_sl1"></div>

<p>A wave like this can be characterized by two components. <a href="https://en.wikipedia.org/wiki/Wavelength">Wavelength</a> <strong>&lambda;</strong> is the distance over which the shape of the wave repeats. Period <strong>T</strong> defines how much time a full cycle takes.</p>

<p><a href="https://en.wikipedia.org/wiki/Frequency">Frequency</a> <strong>f</strong>, is just a reciprocal of period and it&rsquo;s more commonly used – it defines how many waves per second have passed over some fixed point. Wavelength and frequency define <a href="https://en.wikipedia.org/wiki/Phase_velocity">phase velocity</a> <strong>v<sub>p</sub></strong> which describes how quickly a point on a wave, e.g. a peak, moves:</p>

<div class="equation">
<span class="equation_frac">v<sub>p</sup></span> = <span class="equation_frac">&lambda;</span> &middot; <span class="equation_frac">f</span>
</div>

<p>The sinusoidal wave is the building block of a polarized electromagnetic plane wave. As the name implies electromagnetic radiation is an interplay of oscillations of electric field <strong>E</strong> and magnetic field <strong>B</strong>:</p>

<div class="drawer_container double_drawer move_cursor" id="lens_em"></div>

<p>In an electromagnetic wave the magnetic field is tied to the electric field so I&rsquo;m going to hide the former and just visualize the latter. Observe what happens to the electric component of the field as it passes through a block of glass. I need to note that dimensions of wavelengths are <em>not</em> to scale:</p>

<div class="drawer_container move_cursor" id="lens_wave_glass"></div>

<p>Notice that the wave remains continuous at the boundary and inside the glass the frequency of the passing wave remains constant, However, the wavelength and thus the phase velocity are reduced – you can see it clearly <a href="#" class="link_button" onclick="lens_wave_glass_0();return false;">from the side</a>.</p>

<p>The microscopic reason for the phase velocity change is <a href="https://en.wikipedia.org/wiki/Ewald–Oseen_extinction_theorem">quite complicated</a>, but it can be quantified using the <a href="https://en.wikipedia.org/wiki/Refractive_index"><em>index of refraction</em></a> <strong>n</strong>, which is the ratio of the speed of light <strong>c</strong> to the phase velocity <strong>v<sub>p</sub></strong> of lightwave in that medium:</p>

<div class="equation">
<span class="equation_frac">n</span> = <span class="equation_frac"><span>c</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">v<sub>p</sub></span>
</span>
</div>

<p>The higher the index of refraction the <em>slower</em> light propagates through the medium. In the table below I&rsquo;ve presented a few different indices of refraction for some materials:</p>

<table>
<tr><td class="lens_list_item_material">vacuum</td><td class="lens_list_item_index">1.00</td></tr>
<tr><td class="lens_list_item_material">air</td><td class="lens_list_item_index">1.0003</td></tr>
<tr><td class="lens_list_item_material">water</td><td class="lens_list_item_index">1.33</td></tr>
<tr><td class="lens_list_item_material">glass</td><td class="lens_list_item_index">1.53</td></tr>
<tr><td class="lens_list_item_material">diamond</td><td class="lens_list_item_index">2.43</td></tr>
</table>

<p>Light traveling through air barely slows down, but in a diamond it&rsquo;s over twice as slow. Now that we understand how <span class="lens_black">index of refraction</span> affects the wavelength in the glass, let&rsquo;s see what happens when we change the <span class="lens_gray">direction</span> of the incoming wave:</p>

<div class="drawer_container move_cursor" id="lens_wave_glass2"></div>

<div class="lens_black" id="lens_wave_glass2_sl0"></div>

<div id="lens_wave_glass2_sl1"></div>

<p>The wave in the glass has a shorter wavelength, but it still has to match the positions of its peaks and valleys across the boundary. As such, the direction of propagation <a href="#" class="link_button" onclick="lens_wave_glass_2();return false;">must change</a> to ensure that continuity.</p>

<p>I need to note that the previous two demonstrations presented a two dimensional wave since that allowed me to show the sinusoidal component oscillating into the third dimension. In real world the lightwaves are three dimensional and I can&rsquo;t really visualize the sinusoidal component without using the fourth dimension which has <a href="https://ciechanow.ski/tesseract/">its own set of complications</a>.</p>

<p>The alternative way of presenting waves is to use <a href="https://en.wikipedia.org/wiki/Wavefront"><em>wavefronts</em></a>. Wavefronts connect the points of the same phase of the wave, e.g. all the peaks or valleys. In two dimensions wavefronts are represented by lines:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_wave_2d"></div>

<p>In three dimensions the wavefronts are represented by <em>surfaces</em>. In the demonstration below a single source emits a spherical wave, points of the same phase in the wave are represented by the moving shells:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_wave_3d"></div>

<p>By drawing lines that are perpendicular to the surface of the wavefront we create the familiar rays. In this interpretation rays simply show the local direction of wave propagation which can be seen in this example of a section of a spherical 3D wave:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_wave_rays"></div>

<p>I will continue to use the ray analogy to quantify the change in direction of light passing through materials. The relation between the angle of incidence <strong>&theta;<sub>1</sub></strong> and angle of refraction <strong>&theta;<sub>2</sub></strong> can be formalized with the equation known as <a href="https://en.wikipedia.org/wiki/Snell%27s_law">Snell&rsquo;s law</a>:</p>

<div class="equation">
<span class="lens_blue">n<sub>1</sub></span> &middot; sin(&theta;<sub>1</sub>) = <span class="lens_yellow">n<sub>2</sub></span> &middot; sin(&theta;<sub>2</sub>)
</div>

<p>It describes how a ray of light changes direction relative to the surface normal on the border between two different media. Let&rsquo;s see it in action:</p>

<div class="drawer_container square_drawer small_drawer" id="lens_snell"></div>

<div class="lens_black" id="lens_snell_sl0"></div>

<div class="lens_blue" id="lens_snell_sl1"></div>

<div class="lens_yellow" id="lens_snell_sl2"></div>

<p>When traveling from a less to more refractive material the ray bends <a href="#" class="link_button" onclick="lens_snell_0();return false;"><em>towards</em> the normal</a>, but when the ray exits the object with higher index of refraction it bends <a href="#" class="link_button" onclick="lens_snell_1();return false;"><em>away</em> from the normal</a>.</p>

<p>Notice that in <a href="#" class="link_button" onclick="lens_snell_2();return false;">some configurations</a> the refracted ray completely disappears, however, this doesn&rsquo;t paint a full picture because we&rsquo;re currently completely ignoring reflections.</p>

<p>All transparent objects reflect some amount of light. You may have noticed that reflection on a surface of a calm lake or even on the other side of the glass demonstration at the beginning of the <a href="#glass">previous section</a>. The intensity of that reflection depends on the index of refraction of the material and the angle of the incident ray. Here&rsquo;s a more realistic demonstration of how light would get refracted <em>and</em> reflected between two media:</p>

<div class="drawer_container square_drawer small_drawer" id="lens_snell2"></div>

<div class="lens_black" id="lens_snell2_sl0"></div>

<div class="lens_blue" id="lens_snell2_sl1"></div>

<div class="lens_yellow" id="lens_snell2_sl2"></div>

<p>The relation between <em>transmittance</em> and <em>reflectance</em> is determined by <a href="https://en.wikipedia.org/wiki/Fresnel_equations">Fresnel equations</a>. Observe that the curious case of missing light that we saw previously <a href="#" class="link_button" onclick="lens_snell_3();return false;">no longer occurs</a> – that light is actually reflected. The transition from partial reflection and refraction to the complete reflection is continuous, but near the end it&rsquo;s very rapid and at some point the refraction <a href="#" class="link_button" onclick="lens_snell_4();return false;">completely disappears</a> in the effect known as <a href="https://en.wikipedia.org/wiki/Total_internal_reflection">total internal reflection</a>.</p>

<p>The <a href="https://en.wikipedia.org/wiki/Total_internal_reflection#Critical_angle"><em>critical angle</em></a> at which the total internal reflection starts to happen depends on the indices of refraction of the boundary materials. Since that coefficient is low for air, but very high for diamond a <a href="https://en.wikipedia.org/wiki/Brilliant_(diamond_cut)">proper cut</a> of the faces <a href="https://physics.stackexchange.com/questions/43361/why-do-diamonds-shine/43373#43373">makes diamonds</a> very shiny.</p>

<p>While interesting on its own, reflection in glass isn&rsquo;t very relevant to our discussion and for the rest of this article we&rsquo;re not going to pay much attention to it. Instead, we&rsquo;ll simply assume that the materials we&rsquo;re using are covered with high quality <a href="https://en.wikipedia.org/wiki/Anti-reflective_coating">anti-reflective coating</a>.</p>

<h1 id="manipulating-rays">Manipulating Rays<a href="https://ciechanow.ski/cameras-and-lenses/#manipulating-rays" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>Let&rsquo;s go back to the example that started the discussion of light and glass. When both sides of a piece of glass are parallel, the ray is shifted, but it still travels in the same direction. Observe what happens to the ray when we change the relative angle of the surfaces of the glass.</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_parallel"></div>

<div id="lens_parallel_sl0"></div>

<p>When we make two surfaces of the glass <em>not</em> parallel we gain the ability to change the direction of the rays. Recall, that we&rsquo;re trying to make the rays hitting the optical device <em>converge</em> at a certain point. To do that we have to bend the rays in the upper part down and, conversely, bend the rays in the lower part up.</p>

<p>Let&rsquo;s see what happens if we shape the glass to have different angles between its walls at different height. In the demonstration below you can control how many distinct segments a piece of glass is shaped to:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_subdiv"></div>

<div id="lens_subdiv_seg0"></div>

<p>As the number of segments <a href="#" class="link_button" onclick="lens_subdiv_0();return false;">approaches infinity</a> we end up with a continuous surface without any edges. If we look at the crossover point <a href="#" class="link_button" onclick="lens_subdiv_1();return false;">from the side</a> you may notice that we&rsquo;ve managed to converge the rays across one axis, but the top-down view <a href="#" class="link_button" onclick="lens_subdiv_2();return false;">reveals</a> that we&rsquo;re not done yet. To focus all the rays we need to replicate that smooth shape across <em>all</em> possible directions – we need rotational symmetry:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_rotational"></div>

<p>We&rsquo;ve created a <em>convex</em> <a href="https://en.wikipedia.org/wiki/Thin_lens">thin lens</a>. This lens is idealized, in the later part of the article we&rsquo;ll discuss how real lenses aren&rsquo;t as perfect, but for now it will serve us very well. Let&rsquo;s see what happens to the focus point when we change the position of the <span class="lens_red">red</span> source:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_rotational_focal"></div>

<div class="lens_red" id="lens_rotational_focal_sl0"></div>

<p>When the source is positioned <a href="#" class="link_button" onclick="lens_inf();return false;">very far away</a> the incoming rays become parallel and after passing through lens they converge at a certain distance away from the center. That distance is known as <a href="https://en.wikipedia.org/wiki/Focal_length"><em>focal length</em></a>.</p>

<p>The previous demonstration also shows two more general distances: <strong>s<sub>o</sub></strong> which is the distance between the <strong>o</strong>bject, or source, and the lens, as well as <strong>s<sub>i</sub></strong> which is the distance between the <strong>i</strong>mage and the lens. These two values and the focal length <strong>f</strong> are related by the <a href="https://en.wikipedia.org/wiki/Thin_lens#Image_formation"><em>thin lens equation</em></a>:</p>

<div class="equation">
<span class="equation_frac"><span>1</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">s<sub>o</sub></span>
</span>
+
<span class="equation_frac"><span>1</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">s<sub>i</sub></span>
</span>
=
<span class="equation_frac"><span>1</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">f</span>
</span>
</div>

<p>Focal length of a lens depends on both the <span class="lens_black">index of refraction</span> of the material from which the lens is made and its <span class="lens_blue">shape</span>:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_focal_length"></div>

<div class="lens_black" id="lens_focal_length_sl1"></div>

<div class="lens_blue" id="lens_focal_length_sl0"></div>

<p>Now that we understand how a simple convex lens works we&rsquo;re ready to mount it into the hole of our camera. We will still control the <span class="lens_yellow">distance</span> between the sensor and the lens, but instead of controlling the diameter of the lens we&rsquo;ll instead control its <span class="lens_blue">focal length</span>:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_box_lens"></div>

<div class="lens_blue" id="lens_box_lens_sl0"></div>

<div class="lens_yellow" id="lens_box_lens_sl1"></div>

<p>When you look at the lens <a href="#" class="link_button" onclick="lens_camera_lens();return false;">from the side</a> you may observe how the <span class="lens_blue">focal length</span> change is tied to the shape of the lens. Let&rsquo;s see how this new camera works in action:</p>

<div class="drawer_container double_drawer" id="lens_basic"></div>

<div class="lens_blue" id="lens_basic_sl0"></div>

<div class="lens_yellow" id="lens_basic_sl1"></div>

<p>Once again, a lot of things are going on here! Firstly, let&rsquo;s try to understand how the image is formed in the first place. The demonstration below shows paths of rays from two separate points in the scene. After going through the lens they end up hitting the sensor:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_lens_solid_angle"></div>

<div class="lens_blue" id="lens_lens_solid_angle_sl0"></div>

<div class="lens_yellow" id="lens_lens_solid_angle_sl1"></div>

<p>Naturally, this process happens for <em>every</em> single point in the scene which creates the final image. Similarly to a pinhole a convex lens creates an inverted picture – I&rsquo;m still correcting for this by showing you a rotated photograph.</p>

<p>Secondly, notice that the distance between the lens and the sensor still controls the field of view. As a reminder, the focal length of a lens simply defines the distance from the lens at which the rays coming from infinity converge. To achieve a sharp image, the sensor has to be placed at the location where the rays focus and <em>that&rsquo;s</em> what&rsquo;s causing the field of view to change.</p>

<p>In the demonstration below I&rsquo;ve visualized how rays from a very far object focus through a lens of adjustable <span class="lens_blue">focal length</span>, notice that to obtain a sharp image we must change the <span class="lens_yellow">distance</span> between the lens and the sensor which in turn causes the field of view to change:</p>

<div class="drawer_container double_drawer" id="lens_field2"></div>

<div class="lens_blue" id="lens_field2_sl1"></div>

<div class="lens_yellow" id="lens_field2_sl0"></div>

<p>If we want to change the object on which a camera with a lens of a fixed focal length is focused, we have to move the image plane closer or further away from the lens which affects the angle of view. This effect is called <a href="https://en.wikipedia.org/wiki/Breathing_(lens)">focus breathing</a>:</p>

<div class="drawer_container double_drawer" id="lens_focus_demo2"></div>

<div class="lens_yellow" id="lens_focus_demo2_sl0"></div>

<p>A lens with a fixed focal length like the one above is often called a <em>prime</em> lens, while lenses with adjustable focal length are called <em>zoom</em> lenses. While the lenses in our eyes do dynamically adjust their focal lengths by changing their shape, rigid glass can&rsquo;t do that so zoom lenses use a system of multiple glass elements that change their relative position to achieve this effect.</p>

<p>In the simulation above notice the difference in sharpness between the red and green spheres. To understand why this happens let&rsquo;s analyze the rays emitted from two points on the surface of the spheres. In the demonstration below the right side shows the light seen by the sensor <em>just</em> from the two marked points on the spheres:</p>

<div class="drawer_container double_drawer" id="lens_lens_solid_angle2"></div>

<div class="lens_yellow" id="lens_lens_solid_angle2_sl0"></div>

<p>The light from the point in focus converges to a point, while the light from an out-of-focus point spreads onto a circle. For larger objects the multitude of overlapping out-of-focus circles creates a smooth blur called
<a href="https://en.wikipedia.org/wiki/Bokeh"><em>bokeh</em></a>. With tiny and bright light sources that circle itself is often visible, you may have seen effects like the one in the demonstration below in some photographs captured in darker environments:</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>

<div class="dark_light_bg"><div class="bg_content">

<p><br></p>

<div class="drawer_container double_drawer" id="lens_bokeh"></div>
<br>
<div class="lens_yellow" id="lens_bokeh_sl0"></div>

<p>
</div>
</div>

<div class="dark_light_bg_grad_bottom"></div>

<p><br></p></p>
<p>Notice that the circular shape is visible for lights both in front of and behind the focused distance. As the object is positioned closer or further away from the lens the image plane &ldquo;slices&rdquo; the cone of light at different location:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_slice"></div>

<div class="lens_red" id="lens_cone_slice_sl0"></div>

<p>That circular spot is called a <a href="https://en.wikipedia.org/wiki/Circle_of_confusion"><em>circle of confusion</em></a>. While in many circumstances the blurriness of the background or the foreground looks very appealing, it would be very useful to control how much blur there is.</p>

<p>Unfortunately, we don&rsquo;t have total freedom here – we still want the primary photographed object to remain in focus so its light has to converge to a point. We just want to change the size of the circle of out-of-focus objects without moving the central point. We can accomplish that by changing the <em>angle</em> of the cone of light:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_angle"></div>

<div id="lens_cone_angle_sl0"></div>

<p>There are two methods we can use to modify that angle. Firstly, we can change the focal length of the lens – you may recall that with longer focal lengths the cone of light also gets longer. However, changing the focal length and keeping the primary object in focus requires moving the image plane which in turn changes how the picture is framed.</p>

<p>The alternative way of reducing the angle of the cone of light is to simply ignore some of the &ldquo;outer&rdquo; rays. We can achieve that by introducing a stop with a hole in the path of light:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_aperture"></div>

<div class="lens_black" id="lens_cone_aperture_sl0"></div>

<p>This hole is called an <a href="https://en.wikipedia.org/wiki/Aperture"><em>aperture</em></a>. In fact, even the hole in which the lens is mounted is an aperture of some sort, but what we&rsquo;re introducing is an <em>adjustable</em> aperture:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_box_aperture"></div>

<div class="lens_black" id="lens_box_aperture_sl0"></div>

<div class="lens_yellow" id="lens_box_aperture_sl1"></div>

<p>Let&rsquo;s try to see how an aperture affects the photographs taken with our camera:</p>

<div class="drawer_container double_drawer" id="lens_focus_demo3"></div>

<div class="lens_black" id="lens_focus_demo3_sl0"></div>

<div class="lens_yellow" id="lens_focus_demo3_sl1"></div>

<p>In real camera lenses an adjustable aperture is often constructed from a set of overlapping blades that constitute an <em>iris</em>. The movement of those blades changes the size of the aperture:</p>

<div class="drawer_container square_drawer" id="lens_blades"></div>

<div id="lens_blades_sl0"></div>

<p>The shape of the aperture also defines the shape of bokeh. This is the reason why bokeh sometimes has a polygonal shape – it&rsquo;s simply the shape of the &ldquo;cone&rdquo; of light after passing through the blades of the aperture. Next time you watch a movie pay a close attention to the shape of out-of-focus highlights, they&rsquo;re often polygonal:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_hex"></div>

<div id="lens_cone_hex_sl0"></div>

<p>As the aperture diameter decreases, larger and larger areas of the photographed scene remain sharp. The term <a href="https://en.wikipedia.org/wiki/Depth_of_field"><em>depth of field</em></a> is used to define the length of the region over which the objects are acceptably sharp. When describing the depth of field we&rsquo;re trying to conceptually demark those two boundary planes and see how far apart they are from each other.</p>

<p>Let&rsquo;s see the depth of field in action. The <span class="lens_black">black slider</span> controls the aperture, the <span class="lens_blue">blue slider</span> controls the focal length, and the <span class="lens_red">red slider</span> changes the position of the object relative to the camera. The <span style="color:#68C626"><strong>green dot</strong></span> shows the place of perfect focus, while the <span style="color:#3E53A7"><strong>dark blue dots</strong></span> show the limits, or the depth, of positions between which the image of the red light source will be reasonably sharp, as shown by a single outlined pixel on the sensor:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_cone_dof"></div>

<div class="lens_black" id="lens_cone_dof_sl0"></div>

<div class="lens_blue" id="lens_cone_dof_sl1"></div>

<div class="lens_red" id="lens_cone_dof_sl2"></div>

<p>Notice that <a href="#" class="link_button" onclick="lens_dof_0();return false;">the larger</a> the <span class="lens_black">diameter of aperture</span> and <a href="#" class="link_button" onclick="lens_dof_1();return false;">the shorter</a> the <span class="lens_blue">focal length</span> the shorter the distance between the <span style="color:#3E53A7"><strong>dark blue dots</strong></span> and thus the <em>shallower</em> the depth of field becomes. If you recall our discussion of sharpness this demonstration should make it easier to understand why reducing the angle of the cone <em>increases</em> the depth of field.</p>

<p>If you don&rsquo;t have perfect vision you may have noticed that squinting your eyes make you see things a little better. Your eyelids covering some part of your iris simply act as an aperture that decreases the angle of the cone of light falling into your eyes making things sightly less blurry on your retina.</p>

<p>An interesting observation is that aperture defines the diameter of the base of the captured cone of light that is emitted from the object. Twice as large aperture diameter captures roughly <em>four</em> times more light due to increased <a href="https://ciechanow.ski/lights-and-shadows/#solid-angles">solid angle</a>. In practice, the actual size of the aperture as seen from the point of view of the scene, or the <a href="https://en.wikipedia.org/wiki/Entrance_pupil"><em>entrance pupil</em></a>, depends on all the lenses in front of it as the shaped glass may scale the perceived size of the aperture.</p>

<p>On the other hand, when a lens is focused correctly, the focal length defines how large a source object is in the picture. By doubling the focal length we double the width <em>and</em> the height of the object on the sensor thus increasing the area by the factor of four. The light from the source is more spread out and each individual pixel receives less light.</p>

<p>The total amount of light hitting each pixel is proportional to the <em>ratio</em> between the focal length <strong>f</strong> and the diameter of the entrance pupil <strong>D</strong>. This ratio is known as the <a href="https://en.wikipedia.org/wiki/F-number"><em>f-number</em></a>:</p>

<div class="equation">
<span class="equation_frac">N</span> = <span class="equation_frac"><span>f</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">D</span>
</span>
</div>

<p>A lens with a focal length of 50 mm and the entrance pupil of 25 mm would have <strong>N</strong> equal to 2 and the <em>f</em>-number would be known as <em>f</em>/2. Since the amount of light getting to each pixel of the sensor increases with the diameter of the aperture and decreases with the focal length, the <em>f</em>-number controls the brightness of the projected image.</p>

<p>The <em>f</em>-number with which commercial lenses are marked usually defines the maximum aperture a lens can achieve and the smaller the <em>f</em>-number the more light the lens passes through. Bigger amount of incoming light allows reduction of exposure time, so the smaller the <em>f</em>-number the <a href="https://en.wikipedia.org/wiki/Lens_speed"><em>faster</em></a> the lens is. By reducing the size of the aperture we can modify the <em>f</em>-number with which a picture is taken.</p>

<p>The <em>f</em>-numbers are often multiples of 1.4 which is an approximation of <span class="sqrt">2</span>. Scaling the diameter of an adjustable aperture by <span class="sqrt">2</span> scales its <em>area</em> by 2 which is a convenient factor to use. Increasing the <em>f</em>-number by a so-called <a href="https://en.wikipedia.org/wiki/F-number#Stops,_f-stop_conventions,_and_exposure"><em>stop</em></a> halves the amount of received light. The demonstration below shows the relatives sizes of the aperture through which light is being seen:</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>

<div class="dark_light_bg"><div class="bg_content">

<p><br></p>

<div class="drawer_container square_drawer" id="lens_f"></div>
<div id="lens_f_seg0"></div>

<p>To maintain the overall brightness of the image when <a href="https://en.wikipedia.org/wiki/Stopping_down">stopping down</a> we&rsquo;d have to either increase the exposure time or the sensitivity of the sensor.</p>

<p><br></p>

<p>
</div>
</div>

<div class="dark_light_bg_grad_bottom"></div>

<p><br></p></p>
<p>While aperture settings let us easily control the depth of field, that change comes at a cost. When the <em>f</em>-number increases and the aperture diameter gets smaller we effectively start approaching a pinhole camera with all its related complications.</p>

<p>In the final part of this article we will discuss the entire spectrum of another class of problems that we&rsquo;ve been conveniently avoiding all this time.</p>

<h1 id="aberrations">Aberrations<a href="https://ciechanow.ski/cameras-and-lenses/#aberrations" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>In our examples so far we&rsquo;ve been using a perfect idealized lens that did exactly what we want and in all the demonstrations I&rsquo;ve relied on a certain simplification known as the <a href="https://en.wikipedia.org/wiki/Paraxial_approximation">paraxial approximation</a>. However, the physical world is a bit more complicated.</p>

<p>The most common types of lenses are <em>spherical</em> lenses – their curved surfaces are sections of spheres of different radii. These types of lenses are easier to manufacture, however, they actually don&rsquo;t perfectly converge the rays of incoming light. In the demonstration below you can observe how fuzzy the focus point is for various lens radii:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_spherical"></div>

<div id="lens_spherical_sl0"></div>

<p>This imperfection is known as <a href="https://en.wikipedia.org/wiki/Spherical_aberration"><em>spherical aberration</em></a>. This specific flaw can be corrected with <a href="https://en.wikipedia.org/wiki/Aspheric_lens"><em>aspheric lenses</em></a>, but unfortunately there are other types of problems that may not be easily solved by a single lens. In general, for monochromatic light there are five primary types of aberrations: <a href="https://en.wikipedia.org/wiki/Spherical_aberration">spherical aberration</a>, <a href="https://en.wikipedia.org/wiki/Coma_(optics)">coma</a>, <a href="https://en.wikipedia.org/wiki/Astigmatism_(optical_systems)">astigmatism</a>, <a href="https://en.wikipedia.org/wiki/Petzval_field_curvature">field curvature</a>, and <a href="https://en.wikipedia.org/wiki/Distortion_(optics)">distortion</a>.</p>

<p>We&rsquo;re still not out of the woods even if we manage to minimize these problems. In normal environments light is very <em>non</em>-monochromatic and nature sets another hurdle into optical system design. Let&rsquo;s quickly go back to the dark environment as we&rsquo;ll be discussing a single beam of white light.</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>

<div class="dark_light_bg"><div class="bg_content">

<p><br></p>

<p>Observe what happens to that beam when it hits a piece of glass. You can make the sides non-parallel by using the slider:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_prism"></div>
<div id="lens_prism_sl0"></div>

<p>What we perceive as white light is a combination of lights of different wavelengths. In fact, the index of refraction of materials <em>depends</em> on the wavelength of the light. This phenomena called <a href="https://en.wikipedia.org/wiki/Dispersion_(optics)"><em>dispersion</em></a> splits what seems to be a uniform beam of white light into a fan of color bands. The very same mechanism that we see here is also responsible for a rainbow.</p>

<p>In a lens this causes different wavelengths of light to focus at different offsets – the effect known as <a href="https://en.wikipedia.org/wiki/Chromatic_aberration"><em>chromatic aberration</em></a>. We can easily visualize the <em>axial</em> chromatic aberration even on a lens with spherical aberration fixed. I&rsquo;ll only use red, green, and blue dispersed rays to make things less crowded, but remember that other colors of the spectrum are present in between. Using the slider you can control the amount of dispersion the lens material introduces:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_chromatic"></div>
<div id="lens_chromatic_sl0"></div>

<p>Chromatic aberration may be corrected with an <a href="https://en.wikipedia.org/wiki/Achromatic_lens">achromatic lens</a>, usually in the form of a <a href="https://en.wikipedia.org/wiki/Doublet_(lens)">doublet</a> with two different types of glass fused together.</p>

<p><br></p>


</div>
</div>

<div class="dark_light_bg_grad_bottom"></div>

<p><br></p>

<p>To minimize the impact of the aberrations, camera lenses use more than one optical element on their pathways. In this article I&rsquo;ve only shown you simple lens systems, but a high-end camera lens may consist of <a href="https://en.wikipedia.org/wiki/File:Objective_Zeiss_Cut.jpg">a lot of elements</a> that were carefully designed to balance the optical performance, weight, and cost.</p>

<p>While we, in our world of computer simulations on this website, can maintain the illusion of simple and perfect systems devoid of aberrations, <a href="https://en.wikipedia.org/wiki/Vignetting">vignetting</a>, and <a href="https://en.wikipedia.org/wiki/Lens_flare">lens flares</a>, real cameras and lenses have to deal with all these problems to make the final pictures look good.</p>

<h1 id="further-watching-and-reading">Further Watching and Reading<a href="https://ciechanow.ski/cameras-and-lenses/#further-watching-and-reading" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>Over on YouTube <a href="https://www.youtube.com/channel/UCSFAYalJ2Q7Tm_WmLgetmeg">Filmmaker IQ channel</a> has a lot of great content related to lenses and movie making. Two videos especially fitting here are <a href="https://www.youtube.com/watch?v=1YIvvXxsR5Y">The History and Science of Lenses</a> and <a href="https://www.youtube.com/watch?v=lte9pa3RtUk">Focusing on Depth of Field and Lens Equivalents</a>.</p>

<p><a href="https://www.youtube.com/watch?v=q1n2DR6H7mk">What Makes Cinema Lenses So Special!?</a> on <a href="https://www.youtube.com/channel/UCNJe8uQhM2G4jJFRWiM89Wg">Potato Jet channel</a> is a great interview with Art Adams from <a href="https://www.arri.com/en/">ARRI</a>. The video goes over many interesting details of high-end cinema lens design, for example, how the lenses <a href="https://youtu.be/q1n2DR6H7mk?t=370">compensate for focus breathing</a>, or how much attention is paid to the <a href="https://youtu.be/q1n2DR6H7mk?t=899">quality of bokeh</a>.</p>

<p>For a deeper dive on bokeh itself Jakub Trávník&rsquo;s <a href="https://jtra.cz/stuff/essays/bokeh/index.html">On Bokeh</a> is a great article on the subject. The author explains how aberrations may cause bokeh of non uniform intensity and shows many photographs of real cameras and lenses.</p>

<p>In this article I&rsquo;ve mostly been using <a href="https://en.wikipedia.org/wiki/Geometrical_optics">geometrical optics</a> with some soft touches of electromagnetism. For a more modern look at the nature of light and its interaction with matter I recommend Richard Feynman&rsquo;s <a href="https://en.wikipedia.org/wiki/QED:_The_Strange_Theory_of_Light_and_Matter">QED: The Strange Theory of Light and Matter</a>. The book is written in a very approachable style suited for general audience, but it still lets Feynman&rsquo;s wits and brilliance shine right through.</p>

<h1 id="final-words">Final Words<a href="https://ciechanow.ski/cameras-and-lenses/#final-words" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>We’ve barely scratched the surface of optics and camera lens design, but even the most complex systems end up serving the same purpose: to tell light where to go. In some sense optical engineering is all about taming the nature of light.</p>

<p>The simple act of pressing the shutter button in a camera app on a smartphone or on the body of a high-end DSLR is effortless, but it’s at this moment when, through carefully guided rays hitting an array of photodetectors, we immortalize reality by painting with light.</p>

<p></div>
</div></div></div></sub></span></div></p>
</article>


<hr>

<footer>
<p>
<a href="/david/" title="Aller à l’accueil">🏠</a> •
<a href="/david/log/" title="Accès au flux RSS">🤖</a> •
<a href="http://larlet.com" title="Go to my English profile" data-instant>🇨🇦</a> •
<a href="mailto:david%40larlet.fr" title="Envoyer un courriel">📮</a> •
<abbr title="Hébergeur : Alwaysdata, 62 rue Tiquetonne 75002 Paris, +33184162340">🧚</abbr>
</p>
<template id="theme-selector">
<form>
<fieldset>
<legend>Thème</legend>
<label>
<input type="radio" value="auto" name="chosen-color-scheme" checked> Auto
</label>
<label>
<input type="radio" value="dark" name="chosen-color-scheme"> Foncé
</label>
<label>
<input type="radio" value="light" name="chosen-color-scheme"> Clair
</label>
</fieldset>
</form>
</template>
</footer>
<script>
function loadThemeForm(templateName) {
const themeSelectorTemplate = document.querySelector(templateName)
const form = themeSelectorTemplate.content.firstElementChild
themeSelectorTemplate.replaceWith(form)

form.addEventListener('change', (e) => {
const chosenColorScheme = e.target.value
localStorage.setItem('theme', chosenColorScheme)
toggleTheme(chosenColorScheme)
})

const selectedTheme = localStorage.getItem('theme')
if (selectedTheme && selectedTheme !== 'undefined') {
form.querySelector(`[value="${selectedTheme}"]`).checked = true
}
}

const prefersColorSchemeDark = '(prefers-color-scheme: dark)'
window.addEventListener('load', () => {
let hasDarkRules = false
for (const styleSheet of Array.from(document.styleSheets)) {
let mediaRules = []
for (const cssRule of styleSheet.cssRules) {
if (cssRule.type !== CSSRule.MEDIA_RULE) {
continue
}
// WARNING: Safari does not have/supports `conditionText`.
if (cssRule.conditionText) {
if (cssRule.conditionText !== prefersColorSchemeDark) {
continue
}
} else {
if (cssRule.cssText.startsWith(prefersColorSchemeDark)) {
continue
}
}
mediaRules = mediaRules.concat(Array.from(cssRule.cssRules))
}

// WARNING: do not try to insert a Rule to a styleSheet you are
// currently iterating on, otherwise the browser will be stuck
// in a infinite loop…
for (const mediaRule of mediaRules) {
styleSheet.insertRule(mediaRule.cssText)
hasDarkRules = true
}
}
if (hasDarkRules) {
loadThemeForm('#theme-selector')
}
})
</script>
</body>
</html>

+ 571
- 0
cache/2021/ef2067bf42482ed7c48e1d166cde117a/index.md View File

@@ -0,0 +1,571 @@
title: Cameras and Lenses
url: https://ciechanow.ski/cameras-and-lenses/
hash_url: ef2067bf42482ed7c48e1d166cde117a

<p>Pictures have always been a meaningful part of the human experience. From the first cave drawings, to sketches and paintings, to modern photography, we&rsquo;ve mastered the art of recording what we see.</p>

<p>Cameras and the lenses inside them may seem a little mystifying. In this blog post I&rsquo;d like to explain not only how they work, but also how adjusting a few tunable parameters can produce fairly different results:</p>

<div class="drawer_container double_drawer" id="lens_hero"></div>
<div class="lens_yellow" id="lens_hero_sl2"></div>
<div class="lens_blue" id="lens_hero_sl1"></div>
<div class="lens_black" id="lens_hero_sl0"></div>

<p>Over the course of this article we&rsquo;ll build a simple camera from first principles. Our first steps will be very modest – we&rsquo;ll simply try to take any picture. To do that we need to have a sensor capable of detecting and measuring light that shines onto it.</p>

<h1 id="recording-light">Recording Light<a href="https://ciechanow.ski/cameras-and-lenses/#recording-light" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>Before the dawn of the digital era, photographs were taken on a piece of film covered in crystals of <a href="https://en.wikipedia.org/wiki/Silver_halide">silver halide</a>. Those compounds are light-sensitive and when exposed to light they form a speck of metallic silver that can later be developed with further chemical processes.</p>

<p>For better or for worse, I&rsquo;m not going to discuss analog devices – these days most cameras are digital. Before we continue the discussion relating to light we&rsquo;ll use the classic trick of turning the illumination off. Don&rsquo;t worry though, we&rsquo;re not going to stay in darkness for too long.</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>
<div class="dark_light_bg"><div class="bg_content">


<p><br></p>

<p>The <a href="https://en.wikipedia.org/wiki/Image_sensor">image sensor</a> of a digital camera consists of a grid of photodetectors. A&nbsp;photodetector converts photons into electric current that can be measured – the more photons hitting the detector the higher the signal.</p>

<p>In the demonstration below you can observe how photons fall onto the arrangement of detectors represented by small squares. After some processing, the value read by each detector is converted to the brightness of the resulting image pixels which you can see on the right side. I&rsquo;m also symbolically showing which <em>photosite</em> was hit with a short highlight. The slider below controls the flow of time:</p>

<div class="drawer_container double_drawer" id="lens_detector"></div>
<div class="long_slider" id="lens_detector_sl0"></div>

<p>The longer the time of collection of photons the more of them are hitting the detectors and the brighter the resulting pixels in the image. When we don&rsquo;t gather enough photons the image is <a href="#" class="link_button" onclick="lens_under_exp();return false;">underexposed</a>, but if we allow the photon collection to run for too long the image will be <a href="#" class="link_button" onclick="lens_over_exp();return false;">overexposed</a>.</p>

<p>While the photons have the &ldquo;color&rdquo; of their <a href="https://ciechanow.ski/color-spaces/#and-there-was-light">wavelength</a>, the photodetectors don&rsquo;t see that hue – they only measure the total intensity which results in a black and white image. To record the color information we need to separate the incoming photons into distinct groups. We can put tiny color filters on top of the detectors so that they will only accept, more or less, red, green, or blue light:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_rgb_filter"></div>
<div id="lens_rgb_filter_sl0"></div>

<p>This <a href="https://en.wikipedia.org/wiki/Color_filter_array">color filter array</a> can be arranged in many different formations. One of the simplest is a <a href="https://en.wikipedia.org/wiki/Bayer_filter">Bayer filter</a> which uses one red, one blue, and <em>two</em> green filters arranged in a 2x2 grid:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_bayer"></div>

<p>A Bayer filter uses two green filters because light in green part of the spectrum heavily <a href="https://en.wikipedia.org/wiki/Luminosity_function">correlates</a> with perceived brightness. If we now repeat this pattern across the entire sensor we&rsquo;re able to collect color information. For the next demo we will also double the resolution to an astonishing 1 kilopixel arranged in a 32x32 grid:</p>

<div class="drawer_container double_drawer" id="lens_detector_rgbg"></div>
<div class="long_slider" id="lens_detector_rgbg_sl0"></div>

<p>Note that the individual sensors themselves still only see the intensity, and not the color, but knowing the arrangement of the filters we can recreate the colored intensity of each sensor, as shown on the right side of the simulation.</p>

<p>The final step of obtaining a normal image is called <a href="https://en.wikipedia.org/wiki/Demosaicing"><em>demosaicing</em></a>. During demosaicing we want to reconstruct the full color information by filling in the gaps in the captured RGB values. One of the simplest way to do it is to just linearly interpolate the values between the existing neighbors. I&rsquo;m not going to focus on the details of many other available demosaicing algorithms and I&rsquo;ll just present the resulting image created by the process:</p>

<div class="drawer_container double_drawer" id="lens_detector_rgb"></div>
<div class="long_slider" id="lens_detector_rgb_sl0"></div>

<p>Notice that yet again the overall brightness of the image depends on the length of time for which we let the photons through. That duration is known as <a href="https://en.wikipedia.org/wiki/Shutter_speed"><em>shutter speed</em></a> or exposure time. For most of this presentation I will ignore the time component and we will simply assume that the shutter speed has been set <em>just right</em> so that the image is well exposed.</p>

<p>The examples we&rsquo;ve discussed so far were very convenient – we were surrounded by complete darkness with the photons neatly hitting the pixels to form a coherent image. Unfortunately, we can&rsquo;t count on the photon paths to be as favorable in real environments, so let&rsquo;s see how the sensor performs in more realistic scenarios.</p>

<p><br></p>


</div>
</div>
<div class="dark_light_bg_grad_bottom"></div>

<p><br></p>

<p>Over the course of this article we will be taking pictures of this simple scene. The almost white background of this website is also a part of the scenery – it represents a bright overcast sky. You can drag around the demo to see it from other directions:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_scene"></div>

<p>Let&rsquo;s try to see what sort of picture would be taken by a sensor that is placed near the objects without any enclosure. I&rsquo;ll also significantly increase the sensor&rsquo;s resolution to make the pixels of the final image align with the pixels of your display. In the demonstration below the left side represents a view of the scene with the small greenish sensor present, while the right one shows the taken picture:</p>

<div class="drawer_container double_drawer" id="lens_bare_film"></div>

<p>This is not a mistake. As you can see, the obtained image doesn&rsquo;t really resemble anything. To understand why this happens let&rsquo;s first look at the light radiated from the scene.</p>

<p>If you had a chance to explore how <a href="https://ciechanow.ski/lights-and-shadows/#reflections">surfaces reflect light</a>, you may recall that most matte surfaces scatter the incoming light in every direction. While I&rsquo;m only showing a few examples, <em>every</em> point on every surface of this scene reflects the photons it receives from the whiteish background light source all around itself:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_scene_rays"></div>

<p>The red sphere ends up radiating red light, the green sphere radiates green light, and the gray checkerboard floor reflects white light of lesser intensity. Most importantly, however, the light emitted from the background is <em>also</em> visible to the sensor.</p>

<p>The problem with our current approach to taking pictures is that every pixel of the sensor is exposed to the <em>entire</em> environment. Light radiated from every point of the scene and the white background hits every point of the sensor. In the simulation below you can witness how light from different directions hits one point on the surface of the sensor:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_sensor_rays"></div>

<p>Clearly, to obtain a discernible image we have to limit the range of directions that affect a given pixel on the sensor. With that in mind, let&rsquo;s put the sensor in a box that has a small hole in it. The first slider controls the <span class="lens_black">diameter</span> of the hole, while the second one controls the <span class="lens_yellow">distance</span> between the opening and the sensor:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_box"></div>
<div class="lens_black" id="lens_box_sl0"></div>
<div class="lens_yellow" id="lens_box_sl1"></div>

<p>While not shown here, the inner sides of the walls are all black so that no light is reflected inside the box. I also put the sensor on the back wall so that the light from the hole shines onto it. We&rsquo;ve just built a <a href="https://en.wikipedia.org/wiki/Pinhole_camera"><em>pinhole camera</em></a>, let&rsquo;s see how it performs. Observe what happens to the taken image as we tweak the <span class="lens_black">diameter</span> of the hole with the first slider, or change the <span class="lens_yellow">distance</span> between the opening and the sensor with the second one:</p>

<div class="drawer_container double_drawer" id="lens_film"></div>
<div class="lens_black" id="lens_film_sl0"></div>
<div class="lens_yellow" id="lens_film_sl1"></div>

<p>There are so many interesting things happening here! The most pronounced effect is that the image is inverted. To understand why this happens let&rsquo;s look at the schematic view of the scene that shows the light rays radiated from the objects, going through the hole, and hitting the sensor:</p>

<div class="drawer_container double_drawer move_cursor" id="lens_film_invert"></div>

<p>As you can see the rays cross over in the hole and the formed image is a horizontal and a vertical reflection of the actual scene. Those two flips end up forming a 180&deg; rotation. Since rotated images aren&rsquo;t convenient to look at, all cameras automatically rotate the image for presentation and for the rest of this article I will do so as well.</p>

<p>When we change the <span class="lens_yellow">distance</span> between the hole and the sensor the viewing angle changes drastically. If we trace the rays falling on the corner pixels of the sensor we can see that they define the extent of the visible section of the scene:</p>

<div class="drawer_container double_drawer" id="lens_frustum"></div>
<div class="lens_yellow" id="lens_frustum_sl0"></div>

<p>Rays of light coming from outside of that shape still go through the pinhole, but they land outside of the sensor and aren&rsquo;t recorded. As the hole moves further away from the sensor, the angle, and thus the <a href="https://en.wikipedia.org/wiki/Field_of_view">field of view</a> visible to the sensor gets smaller. We can see this in a top-down view of the camera:</p>

<div class="drawer_container double_drawer" id="lens_field"></div>
<div class="lens_yellow" id="lens_field_sl0"></div>

<p>Coincidentally, this diagram also helps us explain two other effects. Firstly, in the photograph the red sphere looks almost as big as the green one, even though the scene view shows the latter is much larger. However, both spheres end up occupying roughly <a href="#" class="link_button" onclick="lens_field_0();return false;">the same span</a> on the sensor and their size in the picture is similar. It&rsquo;s also worth noting that the spheres seem to grow when the field of view gets narrower because their light covers larger part of the sensor.</p>

<p>Secondly, notice that different pixels of the sensor have different distance and relative orientation to the hole. The pixels right in the center of the sensor see the pinhole straight on, but pixels positioned at an angle to the main axis see a distorted pinhole that is further away. The ellipse in the bottom right corner of the demonstration below shows how a pixel positioned at the blue point sees the pinhole:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_hole_pixel_view"></div>
<div class="lens_blue" id="lens_hole_pixel_view_sl0"></div>

<p>This change in the visible area of the hole causes the darkening we see in the corners of the photograph. The value of the <em>cosine</em> of the angle I&rsquo;ve marked with a <span class="lens_yellow">yellow color</span> is quite important as it contributes to the reduction of visible light in four different ways:</p>

<ul>
<li>Two cosine factors from the increased distance to the hole, it&rsquo;s essentially the <a href="https://ciechanow.ski/lights-and-shadows/#inverse_square">inverse square law</a></li>
<li>A cosine factor from the side squeeze of the circular hole seen at an angle</li>
<li>A cosine factor from the relative <a href="https://ciechanow.ski/lights-and-shadows/#cosine_factor">tilt of the receptor</a></li>
</ul>

<p>These four factors conspire together to reduce the illumination by a factor of <strong>cos<sup>4</sup>(&alpha;)</strong> in what is known as <em>cosine-fourth-power law</em>, also described as <a href="https://en.wikipedia.org/wiki/Vignetting#Natural_vignetting">natural vignetting</a>.</p>

<p>Since we know the relative geometry of the camera and the opening we can correct for this effect by simply dividing by the falloff factor and from this point on I will make sure that the images don&rsquo;t have darkened corners.</p>

<p>The final effect we can observe is that when the hole gets smaller the image gets sharper. Let&rsquo;s see how the light radiated from two points of the scene ends up going through the camera depending on the <span class="lens_black">diameter</span> of the pinhole:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_hole_solid_angle"></div>
<div class="lens_black" id="lens_hole_solid_angle_sl0"></div>

<p>We can already see that larger hole size ends up creating a bigger spread on the sensor. Let&rsquo;s see this situation up close on a simple grid of detecting cells. Notice what happens to the size of the final circle hitting the sensor as that <span class="lens_black">diameter</span> of the hole changes:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_hole_sharpness"></div>
<div class="lens_black" id="lens_hole_sharpness_sl0"></div>

<p>When the hole is <a href="#" class="link_button" onclick="lens_sharp_0();return false;">small enough</a> rays from the source only manage to hit one pixel on the sensor. However, at <a href="#" class="link_button" onclick="lens_sharp_1();return false;">larger radii</a> the light spreads onto other pixels and a tiny point in the scene is no longer represented by a single pixel causing the image to no longer be sharp.</p>

<p>It&rsquo;s worth pointing out that sharpness is ultimately arbitrary – it depends on the size at which the final image is seen, viewing conditions, and visual acuity of the observer. The same photograph that looks sharp on a postage stamp may in fact be very blurry when seen on a big display.</p>

<p>By reducing the size of the cone of light we can make sure that the source light affects a limited number of pixels. Here, however, lays the problem. The sensor we&rsquo;ve been using so far has been an idealized detector capable of flawless adjustment of its sensitivity to the lighting conditions. If we instead were to fix the sensor sensitivity adjustment, the captured image would look more like this:</p>

<div class="drawer_container double_drawer" id="lens_film_exposure"></div>
<div class="lens_black" id="lens_film_exposure_sl0"></div>
<div class="lens_yellow" id="lens_film_exposure_sl1"></div>

<p>As the relative size of the hole visible to the pixels of the sensor gets smaller, be it due to reduced <span class="lens_black">diameter</span> or increased <span class="lens_yellow">distance</span>, fewer photons hit the surface and the image gets dimmer.</p>

<p>To increase the number of photons we capture we could extend the duration of collection, but increasing the exposure time comes with its own problems – if the photographed object moves or the camera isn&rsquo;t held steady we risk introducing some <a href="https://en.wikipedia.org/wiki/Motion_blur">motion blur</a>.</p>

<p>Alternatively, we could increase the <a href="https://en.wikipedia.org/wiki/Film_speed">sensitivity</a> of the sensor which is described using the ISO rating. However, boosting the ISO may introduce a higher level of <a href="https://en.wikipedia.org/wiki/Image_noise">noise</a>. Even with these problems solved an actual image obtained by smaller and smaller holes would actually start getting blurry again due to <a href="https://en.wikipedia.org/wiki/Diffraction">diffraction</a> effects of light.</p>

<p>If you recall how diffuse surfaces reflect light you may also realize how incredibly inefficient a pinhole camera is. A single point on the surface of an object radiates light into its surrounding hemisphere, however, the pinhole captures only a tiny portion of that light.</p>

<p>More importantly, however, a pinhole camera gives us minimal artistic control over <em>which</em> parts of the picture are blurry. In the demonstration below you can witness how changing which object is in focus heavily affects what is the primary target of attention of the photograph:</p>

<div class="drawer_container double_drawer" id="lens_focus_demo"></div>
<div id="lens_focus_demo_sl0"></div>

<p>Let&rsquo;s try to build an optical device that would solve both of these problems: we want to find a way to harness a bigger part of the energy radiated by the objects and also control what is blurry and <em>how</em> blurry it is. For the objects in the scene that are supposed to be sharp we want to collect a big chunk of their light and make it converge to the smallest possible point. In essence, we&rsquo;re looking for an instrument that will do something like this:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_device"></div>

<p>We could then put the sensor at the focus point and obtain a sharp image. Naturally, the contraption we&rsquo;ll try to create has to be transparent so that the light can pass through it and get to the sensor, so let&rsquo;s begin the investigation by looking at a piece of glass.</p>

<h1 id="glass">Glass<a href="https://ciechanow.ski/cameras-and-lenses/#glass" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>In the demonstration below I put a red stick behind a pane of glass. You can adjust the thickness of this pane with the <span class="lens_gray">gray slider</span> below:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_glass"></div>
<div id="lens_glass_sl0"></div>

<p>When you look at the stick through the surface of a thick glass <a href="#" class="link_button" onclick="lens_glass_0();return false;">straight on</a>, everything looks normal. However, as your viewing direction <a href="#" class="link_button" onclick="lens_glass_1();return false;">changes</a> the stick seen through the glass seems out of place. The thicker the glass and the steeper the viewing angle the bigger the offset.</p>

<p>Let&rsquo;s focus on one point on the surface of the stick and see how the rays of light radiated from its surface propagate through the subsection of the glass. The <span class="lens_red">red slider</span> controls the position of the source and the <span class="lens_gray">gray slider</span> controls the thickness. You can drag the demo around to see it from different viewpoints:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_glass_rays"></div>
<div class="lens_red" id="lens_glass_rays_sl0"></div>
<div id="lens_glass_rays_sl1"></div>

<p>For some reason the rays passing through glass at an angle are <a href="#" class="link_button" onclick="lens_glass_rays_0();return false;">deflected off their paths</a>. The change of direction happens whenever the ray enters or leaves the glass.</p>

<p>To understand <em>why</em> the light changes direction we have to peek under the covers of <a href="https://en.wikipedia.org/wiki/Classical_electromagnetism">classical electromagnetism</a> and talk a bit more about waves.</p>

<h1 id="waves">Waves<a href="https://ciechanow.ski/cameras-and-lenses/#waves" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>It&rsquo;s impossible to talk about wave propagation without involving the time component, so the simulations in this sections are animated – you can play and pause them by <span class="click_word">clicking</span><span class="tap_word">tapping</span> on the button in their bottom left corner.</p>

<p>By default all animations are <span id="global_animate_on">enabled, but if you find them distracting, or if you want to save power, you can <a href="#" class="link_button" onclick="global_animate(false);return false;">globally pause</a> all the following demonstrations.</span><span id="global_animate_off" class="hidden">disabled, but if you&rsquo;d prefer to have things moving as you read you can <a href="#" class="link_button" onclick="global_animate(true);return false;">globally unpause</a> them and see all the waves oscillating.</span></p>

<p>Let&rsquo;s begin by introducing the simplest sinusoidal wave:</p>

<div class="drawer_container double_drawer" id="lens_sine"></div>
<div class="lens_black" id="lens_sine_sl0"></div>
<div id="lens_sine_sl1"></div>

<p>A wave like this can be characterized by two components. <a href="https://en.wikipedia.org/wiki/Wavelength">Wavelength</a> <strong>&lambda;</strong> is the distance over which the shape of the wave repeats. Period <strong>T</strong> defines how much time a full cycle takes.</p>

<p><a href="https://en.wikipedia.org/wiki/Frequency">Frequency</a> <strong>f</strong>, is just a reciprocal of period and it&rsquo;s more commonly used – it defines how many waves per second have passed over some fixed point. Wavelength and frequency define <a href="https://en.wikipedia.org/wiki/Phase_velocity">phase velocity</a> <strong>v<sub>p</sub></strong> which describes how quickly a point on a wave, e.g. a peak, moves:</p>

<div class="equation">
<span class="equation_frac">v<sub>p</sup></span> = <span class="equation_frac">&lambda;</span> &middot; <span class="equation_frac">f</span>
</div>

<p>The sinusoidal wave is the building block of a polarized electromagnetic plane wave. As the name implies electromagnetic radiation is an interplay of oscillations of electric field <strong>E</strong> and magnetic field <strong>B</strong>:</p>

<div class="drawer_container double_drawer move_cursor" id="lens_em"></div>

<p>In an electromagnetic wave the magnetic field is tied to the electric field so I&rsquo;m going to hide the former and just visualize the latter. Observe what happens to the electric component of the field as it passes through a block of glass. I need to note that dimensions of wavelengths are <em>not</em> to scale:</p>

<div class="drawer_container move_cursor" id="lens_wave_glass"></div>

<p>Notice that the wave remains continuous at the boundary and inside the glass the frequency of the passing wave remains constant, However, the wavelength and thus the phase velocity are reduced – you can see it clearly <a href="#" class="link_button" onclick="lens_wave_glass_0();return false;">from the side</a>.</p>

<p>The microscopic reason for the phase velocity change is <a href="https://en.wikipedia.org/wiki/Ewald–Oseen_extinction_theorem">quite complicated</a>, but it can be quantified using the <a href="https://en.wikipedia.org/wiki/Refractive_index"><em>index of refraction</em></a> <strong>n</strong>, which is the ratio of the speed of light <strong>c</strong> to the phase velocity <strong>v<sub>p</sub></strong> of lightwave in that medium:</p>

<div class="equation">
<span class="equation_frac">n</span> = <span class="equation_frac"><span>c</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">v<sub>p</sub></span>
</span>
</div>

<p>The higher the index of refraction the <em>slower</em> light propagates through the medium. In the table below I&rsquo;ve presented a few different indices of refraction for some materials:</p>

<table>
<tr><td class="lens_list_item_material">vacuum</td><td class="lens_list_item_index">1.00</td></tr>
<tr><td class="lens_list_item_material">air</td><td class="lens_list_item_index">1.0003</td></tr>
<tr><td class="lens_list_item_material">water</td><td class="lens_list_item_index">1.33</td></tr>
<tr><td class="lens_list_item_material">glass</td><td class="lens_list_item_index">1.53</td></tr>
<tr><td class="lens_list_item_material">diamond</td><td class="lens_list_item_index">2.43</td></tr>
</table>

<p>Light traveling through air barely slows down, but in a diamond it&rsquo;s over twice as slow. Now that we understand how <span class="lens_black">index of refraction</span> affects the wavelength in the glass, let&rsquo;s see what happens when we change the <span class="lens_gray">direction</span> of the incoming wave:</p>

<div class="drawer_container move_cursor" id="lens_wave_glass2"></div>
<div class="lens_black" id="lens_wave_glass2_sl0"></div>
<div id="lens_wave_glass2_sl1"></div>

<p>The wave in the glass has a shorter wavelength, but it still has to match the positions of its peaks and valleys across the boundary. As such, the direction of propagation <a href="#" class="link_button" onclick="lens_wave_glass_2();return false;">must change</a> to ensure that continuity.</p>

<p>I need to note that the previous two demonstrations presented a two dimensional wave since that allowed me to show the sinusoidal component oscillating into the third dimension. In real world the lightwaves are three dimensional and I can&rsquo;t really visualize the sinusoidal component without using the fourth dimension which has <a href="https://ciechanow.ski/tesseract/">its own set of complications</a>.</p>

<p>The alternative way of presenting waves is to use <a href="https://en.wikipedia.org/wiki/Wavefront"><em>wavefronts</em></a>. Wavefronts connect the points of the same phase of the wave, e.g. all the peaks or valleys. In two dimensions wavefronts are represented by lines:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_wave_2d"></div>

<p>In three dimensions the wavefronts are represented by <em>surfaces</em>. In the demonstration below a single source emits a spherical wave, points of the same phase in the wave are represented by the moving shells:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_wave_3d"></div>

<p>By drawing lines that are perpendicular to the surface of the wavefront we create the familiar rays. In this interpretation rays simply show the local direction of wave propagation which can be seen in this example of a section of a spherical 3D wave:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_wave_rays"></div>

<p>I will continue to use the ray analogy to quantify the change in direction of light passing through materials. The relation between the angle of incidence <strong>&theta;<sub>1</sub></strong> and angle of refraction <strong>&theta;<sub>2</sub></strong> can be formalized with the equation known as <a href="https://en.wikipedia.org/wiki/Snell%27s_law">Snell&rsquo;s law</a>:</p>

<div class="equation">
<span class="lens_blue">n<sub>1</sub></span> &middot; sin(&theta;<sub>1</sub>) = <span class="lens_yellow">n<sub>2</sub></span> &middot; sin(&theta;<sub>2</sub>)
</div>

<p>It describes how a ray of light changes direction relative to the surface normal on the border between two different media. Let&rsquo;s see it in action:</p>

<div class="drawer_container square_drawer small_drawer" id="lens_snell"></div>
<div class="lens_black" id="lens_snell_sl0"></div>
<div class="lens_blue" id="lens_snell_sl1"></div>
<div class="lens_yellow" id="lens_snell_sl2"></div>

<p>When traveling from a less to more refractive material the ray bends <a href="#" class="link_button" onclick="lens_snell_0();return false;"><em>towards</em> the normal</a>, but when the ray exits the object with higher index of refraction it bends <a href="#" class="link_button" onclick="lens_snell_1();return false;"><em>away</em> from the normal</a>.</p>

<p>Notice that in <a href="#" class="link_button" onclick="lens_snell_2();return false;">some configurations</a> the refracted ray completely disappears, however, this doesn&rsquo;t paint a full picture because we&rsquo;re currently completely ignoring reflections.</p>

<p>All transparent objects reflect some amount of light. You may have noticed that reflection on a surface of a calm lake or even on the other side of the glass demonstration at the beginning of the <a href="#glass">previous section</a>. The intensity of that reflection depends on the index of refraction of the material and the angle of the incident ray. Here&rsquo;s a more realistic demonstration of how light would get refracted <em>and</em> reflected between two media:</p>

<div class="drawer_container square_drawer small_drawer" id="lens_snell2"></div>
<div class="lens_black" id="lens_snell2_sl0"></div>
<div class="lens_blue" id="lens_snell2_sl1"></div>
<div class="lens_yellow" id="lens_snell2_sl2"></div>

<p>The relation between <em>transmittance</em> and <em>reflectance</em> is determined by <a href="https://en.wikipedia.org/wiki/Fresnel_equations">Fresnel equations</a>. Observe that the curious case of missing light that we saw previously <a href="#" class="link_button" onclick="lens_snell_3();return false;">no longer occurs</a> – that light is actually reflected. The transition from partial reflection and refraction to the complete reflection is continuous, but near the end it&rsquo;s very rapid and at some point the refraction <a href="#" class="link_button" onclick="lens_snell_4();return false;">completely disappears</a> in the effect known as <a href="https://en.wikipedia.org/wiki/Total_internal_reflection">total internal reflection</a>.</p>

<p>The <a href="https://en.wikipedia.org/wiki/Total_internal_reflection#Critical_angle"><em>critical angle</em></a> at which the total internal reflection starts to happen depends on the indices of refraction of the boundary materials. Since that coefficient is low for air, but very high for diamond a <a href="https://en.wikipedia.org/wiki/Brilliant_(diamond_cut)">proper cut</a> of the faces <a href="https://physics.stackexchange.com/questions/43361/why-do-diamonds-shine/43373#43373">makes diamonds</a> very shiny.</p>

<p>While interesting on its own, reflection in glass isn&rsquo;t very relevant to our discussion and for the rest of this article we&rsquo;re not going to pay much attention to it. Instead, we&rsquo;ll simply assume that the materials we&rsquo;re using are covered with high quality <a href="https://en.wikipedia.org/wiki/Anti-reflective_coating">anti-reflective coating</a>.</p>

<h1 id="manipulating-rays">Manipulating Rays<a href="https://ciechanow.ski/cameras-and-lenses/#manipulating-rays" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>Let&rsquo;s go back to the example that started the discussion of light and glass. When both sides of a piece of glass are parallel, the ray is shifted, but it still travels in the same direction. Observe what happens to the ray when we change the relative angle of the surfaces of the glass.</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_parallel"></div>
<div id="lens_parallel_sl0"></div>

<p>When we make two surfaces of the glass <em>not</em> parallel we gain the ability to change the direction of the rays. Recall, that we&rsquo;re trying to make the rays hitting the optical device <em>converge</em> at a certain point. To do that we have to bend the rays in the upper part down and, conversely, bend the rays in the lower part up.</p>

<p>Let&rsquo;s see what happens if we shape the glass to have different angles between its walls at different height. In the demonstration below you can control how many distinct segments a piece of glass is shaped to:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_subdiv"></div>
<div id="lens_subdiv_seg0"></div>

<p>As the number of segments <a href="#" class="link_button" onclick="lens_subdiv_0();return false;">approaches infinity</a> we end up with a continuous surface without any edges. If we look at the crossover point <a href="#" class="link_button" onclick="lens_subdiv_1();return false;">from the side</a> you may notice that we&rsquo;ve managed to converge the rays across one axis, but the top-down view <a href="#" class="link_button" onclick="lens_subdiv_2();return false;">reveals</a> that we&rsquo;re not done yet. To focus all the rays we need to replicate that smooth shape across <em>all</em> possible directions – we need rotational symmetry:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_rotational"></div>

<p>We&rsquo;ve created a <em>convex</em> <a href="https://en.wikipedia.org/wiki/Thin_lens">thin lens</a>. This lens is idealized, in the later part of the article we&rsquo;ll discuss how real lenses aren&rsquo;t as perfect, but for now it will serve us very well. Let&rsquo;s see what happens to the focus point when we change the position of the <span class="lens_red">red</span> source:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_rotational_focal"></div>
<div class="lens_red" id="lens_rotational_focal_sl0"></div>

<p>When the source is positioned <a href="#" class="link_button" onclick="lens_inf();return false;">very far away</a> the incoming rays become parallel and after passing through lens they converge at a certain distance away from the center. That distance is known as <a href="https://en.wikipedia.org/wiki/Focal_length"><em>focal length</em></a>.</p>

<p>The previous demonstration also shows two more general distances: <strong>s<sub>o</sub></strong> which is the distance between the <strong>o</strong>bject, or source, and the lens, as well as <strong>s<sub>i</sub></strong> which is the distance between the <strong>i</strong>mage and the lens. These two values and the focal length <strong>f</strong> are related by the <a href="https://en.wikipedia.org/wiki/Thin_lens#Image_formation"><em>thin lens equation</em></a>:</p>

<div class="equation">
<span class="equation_frac"><span>1</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">s<sub>o</sub></span>
</span>
+
<span class="equation_frac"><span>1</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">s<sub>i</sub></span>
</span>
=
<span class="equation_frac"><span>1</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">f</span>
</span>
</div>

<p>Focal length of a lens depends on both the <span class="lens_black">index of refraction</span> of the material from which the lens is made and its <span class="lens_blue">shape</span>:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_focal_length"></div>
<div class="lens_black" id="lens_focal_length_sl1"></div>
<div class="lens_blue" id="lens_focal_length_sl0"></div>

<p>Now that we understand how a simple convex lens works we&rsquo;re ready to mount it into the hole of our camera. We will still control the <span class="lens_yellow">distance</span> between the sensor and the lens, but instead of controlling the diameter of the lens we&rsquo;ll instead control its <span class="lens_blue">focal length</span>:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_box_lens"></div>
<div class="lens_blue" id="lens_box_lens_sl0"></div>
<div class="lens_yellow" id="lens_box_lens_sl1"></div>

<p>When you look at the lens <a href="#" class="link_button" onclick="lens_camera_lens();return false;">from the side</a> you may observe how the <span class="lens_blue">focal length</span> change is tied to the shape of the lens. Let&rsquo;s see how this new camera works in action:</p>

<div class="drawer_container double_drawer" id="lens_basic"></div>
<div class="lens_blue" id="lens_basic_sl0"></div>
<div class="lens_yellow" id="lens_basic_sl1"></div>

<p>Once again, a lot of things are going on here! Firstly, let&rsquo;s try to understand how the image is formed in the first place. The demonstration below shows paths of rays from two separate points in the scene. After going through the lens they end up hitting the sensor:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_lens_solid_angle"></div>
<div class="lens_blue" id="lens_lens_solid_angle_sl0"></div>
<div class="lens_yellow" id="lens_lens_solid_angle_sl1"></div>

<p>Naturally, this process happens for <em>every</em> single point in the scene which creates the final image. Similarly to a pinhole a convex lens creates an inverted picture – I&rsquo;m still correcting for this by showing you a rotated photograph.</p>

<p>Secondly, notice that the distance between the lens and the sensor still controls the field of view. As a reminder, the focal length of a lens simply defines the distance from the lens at which the rays coming from infinity converge. To achieve a sharp image, the sensor has to be placed at the location where the rays focus and <em>that&rsquo;s</em> what&rsquo;s causing the field of view to change.</p>

<p>In the demonstration below I&rsquo;ve visualized how rays from a very far object focus through a lens of adjustable <span class="lens_blue">focal length</span>, notice that to obtain a sharp image we must change the <span class="lens_yellow">distance</span> between the lens and the sensor which in turn causes the field of view to change:</p>

<div class="drawer_container double_drawer" id="lens_field2"></div>
<div class="lens_blue" id="lens_field2_sl1"></div>
<div class="lens_yellow" id="lens_field2_sl0"></div>

<p>If we want to change the object on which a camera with a lens of a fixed focal length is focused, we have to move the image plane closer or further away from the lens which affects the angle of view. This effect is called <a href="https://en.wikipedia.org/wiki/Breathing_(lens)">focus breathing</a>:</p>

<div class="drawer_container double_drawer" id="lens_focus_demo2"></div>
<div class="lens_yellow" id="lens_focus_demo2_sl0"></div>

<p>A lens with a fixed focal length like the one above is often called a <em>prime</em> lens, while lenses with adjustable focal length are called <em>zoom</em> lenses. While the lenses in our eyes do dynamically adjust their focal lengths by changing their shape, rigid glass can&rsquo;t do that so zoom lenses use a system of multiple glass elements that change their relative position to achieve this effect.</p>

<p>In the simulation above notice the difference in sharpness between the red and green spheres. To understand why this happens let&rsquo;s analyze the rays emitted from two points on the surface of the spheres. In the demonstration below the right side shows the light seen by the sensor <em>just</em> from the two marked points on the spheres:</p>

<div class="drawer_container double_drawer" id="lens_lens_solid_angle2"></div>
<div class="lens_yellow" id="lens_lens_solid_angle2_sl0"></div>

<p>The light from the point in focus converges to a point, while the light from an out-of-focus point spreads onto a circle. For larger objects the multitude of overlapping out-of-focus circles creates a smooth blur called
<a href="https://en.wikipedia.org/wiki/Bokeh"><em>bokeh</em></a>. With tiny and bright light sources that circle itself is often visible, you may have seen effects like the one in the demonstration below in some photographs captured in darker environments:</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>
<div class="dark_light_bg"><div class="bg_content">


<p><br></p>

<div class="drawer_container double_drawer" id="lens_bokeh"></div>
<br>
<div class="lens_yellow" id="lens_bokeh_sl0"></div>

<p>
</div>
</div>
<div class="dark_light_bg_grad_bottom"></div>
<br></p>

<p>Notice that the circular shape is visible for lights both in front of and behind the focused distance. As the object is positioned closer or further away from the lens the image plane &ldquo;slices&rdquo; the cone of light at different location:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_slice"></div>
<div class="lens_red" id="lens_cone_slice_sl0"></div>

<p>That circular spot is called a <a href="https://en.wikipedia.org/wiki/Circle_of_confusion"><em>circle of confusion</em></a>. While in many circumstances the blurriness of the background or the foreground looks very appealing, it would be very useful to control how much blur there is.</p>

<p>Unfortunately, we don&rsquo;t have total freedom here – we still want the primary photographed object to remain in focus so its light has to converge to a point. We just want to change the size of the circle of out-of-focus objects without moving the central point. We can accomplish that by changing the <em>angle</em> of the cone of light:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_angle"></div>
<div id="lens_cone_angle_sl0"></div>

<p>There are two methods we can use to modify that angle. Firstly, we can change the focal length of the lens – you may recall that with longer focal lengths the cone of light also gets longer. However, changing the focal length and keeping the primary object in focus requires moving the image plane which in turn changes how the picture is framed.</p>

<p>The alternative way of reducing the angle of the cone of light is to simply ignore some of the &ldquo;outer&rdquo; rays. We can achieve that by introducing a stop with a hole in the path of light:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_aperture"></div>
<div class="lens_black" id="lens_cone_aperture_sl0"></div>

<p>This hole is called an <a href="https://en.wikipedia.org/wiki/Aperture"><em>aperture</em></a>. In fact, even the hole in which the lens is mounted is an aperture of some sort, but what we&rsquo;re introducing is an <em>adjustable</em> aperture:</p>

<div class="drawer_container square_drawer small_drawer move_cursor" id="lens_box_aperture"></div>
<div class="lens_black" id="lens_box_aperture_sl0"></div>
<div class="lens_yellow" id="lens_box_aperture_sl1"></div>

<p>Let&rsquo;s try to see how an aperture affects the photographs taken with our camera:</p>

<div class="drawer_container double_drawer" id="lens_focus_demo3"></div>
<div class="lens_black" id="lens_focus_demo3_sl0"></div>
<div class="lens_yellow" id="lens_focus_demo3_sl1"></div>

<p>In real camera lenses an adjustable aperture is often constructed from a set of overlapping blades that constitute an <em>iris</em>. The movement of those blades changes the size of the aperture:</p>

<div class="drawer_container square_drawer" id="lens_blades"></div>
<div id="lens_blades_sl0"></div>

<p>The shape of the aperture also defines the shape of bokeh. This is the reason why bokeh sometimes has a polygonal shape – it&rsquo;s simply the shape of the &ldquo;cone&rdquo; of light after passing through the blades of the aperture. Next time you watch a movie pay a close attention to the shape of out-of-focus highlights, they&rsquo;re often polygonal:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_cone_hex"></div>
<div id="lens_cone_hex_sl0"></div>

<p>As the aperture diameter decreases, larger and larger areas of the photographed scene remain sharp. The term <a href="https://en.wikipedia.org/wiki/Depth_of_field"><em>depth of field</em></a> is used to define the length of the region over which the objects are acceptably sharp. When describing the depth of field we&rsquo;re trying to conceptually demark those two boundary planes and see how far apart they are from each other.</p>

<p>Let&rsquo;s see the depth of field in action. The <span class="lens_black">black slider</span> controls the aperture, the <span class="lens_blue">blue slider</span> controls the focal length, and the <span class="lens_red">red slider</span> changes the position of the object relative to the camera. The <span style="color:#68C626"><strong>green dot</strong></span> shows the place of perfect focus, while the <span style="color:#3E53A7"><strong>dark blue dots</strong></span> show the limits, or the depth, of positions between which the image of the red light source will be reasonably sharp, as shown by a single outlined pixel on the sensor:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_cone_dof"></div>
<div class="lens_black" id="lens_cone_dof_sl0"></div>
<div class="lens_blue" id="lens_cone_dof_sl1"></div>
<div class="lens_red" id="lens_cone_dof_sl2"></div>

<p>Notice that <a href="#" class="link_button" onclick="lens_dof_0();return false;">the larger</a> the <span class="lens_black">diameter of aperture</span> and <a href="#" class="link_button" onclick="lens_dof_1();return false;">the shorter</a> the <span class="lens_blue">focal length</span> the shorter the distance between the <span style="color:#3E53A7"><strong>dark blue dots</strong></span> and thus the <em>shallower</em> the depth of field becomes. If you recall our discussion of sharpness this demonstration should make it easier to understand why reducing the angle of the cone <em>increases</em> the depth of field.</p>

<p>If you don&rsquo;t have perfect vision you may have noticed that squinting your eyes make you see things a little better. Your eyelids covering some part of your iris simply act as an aperture that decreases the angle of the cone of light falling into your eyes making things sightly less blurry on your retina.</p>

<p>An interesting observation is that aperture defines the diameter of the base of the captured cone of light that is emitted from the object. Twice as large aperture diameter captures roughly <em>four</em> times more light due to increased <a href="https://ciechanow.ski/lights-and-shadows/#solid-angles">solid angle</a>. In practice, the actual size of the aperture as seen from the point of view of the scene, or the <a href="https://en.wikipedia.org/wiki/Entrance_pupil"><em>entrance pupil</em></a>, depends on all the lenses in front of it as the shaped glass may scale the perceived size of the aperture.</p>

<p>On the other hand, when a lens is focused correctly, the focal length defines how large a source object is in the picture. By doubling the focal length we double the width <em>and</em> the height of the object on the sensor thus increasing the area by the factor of four. The light from the source is more spread out and each individual pixel receives less light.</p>

<p>The total amount of light hitting each pixel is proportional to the <em>ratio</em> between the focal length <strong>f</strong> and the diameter of the entrance pupil <strong>D</strong>. This ratio is known as the <a href="https://en.wikipedia.org/wiki/F-number"><em>f-number</em></a>:</p>

<div class="equation">
<span class="equation_frac">N</span> = <span class="equation_frac"><span>f</span>
<span class="equation_div_symbol">/</span>
<span class="lns_div_bottom">D</span>
</span>
</div>

<p>A lens with a focal length of 50 mm and the entrance pupil of 25 mm would have <strong>N</strong> equal to 2 and the <em>f</em>-number would be known as <em>f</em>/2. Since the amount of light getting to each pixel of the sensor increases with the diameter of the aperture and decreases with the focal length, the <em>f</em>-number controls the brightness of the projected image.</p>

<p>The <em>f</em>-number with which commercial lenses are marked usually defines the maximum aperture a lens can achieve and the smaller the <em>f</em>-number the more light the lens passes through. Bigger amount of incoming light allows reduction of exposure time, so the smaller the <em>f</em>-number the <a href="https://en.wikipedia.org/wiki/Lens_speed"><em>faster</em></a> the lens is. By reducing the size of the aperture we can modify the <em>f</em>-number with which a picture is taken.</p>

<p>The <em>f</em>-numbers are often multiples of 1.4 which is an approximation of <span class="sqrt">2</span>. Scaling the diameter of an adjustable aperture by <span class="sqrt">2</span> scales its <em>area</em> by 2 which is a convenient factor to use. Increasing the <em>f</em>-number by a so-called <a href="https://en.wikipedia.org/wiki/F-number#Stops,_f-stop_conventions,_and_exposure"><em>stop</em></a> halves the amount of received light. The demonstration below shows the relatives sizes of the aperture through which light is being seen:</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>
<div class="dark_light_bg"><div class="bg_content">


<p><br></p>

<div class="drawer_container square_drawer" id="lens_f"></div>
<div id="lens_f_seg0"></div>

<p>To maintain the overall brightness of the image when <a href="https://en.wikipedia.org/wiki/Stopping_down">stopping down</a> we&rsquo;d have to either increase the exposure time or the sensitivity of the sensor.</p>

<p><br></p>

<p>
</div>
</div>
<div class="dark_light_bg_grad_bottom"></div>
<br></p>

<p>While aperture settings let us easily control the depth of field, that change comes at a cost. When the <em>f</em>-number increases and the aperture diameter gets smaller we effectively start approaching a pinhole camera with all its related complications.</p>

<p>In the final part of this article we will discuss the entire spectrum of another class of problems that we&rsquo;ve been conveniently avoiding all this time.</p>

<h1 id="aberrations">Aberrations<a href="https://ciechanow.ski/cameras-and-lenses/#aberrations" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>In our examples so far we&rsquo;ve been using a perfect idealized lens that did exactly what we want and in all the demonstrations I&rsquo;ve relied on a certain simplification known as the <a href="https://en.wikipedia.org/wiki/Paraxial_approximation">paraxial approximation</a>. However, the physical world is a bit more complicated.</p>

<p>The most common types of lenses are <em>spherical</em> lenses – their curved surfaces are sections of spheres of different radii. These types of lenses are easier to manufacture, however, they actually don&rsquo;t perfectly converge the rays of incoming light. In the demonstration below you can observe how fuzzy the focus point is for various lens radii:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_spherical"></div>
<div id="lens_spherical_sl0"></div>

<p>This imperfection is known as <a href="https://en.wikipedia.org/wiki/Spherical_aberration"><em>spherical aberration</em></a>. This specific flaw can be corrected with <a href="https://en.wikipedia.org/wiki/Aspheric_lens"><em>aspheric lenses</em></a>, but unfortunately there are other types of problems that may not be easily solved by a single lens. In general, for monochromatic light there are five primary types of aberrations: <a href="https://en.wikipedia.org/wiki/Spherical_aberration">spherical aberration</a>, <a href="https://en.wikipedia.org/wiki/Coma_(optics)">coma</a>, <a href="https://en.wikipedia.org/wiki/Astigmatism_(optical_systems)">astigmatism</a>, <a href="https://en.wikipedia.org/wiki/Petzval_field_curvature">field curvature</a>, and <a href="https://en.wikipedia.org/wiki/Distortion_(optics)">distortion</a>.</p>

<p>We&rsquo;re still not out of the woods even if we manage to minimize these problems. In normal environments light is very <em>non</em>-monochromatic and nature sets another hurdle into optical system design. Let&rsquo;s quickly go back to the dark environment as we&rsquo;ll be discussing a single beam of white light.</p>

<p><br></p>

<div class="dark_light_bg_grad_top"></div>
<div class="dark_light_bg"><div class="bg_content">


<p><br></p>

<p>Observe what happens to that beam when it hits a piece of glass. You can make the sides non-parallel by using the slider:</p>

<div class="drawer_container lens_long_drawer medium_drawer move_cursor" id="lens_prism"></div>
<div id="lens_prism_sl0"></div>

<p>What we perceive as white light is a combination of lights of different wavelengths. In fact, the index of refraction of materials <em>depends</em> on the wavelength of the light. This phenomena called <a href="https://en.wikipedia.org/wiki/Dispersion_(optics)"><em>dispersion</em></a> splits what seems to be a uniform beam of white light into a fan of color bands. The very same mechanism that we see here is also responsible for a rainbow.</p>

<p>In a lens this causes different wavelengths of light to focus at different offsets – the effect known as <a href="https://en.wikipedia.org/wiki/Chromatic_aberration"><em>chromatic aberration</em></a>. We can easily visualize the <em>axial</em> chromatic aberration even on a lens with spherical aberration fixed. I&rsquo;ll only use red, green, and blue dispersed rays to make things less crowded, but remember that other colors of the spectrum are present in between. Using the slider you can control the amount of dispersion the lens material introduces:</p>

<div class="drawer_container lens_very_long_drawer medium_drawer move_cursor" id="lens_chromatic"></div>
<div id="lens_chromatic_sl0"></div>

<p>Chromatic aberration may be corrected with an <a href="https://en.wikipedia.org/wiki/Achromatic_lens">achromatic lens</a>, usually in the form of a <a href="https://en.wikipedia.org/wiki/Doublet_(lens)">doublet</a> with two different types of glass fused together.</p>

<p><br></p>


</div>
</div>
<div class="dark_light_bg_grad_bottom"></div>

<p><br></p>

<p>To minimize the impact of the aberrations, camera lenses use more than one optical element on their pathways. In this article I&rsquo;ve only shown you simple lens systems, but a high-end camera lens may consist of <a href="https://en.wikipedia.org/wiki/File:Objective_Zeiss_Cut.jpg">a lot of elements</a> that were carefully designed to balance the optical performance, weight, and cost.</p>

<p>While we, in our world of computer simulations on this website, can maintain the illusion of simple and perfect systems devoid of aberrations, <a href="https://en.wikipedia.org/wiki/Vignetting">vignetting</a>, and <a href="https://en.wikipedia.org/wiki/Lens_flare">lens flares</a>, real cameras and lenses have to deal with all these problems to make the final pictures look good.</p>

<h1 id="further-watching-and-reading">Further Watching and Reading<a href="https://ciechanow.ski/cameras-and-lenses/#further-watching-and-reading" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>Over on YouTube <a href="https://www.youtube.com/channel/UCSFAYalJ2Q7Tm_WmLgetmeg">Filmmaker IQ channel</a> has a lot of great content related to lenses and movie making. Two videos especially fitting here are <a href="https://www.youtube.com/watch?v=1YIvvXxsR5Y">The History and Science of Lenses</a> and <a href="https://www.youtube.com/watch?v=lte9pa3RtUk">Focusing on Depth of Field and Lens Equivalents</a>.</p>

<p><a href="https://www.youtube.com/watch?v=q1n2DR6H7mk">What Makes Cinema Lenses So Special!?</a> on <a href="https://www.youtube.com/channel/UCNJe8uQhM2G4jJFRWiM89Wg">Potato Jet channel</a> is a great interview with Art Adams from <a href="https://www.arri.com/en/">ARRI</a>. The video goes over many interesting details of high-end cinema lens design, for example, how the lenses <a href="https://youtu.be/q1n2DR6H7mk?t=370">compensate for focus breathing</a>, or how much attention is paid to the <a href="https://youtu.be/q1n2DR6H7mk?t=899">quality of bokeh</a>.</p>

<p>For a deeper dive on bokeh itself Jakub Trávník&rsquo;s <a href="https://jtra.cz/stuff/essays/bokeh/index.html">On Bokeh</a> is a great article on the subject. The author explains how aberrations may cause bokeh of non uniform intensity and shows many photographs of real cameras and lenses.</p>

<p>In this article I&rsquo;ve mostly been using <a href="https://en.wikipedia.org/wiki/Geometrical_optics">geometrical optics</a> with some soft touches of electromagnetism. For a more modern look at the nature of light and its interaction with matter I recommend Richard Feynman&rsquo;s <a href="https://en.wikipedia.org/wiki/QED:_The_Strange_Theory_of_Light_and_Matter">QED: The Strange Theory of Light and Matter</a>. The book is written in a very approachable style suited for general audience, but it still lets Feynman&rsquo;s wits and brilliance shine right through.</p>

<h1 id="final-words">Final Words<a href="https://ciechanow.ski/cameras-and-lenses/#final-words" class="hanchor" ariaLabel="Anchor"><img src="https://ciechanow.ski/images/anchor.png" width="16px" height="8px"/></a> </h1>

<p>We’ve barely scratched the surface of optics and camera lens design, but even the most complex systems end up serving the same purpose: to tell light where to go. In some sense optical engineering is all about taming the nature of light.</p>

<p>The simple act of pressing the shutter button in a camera app on a smartphone or on the body of a high-end DSLR is effortless, but it’s at this moment when, through carefully guided rays hitting an array of photodetectors, we immortalize reality by painting with light.</p></div>
</div></div></div></sub></span></div>

+ 4
- 0
cache/2021/index.html View File

@@ -121,12 +121,16 @@
<li><a href="/david/cache/2020/fd776407232cd6fd7627bac7dba39755/" title="Accès à l’article dans le cache local : Épuiser la pratique">Épuiser la pratique</a> (<a href="https://www.quaternum.net/2020/02/29/epuiser-la-pratique/" title="Accès à l’article original distant : Épuiser la pratique">original</a>)</li>
<li><a href="/david/cache/2020/ef2067bf42482ed7c48e1d166cde117a/" title="Accès à l’article dans le cache local : Cameras and Lenses">Cameras and Lenses</a> (<a href="https://ciechanow.ski/cameras-and-lenses/" title="Accès à l’article original distant : Cameras and Lenses">original</a>)</li>
<li><a href="/david/cache/2020/9e5d68c7459c77716c44dd1463be36d8/" title="Accès à l’article dans le cache local : Plaintext HTTP in a Modern World">Plaintext HTTP in a Modern World</a> (<a href="https://jcs.org/2021/01/06/plaintext" title="Accès à l’article original distant : Plaintext HTTP in a Modern World">original</a>)</li>
<li><a href="/david/cache/2020/cfcd10768187ce1c3e598136cd8838b2/" title="Accès à l’article dans le cache local : Bad News Wrapped in Protein: Inside the Coronavirus Genome">Bad News Wrapped in Protein: Inside the Coronavirus Genome</a> (<a href="https://www.nytimes.com/interactive/2020/04/03/science/coronavirus-genome-bad-news-wrapped-in-protein.html" title="Accès à l’article original distant : Bad News Wrapped in Protein: Inside the Coronavirus Genome">original</a>)</li>
<li><a href="/david/cache/2020/c177668b263f39d20788f002446d2a47/" title="Accès à l’article dans le cache local : 6-month consequences of COVID-19 in patients discharged from hospital: a cohort study">6-month consequences of COVID-19 in patients discharged from hospital: a cohort study</a> (<a href="https://www.thelancet.com/journals/lancet/article/PIIS0140-6736(20)32656-8/fulltext" title="Accès à l’article original distant : 6-month consequences of COVID-19 in patients discharged from hospital: a cohort study">original</a>)</li>
<li><a href="/david/cache/2020/0c6966a8e9543b52c361ac6de68f08e4/" title="Accès à l’article dans le cache local : Understanding ProRAW">Understanding ProRAW</a> (<a href="https://blog.halide.cam/understanding-proraw-4eed556d4c54" title="Accès à l’article original distant : Understanding ProRAW">original</a>)</li>
<li><a href="/david/cache/2020/aeb0a60038b91bf1fbbbd39b358366fb/" title="Accès à l’article dans le cache local : ☕️ Journal : Statu quo">☕️ Journal : Statu quo</a> (<a href="https://oncletom.io/2021/01/05/statu-quo/" title="Accès à l’article original distant : ☕️ Journal : Statu quo">original</a>)</li>
<li><a href="/david/cache/2020/595df9c1f986df0678e4fc7d6aa34ef1/" title="Accès à l’article dans le cache local : Qui sont les dix auteurs de SF de la « Red Team » du ministère des Armées ?">Qui sont les dix auteurs de SF de la « Red Team » du ministère des Armées ?</a> (<a href="https://www.lepoint.fr/high-tech-internet/qui-sont-les-dix-auteurs-de-sf-de-la-red-team-du-ministere-des-armees-04-12-2020-2404230_47.php" title="Accès à l’article original distant : Qui sont les dix auteurs de SF de la « Red Team » du ministère des Armées ?">original</a>)</li>

Loading…
Cancel
Save