<?xml version='1.0' encoding='UTF-8'?><?xml-stylesheet href="http://www.blogger.com/styles/atom.css" type="text/css"?><feed xmlns='http://www.w3.org/2005/Atom' xmlns:openSearch='http://a9.com/-/spec/opensearchrss/1.0/' xmlns:blogger='http://schemas.google.com/blogger/2008' xmlns:georss='http://www.georss.org/georss' xmlns:gd="http://schemas.google.com/g/2005" xmlns:thr='http://purl.org/syndication/thread/1.0'><id>tag:blogger.com,1999:blog-1109038746813902833</id><updated>2022-08-18T14:21:52.628-07:00</updated><category term="YouTube Engineering &amp; Developers Blog"/><category term="announcements"/><category term="googleio"/><category term="devs"/><category term="embedding"/><category term="youtube direct"/><category term="embed"/><category term="io2011"/><category term="tutorials"/><category term="youtube"/><category term="authentication"/><category term="code"/><category term="docs"/><category term="events"/><category term="mobile"/><category term="oauth"/><category term="playlists"/><category term="samples"/><category term="clientlogin"/><category term="io12"/><category term="uploads"/><category term="video"/><category term="ytd"/><category term="android"/><category term="as3"/><category term="captions"/><category term="channels"/><category term="deprecation"/><category term="iphone"/><category term="mashups"/><category term="player"/><category term="YouTube Reporting API"/><category term="accessibility"/><category term="appengine"/><category term="as2"/><category term="authsub"/><category term="chrome"/><category term="chromeless"/><category term="dotnet"/><category term="feeds"/><category term="flash"/><category term="html5"/><category term="https"/><category term="iframe"/><category term="irc"/><category term="java"/><category term="javascript"/><category term="json"/><category term="oauth2"/><category term="php"/><category term="python"/><category term="releases"/><category term="resumable"/><category term="sandbox"/><category term="staging"/><category term="sup"/><category term="youtube api"/><category term=".net"/><category term="360"/><category term="Documentation RSS"/><category term="Live Streaming API"/><category term="LiveBroadcasts API"/><category term="NAB 2016"/><category term="Super Chat API"/><category term="VR"/><category term="YouTube Data API"/><category term="YouTube IFrame Player API"/><category term="YouTube live"/><category term="acceleration"/><category term="access control"/><category term="actionscript"/><category term="activities"/><category term="activity"/><category term="apis"/><category term="app engine"/><category term="apps script"/><category term="atom"/><category term="authorization"/><category term="best practices"/><category term="blackops"/><category term="blur faces"/><category term="bootcamp"/><category term="categories"/><category term="charts"/><category term="client library"/><category term="clientlibraries"/><category term="color"/><category term="comments"/><category term="compositing"/><category term="create"/><category term="curation"/><category term="custom player"/><category term="decommission"/><category term="default"/><category term="direct"/><category term="discovery"/><category term="education"/><category term="extension"/><category term="format"/><category term="friendactivity"/><category term="friends"/><category term="fun"/><category term="gears"/><category term="google developers live"/><category term="google group"/><category term="googlegamedev"/><category term="insight"/><category term="ios"/><category term="issue tracker"/><category term="json-c"/><category term="jsonc"/><category term="knight"/><category term="legacy"/><category term="logo"/><category term="machine learning"/><category term="media:keywords keywords tags metadata"/><category term="metadata"/><category term="mozilla"/><category term="news"/><category term="office hours"/><category term="open source"/><category term="partial"/><category term="partial response"/><category term="partial update"/><category term="partners"/><category term="patch"/><category term="policy"/><category term="previews"/><category term="pubsubhubbub"/><category term="push"/><category term="quota"/><category term="rails"/><category term="rendering"/><category term="reports"/><category term="responses"/><category term="ruby"/><category term="shortform"/><category term="ssl https certificate staging stage"/><category term="stack overflow"/><category term="stage video"/><category term="standard feeds"/><category term="storify"/><category term="storyful"/><category term="subscription"/><category term="survey"/><category term="tdd"/><category term="theme"/><category term="tos"/><category term="updates"/><category term="v2"/><category term="v3"/><category term="video files"/><category term="video transcoding"/><category term="virtual reality"/><category term="voting"/><category term="watch history"/><category term="watchlater"/><category term="webvtt"/><category term="youtube developers live"/><title type='text'>YouTube Engineering and Developers Blog</title><subtitle type='html'>What&#39;s happening with engineering and developers at YouTube</subtitle><link rel='http://schemas.google.com/g/2005#feed' type='application/atom+xml' href='http://youtube-eng.googleblog.com/feeds/posts/default'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default?alt=atom&amp;redirect=false'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/'/><link rel='hub' href='http://pubsubhubbub.appspot.com/'/><link rel='next' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default?alt=atom&amp;start-index=26&amp;max-results=25&amp;redirect=false'/><author><name>ewood</name><uri>http://www.blogger.com/profile/12341551220176883769</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><generator version='7.00' uri='http://www.blogger.com'>Blogger</generator><openSearch:totalResults>211</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-3823388909896663367</id><published>2020-11-09T10:07:00.007-08:00</published><updated>2020-11-09T10:09:55.503-08:00</updated><title type='text'>Visit our new blog destination</title><content type='html'>Our YouTube Blog has been on Blogger for the past 15 years, so this past August, we figured it was time to &lt;a href=&quot;https://blog.youtube/news-and-events/introduction-to-our-blog-redesign/&quot; target=&quot;_blank&quot;&gt;completely change things up&lt;/a&gt;. We created an entirely &lt;a href=&quot;https://blog.youtube/&quot; target=&quot;_blank&quot;&gt;new site&lt;/a&gt; and redesign.&amp;nbsp;&lt;div&gt;&lt;br /&gt;&lt;/div&gt;&lt;div&gt;&lt;br /&gt;&lt;/div&gt;&lt;div&gt;In 30 days, we’re going to redirect the Engineering Blog over to our new YouTube Official Blog as the final piece of the redesign strategy. We hope you enjoy the blog’s new home!&lt;/div&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/3823388909896663367'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/3823388909896663367'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2020/11/visit-our-new-blog-destination.html' title='Visit our new blog destination'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-5649737044541017068</id><published>2019-08-29T11:05:00.000-07:00</published><updated>2020-08-31T12:14:12.002-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Abbreviated public-facing subscriber counts</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;&lt;span id=&quot;docs-internal-guid-91b4e6a9-7fff-973a-9ad0-fdf8521916b3&quot;&gt;&lt;span style=&quot;color: #212121; font-family: &amp;quot;roboto&amp;quot; , sans-serif; font-size: 12pt; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Following our &lt;/span&gt;&lt;a href=&quot;https://support.google.com/youtube/thread/6543166&quot; style=&quot;text-decoration-line: none;&quot; target=&quot;_blank&quot;&gt;&lt;span style=&quot;color: #1155cc; font-family: &amp;quot;roboto&amp;quot; , sans-serif; font-size: 12pt; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;announcement&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;color: #212121; font-family: &amp;quot;roboto&amp;quot; , sans-serif; font-size: 12pt; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; in May, we&#39;ll be abbreviating subscriber counts across YouTube, starting the week of September 2; and the public YouTube Data API Service, starting the week of September 9. Read more about what this means for the public YouTube Data API Service in this &lt;a href=&quot;https://support.google.com/youtube/thread/6543166?msgid=13119244&quot; target=&quot;_blank&quot;&gt;updated Help Community post&lt;/a&gt;.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5649737044541017068'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5649737044541017068'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2019/08/abbreviated-public-facing-subscriber.html' title='Abbreviated public-facing subscriber counts'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-1507315394990239663</id><published>2019-04-12T11:54:00.000-07:00</published><updated>2020-08-31T12:14:10.161-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Launching a YouTube dataset of user-generated content</title><content type='html'>We are excited to launch a large-scale dataset of public user-generated content (UGC) videos uploaded to YouTube under a Creative Commons license. This dataset is intended to aid the advancement of research on video compression and quality evaluation.  &lt;br /&gt;&lt;br /&gt;We created this dataset to help baseline research efforts, as well as foster algorithmic development. We hope that this dataset will help the industry better comprehend UGC quality and tackle UGC challenges at scale.&lt;br /&gt;&lt;br /&gt;&lt;h3&gt;What is UGC? &lt;/h3&gt;&lt;br /&gt;User-generated content (UGC)  videos are uploaded by users and creators. These videos are not always professionally curated and could exhibit perceptual artifacts. For the purpose of this dataset, we&#39;ve selected original videos with specific and perceptual quality issues, like blockiness, blur, banding, noise, jerkiness, and so on. &lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://1.bp.blogspot.com/-R28oMVFRhyo/XLDLxijb64I/AAAAAAAAAmo/ZUKOR9NohJ85Yw2piRyxfJDUl4Ikb4_OQCLcBGAs/s1600/qset_collage.gif&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;360&quot; data-original-width=&quot;640&quot; height=&quot;360&quot; src=&quot;https://1.bp.blogspot.com/-R28oMVFRhyo/XLDLxijb64I/AAAAAAAAAmo/ZUKOR9NohJ85Yw2piRyxfJDUl4Ikb4_OQCLcBGAs/s640/qset_collage.gif&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;br /&gt;&lt;br /&gt;These videos have a wide array of categories, such as “how to” videos, technology reviews, gaming, pets, etc.   &lt;br /&gt;&lt;br /&gt;Since these videos are often captured in environments without controlled lighting, with ambient noise, or on low-end capture devices, they may end up exhibiting various video quality issues, such as camera shaking, low visibility, or jarring audio. &lt;br /&gt;&lt;br /&gt;Before sharing these videos, creators may edit the video for aesthetics and generally compress the captured video for a faster upload (e.g. depending on the network conditions). Creators also may annotate the video or add additional overlays. The editing, annotating, and overlaying processes change the underlying video data at the pixel and/or frame levels. Additionally, any associated compression may introduce visible compression artifacts within the video such as blockiness, banding, or ringing. &lt;br /&gt;&lt;br /&gt;For these reasons, in our experience, UGC should be evaluated and treated differently from traditional, professional video. &lt;br /&gt;&lt;br /&gt;&lt;h3&gt;The challenges with UGC &lt;/h3&gt;&lt;br /&gt;Processing and encoding UGC video presents a variety of challenges that are less prevalent in traditional video. &lt;br /&gt;&lt;br /&gt;For instance, look at these clips shown below that are heavily ridden with blockiness and noise. Many modern video codecs would target their encoding algorithms based on reference-based metrics, such as PSNR or SSIM. These metrics measure the fidelity of accurately reproducing the original content roughly pixel for pixel, including artifacts. The assumption here is that the video that acts as the reference is “pristine,” but for UGC, this assumption often breaks down. &lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://2.bp.blogspot.com/-pv7EZFcvwJU/XLDMLLfutHI/AAAAAAAAAmw/bwnCBlGyE1EhFSdI9bGFYiIWcYSbtBH3ACLcBGAs/s1600/unnamed.gif&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;156&quot; data-original-width=&quot;512&quot; height=&quot;194&quot; src=&quot;https://2.bp.blogspot.com/-pv7EZFcvwJU/XLDMLLfutHI/AAAAAAAAAmw/bwnCBlGyE1EhFSdI9bGFYiIWcYSbtBH3ACLcBGAs/s640/unnamed.gif&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;br /&gt;&lt;a href=&quot;https://3.bp.blogspot.com/-9z4ZKv_KQZA/XLDMWf2p_KI/AAAAAAAAAm0/psjodwYMA5c27yZ8l6XX5TdJfc-nM7GfQCLcBGAs/s1600/YNXJzjTis14-2295.04-Lecture_1080_10m_1m_cropped.gif&quot; imageanchor=&quot;1&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;251&quot; data-original-width=&quot;818&quot; height=&quot;196&quot; src=&quot;https://3.bp.blogspot.com/-9z4ZKv_KQZA/XLDMWf2p_KI/AAAAAAAAAm0/psjodwYMA5c27yZ8l6XX5TdJfc-nM7GfQCLcBGAs/s640/YNXJzjTis14-2295.04-Lecture_1080_10m_1m_cropped.gif&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;br /&gt;&lt;br /&gt;In this case, the videos on the left ends up having 5 Mbps bitrate to faithfully represent the originally uploaded user video content. However, the heavily compressed video on the right has a bitrate of only 1 Mbps, but looks similar when compared to the 5 Mbps counterpart.  &lt;br /&gt;&lt;br /&gt;Another unconventional challenge can come from a lack of understanding of the provided quality of the uploaded video. With traditional video, quite often a lower quality is a result of heavy editing or processing and an un-optimized encoding. However, this is not always true for UGC, where the uploaded video itself could be sufficiently low quality that any number of optimizations on the encoding operation would not increase the quality of the encoded video. &lt;br /&gt;&lt;br /&gt;&lt;h3&gt;How is the dataset put together? &lt;/h3&gt;&lt;br /&gt;This dataset is sampled from millions of YouTube uploaded videos licensed under a Creative Commons license. Only publicly shared videos from uploaders are sampled. &lt;br /&gt;&lt;br /&gt;The sample space the videos were chosen from can be divided into four discrete dimensions: Spatial, Motion, Color, and Chunk-level variations. We believe that this dataset reasonably represents the variety of content that we observe as uploads within these dimensions.  &lt;br /&gt;&lt;br /&gt;For technical details on how this dataset was composed, the coverage correlations scores and more, please refer to our &lt;a href=&quot;https://storage.cloud.google.com/ugc-dataset/ugc_dataset.pdf&quot;&gt;paper&lt;/a&gt; on dataset generation in arxiv (also submitted to ICIP 2019).  &lt;br /&gt;&lt;br /&gt;&lt;h3&gt;Where can I see and download it? &lt;/h3&gt;&lt;br /&gt;This UGC dataset can be explored over various content categories and resolutions in the explore tab of &lt;a href=&quot;https://media.withyoutube.com/&quot; target=&quot;_blank&quot;&gt;media.withyoutube.com.&lt;/a&gt; The video preview will be shown when you mouse-over the video, along with an overlay of the attribution. &lt;br /&gt;&lt;br /&gt;Various content categories are separated out for simplicity of selection. HDR and VR formats are available in addition for each resolution. Though some high frame rate content is present as part of the offering, it is not currently separated out as a category. Frame rate information is embedded in the video metadata and can be obtained when the corresponding video is downloaded.   &lt;br /&gt;&lt;br /&gt;Videos can be downloaded from the download tab of &lt;a href=&quot;https://media.withyoutube.com/&quot; target=&quot;_blank&quot;&gt;media.withyoutube.com&lt;/a&gt;&amp;nbsp;page. Here you will also notice the &lt;a href=&quot;https://creativecommons.org/licenses/by/3.0/legalcode&quot; target=&quot;_blank&quot;&gt;CC BY&lt;/a&gt; creative commons attribution file for the whole set of videos. Details about the video download format along with the link to the &lt;a href=&quot;https://cloud.google.com/&quot; target=&quot;_blank&quot;&gt;Google Cloud Platform&lt;/a&gt; location are available on this page. &lt;br /&gt;&lt;br /&gt;Additionally, three no-reference metrics that have been computed on the UGC video dataset by the YouTube Media Algorithms team are available to download from this page. These three metrics are Noise, Banding, and SLEEQ. Explanations of each were published in ICIPs and ACM Multimedia Conferences. &lt;br /&gt;&lt;br /&gt;&lt;i&gt;Posted by Balu Adsumilli, Sasi Inguva, Yilin Wang, Jani Huoponen, Ross Wolf&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1507315394990239663'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1507315394990239663'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2019/04/launching-youtube-dataset-of-user.html' title='Launching a YouTube dataset of user-generated content'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://1.bp.blogspot.com/-R28oMVFRhyo/XLDLxijb64I/AAAAAAAAAmo/ZUKOR9NohJ85Yw2piRyxfJDUl4Ikb4_OQCLcBGAs/s72-c/qset_collage.gif" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-1395456984731392871</id><published>2018-12-18T09:00:00.000-08:00</published><updated>2020-08-31T12:14:10.092-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Add stereo music or narration to VR videos</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;&lt;div style=&quot;text-align: center;&quot;&gt;&lt;div style=&quot;text-align: left;&quot;&gt;We introduced a new feature which allows YouTube creators to mix together spatial audio with stereo audio content, like music and/or narration, when they work on VR experiences. Viewers can already enjoy this feature on YouTube mobile apps as well as desktop web browsers. &lt;/div&gt;&lt;/div&gt;&lt;br /&gt;&lt;h3 style=&quot;text-align: left;&quot;&gt;Why we need spatial audio in VR&lt;/h3&gt;&lt;br /&gt;Sound has been part of making movies great almost from the start.  Live music performance went along with silent movies even before 1927, when &lt;a href=&quot;https://www.youtube.com/watch?v=XZpX1B6n5Fc&quot; target=&quot;_blank&quot;&gt;&quot;The Jazz Singer&quot;&lt;/a&gt; brought &lt;a href=&quot;https://en.wikipedia.org/wiki/Sound_film&quot; target=&quot;_blank&quot;&gt;talkies&lt;/a&gt; to the big screen. In the early days, movie sound reproduction was very primitive and typically played over a single loudspeaker. Little consideration was given to the relationship between recorded sounds and the objects or actors on the screen. As technology progressed, people realized that making sound stereo&amp;nbsp;&lt;span style=&quot;background-color: white; color: #222222; font-family: &amp;quot;roboto&amp;quot; , &amp;quot;arial&amp;quot; , sans-serif; font-size: 16px;&quot;&gt;—&amp;nbsp;&lt;/span&gt;putting some sounds to the left, some to the right, and some moving from side-to-side across the screen&amp;nbsp;&lt;span style=&quot;background-color: white; color: #222222; font-family: &amp;quot;roboto&amp;quot; , &amp;quot;arial&amp;quot; , sans-serif; font-size: 16px;&quot;&gt;—&lt;/span&gt;&amp;nbsp;added another dimension to the experience. It&#39;s easier to get immersed in a movie when the sound and the picture fit together!&lt;br /&gt;&lt;br /&gt;We like when the sound of an engine follows the car we are seeing. We get excited if our attention is suddenly drawn to another part of the screen by a door squeak, or a gunshot or an explosion. Although stereo sound creates an immersive experience, a typical loudspeaker set-up places the speakers on either side of the screen, which largely confines the movement of sound to within the screen itself. One of the limitations of this is that it doesn&#39;t match what we&#39;re used to in real life. We&#39;re used to hearing sounds from all around us, even when we don&#39;t see where they&#39;re coming from. &lt;br /&gt;&lt;br /&gt;The need for more accurate real-life sound reproduction was recognized even before stereo was perfected for film in the production of &quot;Fantasia&quot; and its groundbreaking multi-speaker: &lt;a href=&quot;https://en.wikipedia.org/wiki/Fantasound&quot; target=&quot;_blank&quot;&gt;Fantasound&lt;/a&gt; system. Through the use of multiple speakers, &quot;Fantasia&quot; pushed sound reproduction off the screen and into three-dimensions, putting the audience at the center of a 3-D sound experience. Since this early work, sound technology has advanced to more complex multi-speaker surround sound systems, but also 3-D headphone sound. &lt;br /&gt;&lt;br /&gt;More recently, we&#39;ve seen the emergence of VR, which aims to improve immersive experiences further by giving the audience not just a 3-D audio experience, but an entire 3-D video experience too. That&#39;s why the VR teams at Google and YouTube have been working to provide YouTube users with VR experiences with immersive spatial audio. &lt;br /&gt;&lt;br /&gt;&lt;h3 style=&quot;text-align: left;&quot;&gt;Spatial audio in VR &lt;/h3&gt;&lt;br /&gt;One of the challenges in VR production is sound design and spatial audio production. A major task for sound designers is to accurately associate sounds in 3-D space with visible objects within the 3-D video scene. Like the engine sound we mentioned before, a sound designer needs to correctly position the audio to accurately follow the visible position of the car in the scene. The car in this example is what is known as a diegetic sound source, because its position is visible or implied within the video scene. In a typical cinematic production, though, there will also be sounds that don&#39;t directly correspond to positions within the video scene, like voice-overs or narration, for example. Voiced narration (i.e. &#39;Red&#39; in &quot;The Shawshank Redemption&quot;) is typically not associated with any object within the video scene. This type of sound is known as a non-diegetic sound source. Another example would be background music, which has been present in cinematic experiences since the very beginning. &lt;br /&gt;&lt;br /&gt;&lt;h3 style=&quot;text-align: left;&quot;&gt;How does it work?&lt;/h3&gt;&lt;br /&gt;When you watch a VR video on a Head Mounted Display (HMD) like the &lt;a href=&quot;https://vr.google.com/daydream/smartphonevr/&quot; target=&quot;_blank&quot;&gt;Daydream View&lt;/a&gt;, the spatial audio rendering needs to accurately reproduce the intentions of the sound designer. How does it achieve this? Firstly, a spatial audio rendering engine needs to treat non-diegetic and diegetic sounds differently. &lt;br /&gt;&lt;br /&gt;The audio processing for diegetic sounds is conceptually quite simple: The device knows how your head moves, and hence, how all the sounds need to be filtered, so that what you hear over headphones precisely reflects what is happening around you at that very moment. It is like creating a unique headphone mix especially for you every time you watch a movie. This way you can experience all the sounds with their true depth and spatial location, coming from all around you! &lt;br /&gt;&lt;br /&gt;When it comes to non-diegetic sounds, the situation is quite different. These should be rendered as a standard stereophonic track, alongside immersive spatial audio content and preserve the original fidelity of music or narrator&#39;s voice. The viewer should experience them the same way that we are used to: in left/right stereo. (This is why you may hear the phrase &quot;head-locked stereo.&quot;)  &lt;br /&gt;&lt;br /&gt;&lt;h3 style=&quot;text-align: left;&quot;&gt;Create spatial audio with head-locked stereo and upload to YouTube &lt;/h3&gt;&lt;br /&gt;YouTube now allows creators to join these two concepts together and augment immersive spatial audio with more traditional stereo content. When creators add two extra channels to their uploaded spatial audio soundtrack, they will now be interpreted as a head-locked stereo and won&#39;t go through the same processing algorithms that YouTube uses for spatial audio. In other words, it will sound exactly the same as more traditional audio uploaded to YouTube. See this &lt;a href=&quot;https://support.google.com/youtube/answer/6395969?co=GENIE.Platform%3DDesktop&amp;amp;hl=en&quot; target=&quot;_blank&quot;&gt;YouTube Spatial Audio help page&lt;/a&gt; for a more detailed guide on how to prepare and upload spatial audio and head-locked stereo to YouTube. Also, make sure to check out the example video here:  &lt;br /&gt;&lt;br /&gt;&lt;iframe allow=&quot;accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture&quot; allowfullscreen=&quot;&quot; frameborder=&quot;0&quot; height=&quot;315&quot; src=&quot;https://www.youtube.com/embed/KEXsXsEGQGg&quot; width=&quot;560&quot;&gt;&lt;/iframe&gt; &lt;br /&gt;YouTube viewers can already enjoy this new feature on YouTube Android/iOS apps, as well as Chrome, Opera and now also Mozilla Firefox web browsers. For the best experience, we recommend using YouTube VR with the &lt;a href=&quot;https://vr.google.com/daydream/smartphonevr/&quot; target=&quot;_blank&quot;&gt;Daydream View&lt;/a&gt;.   &lt;br /&gt;&lt;br /&gt;&lt;i&gt;Marcin Gorzel and Damien Kelly, Software Engineers, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=T_s4ZLGQISs&quot; target=&quot;_blank&quot;&gt;Ecuadorian Cloud Forest in 360 VR!! (2018)&lt;/a&gt;.&quot;&lt;/i&gt;&lt;/div&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1395456984731392871'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1395456984731392871'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2018/12/add-stereo-music-or-narration-to-vr.html' title='Add stereo music or narration to VR videos'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://img.youtube.com/vi/KEXsXsEGQGg/default.jpg" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-5087154068109238670</id><published>2018-08-01T11:00:00.002-07:00</published><updated>2020-08-31T12:14:11.725-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube IFrame Player API"/><title type='text'>Control your 360 videos with the YouTube IFrame Player API</title><content type='html'>Ever since we launched &lt;a href=&quot;https://youtube-creators.googleblog.com/2015/03/a-new-way-to-see-and-share-your-world.html&quot; target=&quot;_blank&quot;&gt;360° videos&lt;/a&gt; in 2015, we&#39;ve been exploring ways to unleash the full potential of this new video format, including &lt;a href=&quot;https://youtube-creators.googleblog.com/2015/11/youtube-presses-play-on-virtual-reality.html&quot; target=&quot;_blank&quot;&gt;Cardboard mode&lt;/a&gt;, &lt;a href=&quot;https://youtube.googleblog.com/2016/04/one-step-closer-to-reality-introducing.html&quot; target=&quot;_blank&quot;&gt;360° live streams&lt;/a&gt;, and &lt;a href=&quot;https://youtube-eng.googleblog.com/2017/03/improving-vr-videos.html&quot; target=&quot;_blank&quot;&gt;improved video quality&lt;/a&gt;. We are excited to share with you some new APIs for controlling 360° videos in embedded videos.&lt;br /&gt;&lt;br /&gt;The &lt;a href=&quot;https://developers.google.com/youtube/iframe_api_reference#Spherical_Video_Controls&quot; target=&quot;_blank&quot;&gt;Spherical Video Control API&lt;/a&gt; gives developers full control over the user’s perspective when using the YouTube &lt;a href=&quot;https://developers.google.com/youtube/iframe_api_reference&quot; target=&quot;_blank&quot;&gt;IFrame Player SDK&lt;/a&gt;. Developers can get and set the view’s current yaw, pitch, roll, and field-of-view. This opens the door to many different scenarios such as narration-driven tours, custom controllers, multi-display installations all via JavaScript.&lt;br /&gt;&lt;br /&gt;&lt;style&gt;#alienButton {        position: relative;        display:block;        margin: 15px auto;        border-width: 0;        outline: none;        border-radius: 10px;        box-shadow: 0 3px 6px rgba(0, 0, 0, .24);        background-color: #f00;        color: #fff;        transition: background-color .2s;  }  #alienButton:hover {        background-color: #F44336;  }  #alienButton:active {        box-shadow: 0 2px 6px rgba(0, 0, 0, .24);        background-color: #EF9A9A;  }  #alienButton:disabled {        box-shadow: 0 1px 3px rgba(0, 0, 0, .24);        background-color: #9E9E9E;  }  #alienButton span {        display: block;        padding: 12px 24px;        font-size: 20px; } &lt;/style&gt;  &lt;br /&gt;&lt;div style=&quot;width: 100%;&quot;&gt;&lt;iframe frameborder=&quot;0&quot; height=&quot;350&quot; id=&quot;alien-iframe-embed&quot; onload=&quot;(function() {if(window.location.search.indexOf(&#39;m=1&#39;)&amp;gt;=0){return;} jsScript = document.getElementById(&#39;alien-js-script&#39;); eval(jsScript.innerHTML);})()&quot; src=&quot;https://www.youtube.com/embed/G-XZhKqQAHU?enablejsapi=1&quot; style=&quot;border: 0px;&quot; width=&quot;100%&quot;&gt;&lt;/iframe&gt;&lt;button disabled=&quot;&quot; id=&quot;alienButton&quot;&gt;&lt;span id=&quot;alienButtonText&quot;&gt;Alien!!!&lt;/span&gt;&lt;/button&gt;    &lt;/div&gt;&lt;br /&gt;Here is a simple example of using the API. &lt;a href=&quot;https://atap.google.com/spotlight-stories/&quot; target=&quot;_blank&quot;&gt;Google Spotlight Stories&lt;/a&gt;, collaborating with Justin Lin, brought us this wonderful story centered around a mysterious alien. We loved the experience, but it is easy to lose track of the alien while exploring the surroundings, so we added an “Alien” button to the video. Try wandering through the story, using your mouse to look around, and using the button to bring the alien back to the center of the scene.&lt;br /&gt;&lt;br /&gt;We hope this helps you to incorporate 360° videos as an integral part of your applications and to create new and novel 360° experiences. To get you started, this short script will create an embed that pans in the horizontal direction while oscillating vertically.  &lt;br /&gt;&lt;br /&gt;&lt;link href=&quot;https://fonts.googleapis.com/css?family=Roboto+Mono&quot; rel=&quot;stylesheet&quot;&gt;&lt;/link&gt; &lt;div style=&quot;overflow: auto; padding: 0.2em 0.6em; width: auto;&quot;&gt;&lt;pre style=&quot;color: black; font-family: &amp;quot;roboto mono&amp;quot; , monospace; line-height: 125%; margin: 0;&quot;&gt;&lt;span style=&quot;color: green; font-weight: bold;&quot;&gt;&amp;lt;div&lt;/span&gt; &lt;span style=&quot;color: #bb4444;&quot;&gt;id=&quot;player&quot;&lt;/span&gt;&lt;span style=&quot;color: green; font-weight: bold;&quot;&gt;&amp;gt;&amp;lt;/div&amp;gt;&lt;/span&gt;&lt;br /&gt;&lt;span style=&quot;color: green; font-weight: bold;&quot;&gt;&amp;lt;script &lt;/span&gt;&lt;span style=&quot;color: #bb4444;&quot;&gt;src=&quot;https://www.youtube.com/iframe_api&quot;&lt;/span&gt;&lt;span style=&quot;color: green; font-weight: bold;&quot;&gt;&amp;gt;&amp;lt;/script&amp;gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;span style=&quot;color: green; font-weight: bold;&quot;&gt;&amp;lt;script&amp;gt;&lt;/span&gt;&lt;br /&gt;    &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;let&lt;/span&gt; player;&lt;br /&gt;    &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;let&lt;/span&gt; panStarted = &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;false&lt;/span&gt;;&lt;br /&gt;&lt;br /&gt;    &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;function&lt;/span&gt; onYouTubeIframeAPIReady() {&lt;br /&gt;        player = &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;new&lt;/span&gt; YT.Player(&lt;span style=&quot;color: #bb4444;&quot;&gt;&#39;player&#39;&lt;/span&gt;, {&lt;br /&gt;            videoId&lt;span style=&quot;color: #666666;&quot;&gt;:&lt;/span&gt; &lt;span style=&quot;color: #bb4444;&quot;&gt;&#39;FAtdv94yzp4&#39;&lt;/span&gt;,&lt;br /&gt;            events&lt;span style=&quot;color: #666666;&quot;&gt;:&lt;/span&gt; {&lt;br /&gt;                &lt;span style=&quot;color: #bb4444;&quot;&gt;&#39;onStateChange&#39;&lt;/span&gt;&lt;span style=&quot;color: #666666;&quot;&gt;:&lt;/span&gt; onPlayerStateChange&lt;br /&gt;            }&lt;br /&gt;        });&lt;br /&gt;    }&lt;br /&gt;&lt;br /&gt;    &lt;span style=&quot;color: #008800; font-style: italic;&quot;&gt;// Start animation when video starts playing.&lt;/span&gt;&lt;br /&gt;    &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;function&lt;/span&gt; onPlayerStateChange(event) {&lt;br /&gt;        &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;if&lt;/span&gt; (event.data == 1 &amp;amp;&amp;amp; !panStarted) {&lt;br /&gt;            requestAnimationFrame(panVideo);&lt;br /&gt;            panStarted = &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;true&lt;/span&gt;;&lt;br /&gt;        }&lt;br /&gt;    }&lt;br /&gt;&lt;br /&gt;    &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;function&lt;/span&gt; panVideo() {&lt;br /&gt;        &lt;span style=&quot;color: #008800; font-style: italic;&quot;&gt;// 20 seconds per rotation.&lt;/span&gt;&lt;br /&gt;        &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;const&lt;/span&gt; yaw = (performance.now() / 1000 / 20 * 360) % 360;&lt;br /&gt;        &lt;span style=&quot;color: #008800; font-style: italic;&quot;&gt;// 2 up-down cycle per rotation.&lt;/span&gt;&lt;br /&gt;        &lt;span style=&quot;color: #aa22ff; font-weight: bold;&quot;&gt;const&lt;/span&gt; pitch = 20 * &lt;span style=&quot;color: #aa22ff;&quot;&gt;Math&lt;/span&gt;.sin(2 * yaw / 360 * 2 * &lt;span style=&quot;color: #aa22ff;&quot;&gt;Math&lt;/span&gt;.PI);&lt;br /&gt;        player.setSphericalProperties({&lt;br /&gt;            yaw&lt;span style=&quot;color: #666666;&quot;&gt;:&lt;/span&gt; yaw,&lt;br /&gt;            pitch&lt;span style=&quot;color: #666666;&quot;&gt;:&lt;/span&gt; pitch&lt;br /&gt;        });&lt;br /&gt;        requestAnimationFrame(panVideo);&lt;br /&gt;    }&lt;br /&gt;&lt;span style=&quot;color: green; font-weight: bold;&quot;&gt;&amp;lt;/script&amp;gt;&lt;/span&gt;&lt;br /&gt;&lt;/pre&gt;&lt;/div&gt;&lt;br /&gt;&lt;i&gt;Yingyu Yao, Software Engineer, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=_tjt8WT5mRs&quot; target=&quot;_blank&quot;&gt;The Earth&#39;s Internet: How Fungi Help Plants Communicate&lt;/a&gt;&quot;. &lt;/i&gt; &lt;script id=&quot;alien-js-script&quot; type=&quot;text/javascript&quot;&gt;// Initialize the iframe embed&#39;s API.  const tag = document.createElement(&#39;script&#39;); tag.src = &quot;https://www.youtube.com/iframe_api&quot;; const firstScriptTag = document.getElementsByTagName(&#39;script&#39;)[0]; firstScriptTag.parentNode.insertBefore(tag, firstScriptTag); let ytplayer; window.onYouTubeIframeAPIReady = function() {     ytplayer = new YT.Player(&#39;alien-iframe-embed&#39;, {         events: {             &#39;onStateChange&#39;: onPlayerStateChange,         },     }); }  // Disable the button by default.  // When video goes into PLAYING state, enable the button.  const alienButton = document.getElementById(&quot;alienButton&quot;); alienButton.disabled = true; window.onPlayerStateChange = function(event) {     if (event.data == 1) {         alienButton.disabled = false;     } }  // This function constructs a polynomial using a list of input weights. function poly(weights) {     return function(t) {         let val = 0;         let x = 1;         for (let w of weights) {             val += x * w;             x *= t;         }         return val;     } }  // These are hand fitted polynomials that tracks the movement of the alien // throughout the video. // For yaw, we had to change between several different polynomial to keep // things performant.  // For pitch, a single polynomial over the entire duration of the video // was sufficient.  const yawTrack = function() {     const yaw11 = poly([-3.04631909e+03, 1.19219406e+03, -1.88145883e+02,         1.55743418e+01, -7.41740578e-01, 2.10484076e-02,         -3.51373142e-04, 3.18408210e-06, -1.20830410e-08     ]);     const yaw58 = poly([2.14359903e+08, -1.95479428e+07, 7.41961802e+05,         -1.50035469e+04, 1.70475072e+02, -1.03194757e+00,         2.60000087e-03     ]);     const yaw74 = poly([-1.46264739e+06, 9.00087573e+04, -2.28526587e+03,         3.06432819e+01, -2.28920640e-01, 9.03520918e-04,         -1.47226423e-06     ]);     const yaw129 = poly([1.39044612e+05, -3.12771539e+03, 2.33594151e+01,         -5.81151009e-02     ]);     const yaw146 = poly([5.35468540e+04, -9.57843251e+02, 4.42486931e-01,         9.96639354e-02, -8.72828330e-04, 3.01098624e-06,         -3.82035076e-09     ]);     const yaw209 = poly([-1.00415969e+05, -2.56340987e+03, 7.66464454e+01,         -6.85017122e-01, 2.88569970e-03, -5.93185145e-06,         4.80999119e-09     ]);     return function(t) {         let val;         if (t &lt; 11) {             val = 0;         } else if (t &lt; 58) {             val = yaw11(t);         } else if (t &lt; 74) {             val = yaw58(t);         } else if (t &lt; 129) {             val = yaw74(t);         } else if (t &lt; 146) {             val = yaw129(t);         } else if (t &lt; 209) {             val = yaw146(t);         } else {             val = yaw209(t);         }         return (val % 360 + 360) % 360;     }; }(); const pitchTrack = poly([-1.45284489e+00, 1.60253082e+00, -2.48931512e-01,     1.19719311e-02, -2.92248673e-04, 4.13528625e-06,     -3.56801619e-08, 1.89765163e-10, -6.05548401e-13,     1.06076048e-15, -7.82844242e-19 ])  // When the button is clicked, we starts an animation motion.  // It essentially &quot;drags&quot; the view toward the target using // a predefined spring constant.  alienButton.onclick = function() {     const springConstant = -0.8;     let previousYaw = null;     let previousPitch = null;     let initialTime = null;     let animate = false;      function onAnimationFrame() {         const currentSetting = ytplayer.getSphericalProperties();         const currentYaw = currentSetting.yaw;         const currentPitch = currentSetting.pitch;         const currentTime = performance.now();         const videoTime = ytplayer.getCurrentTime();          if (!initialTime) {             if (currentSetting.yaw != null) {                 initialTime = currentTime;                 animate = true;                 alienButton.disabled = true;             }         }          if (animate) {             const targetYaw = yawTrack(videoTime);             const targetPitch = pitchTrack(videoTime);             let yawDiff = targetYaw - currentYaw;             if (yawDiff &gt; 180) {                 yawDiff -= 360;             } else if (yawDiff &lt; -180) {                 yawDiff += 360;             }             const pitchDiff = targetPitch - currentPitch;              if (Math.max(Math.abs(yawDiff), Math.abs(pitchDiff)) &lt; 1) {                 animate = false;             }              deltaTime = (currentTime - initialTime) / 1000;              let newYaw = targetYaw - yawDiff * Math.exp(springConstant * deltaTime);             newYaw = (newYaw % 360 + 360) % 360;             let newPitch = targetPitch - pitchDiff * Math.exp(springConstant * deltaTime);              ytplayer.setSphericalProperties({                 yaw: newYaw,                 pitch: newPitch,                 // Keeps the demo working on phones.                 // In practical use, calling this once is sufficient.                 // We are just piggybacking it here to keep things simple.                  enableOrientationSensor: false             });         }          if (animate) {             requestAnimationFrame(onAnimationFrame);         } else {             alienButton.disabled = false;             initialTime = null;         }     }      return onAnimationFrame; }(); &lt;/script&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5087154068109238670'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5087154068109238670'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2018/08/control-your-360-videos-with-youtube.html' title='Control your 360 videos with the YouTube IFrame Player API'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://img.youtube.com/vi/G-XZhKqQAHU/default.jpg" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-2359817941447804071</id><published>2018-04-24T16:15:00.000-07:00</published><updated>2020-08-31T12:14:10.509-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Making high quality video efficient</title><content type='html'>YouTube works hard to provide the best looking video at the lowest bandwidth.  One way we&#39;re doing that is by optimizing videos with bandwidth in mind.  We recently made videos stream better -- giving you higher-quality video by improving our videos so they are more likely to fit into your available bandwidth.&lt;br /&gt;&lt;br /&gt;When you watch a video the YouTube player measures the bandwidth on the client and adaptively chooses chunks of video that can be downloaded fast enough, up to the limits of the device’s viewport, decoding, and processing capability. YouTube makes multiple versions of each video at different resolutions, with bigger resolutions having higher encoding bitrates.&lt;br /&gt;&lt;br /&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;img alt=&quot;01.png&quot; height=&quot;244&quot; src=&quot;https://lh5.googleusercontent.com/xJIWEedbKcHYXytN5SOClQKt5jdWx3GhoL6og9hVzUqu1lZoCBZPPDNg7vHY3HLvYC3G4MrW5Tiq3awDLmbPQ07ziDTZGVOZiiI7dU2DxttX4-glKliJAWMZfxE7s6OBv9f_d7nA&quot; style=&quot;border: none; margin-left: auto; margin-right: auto; transform: rotate(0rad);&quot; width=&quot;624&quot; /&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Figure 1: HTTP-based Adaptive Video Streaming.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: white; font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;YouTube chooses how many bits are used to encode a particular resolution (within the limits that the codecs provide). A higher bitrate generally leads to better video quality for a given resolution but only up to a point.  After that, a higher bitrate just makes the chunk bigger even though it doesn’t look better. When we choose the encoding bitrate for a resolution, we select the sweet spot on the corresponding bitrate-quality curve (see Figure 2) at the point where adding more data rate stops making the picture look meaningfully better.&lt;br /&gt;&lt;br /&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;img alt=&quot;02.png&quot; height=&quot;397&quot; src=&quot;https://lh3.googleusercontent.com/wiMxh7FATEyRyD7RJoAyoYPNBcx2n3vlSfe4DOuZ25eaYAnCmt5xw2GaCSNmVUo6oF5-e2NcoNkuEjIiyUEeGFVRolbO61JXCl7dV31UGiwFN6exzflUChVcehzy7c3G3i1-yHfo&quot; style=&quot;border: none; margin-left: auto; margin-right: auto; transform: rotate(0rad);&quot; width=&quot;624&quot; /&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Figure 2: Rate-quality curves of a video chunk for a given video codec at different encoding resolutions.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: white; font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;We found these sweet spots, but observing how people watch videos made us realize we could deliver great looking video even more efficiently.&lt;br /&gt;&lt;br /&gt;These sweet spots assume that viewers are not bandwidth limited but if we set our encoding bitrates based only on those sweet spots for best looking video, we see that in practice video quality is often constrained by viewers’ bandwidth limitations. However, if we consider an operating point (other than the sweet spot) given our users’ bandwidth distribution (what we call &lt;i&gt;&lt;strong&gt;streaming bandwidth&lt;/strong&gt;&lt;/i&gt;), we end up providing better looking video (what we call &lt;i&gt;&lt;strong&gt;delivered video quality&lt;/strong&gt;&lt;/i&gt;).&lt;br /&gt;&lt;br /&gt;A way to think about this is to imagine the bandwidth available to a user, as a pipe shown in Figure 3.  Given the pipe’s capacity fits a 360p chunk but not a 480p chunk, we could tweak the 480p chunk size to be more likely to fit within that pipe by estimating the streaming bandwidth, thereby increasing the resolution users see. We solved the resulting constrained optimization problem to make sure there was no perceivable impact to video quality. In short, by analyzing aggregated playback statistics, and correspondingly altering the bitrates for various resolutions, we worked out how to stream higher quality video to more users.&lt;sup&gt;&lt;span style=&quot;font-size: xx-small;&quot;&gt;1&lt;/span&gt;&lt;/sup&gt;&lt;br /&gt;&lt;br /&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;img height=&quot;274&quot; src=&quot;https://lh4.googleusercontent.com/EKkfPN8WYQtKxhkWCv6MqAKfNkNRlDgCsZEJ7uWkSLwgt0STQYPFX7MUpfjxkQztdHhfwnzhXzSpbRBaXO5P2OIRH8ZMlvZcHgck03etEYAxeY5Xwg5da2_B9D08ZqgsMGC3baLC&quot; style=&quot;border: none; margin-left: auto; margin-right: auto; transform: rotate(0rad);&quot; width=&quot;568&quot; /&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Figure 3: Efficient streaming scenario before and after our proposal&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: white; font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;To understand how streaming bandwidth is different from an individual viewer’s bandwidth, consider the example in Figure 4 below. Given the measured distribution of viewers’ available bandwidth, the playback distribution can be estimated using the areas between the encoding bitrates of neighboring resolutions.&lt;br /&gt;&lt;br /&gt;Using playback statistics, we are able to model the behavior of the player as it switches  between resolutions. This allows us in effect to predict when an increased bitrate would be more likely to cause a player to switch to a lower resolution and thereby cancel the effect of bitrate increase in any one resolution. With this model, we are able to find better operating points for each video in the real world.&lt;sup&gt;&lt;span style=&quot;font-size: xx-small;&quot;&gt;1&lt;/span&gt;&lt;/sup&gt;&lt;br /&gt;&lt;br /&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;img alt=&quot;04-new.png&quot; height=&quot;791&quot; src=&quot;https://lh5.googleusercontent.com/XxnM3_0Yuo4IbXXZ6sfluumg43KLtT3ZyRjQzGfALi1oZsCy4z_GPdjNhCzeM60LRp-gFwOqmcawKRK-NbPnYUvWcqW6mY7dWTpq5FrF0UBkSNT48a3k8P_lIzCLAMAsqFJWWxJa&quot; style=&quot;border: none; margin-left: auto; margin-right: auto; transform: rotate(0rad);&quot; width=&quot;624&quot; /&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Figure 4: For a given resolution 720p for example, the playback distribution across resolutions can be estimated from the probability density function of bandwidth. Partitioning the bandwidth using encoding bitrates of the different representations, the probability of watching a representation can then be estimated with the corresponding area under the bandwidth curve.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: white; font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;Another complication here is that the operating points provide an estimate of delivered quality, which is different from encoded quality. If the available bandwidth of a viewer decreases, then the viewer is more likely to switch down to a lower resolution, and therefore land on a different operating point. This doesn’t influence the encoded quality per resolution, but changes the delivered quality.&lt;br /&gt;&lt;br /&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;img alt=&quot;05.png&quot; height=&quot;279&quot; src=&quot;https://lh5.googleusercontent.com/xzZHWlVJA7QRWV5pji7iiA1sTH3pUktWwnFsVN4X3TxW-DYoiT1ia9thhf98fzjnEvMX2z4iYZONlvnKXEIZoFv6Zs85rrPcHInuCvNBvs5YDrwyRqyEZCvKF5ci9TZF3Z_eZx-_&quot; style=&quot;border: none; margin-left: auto; margin-right: auto; transform: rotate(0rad);&quot; width=&quot;624&quot; /&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Fig.5 Our system for encoder optimization&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: white; font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;In Figure 5, the Rate-quality analyzer takes the video to be encoded and generates rate-quality curves for each resolution.  The Performance Estimator takes these curves and the distributions of viewer resolutions and streaming bandwidth to estimate possible operation points, so the Non-linear optimizer can choose the best possible set.&lt;br /&gt;&lt;br /&gt;The output is a set of optimized operation points, one for each resolution. The optimization algorithm can be configured to minimize average streaming bandwidth subject to a constraint of delivered video quality or to maximize delivered video quality subject to a streaming bandwidth budget.&lt;br /&gt;&lt;br /&gt;When we used this system to process HD videos, we delivered a reduction of 14 percent in the streaming bandwidth in YouTube playbacks. This reduction in bandwidth is expected to help the viewers to lower their data consumption when watching YouTube videos, which is especially helpful for those on limited data plans. We also saw watch time for the HD resolution increase by more than 6 percent as more people were able to stream higher-resolution videos on both fixed and mobile networks.&lt;br /&gt;&lt;br /&gt;Another big benefit of this method is improved viewer experience. In addition to very low impact on delivered quality, these videos loaded up to 5 percent faster with 12 percent fewer rebuffering events.&lt;br /&gt;&lt;br /&gt;We have made progress towards better video streaming efficiency. But we still want to do more.&lt;br /&gt;&lt;br /&gt;Our optimization approach is currently based on global distribution of viewers’ bandwidth and player resolutions. But videos sometimes are viewed regionally. For example, a popular Indian music video may be less likely to be as popular in Brazil or a Spanish sporting event may not be played many times in Vietnam. Bandwidth and player resolution distributions vary from country to country. If we can accurately predict the geographic regions in which a video will become popular, then we could integrate the local bandwidth statistics to do a better job with those videos. We&#39;re looking into this now to try to make your video playback experience even better!&lt;br /&gt;&lt;br /&gt;&lt;i&gt;-- Balu Adsumilli, Steve Benting, Chao Chen, Anil Kokaram, and Yao-Chung Lin&lt;/i&gt;&lt;br /&gt;&lt;br /&gt;&lt;sup&gt;&lt;span style=&quot;font-size: xx-small;&quot;&gt;1&lt;/span&gt;&lt;/sup&gt;Chao Chen, Yao-Chung Lin, Anil Kokaram and Steve Benting, &quot;&lt;a href=&quot;https://arxiv.org/abs/1709.08763&quot;&gt;Encoding Bitrate Optimization Using Playback Statistics for HTTP-based Adaptive Video Streaming&lt;/a&gt;,&quot; Arxiv, 2017</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/2359817941447804071'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/2359817941447804071'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2018/04/making-high-quality-video-efficient.html' title='Making high quality video efficient'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh5.googleusercontent.com/xJIWEedbKcHYXytN5SOClQKt5jdWx3GhoL6og9hVzUqu1lZoCBZPPDNg7vHY3HLvYC3G4MrW5Tiq3awDLmbPQ07ziDTZGVOZiiI7dU2DxttX4-glKliJAWMZfxE7s6OBv9f_d7nA=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-5223572348553392660</id><published>2017-11-06T09:00:00.000-08:00</published><updated>2020-08-31T12:14:11.795-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Resonance Audio: Multi-platform spatial audio at scale</title><content type='html'>&lt;i&gt;Cross-posted from the&amp;nbsp;&lt;a href=&quot;https://www.blog.google/products/google-vr/resonance-audio-multi-platform-spatial-audio-scale/&quot;&gt;VR Blog&lt;/a&gt;&lt;/i&gt;&lt;br /&gt;&lt;em&gt;&lt;br /&gt;&lt;/em&gt;&lt;em&gt;Posted by Eric Mauskopf, Product Manager&lt;/em&gt;&lt;br /&gt;&lt;div class=&quot;blogimg1&quot;&gt;&lt;a href=&quot;https://3.bp.blogspot.com/-HtJoT8UZ2cc/WgB7oQp2KmI/AAAAAAAAEBI/iRPEbuIAkL4r3MMAzhFtCU8jf4z1C-CUACLcBGAs/s1600/hero%2Bbanner%2Bwith%2Blogo%2Bfor%2Bblog.png&quot; imageanchor=&quot;1&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;681&quot; data-original-width=&quot;1600&quot; src=&quot;https://3.bp.blogspot.com/-HtJoT8UZ2cc/WgB7oQp2KmI/AAAAAAAAEBI/iRPEbuIAkL4r3MMAzhFtCU8jf4z1C-CUACLcBGAs/s1600/hero%2Bbanner%2Bwith%2Blogo%2Bfor%2Bblog.png&quot; /&gt;&lt;/a&gt;&lt;/div&gt;As humans, we rely on sound to guide us through our environment, help us communicate with others and connect us with what&#39;s happening around us. Whether walking along a busy city street or attending a packed music concert, we&#39;re able to hear hundreds of sounds coming from different directions. So when it comes to AR, VR, games and even 360 video, you need rich sound to create an engaging immersive experience that makes you feel like you&#39;re really there. Today, we&#39;re releasing a new spatial audio software development kit (SDK) called &lt;a href=&quot;https://developers.google.com/resonance-audio&quot;&gt;Resonance Audio&lt;/a&gt;. It&#39;s based on technology from Google&#39;s VR Audio SDK, and it works at scale across mobile and desktop platforms. &lt;br /&gt;&lt;center&gt;&lt;iframe allowfullscreen=&quot;&quot; frameborder=&quot;0&quot; height=&quot;480&quot; src=&quot;https://www.youtube.com/embed/IYdx9cnHN8I&quot; width=&quot;720&quot;&gt;&lt;/iframe&gt;&lt;/center&gt;&lt;br /&gt;&lt;center&gt;&lt;em&gt;Experience spatial audio in our Audio Factory VR app for &lt;a href=&quot;https://play.google.com/store/apps/details?id=com.google.vr.audiofactory&quot;&gt;Daydream&lt;/a&gt;and &lt;a href=&quot;http://store.steampowered.com/app/722590/Audio_Factory/&quot;&gt;SteamVR&lt;/a&gt; &lt;/em&gt;&lt;/center&gt;&lt;br /&gt;&lt;h3&gt;Performance that scales on mobile and desktop&lt;/h3&gt;Bringing rich, dynamic audio environments into your VR, AR, gaming, or video experiences without affecting performance can be challenging. There are often few CPU resources allocated for audio, especially on mobile, which can limit the number of simultaneous high-fidelity 3D sound sources for complex environments. The SDK uses highly optimized digital signal processing algorithms based on higher order Ambisonics to spatialize hundreds of simultaneous 3D sound sources, without compromising audio quality, even on mobile. We&#39;re also introducing a new feature in Unity for precomputing highly realistic reverb effects that accurately match the acoustic properties of the environment, reducing CPU usage significantly during playback. &lt;br /&gt;&lt;center&gt;&lt;/center&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://2.bp.blogspot.com/-m0_ROXjZHfk/WgCd4WLh7WI/AAAAAAAAAUI/_toemMUjd-EPdp8XffkEvCrYuAR1C1qxQCLcBGAs/s1600/geometric_reverb_baking_unity%2Bfor%2Bblog.gif&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;400&quot; data-original-width=&quot;640&quot; src=&quot;https://2.bp.blogspot.com/-m0_ROXjZHfk/WgCd4WLh7WI/AAAAAAAAAUI/_toemMUjd-EPdp8XffkEvCrYuAR1C1qxQCLcBGAs/s1600/geometric_reverb_baking_unity%2Bfor%2Bblog.gif&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;center&gt;&lt;em&gt;&lt;br /&gt;&lt;/em&gt;&lt;/center&gt;&lt;center&gt;&lt;em&gt;Using geometry-based reverb by assigning acoustic materials to a cathedral in Unity&lt;/em&gt;&lt;/center&gt;&lt;br /&gt;&lt;h3&gt;Multi-platform support for developers and sound designers&lt;/h3&gt;&lt;br /&gt;We know how important it is that audio solutions integrate seamlessly with your preferred audio middleware and sound design tools. With Resonance Audio, we&#39;ve released cross-platform SDKs for the most popular game engines, audio engines, and digital audio workstations (DAW) to streamline workflows, so you can focus on creating more immersive audio. The SDKs run on Android, iOS, Windows, MacOS and Linux platforms and provide integrations for Unity, Unreal Engine, FMOD, Wwise and DAWs. We also provide native APIs for C/C++, Java, Objective-C and the web. This multi-platform support enables developers to implement sound designs once, and easily deploy their project with consistent sounding results across the top mobile and desktop platforms. Sound designers can save time by using our new DAW plugin for accurately monitoring spatial audio that&#39;s destined for YouTube videos or apps developed with Resonance Audio SDKs. Web developers get the open source Resonance Audio Web SDK that works in the top web browsers by using the &lt;a href=&quot;https://webaudio.github.io/web-audio-api/&quot;&gt;Web Audio API&lt;/a&gt;. &lt;br /&gt;&lt;div class=&quot;blogimg2&quot;&gt;&lt;a href=&quot;https://1.bp.blogspot.com/-VHacpXyUAxQ/WgB-sagtX5I/AAAAAAAAECE/4AwIbkaJzVckzPu9pxv3JE2fHmdcMQgDgCLcBGAs/s1600/DAW%2Bplugin%2Bimage%2Bfor%2Bblog.png&quot; imageanchor=&quot;1&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;1031&quot; data-original-width=&quot;1600&quot; src=&quot;https://1.bp.blogspot.com/-VHacpXyUAxQ/WgB-sagtX5I/AAAAAAAAECE/4AwIbkaJzVckzPu9pxv3JE2fHmdcMQgDgCLcBGAs/s1600/DAW%2Bplugin%2Bimage%2Bfor%2Bblog.png&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;em&gt;DAW plugin for sound designers to monitor audio destined for YouTube 360 videos or apps developed with the SDK&lt;/em&gt;&lt;br /&gt;&lt;h3&gt;Model complex Sound Environments Cutting edge features&lt;/h3&gt;By providing powerful tools for accurately modeling complex sound environments, Resonance Audio goes beyond basic 3D spatialization. The SDK enables developers to control the direction acoustic waves propagate from sound sources. For example, when standing behind a guitar player, it can sound quieter than when standing in front. And when facing the direction of the guitar, it can sound louder than when your back is turned. &lt;br /&gt;&lt;div class=&quot;blogimg3&quot;&gt;&lt;a href=&quot;https://1.bp.blogspot.com/-n4faMrcZLb4/WgCBfCnayUI/AAAAAAAAECo/WR_hVQjWJ-Ep6-zBxxwaLUkawHt7eOKXgCLcBGAs/s1600/acoustic%2Bguitar%2Bdirectivity%2Bfor%2Bblog.gif&quot; imageanchor=&quot;1&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;360&quot; data-original-width=&quot;640&quot; src=&quot;https://1.bp.blogspot.com/-n4faMrcZLb4/WgCBfCnayUI/AAAAAAAAECo/WR_hVQjWJ-Ep6-zBxxwaLUkawHt7eOKXgCLcBGAs/s1600/acoustic%2Bguitar%2Bdirectivity%2Bfor%2Bblog.gif&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;br /&gt;&lt;center&gt;&lt;em&gt;Controlling sound wave directivity for an acoustic guitar using the SDK&lt;/em&gt;&lt;/center&gt;&lt;br /&gt;Another SDK feature is automatically rendering near-field effects when sound sources get close to a listener&#39;s head, providing an accurate perception of distance, even when sources are close to the ear. The SDK also enables sound source spread, by specifying the width of the source, allowing sound to be simulated from a tiny point in space up to a wall of sound. We&#39;ve also released an Ambisonic recording tool to spatially capture your sound design directly within Unity, save it to a file, and use it anywhere Ambisonic soundfield playback is supported, from game engines to YouTube videos. &lt;br /&gt;If you&#39;re interested in creating rich, immersive soundscapes using cutting-edge spatial audio technology, check out the Resonance Audio documentation on our &lt;a href=&quot;https://developers.google.com/resonance-audio&quot;&gt;developer site&lt;/a&gt;, let us know what you think through &lt;a href=&quot;https://github.com/resonance-audio&quot;&gt;GitHub&lt;/a&gt;, and show us what you build with #ResonanceAudio on social media; we&#39;ll be resharing our favorites. &lt;br /&gt;&lt;style&gt; .blogimg1 img { width: 100%; border: 0; margin: 0; padding: 20px 0 10px 0; }  .blogimg2 img { width: 100%; border: 0; margin: 0; padding: 20px 0 10px 0; }  .blogimg3 img { width: 100%; border: 0; margin: 0; padding: 20px 0 10px 0; }   &lt;/style&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5223572348553392660'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5223572348553392660'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2017/11/resonance-audio-multi-platform-spatial_6.html' title='Resonance Audio: Multi-platform spatial audio at scale'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://3.bp.blogspot.com/-HtJoT8UZ2cc/WgB7oQp2KmI/AAAAAAAAEBI/iRPEbuIAkL4r3MMAzhFtCU8jf4z1C-CUACLcBGAs/s72-c/hero%2Bbanner%2Bwith%2Blogo%2Bfor%2Bblog.png" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-2737908665191737412</id><published>2017-09-07T10:00:00.000-07:00</published><updated>2020-08-31T12:14:10.614-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Variable speed playback on mobile</title><content type='html'>Variable speed playback was launched on the web several years ago and is one of our most highly requested features on mobile. &lt;a href=&quot;https://youtube.googleblog.com/2017/08/a-new-youtube-look-that-works-for-you.html&quot;&gt;Now, it’s here!&lt;/a&gt; You can speed up or slow down videos in the YouTube app on iOS and on Android devices running Android 5.0+. Playback speed can be adjusted from 0.25x (quarter speed) to 2x (double speed) in the overflow menu of the player controls.&lt;br /&gt;&lt;br /&gt;The most commonly used speed setting on the web is 1.25x, closely followed by 1.5x. &lt;a href=&quot;http://blogs.seattletimes.com/monica-guzman/2014/05/24/how-to-binge-on-an-entire-tv-series-in-half-the-time/&quot;&gt;Speed watching&lt;/a&gt; is the new &lt;a href=&quot;https://www.theatlantic.com/technology/archive/2015/06/the-rise-of-speed-listening/396740/&quot;&gt;speed listening&lt;/a&gt; which was the new &lt;a href=&quot;https://www.youtube.com/watch?v=1nMP4U8JQDo&quot;&gt;speed reading&lt;/a&gt;, especially when consuming long lectures or interviews. But variable speed isn’t just useful for skimming through content to save time, it can also be an important tool for investigating finer details. For example, you might want to slow down a tutorial to learn some new &lt;a href=&quot;https://www.youtube.com/watch?v=nEXXYdKwhWM&amp;amp;t=3m58s&quot;&gt;choreography&lt;/a&gt; or figure out &lt;a href=&quot;https://www.youtube.com/watch?v=a6IUFKHZW1Q&amp;amp;t=0m53s&quot;&gt;a guitar strumming pattern&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;To speed up or slow down audio while retaining its comprehensibility, our main challenge was to efficiently change the duration of the audio signal without affecting the pitch or introducing distortion. This process is called &lt;a href=&quot;https://en.wikipedia.org/wiki/Audio_time_stretching_and_pitch_scaling&quot;&gt;time stretching&lt;/a&gt;. Without time stretching, an audio signal that was originally at 100 Hz becomes 200 Hz at double speed causing that &lt;a href=&quot;https://en.wikipedia.org/wiki/Alvin_and_the_Chipmunks#Recording_technique&quot;&gt;chipmunk effect&lt;/a&gt;. Similarly, slowing down the speed will lower the pitch. Time stretching can be achieved using a &lt;a href=&quot;https://en.wikipedia.org/wiki/Phase_vocoder&quot;&gt;phase vocoder&lt;/a&gt;, which transforms the signal into its frequency domain representation to make phase adjustments before producing a lengthened or shortened version. Time stretching can also be done in the time domain by carefully selecting windows from the original signal to be assembled into the new one. On Android, we used the &lt;a href=&quot;https://github.com/waywardgeek/sonic&quot;&gt;Sonic&lt;/a&gt; library for our audio manipulation in ExoPlayer. Sonic uses PICOLA, a time domain based algorithm. On iOS, AVplayer has a &lt;a href=&quot;https://developer.apple.com/documentation/avfoundation/avplayer/1388846-rate?preferredLanguage=occ&quot;&gt;built in playback rate&lt;/a&gt; feature with &lt;a href=&quot;https://developer.apple.com/documentation/avfoundation/playback_recording_mixing_and_processing/audio_settings/time_pitch_algorithm_settings&quot;&gt;configurable time stretching&lt;/a&gt;. Here, we have chosen to use the spectral (frequency domain) algorithm.&lt;br /&gt;&lt;br /&gt;To speed up or slow down video, we render the video frames in alignment with the modified audio timestamps. Video frames are not necessarily encoded chronologically, so for the video to stay in sync with the audio playback, the video decoder needs to work faster than the rate at which the video frames need to be rendered. This is especially pertinent at higher playback speeds. On mobile, there are also often more network and hardware constraints than on desktop that limit our ability to decode video as fast as necessary. For example, less reliable wireless links will affect how quickly and accurately we can download video data, and then battery, CPU speed, and memory size will limit the processing power we can spend on decoding it. To address these issues, we adapt the video quality to be only as high as we can download dependably. The video decoder can also skip forward to the next &lt;a href=&quot;https://en.wikipedia.org/wiki/Video_compression_picture_types&quot;&gt;key frame&lt;/a&gt; if it has fallen behind the renderer, or the renderer can drop already decoded frames to catch up to the audio track.&lt;br /&gt;&lt;br /&gt;If you want to check out the feature, try this: turn up your volume and play the classic dramatic chipmunk at 0.5x to see an EVEN MORE dramatic chipmunk. Enjoy!&lt;br /&gt;&lt;br /&gt;&lt;iframe allowfullscreen=&quot;&quot; frameborder=&quot;0&quot; height=&quot;315&quot; src=&quot;https://www.youtube.com/embed/y8Kyi0WNg40&quot; width=&quot;560&quot;&gt;&lt;/iframe&gt; &lt;i&gt;&lt;br /&gt;&lt;/i&gt;&lt;i&gt;Posted by Pallavi Powale, Software Engineer, recently watched “&lt;a href=&quot;https://www.youtube.com/watch?v=y8Kyi0WNg40&quot;&gt;Dramatic Chipmunk&lt;/a&gt;” at 0.5x speed.&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/2737908665191737412'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/2737908665191737412'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2017/09/variable-speed-playback-on-mobile.html' title='Variable speed playback on mobile'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://img.youtube.com/vi/y8Kyi0WNg40/default.jpg" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-5599203697942531157</id><published>2017-08-21T10:00:00.000-07:00</published><updated>2020-08-31T12:14:11.968-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="blur faces"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Blur select faces with the updated Blur Faces tool</title><content type='html'>In 2012 we launched &lt;a href=&quot;https://youtube.googleblog.com/2012/07/face-blurring-when-footage-requires.html&quot;&gt;face blurring&lt;/a&gt; as a visual anonymity feature, allowing creators to obscure all faces in their video. Last February we followed up with &lt;a href=&quot;https://youtube-creators.googleblog.com/2016/02/blur-moving-objects-in-your-video-with.html&quot;&gt;custom blurring&lt;/a&gt; to let creators blur any objects in their video, even as they move. Since then we’ve been hard at work improving our  face blurring tool.&lt;br /&gt;&lt;br /&gt;Today we’re launching a new and improved version of Blur Faces, allowing creators to easily and accurately blur specific faces in their videos. The tool now displays images of the faces in the video, and creators simply click an image to blur that individual throughout their video.&lt;br /&gt;&lt;br /&gt;&lt;span id=&quot;docs-internal-guid-5d00b9f3-e72b-4c40-6cbe-f0753f814bda&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;english_us_short (3).gif&quot; height=&quot;280&quot; src=&quot;https://lh6.googleusercontent.com/Ow5xlOo_UH6ZNnPpGdGYpn1-yDYo7Jz2t3F6kSbr-d5hnJXWngbINrzP1NPpY8wUl2EGSk_fKhEFBUjBPt_wmndCRQy3n5H5zdbdTUp2p3fm9ppy0efCNQwAsEskHPjJ9PmJnego&quot; style=&quot;-webkit-transform: rotate(0.00rad); border: none; transform: rotate(0.00rad);&quot; width=&quot;624&quot; /&gt;&lt;/span&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;To introduce this feature, we had to improve the accuracy of our face detection tools, allowing for recognition of the same person across an entire video. The tool is designed for a wide array of situations that we see in YouTube videos, including users wearing glasses, occlusion (the face being blocked, for example, by a hand), and people leaving the video and coming back later.&lt;br /&gt;&lt;br /&gt;Instead of having to use video editing software to manually create feathered masks and motion tracks, our &lt;a href=&quot;https://support.google.com/youtube/answer/1388383#blur_faces&quot;&gt;Blur Faces tool&lt;/a&gt; automatically handles motion and presents creators with a thumbnail that encapsulates all instances of that individual recognized by our technology. Creators can apply these blurring edits to already uploaded videos without losing views, likes, and comments by choosing to “Save” the edits in-place. Applying the effect using “Save As New” and deleting the original video will remove the original unblurred video from YouTube for an extra level of privacy. The blur applied to the published video cannot be practically reversed, but keep in mind that blurring does not guarantee absolute anonymity.&lt;br /&gt;&lt;br /&gt;To get to Blur Faces, go to the Enhance tool for a video you own. This can be done from the Video Manager or watch page. The Blur Faces tool can be found under the “Blurring Effects” tab of Enhancements. The following image shows how to get there.&lt;br /&gt;&lt;br /&gt;&lt;span id=&quot;docs-internal-guid-5d00b9f3-e72b-7968-0404-0cb110ec7ff2&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;english_us_gettofeature.gif&quot; height=&quot;410&quot; src=&quot;https://lh6.googleusercontent.com/CmB_x2vWbbb9Fyr3FGQJHkOR6jLQcpuMSt2T5bgRPHjgHlWwdvSLczlIoMEbGBBtWcmtqpWZf2Q6KbM964GWEQlqG1kWatrQwbnfpCkFM3V7umdaDPSOZeakA8CqEqo420oJeKxA&quot; style=&quot;-webkit-transform: rotate(0.00rad); border: none; transform: rotate(0.00rad);&quot; width=&quot;588&quot; /&gt;&lt;/span&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;When you open the Blur Faces tool on your video for the first time, we start processing your video for faces. During processing, we break your video up into chunks of frames, and start detecting faces on each frame individually. We use a high-quality face detection model to increase our accuracy, and at the same time, we look for scene changes and compute motion vectors throughout the video which we will use later.&lt;br /&gt;&lt;br /&gt;&lt;span id=&quot;docs-internal-guid-5d00b9f3-e72b-abeb-004f-bd1d7e5faec8&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;english_us_short2.gif&quot; height=&quot;271&quot; src=&quot;https://lh6.googleusercontent.com/XSLsx706Yg6QfsXAlIHYzJgxbInXSawslUFM4g7Ppv81YjIQhTF2bhQ954XUgDGmx89VQ5kmIh1-9qm5LIqxaCHapz9D-bBIdRnmWAfm0ptSnQ0FvbyKwVZ9jM7AiiuiJouYye0F&quot; style=&quot;-webkit-transform: rotate(0.00rad); border: none; transform: rotate(0.00rad);&quot; width=&quot;594&quot; /&gt;&lt;/span&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;Once we’ve detected the faces in each frame of your video, we start matching face detections within a single scene of the video, relying on both the visual characteristics of the face as well as the face’s motion. To compute motion, we use the same technology that powers our &lt;a href=&quot;https://youtube-creators.googleblog.com/2016/02/blur-moving-objects-in-your-video-with.html&quot;&gt;Custom Blurring feature&lt;/a&gt;. Face detections aren’t perfect, so we use a few techniques to help us hone in on edge cases such as tracking motion through occlusions (see the water bottle in the above GIF) and near the edge of the video frame. Finally, we compute visual similarity across what we found in each scene, pick the best face to show as a thumbnail, and present it to you.&lt;br /&gt;&lt;br /&gt;Before publishing your changes, we encourage you to preview the video. As we cannot guarantee 100 percent accuracy in every video, you can use our Custom Blurring tool to further enhance the automated face blurring edits in the same interface.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Ryan Stevens, Software Engineer, recently watched &lt;a href=&quot;https://www.youtube.com/watch?v=G2_Q9FoD-oQ&amp;amp;t=1s&quot;&gt;158,962,555,217,826,360,000 (Enigma Machine)&lt;/a&gt;, and Ian Pudney, Software Engineer, recently watched &lt;a href=&quot;https://www.youtube.com/watch?v=BuG1oNRQnyI&quot;&gt;Wood burning With Lightning. Lichtenberg Figures!&lt;/a&gt;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5599203697942531157'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/5599203697942531157'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2017/08/blur-select-faces-with-updated-blur.html' title='Blur select faces with the updated Blur Faces tool'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh6.googleusercontent.com/Ow5xlOo_UH6ZNnPpGdGYpn1-yDYo7Jz2t3F6kSbr-d5hnJXWngbINrzP1NPpY8wUl2EGSk_fKhEFBUjBPt_wmndCRQy3n5H5zdbdTUp2p3fm9ppy0efCNQwAsEskHPjJ9PmJnego=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-843965047556708120</id><published>2017-03-23T10:00:00.000-07:00</published><updated>2020-08-31T12:14:13.184-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="captions"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Visualizing Sound Effects</title><content type='html'>At YouTube, we understand the power of video to tell stories, move people, and leave a lasting impression. One part of storytelling that many people take for granted is sound, yet sound adds color to the world around us. Just imagine not being able to hear music, the joy of a baby laughing, or the roar of a crowd. But this is often a reality for the &lt;a href=&quot;http://www.who.int/mediacentre/factsheets/fs300/en/&quot;&gt;360 million people&lt;/a&gt; around the world who are deaf and hard of hearing. Over the last decade, we have been working to change that.&lt;br /&gt;&lt;br /&gt;The first step came over ten years ago with the launch of &lt;a href=&quot;http://googlevideo.blogspot.com/2006/09/finally-caption-playback.html&quot;&gt;captions&lt;/a&gt;. And in an effort to scale this technology, &lt;a href=&quot;https://googleblog.blogspot.com/2009/11/automatic-captions-in-youtube.html&quot;&gt;automated captions&lt;/a&gt; came a few years later. The success of that effort has been astounding, and a few weeks ago we &lt;a href=&quot;https://youtube.googleblog.com/2017/02/one-billion-captioned-videos.html&quot;&gt;announced&lt;/a&gt; that the number of videos with automatic captions now exceeds 1 billion. Moreover, people watch videos with automatic captions more than 15 million times per day. And we have made meaningful improvements to quality, resulting in a 50 percent leap in accuracy for automatic captions in English, which is getting us closer and closer to human transcription error rates.&lt;br /&gt;&lt;br /&gt;But there is more to sound and the enjoyment of a video than words. In a joint effort between YouTube, Sound Understanding, and Accessibility teams, we embarked on the task of developing the first ever automatic sound effect captioning system for YouTube. This means finding a way to identify and label all those other sounds in the video without manual input.&lt;br /&gt;&lt;br /&gt;We started this project by taking on a wide variety of challenges, such as how to best design the sound effect recognition system and what sounds to prioritize. At the heart of the work was utilizing thousands of hours of videos to train a deep neural network model to achieve high quality recognition results. There are more details in a companion post &lt;a href=&quot;https://research.googleblog.com/2017/03/adding-sound-effect-information-to.html&quot;&gt;here&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;As a result, we can now automatically detect the existence of these sound effects in a video and transcribe it to appropriate classes or sound labels. With so many sounds to choose from, we started with [APPLAUSE], [MUSIC] and [LAUGHTER], since these were among the most frequent manually captioned sounds, and they can add meaningful context for viewers who are deaf and hard of hearing.&lt;br /&gt;&lt;br /&gt;So what does this actually look like when you are watching a YouTube video? The sound effect is merged with the automatic speech recognition track and shown as part of standard automatic captions.&lt;br /&gt;&lt;br /&gt;&lt;iframe allowfullscreen=&quot;&quot; frameborder=&quot;0&quot; height=&quot;315&quot; src=&quot;https://www.youtube.com/embed/oOtqbAxRkyM?start=128&amp;amp;end=178&quot; width=&quot;560&quot;&gt;&lt;/iframe&gt; &lt;br /&gt;&lt;div style=&quot;text-align: center;&quot;&gt;&lt;i&gt;Click the CC button to see the sound effect captioning system in action&lt;/i&gt;&lt;/div&gt;&lt;br /&gt;We are still in the early stages of this work, and we are aware that these captions are fairly simplistic. However, the infrastructural backend to this system will allow us to expand and easily apply this framework to other sound classes. Future challenges might include adding other common sound classes like ringing, barking and knocking, which present particular problems -- for example, with ringing we need to be able to decipher if this is an alarm clock, a door or a phone as described &lt;a href=&quot;https://research.googleblog.com/2017/03/adding-sound-effect-information-to.html&quot;&gt;here&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;Since the addition of sound effect captions presented a number of unique challenges on both the machine learning end as well as the user experience, we continue to work to better understand the effect of the captioning system on the viewing experience, how viewers use sound effect information, and how useful it is to them. From our initial user studies, two-thirds of participants said these sound effect captions really enhance the overall experience, especially when they added crucial “invisible” sound information that people cannot tell from the visual cues. Overall, users reported that their experience wouldn&#39;t be impacted by the system making occasional mistakes as long as it was able to provide good information more often than not.&lt;br /&gt;&lt;br /&gt;We are excited to support automatic sound effect captioning on YouTube, and we hope this system helps us make information useful and accessible for everyone.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Noah Wang, software engineer, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=BKorP55Aqvg&quot;&gt;The Expert (Short Comedy Sketch)&lt;/a&gt;.&quot;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/843965047556708120'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/843965047556708120'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2017/03/visualizing-sound-effects.html' title='Visualizing Sound Effects'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://img.youtube.com/vi/oOtqbAxRkyM/default.jpg" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-7389909808453602105</id><published>2017-03-14T09:00:00.000-07:00</published><updated>2020-08-31T12:14:12.697-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="360"/><category scheme="http://www.blogger.com/atom/ns#" term="VR"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Improving VR videos</title><content type='html'>At YouTube, we are focused on enabling the kind of immersive and interactive experiences that only VR can provide, making digital video as immersive as it can be. In March 2015, we launched support for &lt;a href=&quot;https://youtube-creators.googleblog.com/2015/03/a-new-way-to-see-and-share-your-world.html&quot;&gt;360-degree videos&lt;/a&gt; shortly followed by &lt;a href=&quot;https://youtube.googleblog.com/2015/11/youtube-presses-play-on-virtual-reality.html&quot;&gt;VR (3D 360) videos&lt;/a&gt;. In 2016 we brought &lt;a href=&quot;https://youtube.googleblog.com/2016/04/one-step-closer-to-reality-introducing.html&quot;&gt;360 live streaming and spatial audio&lt;/a&gt; and &lt;a href=&quot;https://youtube.googleblog.com/2016/11/youtube-vr-whole-new-way-to-watch-and.html&quot;&gt;a dedicated YouTube VR app&lt;/a&gt; to our users.&lt;br /&gt;&lt;br /&gt;Now, &lt;a href=&quot;https://blog.google/products/google-vr/bringing-pixels-front-and-center-vr-video/&quot;&gt;in a joint effort between YouTube and Daydream&lt;/a&gt;, we&#39;re adding new ways to make 360 and VR videos look even more realistic.&lt;br /&gt;&lt;br /&gt;360 videos need a large numbers of pixels per video frame to achieve a compelling immersive experience. In the ideal scenario, we would match human &lt;a href=&quot;https://en.wikipedia.org/wiki/Visual_acuity&quot;&gt;visual acuity&lt;/a&gt; which is 60 pixels per degree of immersive content. We are however limited by user internet connection speed and device capabilities. One way to bridge the gap between these limitations and the human visual acuity is to use better projection methods.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;&lt;span style=&quot;font-size: large;&quot;&gt;Better Projections&lt;/span&gt;&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;A Projection is the mapping used to fit a 360-degree world view onto a rectangular video surface. The world map is a good example of a spherical earth projected on a rectangular piece of paper. A commonly used projection is called &lt;a href=&quot;https://en.wikipedia.org/wiki/Equirectangular_projection&quot;&gt;equirectangular projection&lt;/a&gt;. Initially, we chose this projection when we launched 360 videos because it is easy to produce by camera software and easy to edit.&lt;br /&gt;&lt;br /&gt;However, equirectangular projection has some drawbacks:&lt;br /&gt;&lt;br /&gt;&lt;ul&gt;&lt;li&gt;It has high quality at the poles (top and bottom of image) where people don’t look as much – typically, sky overhead and ground below are not that interesting to look at.&lt;/li&gt;&lt;li&gt;It has lower quality at the equator or horizon where there is typically more interesting content.&lt;/li&gt;&lt;li&gt;It has fewer vertical pixels for 3D content.&lt;/li&gt;&lt;li&gt;A straight line motion in the real world does not result in a straight line motion in equirectangular projection, making videos hard to compress.&lt;/li&gt;&lt;/ul&gt;&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;261&quot; src=&quot;https://lh3.googleusercontent.com/-5Qfraa9_HCRpe17TTVoJZpZVZDcAueHzsdnwyG9Y4pCQNKr9hKzKyuy32vlAwUittM-BS6HHqu6pj3P3SD2AcZIl3FG1Dl5jMIEaRBCiBkK09UZzK7qm_tGzMDAWjpweZXhUExk&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;479&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-a24b2771-c9fe-bdaa-a620-5c12df80f754&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;div style=&quot;text-align: center;&quot;&gt;Drawbacks of equirectangular (EQ) projection&lt;/div&gt;&lt;br /&gt;These drawbacks made us look for better projection types for 360-degree videos. To compare different projection types we used saturation maps. A saturation map shows the ratio of video pixel density to display pixel density. The color coding goes from red (low) to orange, yellow, green and finally blue (high). Green indicates optimal pixel density of near 1:1. Yellow and orange indicate insufficient density (too few video pixels for the available display pixels) and blue indicates wasted resources (too many video pixels for the available display pixels). The ideal projection would lead to a saturation map that is uniform in color. At sufficient video resolution it would be uniformly green.&lt;br /&gt;&lt;br /&gt;We investigated cubemaps as a potential candidate. Cubemaps have been used by computer games for a long time to display the &lt;a href=&quot;https://en.wikipedia.org/wiki/Skybox_(video_games)&quot;&gt;skybox&lt;/a&gt; and other special effects.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;eqr_saturation.png&quot; height=&quot;154&quot; src=&quot;https://lh3.googleusercontent.com/Tt8SG1Au0uzuDJkLfrCF9GPNQ7LHXSLkUEl6KXbUhXecLxZ7wMQDugHUFYinwxGEQkzydtwlUb1g0LysWtLgDvNYFtx21EftDxl_3eOmbG1dt8lqYbAVm-DEW3TU7X6R4GHTev-e&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;311&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-a24b2771-c9ff-bde7-bc49-7dc10b2804ff&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;div style=&quot;text-align: center;&quot;&gt;Equirectangular projection saturation map&lt;/div&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;cubemap_saturation.png&quot; height=&quot;155&quot; src=&quot;https://lh6.googleusercontent.com/1jY2ci87yC6tFQ_GUgYGs9xGGllWnEgiFHA1Jh5qkI7CN5VLSQKj0Bk1fVQZm22DNJ41N72Geo6jGMqzVW3xjwkBqtoGnSzoxJw_EMtmrIn9iHFRFsdYshPGokRBR_3rY77sd3f_&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;309&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-a24b2771-ca00-16c1-6c73-f65e47947fec&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;div style=&quot;text-align: center;&quot;&gt;Cubemap projection saturation map&lt;/div&gt;&lt;br /&gt;In the equirectangular saturation map the poles are blue, indicating wasted pixels. The equator (horizon) is orange, indicating an insufficient number of pixels. In contrast, the cubemap has green (good) regions nearer to the equator, and the wasteful blue regions at the poles are gone entirely. However, the cubemap results in large orange regions (not good) at the equator because a cubemap samples more pixels at the corners than at the center of the faces.&lt;br /&gt;&lt;br /&gt;We achieved a substantial improvement using an approach we call &lt;b&gt;Equi-angular Cubemap&lt;/b&gt; or &lt;b&gt;EAC&lt;/b&gt;. The EAC projection’s saturation is significantly more uniform than the previous two, while further improving quality at the equator:&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 11pt; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;eac_saturation.png&quot; height=&quot;161&quot; src=&quot;https://lh4.googleusercontent.com/mVXeFE-luJSTFaX-BdBVmh-k0_QuEaB56DjlrnKAj7MdlqrYThGqHFmf3Fqpq2IjzEfkZ3OuKrGi3mmX5UIOsObn7Ne-TFg3vvRj6KvkufYkjEGd6WOpbi0YtrFxDfQqIU3XlMfW&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;325&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-a24b2771-ca00-7993-edd6-48711d8334ce&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;div style=&quot;text-align: center;&quot;&gt;Equi-angular Cubemap - EAC&lt;/div&gt;&lt;br /&gt;As opposed to traditional cubemap, which distributes equal pixels for equal distances on the cube surface, equi-angular cubemap distributes equal pixels for equal angular change.&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;/div&gt;&lt;br /&gt;The saturation maps seemed promising, but we wanted to see if people could tell the difference. So we asked people to rate the quality of each without telling them which projection they were viewing. People generally rated EAC as higher quality compared to other projections. Here is an example comparison: &lt;br /&gt;&lt;br /&gt;&lt;!-- Comparison Tool Begin --&gt;&lt;!-- Licensed under the MIT License (http://opensource.org/licenses/MIT) --&gt;&lt;style&gt;div#EQvsEAC { width: 60vw; height: 60vw; max-width: 640px; max-height: 360px; overflow: hidden; } div#EQvsEAC figure { background-image: url(https://4.bp.blogspot.com/-LksAiIjxW3E/WMcyYPPjUVI/AAAAAAAAASE/xIcJBPIg4RAEDlIrUBpJdMV1-K2tORJJQCLcB/s1600/EAC.png); background-size: cover; position: relative; font-size: 0; width: 100%; height: 100%; margin: 0; } div#EQvsEAC figure &gt; img { position: relative; width: 100%; } div#EQvsEAC figure div { background-image: url(https://4.bp.blogspot.com/-IBWF3S0VTvs/WMcyYOj4YVI/AAAAAAAAASA/RGvPl1MiyFE312oFPfvZ4saPKkPyF9lWgCLcB/s1600/EQ.png); background-size: cover; position: absolute; width: 50%; box-shadow: 2px 0px white; overflow: hidden; bottom: 0; height: 100%; }  input[type=range]{ -webkit-appearance:none; -moz-appearance:none; position: relative; top: -2rem; left: -1%; background-color: rgba(255,255,255,0.1); width: 102%; } input[type=range]:focus { outline: none; } input[type=range]:active { outline: none; }  input[type=range]::-moz-range-track { -moz-appearance:none; height:20px; width: 98%; background-color: rgba(255,255,255,0.1); position: relative; outline: none; } input[type=range]::active { border: none; outline: none; } input[type=range]::-webkit-slider-thumb { -webkit-appearance:none; width: 20px; height: 20px; background: white; border-radius: 0; } input[type=range]::-moz-range-thumb { -moz-appearance: none; width: 20px; height: 20px; background: white; border-radius: 0; } input[type=range]:focus::-webkit-slider-thumb { background: gray; } input[type=range]:focus::-moz-range-thumb { background: gray; } &lt;/style&gt;&lt;br /&gt;&lt;div id=&quot;EQvsEAC&quot;&gt;&lt;br /&gt;&lt;figure&gt;&lt;div id=&quot;divider&quot;&gt;&lt;/div&gt;&lt;/figure&gt;&lt;input autocomplete=&quot;off&quot; id=&quot;slider&quot; max=&quot;100&quot; min=&quot;0&quot; oninput=&quot;var divider,slider;if(divider==null) divider=document.getElementById(&#39;divider&#39;);if(slider==null)slider=document.getElementById(&#39;slider&#39;);divider.style.width=slider.value+&#39;%&#39;;&quot; type=&quot;range&quot; value=&quot;50&quot; /&gt;&lt;/div&gt;&lt;!-- Comparison Tool End --&gt; EAC vs EQ&lt;br /&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;&lt;span style=&quot;font-size: large;&quot;&gt;Creating Industry Standards&lt;/span&gt;&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;We’re just beginning to see innovative new projections for 360 video. We’ve worked with Equirectangular and Cube Map, and now EAC. We think a standardized way to represent arbitrary projections will help everyone innovate, so we’ve developed a Projection Independent Mesh.&lt;br /&gt;&lt;br /&gt;A Projection Independent Mesh describes the projection by including a 3D mesh along with its texture mapping in the video container. The video rendering software simply renders this mesh as per the texture mapping specified and does not need to understand the details of the projection used. This gives us infinite possibilities. We published our &lt;a href=&quot;https://github.com/google/spatial-media/blob/master/docs/spherical-video-v2-rfc.md&quot;&gt;mesh format draft standard&lt;/a&gt; on github inviting industry experts to comment and are hoping to turn this into a widely agreed upon industry standard.&lt;br /&gt;&lt;br /&gt;Some 360-degree cameras do not capture the entire field of view. For example, they may not have a lens to capture the top and bottom or may only capture a 180-degree scene. Our proposal supports these cameras and allows replacing the uncaptured portions of the field of view by a static geometry and image. Our proposal allows compressing the mesh using deflate or other compression. We designed the mesh format with compression efficiency in mind and were able to fit EAC projection within a 4 KB payload.&lt;br /&gt;&lt;br /&gt;The projection independent mesh allows us to continue improving on projections and deploy them with ease since our renderer is now projection independent.&lt;br /&gt;&lt;br /&gt;Spherical video playback on Android now benefits from EAC projection streamed using a projection independent mesh. We automatically convert uploaded videos to EAC mesh. This will soon be available on IOS and desktop too. Our ingestion format continues to be based on equirect projection as mentioned in our &lt;a href=&quot;https://support.google.com/youtube/answer/6178631&quot;&gt;upload recommendations&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Anjali Wheeler, Software Engineer, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=u9Dg-g7t2l4&quot;&gt;Disturbed - The Sound Of Silence&lt;/a&gt;.&quot;&lt;/i&gt;&lt;br /&gt;&lt;i&gt;&lt;br /&gt;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/7389909808453602105'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/7389909808453602105'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2017/03/improving-vr-videos.html' title='Improving VR videos'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh3.googleusercontent.com/-5Qfraa9_HCRpe17TTVoJZpZVZDcAueHzsdnwyG9Y4pCQNKr9hKzKyuy32vlAwUittM-BS6HHqu6pj3P3SD2AcZIl3FG1Dl5jMIEaRBCiBkK09UZzK7qm_tGzMDAWjpweZXhUExk=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-7671781959196132120</id><published>2017-01-12T10:00:00.000-08:00</published><updated>2020-08-31T12:14:12.906-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Super Chat API"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Supercharge your YouTube live tools with the new Super Chat API</title><content type='html'>In December 2015, we &lt;a href=&quot;https://youtube-eng.googleblog.com/2015/12/chat-it-up-streamers-new-live-chat-fan.html&quot;&gt;launched an array of API services&lt;/a&gt; that let developers access a wealth of data about live streams, chat, and fan funding. Since then, we’ve seen thousands of creators use the tools listed on our &lt;a href=&quot;https://support.google.com/youtube/answer/7060842?hl=en&quot;&gt;Tools for Gaming Streamers page&lt;/a&gt; to enhance their streams by adding chatbots, overlays, polls and more.&lt;br /&gt;&lt;br /&gt;Today, we &lt;a href=&quot;https://youtube.googleblog.com/2017/01/can-we-chat-bye-bye-fan-funding-hello.html&quot;&gt;announced&lt;/a&gt; a new live feature for fans and creators, Super Chat, which lets anybody watching a live stream stand out from the crowd and get a creator’s attention by purchasing highlighted chat messages. We’re also announcing a new API service for this feature: the &lt;b&gt;Super Chat API&lt;/b&gt;, designed to allow developers to access real-time information about Super Chat purchases.&lt;br /&gt;&lt;br /&gt;The launch of this new API service will be followed by the shutdown of our Fan Funding API. To that end, developers using the Fan Funding API need to move to the new Super Chat API as soon as possible.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;On January 31, 2017&lt;/b&gt;, we’ll begin offering replacements for the two ways developers currently get information about Fan Funding:&lt;br /&gt;&lt;br /&gt;&lt;ul&gt;&lt;li&gt;&lt;a href=&quot;https://developers.google.com/youtube/v3/live/docs/liveChatMessages/list&quot;&gt;LiveChatMessages.list&lt;/a&gt; will gain a new message type, &lt;b&gt;superChatMessage&lt;/b&gt;, which will contain details about Super Chats purchased during an active live stream&lt;/li&gt;&lt;li&gt;A new endpoint, &lt;b&gt;SuperChats.list&lt;/b&gt;, will be made available to list a channel’s Super Chat purchases&lt;/li&gt;&lt;/ul&gt;&lt;br /&gt;&lt;b&gt;On February 28, 2017&lt;/b&gt;, we’ll be turning down the two existing Fan Funding methods:&lt;br /&gt;&lt;br /&gt;&lt;ul&gt;&lt;li&gt;LiveChatMessages.list will no longer return messages of type &lt;b&gt;fanFundingEvent&lt;/b&gt;&lt;/li&gt;&lt;li&gt;FanFundingEvents.list will no longer return data&lt;/li&gt;&lt;/ul&gt;&lt;br /&gt;During the transition period between Super Chats and Fan Funding, &lt;b&gt;SuperChats.list&lt;/b&gt; will provide information about &lt;b&gt;both&lt;/b&gt; Super Chat events &lt;i&gt;and&lt;/i&gt; Fan Funding events, so we encourage all developers to switch to the new API as soon as it becomes available. Keep your eye on the &lt;a href=&quot;https://developers.google.com/youtube/v3/revision_history&quot;&gt;YouTube Data API v3 Revision History&lt;/a&gt; to get the documentation for this service as soon as we post it.&lt;br /&gt;&lt;br /&gt;If you’ve got questions on this, please feel free to ask the community on our &lt;a href=&quot;http://stackoverflow.com/questions/tagged/youtube-api&quot;&gt;Stack Overflow tag&lt;/a&gt; or send us a tweet at @YouTubeDev and we’ll do our best to answer.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Marc Chambers, Developer Relations, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=YZ8nTwVkr88&quot;&gt;Show of the Week: New Games for 2017&lt;/a&gt;.&quot;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/7671781959196132120'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/7671781959196132120'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2017/01/supercharge-your-youtube-live-tools.html' title='Supercharge your YouTube live tools with the new Super Chat API'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-206062137920682582</id><published>2016-11-08T09:00:00.000-08:00</published><updated>2020-08-31T12:14:10.474-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Reporting API"/><title type='text'>Download your ad revenue reports through the YouTube Reporting API service</title><content type='html'>With the launch of the &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/&quot;&gt;YouTube Reporting API&lt;/a&gt; last year, we introduced a mechanism to download raw YouTube Analytics data. It generates a set of predefined reports in the form of CSV files that contain YouTube Analytics data for content owners. Once activated, reports are generated regularly, and each one contains data for a unique, 24-hour period. We heard that you also wanted more data to be accessible via the YouTube Reporting API service.&lt;br /&gt;&lt;br /&gt;So today, &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/system_managed/&quot;&gt;we are making a set of system-managed ad revenue reports available to content owners&lt;/a&gt;. Previously, this data was only available via manually downloadable reports in Creator Studio. The system-managed reports released via the YouTube Reporting API maintain the same breakdowns as downloadable reports, but the schema is optimized to align to other reports available via this API.&lt;br /&gt;&lt;br /&gt;These new reports are generated automatically for eligible YouTube Partners. Thus, if you are an eligible YouTube partner, you don&#39;t even need to create reporting jobs. Just follow the instructions below to find out whether the reports are available to you and to download the reports themselves.&lt;br /&gt;&lt;br /&gt;We also want to let you know that more reports will be available via the YouTube Reporting API service in the coming weeks and months. Please keep an eye on the &lt;a href=&quot;https://developers.google.com/youtube/analytics/revision_history&quot;&gt;revision history&lt;/a&gt; to find out when additional reports become available.&lt;br /&gt;&lt;br /&gt;&lt;span style=&quot;font-size: large;&quot;&gt;How to start using the new reports&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Check what new report types are available to you&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;ol&gt;&lt;li&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/guides/authorization&quot;&gt;Get an OAuth token&lt;/a&gt; (authentication credentials)&lt;/li&gt;&lt;li&gt;Call the &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/reportTypes/list&quot;&gt;reportTypes.list&lt;/a&gt; method with the &lt;span style=&quot;font-family: Courier New, Courier, monospace;&quot;&gt;includeSystemManaged&lt;/span&gt; parameter set to true.&lt;/li&gt;&lt;li&gt;The response lists all report types available to you. As you can’t use the new report types to create reporting jobs yourself, their &lt;span style=&quot;font-family: Courier New, Courier, monospace;&quot;&gt;systemManaged&lt;/span&gt; property is set to true.&lt;/li&gt;&lt;/ol&gt;&lt;br /&gt;&lt;b&gt;Check if system-managed jobs have been created for you&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;ol&gt;&lt;li&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/guides/authorization&quot;&gt;Get an OAuth token&lt;/a&gt; (authentication credentials)&lt;/li&gt;&lt;li&gt;Call the &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/list&quot;&gt;jobs.list&lt;/a&gt; method with the &lt;span style=&quot;font-family: Courier New, Courier, monospace;&quot;&gt;includeSystemManaged&lt;/span&gt; parameter set to true. This will return a list of the available reporting jobs. All jobs with the &lt;span style=&quot;font-family: Courier New, Courier, monospace;&quot;&gt;systemManaged&lt;/span&gt; property set to true are jobs for the new report types.&lt;/li&gt;&lt;li&gt;Store the IDs of the jobs you want download reports for.&lt;/li&gt;&lt;/ol&gt;&lt;br /&gt;&lt;b&gt;Download reports&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;&lt;ol&gt;&lt;li&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/guides/authorization&quot;&gt;Get an OAuth token&lt;/a&gt; (authentication credentials)&lt;/li&gt;&lt;li&gt;Call the &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs.reports/list&quot;&gt;reports.list&lt;/a&gt; method with the &lt;span style=&quot;font-family: Courier New, Courier, monospace;&quot;&gt;jobId&lt;/span&gt; parameter set to the ID found in the previous section to retrieve a list of downloadable reports created by that job.&lt;/li&gt;&lt;li&gt;Choose a report from the list and download it using its &lt;span style=&quot;font-family: Courier New, Courier, monospace;&quot;&gt;downloadUrl&lt;/span&gt;.&lt;/li&gt;&lt;/ol&gt;&lt;br /&gt;&lt;span style=&quot;font-size: large;&quot;&gt;Client libraries and sample code&lt;/span&gt;&lt;br /&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/tools/&quot;&gt;Client libraries&lt;/a&gt; exist for many different programming languages to help you use the YouTube Reporting API. Our &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/code_samples/&quot;&gt;Java, PHP, and Python code samples&lt;/a&gt; will help you get started. The &lt;a href=&quot;https://developers.google.com/apis-explorer/#p/youtubereporting/v1/&quot;&gt;API Explorer&lt;/a&gt; lets you try out sample calls before writing any code.&lt;br /&gt;&lt;i&gt;&lt;br /&gt;&lt;/i&gt;&lt;i&gt;Posted by &lt;a href=&quot;https://www.google.com/+MarkusLanthaler&quot;&gt;Markus Lanthaler&lt;/a&gt;, Tech Lead YouTube Analytics APIs, recently watched “&lt;a href=&quot;https://www.youtube.com/watch?v=os9qYgJ6Nw8&quot;&gt;Crushing gummy bears with hydraulic press&lt;/a&gt;” and &lt;a href=&quot;https://plus.google.com/+MihirKulkarni31&quot;&gt;Mihir Kulkarni&lt;/a&gt;, Software Engineer, recently watched “&lt;a href=&quot;https://www.youtube.com/watch?v=84WIaK3bl_s&quot;&gt;The $21,000 first class airplane seat&lt;/a&gt;.”&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/206062137920682582'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/206062137920682582'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/11/download-your-ad-revenue-reports.html' title='Download your ad revenue reports through the YouTube Reporting API service'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-4620531339287436806</id><published>2016-10-10T10:00:00.000-07:00</published><updated>2020-08-31T12:14:11.551-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Data API"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Saying goodbye to the YouTube v2 Uploads API service</title><content type='html'>If you’re already using or migrated to the &lt;a href=&quot;http://apiblog.youtube.com/2012/12/the-simpler-yet-more-powerful-new.html&quot;&gt;YouTube Data API v3&lt;/a&gt;, you can stop reading.&lt;br /&gt;&lt;br /&gt;If you develop a tool, script, plugin, or any other code that uploads video to YouTube, we have an important update for you! On October 31, 2016, we’ll be shutting down the ability to upload videos through the old YouTube Data API (v2) service. This shutdown is in accordance with our prior deprecation announcements for the &lt;a href=&quot;https://youtube-eng.googleblog.com/2014/03/committing-to-youtube-data-api-v3_4.html&quot;&gt;YouTube Data API (v2) service&lt;/a&gt; in 2014 and &lt;a href=&quot;https://developers.google.com/youtube/v3/guides/moving_to_oauth&quot;&gt;ClientLogin authentication&lt;/a&gt; in 2013.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;If you’re using this service, unless changes are made to your API Client(s), your users will no longer be able to upload videos using your integration starting October 31, 2016.&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;We announced this deprecation over two years ago to give our developer community time to adjust. If you haven’t already updated, please update your integration as soon as possible. The supported method for programmatically uploading videos to YouTube is the &lt;a href=&quot;https://developers.google.com/youtube/v3/&quot;&gt;YouTube Data API v3 service&lt;/a&gt;, with &lt;a href=&quot;https://developers.google.com/youtube/v3/guides/authentication&quot;&gt;OAuth2 for authentication&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;You can find a complete guide to uploading videos using this method, as well as sample Python code, &lt;a href=&quot;https://developers.google.com/youtube/v3/guides/uploading_a_video&quot;&gt;on the Google Developers site&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Did you already update your integration to use the YouTube Data API v3 service and OAuth2?&lt;/b&gt; It’s possible there are users who may still be on old versions of your software. You may want to reach out to your users and let them know about this. We may also reach out to YouTube creators who are using these old versions and let them know about this as well.&lt;br /&gt;&lt;br /&gt;If you have questions about this shutdown or about the YouTube Data API v3 service, please post them to our &lt;a href=&quot;http://stackoverflow.com/questions/tagged/youtube-api&quot;&gt;Stack Overflow tag&lt;/a&gt;. You can also send us a tweet at &lt;a href=&quot;http://twitter.com/youtubedev&quot;&gt;@YouTubeDev&lt;/a&gt;, and follow us for the latest updates.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Posted by Marc Chambers, YouTube Developer Relations&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/4620531339287436806'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/4620531339287436806'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/10/saying-goodbye-to-youtube-v2-uploads.html' title='Saying goodbye to the YouTube v2 Uploads API service'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-4222872462408424256</id><published>2016-08-11T14:26:00.000-07:00</published><updated>2020-08-31T12:14:11.343-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="youtube api"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>An updated Terms of Service and New Developer Policies for the YouTube API Services</title><content type='html'>&lt;i&gt;The updated YouTube API Services Terms and Policies are effective starting today (February 10, 2017)&lt;/i&gt;&lt;br /&gt;&lt;br /&gt;Today we are announcing changes to the YouTube API Services Terms of Service and introducing new Developer Policies to guide their implementation. These updated &lt;a href=&quot;https://developers.google.com/youtube/terms/api-services-terms-of-service&quot;&gt;Terms of Service&lt;/a&gt; and new &lt;a href=&quot;https://developers.google.com/youtube/terms/developer-policies&quot;&gt;Developer Policies&lt;/a&gt; will take effect in six months so that you have time to understand and implement them.&lt;br /&gt;&lt;br /&gt;The YouTube API Services Terms of Service are developers’ rules of the road, and like any rules of the road, they need to be updated over time as usage evolves. As we&#39;ve grown, so has an entire ecosystem of companies that support users, creators and advertisers, many of them built on top of YouTube’s API Services. We haven’t had major updates to our API Services Terms of Service in over four years, so during the past several months we&#39;ve been speaking to developers and studying how our API Services are being used to make sure that our terms make sense for the YouTube of today. We updated the &lt;a href=&quot;https://developers.google.com/youtube/terms/api-services-terms-of-service&quot;&gt;YouTube API Services Terms of Service&lt;/a&gt; to keep up with usage growth, strengthen user controls and protections even further, and address misuse. You can find the updated terms &lt;a href=&quot;https://developers.google.com/youtube/terms/api-services-terms-of-service&quot;&gt;here&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;In order to provide more guidance to developers, which has been a key ask, we are introducing new Developer Policies. They aim to provide operational guidelines for accessing and using our API Services, covering user privacy and data protection, data storage, interface changes, uploads, comments, and more. You can read the full Developer Policies &lt;a href=&quot;https://developers.google.com/youtube/terms/developer-policies&quot;&gt;here&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;In addition to the new terms, we&#39;re also announcing the upcoming &lt;a href=&quot;https://www.youtube.com/yt/serviceproviders/measurement.html&quot;&gt;YouTube’s Measurement Program&lt;/a&gt;. This new certification program will help participants provide accurate, consistent, and relevant YouTube measurement data to their clients and users, thereby helping them make informed decisions about YouTube. We’ll launch the program with a few initial partners before scaling it more broadly. Please visit the YouTube’s Measurement Program &lt;a href=&quot;https://www.youtube.com/yt/serviceproviders/measurement.html&quot;&gt;website&lt;/a&gt; to learn more.&lt;br /&gt;&lt;br /&gt;We developed these updates with a few core principles in mind:&lt;br /&gt;&lt;ul&gt;&lt;li&gt;&lt;b&gt;Improving the YouTube experience for users and creators.&lt;/b&gt; Every month, we update our app and site with dozens of new features for users and creators. We want to make sure that every application or website takes advantage of the latest and greatest YouTube functionalities. That’s why we’re introducing a &lt;a href=&quot;https://developers.google.com/youtube/terms/required-minimum-functionality&quot;&gt;Requirement of Minimum Functionality&lt;/a&gt;, which is designed to ensure users have a set of basic functionality around core parts of their YouTube experience, like video playback, comment management, video upload, and other services.&lt;/li&gt;&lt;li&gt;&lt;b&gt;Strengthening user data and privacy.&lt;/b&gt; We want to help foster innovative products while giving users even more control around data privacy and security. These updated terms serve to strengthen our existing user controls and protections even further. For example, we now require developers to have a privacy policy that clearly explains to users what user info is accessed, collected and stored.&lt;/li&gt;&lt;li&gt;&lt;b&gt;Fostering a healthy YouTube ecosystem.&lt;/b&gt; While we want to continue to encourage growth of our ecosystem, we also need to make sure our terms limit misuse. As the YouTube developer ecosystem evolved, we saw some fantastic uses of our API Services. Sadly, with amazing uses, there have also been a handful of applications that have misused our API Services. These updated terms serve to further protect against misuse and protect users, creators, and advertisers.&lt;/li&gt;&lt;/ul&gt;It&#39;s been great to see all the ways developer websites and applications have integrated with YouTube. We are committed to the YouTube API Services and we continue to invest with new features that will improve the product, such as expanding the Reporting API service with Payment reports, and Custom reports, launching later this year.&lt;br /&gt;&lt;br /&gt;While we understand these updated terms and new policies may require some adjustment by developers, we believe they’ll help ensure our ecosystem remains strong and poised for growth. Again, to ensure developers have sufficient time to understand and adapt to these changes, the updated &lt;a href=&quot;https://developers.google.com/youtube/terms/api-services-terms-of-service&quot;&gt;YouTube API Services Terms of Service&lt;/a&gt; and the new &lt;a href=&quot;https://developers.google.com/youtube/terms/developer-policies&quot;&gt;Developer Policies&lt;/a&gt; will take effect six months from now, on February 10, 2017. Please do take the time to read and become familiar with them. If you have any questions please get in touch with us via &lt;a href=&quot;mailto:yt-api-tos-questions@google.com&quot;&gt;yt-api-tos-questions@google.com&lt;/a&gt;.&lt;br /&gt;&lt;i&gt;&lt;br /&gt;&lt;/i&gt;&lt;i&gt;Posted by Shalini GovilPai, Global Head of Technology Solutions&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/4222872462408424256'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/4222872462408424256'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/08/an-updated-terms-of-service-and-new.html' title='An updated Terms of Service and New Developer Policies for the YouTube API Services'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-8859426344370245893</id><published>2016-08-01T11:00:00.000-07:00</published><updated>2020-08-31T12:14:13.254-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="https"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>YouTube&#39;s road to HTTPS</title><content type='html'>Today we added YouTube to Google&#39;s &lt;a href=&quot;https://www.google.com/transparencyreport/https?hl=en&quot;&gt;HTTPS transparency report&lt;/a&gt;.  We&#39;re proud to announce that in the last two years, we steadily rolled out encryption using HTTPS to 97 percent of YouTube&#39;s traffic.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: Arial; font-size: 13.3333px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;229&quot; src=&quot;https://lh4.googleusercontent.com/YPT5SV7V87NwT3AlcIobVyM_lQrZmJyN9j4X33vG4gba1bggDLyFS04X9lnNnhLODlw73VbCjq7A_n4OHLOVosnSEibpFFE_-xMacZSmoi7SavNZ9D8jCKV5EN4--Iiw1GdGS6PT&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;720&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-0be1d1df-46cb-524f-1031-325543078a18&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;a href=&quot;https://developers.google.com/web/fundamentals/security/encrypt-in-transit/why-https?hl=en&quot;&gt;HTTPS&lt;/a&gt; provides critical security and data integrity for the web and for all web users. So what took us so long? As we gradually moved YouTube to HTTPS, we faced several unique challenges:&lt;br /&gt;&lt;br /&gt;&lt;ul&gt;&lt;li&gt;&lt;b&gt;Lots of traffic!&lt;/b&gt; Our CDN, the &lt;a href=&quot;https://peering.google.com/#/options/google-global-cache&quot;&gt;Google Global Cache&lt;/a&gt;, serves a massive amount of video, and migrating it all to HTTPS is no small feat. Luckily, hardware acceleration for AES is widespread, so we were able to encrypt virtually all video serving without adding machines. (Yes, &lt;a href=&quot;https://istlsfastyet.com/&quot;&gt;HTTPS is fast now&lt;/a&gt;.)&lt;/li&gt;&lt;li&gt;&lt;b&gt;Lots of devices!&lt;/b&gt; You watch YouTube videos on everything from flip phones to smart TVs. We A/B tested HTTPS on every device to ensure that users would not be negatively impacted. We found that HTTPS improved quality of experience on most clients: by ensuring content integrity, we virtually eliminated many types of streaming errors.&lt;/li&gt;&lt;li&gt;&lt;b&gt;Lots of requests!&lt;/b&gt; Mixed content—any insecure request made in a secure context—poses a challenge for any large website or app. We get an alert when an insecure request is made from any of our clients and will block all mixed content using &lt;a href=&quot;https://en.wikipedia.org/wiki/Content_Security_Policy&quot;&gt;Content Security Policy&lt;/a&gt; on the web, &lt;a href=&quot;https://developer.apple.com/library/ios/releasenotes/General/WhatsNewIniOS/Articles/iOS9.html#//apple_ref/doc/uid/TP40016198-SW14&quot;&gt;App Transport Security&lt;/a&gt; on iOS, and uses &lt;a href=&quot;https://developer.android.com/guide/topics/manifest/application-element.html#usesCleartextTraffic&quot;&gt;CleartextTraffic&lt;/a&gt; on Android. Ads on YouTube have used HTTPS &lt;a href=&quot;https://security.googleblog.com/2015/04/ads-take-step-towards-https-everywhere.html&quot;&gt;since 2014&lt;/a&gt;.&lt;/li&gt;&lt;/ul&gt;&lt;br /&gt;&lt;br /&gt;We&#39;re also proud to be using &lt;b&gt;&lt;a href=&quot;https://security.googleblog.com/2016/07/bringing-hsts-to-wwwgooglecom.html&quot;&gt;HTTP Secure Transport Security (HSTS)&lt;/a&gt;&lt;/b&gt; on youtube.com to cut down on HTTP to HTTPS redirects. This improves both security and latency for end users. Our HSTS lifetime is one year, and we hope to preload this soon in web browsers.&lt;br /&gt;&lt;br /&gt;97 percent is pretty good, but why isn&#39;t YouTube at 100 percent? In short, some devices do not fully support modern HTTPS. Over time, to keep YouTube users as safe as possible, we will gradually phase out insecure connections.&lt;br /&gt;&lt;br /&gt;In the real world, we know that any non-secure HTTP traffic could be vulnerable to attackers. All websites and apps should be protected with HTTPS — if you’re a developer that hasn’t yet migrated, &lt;a href=&quot;https://developers.google.com/web/fundamentals/security/encrypt-in-transit/?hl=en&quot;&gt;get started&lt;/a&gt; today.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Sean Watson, Software Engineer, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=QMy4SeShfvA&quot;&gt;GoPro: Fire Vortex Cannon with the Backyard Scientist&lt;/a&gt;.&quot;&lt;/i&gt;&lt;br /&gt;&lt;i&gt;Jon Levine, Product Manager, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=jOyfZex7B3E&quot;&gt;Sega Saturn CD - Cracked after 20 years&lt;/a&gt;.&quot;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/8859426344370245893'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/8859426344370245893'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/08/youtubes-road-to-https.html' title='YouTube&#39;s road to HTTPS'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh4.googleusercontent.com/YPT5SV7V87NwT3AlcIobVyM_lQrZmJyN9j4X33vG4gba1bggDLyFS04X9lnNnhLODlw73VbCjq7A_n4OHLOVosnSEibpFFE_-xMacZSmoi7SavNZ9D8jCKV5EN4--Iiw1GdGS6PT=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-2822311802688910848</id><published>2016-05-13T10:00:00.000-07:00</published><updated>2020-08-31T12:14:10.683-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="machine learning"/><category scheme="http://www.blogger.com/atom/ns#" term="video transcoding"/><category scheme="http://www.blogger.com/atom/ns#" term="youtube"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Machine learning for video transcoding</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;At YouTube we care about the quality of the pixels we deliver to our users. With many millions of devices uploading to our servers every day, the content variability is so huge that delivering an acceptable audio and video quality in all playbacks is a considerable challenge. Nevertheless, our goal has been to continuously improve quality by reducing the amount of compression artifacts that our users see on each playback. While we could do this by increasing the bitrate for every file we create, that would quite easily exceed the capacity of many of the network connections available to you. Another approach is to optimize the parameters of our video processing algorithms to meet bitrate budgets and minimum quality standards. While Google’s compute and storage resources are huge, they are finite and so we must temper our algorithms to &lt;/span&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;also&lt;/span&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; fit within compute requirements. The hard problem then is to adapt our pipeline to create the best quality output for each clip you upload to us, within constraints of quality, bitrate and compute cycles. &lt;/span&gt;&lt;/div&gt;&lt;b id=&quot;docs-internal-guid-2b3707e9-a755-dc73-ea20-6e5f04a617ce&quot; style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;This is a well known triad in the world of video compression and transcoding. The problem is usually solved by finding a sweet spot of transcoding parameters that seem to work well on average for a large number of clips. That sweet spot is sometimes found by trying every possible set of parameters until one is found that satisfies all the constraints. Recently, others have been using this “exhaustive search” idea to tune parameters on a per clip basis. &lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;What we’d like to show you in this blog post is a new technology we have developed that adapts our parameter set for each clip automatically using Machine Learning. We’ve been using this over the last year for improving the quality of movies you see on YouTube and Google Play.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;h2 dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 4pt; margin-top: 18pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 18.666666666666664px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The good and bad about parallel processing&lt;/span&gt;&lt;/h2&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;We ingest more than 400 hours of video per minute. Each file must be transcoded from the uploaded video format into a number of other video formats with different codecs so we can support playback on any device you might have. The only way we can keep up with that rate of ingest and quickly show you your transcoded video in YouTube is to break each file in pieces called “chunks,” and process these in parallel. Every chunk is processed independently and simultaneously by CPUs in our Google cloud infrastructure. The complexity involved in chunking and recombining the transcoded segments is significant. Quite aside from the mechanics of assembling the processed chunks, maintaining the quality of the video in each chunk is a challenge. This is because to have as speedy a pipeline as possible, our chunks don’t overlap, and are also very small; just a few seconds. So the good thing about parallel processing is increased speed and reduced latency. But the bad thing is that without the information about the video in the neighboring chunks, it’s now difficult to control chunk quality so that there is no visible difference between the chunks when we tape them back together. Small chunks don’t give the encoder much time to settle into a stable state hence each encoder treats each chunk slightly differently.&lt;/span&gt;&lt;/div&gt;&lt;h2 dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 4pt; margin-top: 18pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 18.666666666666664px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Smart parallel processing&lt;/span&gt;&lt;/h2&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;You could say that we are shooting ourselves in the foot before starting the race. Clearly, if we communicate information about chunk complexity between the chunks, each encoder can adapt to what’s happening in the chunks after or before it. But inter-process communication increases overall system complexity and requires some extra iterations in processing each chunk.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Actually, OK, truth is we’re stubborn here in Engineering and we wondered how far we could push this idea of “don’t let the chunks talk to each other.”&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The plot below shows an example of the PSNR in dB per frame over two chunks from a 720p video clip, using H.264 as the codec. A higher value of PSNR means better picture quality and a lower value means poorer quality. You can see that one problem is the quality at the start of a chunk is very different from that at the end of the chunk. Aside from the average quality level being worse than we would like, this variability in quality causes an annoying pulsing artifact.&lt;/span&gt;&lt;/div&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://1.bp.blogspot.com/-R0GA61qcsBA/X0BID86YtSI/AAAAAAAAA1A/Hiw-SGPlBywh2Aq_s4naxMt3aTnJ6qsuQCLcBGAsYHQ/s1600/cqbb_psnr.png&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;900&quot; data-original-width=&quot;1600&quot; src=&quot;https://1.bp.blogspot.com/-R0GA61qcsBA/X0BID86YtSI/AAAAAAAAA1A/Hiw-SGPlBywh2Aq_s4naxMt3aTnJ6qsuQCLcBGAsYHQ/s1600/cqbb_psnr.png&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Because of small chunk sizes, we would expect that each chunk behaves like the previous and next one, at least statistically. So we might expect the encoding process to converge to roughly the same result across consecutive chunks. While this is true much of the time, it is not true in this case. One immediate solution is to change the chunk boundaries so that they align with high activity video behavior like fast motion, or a scene cut. Then we would expect that each chunk is relatively homogenous so the encoding result should be more uniform. It turns out that this does improve the situation, but not as much as we’d like, and the instability is still often there.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The key is to allow the encoder to process each chunk multiple times, learning on each iteration how to adjust its parameters in anticipation of what happens in across the entire chunk instead of just a small part of it. This results in the start and end of each chunk having similar quality, and because the chunks are short, it is now more likely that the differences across chunk boundaries are also reduced. But even then, we noticed that it can take quite a number of iterations for this to happen. We observed that the number of iterations is affected a great deal by the quantization related parameter (CRF) of the encoder on that first iteration. Even better, there is often a “best” CRF that allows us to hit our target bitrate at a desired quality with just one iteration. But this “best” setting is actually different for every clip. That’s the tricky bit. If only we could work out what that setting was for each clip, then we’d have a simple way of generating good looking clips without chunking artifacts.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;margin-left: 0pt;&quot;&gt;&lt;table style=&quot;border-collapse: collapse; border: none;&quot;&gt;&lt;colgroup&gt;&lt;col width=&quot;322&quot;&gt;&lt;/col&gt;&lt;col width=&quot;350&quot;&gt;&lt;/col&gt;&lt;/colgroup&gt;&lt;tbody&gt;&lt;tr style=&quot;height: 0px;&quot;&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: top;&quot;&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;/div&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The plot on the right shows the result of many experiments with our encoder at varying CRF (constant quality) settings, over the same 1080p clip. After each experiment we measured the bitrate of the output file and each point shows the CRF, bitrate pair for that experiment. There is a clear relationship between these two values. In fact it is very well modeled as an exponential fit with three parameters, and the plot shows just how good that modeled line is in fitting the observed data points. If we knew the parameters of the line for our clip, then we’d see that to create a 5 Mbps version of this clip (for example) we’d need a CRF of about 20.&lt;/span&gt;&lt;/td&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: middle;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;/div&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;318&quot; data-original-width=&quot;512&quot; height=&quot;199&quot; src=&quot;https://4.bp.blogspot.com/-v4QBPJLMvlc/X0BIj5hyceI/AAAAAAAAA1M/Zt6hGEkXasQDPovd_VaMbTDHGIxs-GgkwCLcBGAsYHQ/s320/unnamed.png&quot; width=&quot;320&quot; /&gt;&lt;/span&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;h2 dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 4pt; margin-top: 18pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 18.666666666666664px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Pinky and the Brain&lt;/span&gt;&lt;/h2&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;What we needed was a way to predict our three curve fitting parameters from low complexity measurements about the video clip. This is a classic problem in machine learning, statistics and signal processing. The gory mathematical details of our solution are in technical papers that we published recently.&lt;sup&gt;&lt;span style=&quot;font-size: xx-small;&quot;&gt;1&lt;/span&gt;&lt;/sup&gt;&lt;/span&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; You can see there how our thoughts evolved. Anyway, the idea is rather simple: predict the three parameters given things we know about the input video clip, and read off the CRF we need. This prediction is where the “Google Brain” comes in.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The “things we know about the input video clip” are called video “features.” In our case there are a vector of features containing measurements like input bit rate, motion vector bits in the input file, resolution of the video and frame rate. These measurements can also be made from a very fast low quality transcode of the input clip to make them more informative. However, the exact relationship between the features and the curve parameters for each clip is rather more complicated than an equation we could write down. So instead of trying to discover that explicitly ourselves, we turned to Machine Learning with Google Brain. We first took about 10,000 video clips and exhaustively tested every quality setting on each, measuring the resulting bitrate from each setting. This gave us 10,000 curves which in turn gave us 4 x 10,000 parameters measured from those curves.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The next step was to extract features from our video clips. Having generated the training data and the feature set, our Machine Learning system learned a “Brain” configuration that could predict the parameters from the features. Actually we used both a simple “regression” technique as well as the Brain. Both outperformed our existing strategy. &amp;nbsp;Although the process of training the Brain is relatively computationally heavy, the resulting system was actually quite simple and required only a few operations on our features. That meant that the compute load in production was small.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;margin-left: 0pt;&quot;&gt;&lt;table style=&quot;border-collapse: collapse; border: none;&quot;&gt;&lt;colgroup&gt;&lt;col width=&quot;312&quot;&gt;&lt;/col&gt;&lt;col width=&quot;347&quot;&gt;&lt;/col&gt;&lt;/colgroup&gt;&lt;tbody&gt;&lt;tr style=&quot;height: 0px;&quot;&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: top;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 18.666666666666664px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Does it work?&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The plot on the right shows the performance of the various systems on 10,000 video clips. Each point (x,y) represents the percentage of clips (y-axis) in which the resulting bitrate after compression is within x% of the target bitrate. The blue line shows the best case scenario where we use exhaustive search to get the perfect CRF for each clip. Any system that gets close to that is a good one. As you can see at the 20% rate, our old system (green line) would hit the target bitrate 15% of the time. Now with our fancy Brain system we can hit it 65% of the time if we use features from your upload only (red line), and better than 80% of the time (dashed line) using some features from a very fast low quality transcode. &lt;/span&gt;&lt;/div&gt;&lt;/td&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: middle;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;a href=&quot;https://1.bp.blogspot.com/-KZL5F82QkX4/X0BJR7aI9bI/AAAAAAAAA1Y/ZYQZCRcWrc8Q_BCQJSTCPfN_63eoeyM-QCLcBGAsYHQ/s1600/nn_wp_prediction.png&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; data-original-height=&quot;457&quot; data-original-width=&quot;490&quot; height=&quot;298&quot; src=&quot;https://1.bp.blogspot.com/-KZL5F82QkX4/X0BJR7aI9bI/AAAAAAAAA1Y/ZYQZCRcWrc8Q_BCQJSTCPfN_63eoeyM-QCLcBGAsYHQ/s320/nn_wp_prediction.png&quot; width=&quot;320&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;But does this actually look good? You may have noticed that we concentrated on our ability to hit a particular bitrate rather than specifically addressing picture quality. Our analysis of the problem showed that this was the root cause. Pictures are the proof of the pudding and you can see some frames from a 720p video clip below (shot from a racing car). The top row shows two frames at the start and end of a typical chunk and you can see that the quality in the first frame is way worse than the last. The bottom row shows the frames in the same chunk using our new automated clip adaptive system. In both cases the measured bitrate is the same at 2.8 Mbps. As you can see, the first frame is much improved and as a bonus the last frame looks better as well. So the temporal fluctuation in quality is gone and we also managed to improve the clip quality overall.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;margin-left: 0pt;&quot;&gt;&lt;/div&gt;&lt;table style=&quot;border-collapse: collapse; border: none; width: 672px;&quot;&gt;&lt;colgroup&gt;&lt;col width=&quot;*&quot;&gt;&lt;/col&gt;&lt;col width=&quot;*&quot;&gt;&lt;/col&gt;&lt;/colgroup&gt;&lt;tbody&gt;&lt;tr style=&quot;height: 0px;&quot;&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: middle;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.2; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;/div&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://2.bp.blogspot.com/-rzQZ1-9mi7A/X0BKqRfAueI/AAAAAAAAA1k/qlygf8Bvb0MN8Uc7bA39DiWx-DdK_J01QCLcBGAsYHQ/s1600/pasted%2Bimage%2B0.png&quot; imageanchor=&quot;1&quot; style=&quot;clear: left; float: left; margin-bottom: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; src=&quot;https://2.bp.blogspot.com/-rzQZ1-9mi7A/X0BKqRfAueI/AAAAAAAAA1k/qlygf8Bvb0MN8Uc7bA39DiWx-DdK_J01QCLcBGAsYHQ/s320/pasted%2Bimage%2B0.png&quot; width=&quot;320&quot; height=&quot;180&quot; data-original-width=&quot;1280&quot; data-original-height=&quot;720&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;/td&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: middle;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.2; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;/div&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://1.bp.blogspot.com/-pUCKBPmvAII/X0BLBYZjpuI/AAAAAAAAA1w/jaF60Zig94EF0Kv64rmpwqM6K79u33YrACLcBGAsYHQ/s1600/pasted%2Bimage%2B0%2B%25282%2529.png&quot; imageanchor=&quot;1&quot; style=&quot;clear: right; float: right; margin-bottom: 1em; margin-left: 1em;&quot;&gt;&lt;img border=&quot;0&quot; src=&quot;https://1.bp.blogspot.com/-pUCKBPmvAII/X0BLBYZjpuI/AAAAAAAAA1w/jaF60Zig94EF0Kv64rmpwqM6K79u33YrACLcBGAsYHQ/s320/pasted%2Bimage%2B0%2B%25282%2529.png&quot; width=&quot;320&quot; height=&quot;180&quot; data-original-width=&quot;1280&quot; data-original-height=&quot;720&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr style=&quot;height: 0px;&quot;&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: top;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.2; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://3.bp.blogspot.com/-FAp0U3FVC3M/X0BLi8fNBpI/AAAAAAAAA14/GwmbG1bEykg4a63Si_LQ8RXB_xb3wufxwCLcBGAsYHQ/s1600/unnamed%2B%25281%2529.png&quot; imageanchor=&quot;1&quot; style=&quot;clear: left; float: left; margin-bottom: 1em; margin-right: 1em;&quot;&gt;&lt;img border=&quot;0&quot; src=&quot;https://3.bp.blogspot.com/-FAp0U3FVC3M/X0BLi8fNBpI/AAAAAAAAA14/GwmbG1bEykg4a63Si_LQ8RXB_xb3wufxwCLcBGAsYHQ/s320/unnamed%2B%25281%2529.png&quot; width=&quot;320&quot; height=&quot;180&quot; data-original-width=&quot;512&quot; data-original-height=&quot;288&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;/td&gt;&lt;td style=&quot;border-bottom: solid #000000 1px; border-left: solid #000000 1px; border-right: solid #000000 1px; border-top: solid #000000 1px; padding: 7px 7px 7px 7px; vertical-align: top;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.2; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;a href=&quot;https://2.bp.blogspot.com/-R-6r4w7K-tQ/X0BLnYSKq6I/AAAAAAAAA18/OPNs2Xdfh64NE-Ll4vSsU0pk4MPV_KZvQCLcBGAsYHQ/s1600/pasted%2Bimage%2B0%2B%25283%2529.png&quot; imageanchor=&quot;1&quot; style=&quot;clear: right; float: right; margin-bottom: 1em; margin-left: 1em;&quot;&gt;&lt;img border=&quot;0&quot; src=&quot;https://2.bp.blogspot.com/-R-6r4w7K-tQ/X0BLnYSKq6I/AAAAAAAAA18/OPNs2Xdfh64NE-Ll4vSsU0pk4MPV_KZvQCLcBGAsYHQ/s320/pasted%2Bimage%2B0%2B%25283%2529.png&quot; width=&quot;320&quot; height=&quot;180&quot; data-original-width=&quot;1280&quot; data-original-height=&quot;720&quot; /&gt;&lt;/a&gt;&lt;/div&gt;&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;This concept has been used in production in our video infrastructure division for about a year. We are delighted to report it has helped us deliver very good quality streams for movies like &quot;Titanic&quot; and most recently &quot;Spectre.&quot; We don’t expect anyone to notice, because they don’t know what it would look like otherwise.&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;But there is always more we can do to improve on video quality. We’re working on it. Stay tuned.&lt;/span&gt;&lt;br /&gt;&lt;i&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;br /&gt;&lt;/span&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Anil Kokaram, Engineering Manager, AV Algorithms Team, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=t179-xxkls0&quot;&gt;Tony Cozier speaking about the West Indies Cricket Heritage Centre&lt;/a&gt;,&quot; Yao Chung Lin, Software Engineer, Transcoder Team, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=Xh9jAD1ofm4&quot;&gt;UNDER ARMOUR | RULE YOURSELF | MICHAEL PHELPS&lt;/a&gt;,&quot; Michelle Covell, Research Scientist, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=0Rnq1NpHdmw&quot;&gt;Last Week Tonight with John Oliver: Scientific Studies (HBO)&lt;/a&gt;&quot; and Sam John, Software Engineer, Transcoder Team, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=pce6OPW4SSQ&quot;&gt;Atlantis Found: The Clue in the Clay | History&lt;/a&gt;.&quot;&lt;/span&gt;&lt;/i&gt;&lt;br /&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;br /&gt;&lt;/span&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; &lt;/span&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;arial&amp;quot;; font-size: 13.333333333333332px; font-style: italic; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;sup&gt;&lt;span style=&quot;font-size: xx-small;&quot;&gt;1&lt;/span&gt;&lt;/sup&gt;Optimizing transcoder quality targets using a neural network with an embedded bitrate model, Michele Covell, Martin Arjovsky, Yao-Chung Lin and Anil Kokaram, Proceedings of the Conference on Visual Information Processing and Communications 2016, San Francisco&lt;/span&gt;&lt;/div&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;span id=&quot;docs-internal-guid-2b3707e9-a75f-5684-60db-7f6d2fce6b74&quot;&gt;&lt;span style=&quot;font-size: 13.3333px; font-style: italic; vertical-align: baseline;&quot;&gt;Multipass Encoding for reducing pulsing artefacts in cloud based video transcoding, Yao-Chung Lin, Anil Kokaram and Hugh Denman, IEEE International Conference on Image Processing, pp 907-911, Quebec 2015&lt;/span&gt;&lt;/span&gt;&lt;/span&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/2822311802688910848'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/2822311802688910848'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/05/machine-learning-for-video-transcoding.html' title='Machine learning for video transcoding'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://1.bp.blogspot.com/-R0GA61qcsBA/X0BID86YtSI/AAAAAAAAA1A/Hiw-SGPlBywh2Aq_s4naxMt3aTnJ6qsuQCLcBGAsYHQ/s72-c/cqbb_psnr.png" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-190165636953526114</id><published>2016-05-10T09:00:00.000-07:00</published><updated>2020-08-31T12:14:10.301-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Reporting API"/><title type='text'>Because retro is in -- announcing historical data in the YouTube Reporting API</title><content type='html'>YouTube creators rely on data -- data about how their channel is performing, data about their video’s ratings, their earnings. Lots of data. That’s why we &lt;a href=&quot;http://youtube-eng.blogspot.com/2015/10/access-to-youtube-analytics-data-in-bulk.html&quot;&gt;launched the YouTube Reporting API&lt;/a&gt; back in October, which helps you bulk up your data requests while keeping them on a low-quota diet.&lt;br /&gt;&lt;br /&gt;Reports made with the API started from the day you scheduled them, going forward. Now that it’s been in the wild, we’ve heard another request loud and clear: you don’t just want current data, you want older data, too. We’re happy to announce that the Reporting API now delivers historical data covering 180 days prior to when the reporting job is first scheduled (or July 1st, 2015, whichever is later.)&lt;br /&gt;&lt;br /&gt;Developers with a keen eye may have already noticed this, as it launched a few weeks ago! Just in case you didn’t, you can find more information on how historical data works by checking out the &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/#historical-data&quot;&gt;Historical Data section&lt;/a&gt; of the Reporting API docs.&lt;br /&gt;&lt;br /&gt;(Hint: if you’ve already got some jobs scheduled, you don’t need to do anything! We’ll generate the data automatically.)&lt;br /&gt;&lt;br /&gt;New to the Reporting API? Tantalized by the possibility of all that historical data? Our documentation explains everything you need to know about scheduling jobs and the types of reports available. Try it out with our &lt;a href=&quot;https://developers.google.com/apis-explorer/#p/youtubereporting/v1/&quot;&gt;API Explorer&lt;/a&gt;, then dive into the &lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/code_samples/&quot;&gt;sample code&lt;/a&gt; or write your own with one of our &lt;a href=&quot;https://developers.google.com/youtube/reporting/tools/&quot;&gt;client libraries&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;Happy reporting,&lt;br /&gt;&lt;br /&gt;&lt;i&gt;YouTube Developer Relations on behalf of &lt;a href=&quot;https://plus.google.com/109924082343916685028&quot;&gt;Alvin Cham&lt;/a&gt;, &lt;a href=&quot;https://www.google.com/+MarkusLanthaler&quot;&gt;Markus Lanthaler&lt;/a&gt;, &lt;a href=&quot;https://plus.google.com/+MatteoAgosti&quot;&gt;Matteo Agosti&lt;/a&gt;, and &lt;a href=&quot;https://plus.google.com/116663323979440162292/&quot;&gt;Andy Diamondstein&lt;/a&gt;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/190165636953526114'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/190165636953526114'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/05/because-retro-is-in-announcing.html' title='Because retro is in -- announcing historical data in the YouTube Reporting API'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-6778964210115782819</id><published>2016-04-27T10:00:00.000-07:00</published><updated>2020-08-31T12:14:12.456-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Announcing the Mobile Data Plan API </title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;text-align: left;&quot; trbidi=&quot;on&quot;&gt;More than half of YouTube watch time happens on mobile devices, with a large and rapidly increasing fraction of this time spent on cellular networks. At the same time, it is common for users to have mobile data plans with usage limits. Users who exhaust their quota can incur overage charges, have their data connections turned off and speeds reduced. When this happens, application performance suffers and user satisfaction decreases.&lt;br /&gt;&lt;br /&gt;At the root of this problem lies the fact that users do not have an easy manner to share data plan information with an application, and, in turn, applications cannot optimize the user’s experience. In an effort to address this limitation we have worked with a few partners in the mobile ecosystem to specify an API that improves data transparency.&lt;br /&gt;&lt;br /&gt;At a high level, the API comprises two parts. First, a mechanism for applications to establish an anonymous identifier of the user’s data plan. This new, Carrier Plan Identifier (CPID), protects the user’s identity and privacy. Second, a mechanism that allows applications, after establishing a CPID, to request information about the user’s data plan from the mobile network operator (MNO). Applications communicate with MNOs using HTTPS and the API encodes data plan information in an extensible JSON-based format.&lt;br /&gt;&lt;br /&gt;We believe the API will improve transparency and Quality of Experience (QoE) for mobile applications such as YouTube. For example, the cost of data can depend on the time of day, where users get discounts for using the network during off-peak hours. For another example consider that while users with unlimited data plans may prefer high resolution videos, users who are about to exceed their data caps or are in a busy network may be better served by reduced data rate streams that extend the life of the data plan while still providing good quality.&lt;br /&gt;&lt;br /&gt;Cellular network constraints are even more acute in countries where the cost of data is high, users have small data budgets, and networks are overutilized. With more than 80% of views from outside the United States, YouTube is the first Google application conducting field trials of the Mobile Data Plan API in countries, such as Malaysia, Thailand, the Philippines and Guatemala, where these characteristics are more prominent. These trials aim to bring data plan information as an additional real-time input to YouTube’s decision engine tuned to improve QoE.&lt;br /&gt;&lt;br /&gt;We believe the same data plan information will lay the foundation for other applications and mobile operators to innovate together. This collaboration can make data usage more transparent to users, incentivize efficient use of mobile networks, and optimize user experience.&lt;br /&gt;&lt;br /&gt;We designed the API in cooperation with a number of key partners in the mobile ecosystem, including Telenor Group, Globe Telecom and Tigo, all of which have already adopted and implemented this API. Google also worked with Ericsson to support the Mobile Data Plan API in their OTT Cloud Connect platform. We invite other operators and equipment vendors to implement this solution and offer applicable products and services to their customers.&lt;br /&gt;&lt;br /&gt;The Mobile Data Plan API specification is available from this &lt;a href=&quot;https://docs.google.com/document/d/1LU3qzT-vpK38P2DKF-hLJlXV3hWo_fDPRB00WZUIK_4/edit&quot;&gt;link&lt;/a&gt;.  We are looking forward to your comments and we are available at: [&lt;a href=&quot;mailto:data-plan-api@google.com&quot;&gt;data-plan-api@google.com&lt;/a&gt;].&lt;br /&gt;&lt;br /&gt;&lt;span id=&quot;docs-internal-guid-e25dc29e-54dd-5f22-54fb-50d293d51756&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 16px; font-style: italic; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Posted by Andreas Terzis, technical lead at Google Access, &amp;amp; Jessica Xu, product manager at YouTube.&lt;/span&gt;&lt;/span&gt;&lt;/div&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/6778964210115782819'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/6778964210115782819'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/04/announcing-mobile-data-plan-api.html' title='Announcing the Mobile Data Plan API '/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-4253663455654146799</id><published>2016-04-20T10:00:00.000-07:00</published><updated>2020-08-31T12:14:11.379-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="video files"/><category scheme="http://www.blogger.com/atom/ns#" term="youtube"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>A look into YouTube’s video file anatomy</title><content type='html'>Over 1 billion people use YouTube, watching hundreds of millions of hours of content all over the world everyday. We have been receiving content at a rate exceeding 100 hours/min for the last three years (currently at 400 hours/min). With those kinds of usage statistics what we see on ingest actually says something about the state of video technology today.&lt;br /&gt;&lt;br /&gt;Video files are the currency of video sharing and distribution over the web. Each file contains the video and audio data wrapped up in some container format and associated with metadata that describes the nature of the content in some way. To make sure each user can “Broadcast yourself” we have spent years building systems that can faithfully extract the video and audio data hidden inside almost any kind of file you can imagine. That is why when our users upload to YouTube they have confidence that their video and audio will always appear.&lt;br /&gt;&lt;br /&gt;The video and audio data is typically compressed using a codec and of course the data itself comes in a variety of resolutions, frame rates, sample rates and channels (in the case of audio). As technology evolves, codecs get better, and the nature of the data itself changes, typically toward higher fidelity. But how much variety is there in this landscape and how has that variety changed with time? We’ve been analyzing the anatomy of files you’ve been uploading over the years and think it reflects how video technology has changed.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Audio/video file anatomy&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Audio/video files contain audio and video media which can be played or viewed on some multimedia devices like a TV or desktop or smartphone. Each pixel of video data is associated with values for brightness and color which tells the display how that pixel should appear. A quick calculation on the data rate for the raw video data shows that for 720p video at 30 frames per second the data rate is in excess of 420 Mbits/sec. Raw audio data rates are smaller but still significant at about 1.5 MBits/sec for 44.1 KHz sampling with 16 bits per sample. These rates are well in excess of the 10’s of MBits/sec (at most) that many consumers have today. By using compression technology that same &amp;gt; 400 MBits/sec of data can be expressed in less than 5 Mbits/sec.  This means that audio and video compression is a vital part of any practical media distribution system. Without compression we would not be able to stream media over the internet in the way everyone enjoys now.&lt;br /&gt;&lt;br /&gt;There are three main components of media files today: the container, the compressed bitstream itself and finally metadata. The bitstream (called the video and audio “essence”) contains the actual audio and video media in a compressed form. It will also contain information about the size of the pictures and start and end of frames so that the codec knows how to decode the picture data in the right way. This information embedded in the bitstream is still not enough though. The “container” refers to the additional information that helps the decoder work out when a video frame is to be played, and when the audio data should be played relative to the frame. The container often also holds an index to the start of certain frames in the bitstream. This makes it easier for a player system to allow users to “seek” or “fast forward” through the contents. The container will also hold information about the file content itself like the author, and other kinds of “metadata” that could be useful for a rights holder or “menu” on a player for instance. So the bitstream contains the actual picture and audio, but the container lets the player know how that content should be played.&lt;br /&gt;&lt;br /&gt;Standardization of containers and codecs was vital for the digital video industry to take off as it did in the late 1990s. The Motion Picture Experts Group (MPEG) was the consortium responsible and they are still active today. The interaction between containers and codecs has been so tight in the past that quite often the container and the codec might have the same name, because they arise from the same standards document. Needless to say, there are many different standards for the various components in a media file. Today we have MPEG and the Alliance for Open Media (AOM) emerging as the two major bodies engaged in creating new media compression and distribution technology. This is what makes the job of YouTube so challenging. We must correctly decode your content despite the endless variety, and despite the frequent errors and missing component in uploaded files. We deal with thousands of combinations of containers and codecs every week.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Containers&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;The plot below shows the percentage of files uploaded having the same container month on month over the last five years. Each container is associated with the same color over time. The legend is ordered from the bottom up. The container type used in the largest fraction of uploads is at the bottom.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;438&quot; src=&quot;https://lh5.googleusercontent.com/GjRVpquU5vtQ1wOL7rsV3mSHuwzPZnSM3MX6xCgbsgF_1GReENc0zRlphc5FRoUIXAPyxqZzXXPZcoBdombP284CNFg2o7-fHICf3LrGp-spjdlXLSWXGsM4dUWuTPqTUy2aJMr1&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;739&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff3-7e58-3adb-4fb7764b8abd&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;In 2011, MP4 (.mp4), Audio Video Interleave (.avi), Flash Video (.flv), Advanced Systems Format (.asf) and MPEG Transport Stream (.ts) were more equally distributed than they are now. But over the years MP4 has overtaken them all to become the most common ingest container format. Open source formats like WebM and Matroska seem to have been slowly gaining in popularity since about 2012, which is when we started rolling out the open source VP9 codec. Windows Media files (using the .asf container) and Flash Video have declined significantly. On the other end of the scale, files using Creative Labs video containers (for instance), which were popular before 2011, are hardly ever seen in our ingest today.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Codecs&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;The history of ingested codec types reflects the speed with which new codecs are adopted by hardware manufacturers and the makers of software editing and conforming systems. The chart below looks at the top ten video codecs back in 2011 and reveals how they have fared since then in our ingest profile. The VP range of codecs (VP6 - VP8) do still figure in our ingest today and in fact compared to 2011, VP8 ranks seventh in our top ten in 2015. Clearly H.264 is the dominant codec we see in use for upload to YouTube now, but MPEG4 and Windows Media bitstreams are still significant. This is very different from the situation in 2011 when almost every codec had a significant share of our ingest profile. This reflects how heterogeneous the video compression landscape was five years ago, with no dominant compression technology. The chart shows how rapidly the ecosystem moves to adopt a compression technology as soon as it proves itself: just five years. Uploads from mobile devices have also driven this trend as efficient codec technology enables more uploads from low power devices with low bandwidth availability. In that time we have seen the almost complete erosion of Flash Video (FLV) and MPEG1/2 video for upload to YouTube, which all appear to have reached some kind of low volume steady state behavior in our ingest.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;407&quot; src=&quot;https://lh3.googleusercontent.com/v1u2eM8qNz2YrzC5E76yjA1xAF-FsrCuB7o6JPm5qQNcH-eccUi_KAYcsQdJ7IazoI5bZZ4fXeBty84w66wUzaUL85_g3sMuJqNrbMnVc3RF_B7K6Aic3HbJSsu_gBD-UhkcrCI2&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;670&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff4-fe5a-ce83-56fb8c212c3f&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;The situation with audio codecs follows similar trends. The chart below shows the top 15 codecs we see on ingest, measured over 2015. Five years ago we saw a very heterogeneous landscape with Raw audio data (PCM), Windows Media (WMA), MPEG and Advanced Audio (AAC)  all contributing significant proportions. Over the intervening time the AAC codec has grown to dominate the profile of audio codecs, but PCM, WMA and MP3 are still significant. It&#39;s interesting that we see a pretty steady rate of media with no audio at all (shown as “No Audio”), although the total proportion is of course small. The use of the VORBIS open source audio codec got a boost in 2012 when the new version was released. Although it is hard to see from the chart, OPUS follows a similar pattern with uploads starting to kick off in late 2012 once the reference software was available and then a boost in uploads in 2013 coinciding with the next API release.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;412&quot; src=&quot;https://lh6.googleusercontent.com/fkV_aLdePTk8n7j_weTgib9Gb3FT72t6lkBCg4Nk2Z67-i657lBqDIGhBm6NYxFxngDUsjlTtkbS9jdBT7HVCCvpvL2I8YvnaVjpbu9s-hRvWgSIQuHcuR-4vPf0c7AQ1rRfExkE&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;702&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff6-152e-29cb-974c926091f3&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Properties&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;But what about the nature of video and audio media itself? Is there evidence to show that capture is increasing in resolution and color fidelity? This section reinforces the law that “in the internet everything gets bigger with time.”&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Picture size&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;The chart below stacks the proportions of each resolution in our ingest against month. The legend shows the top ten resolutions by proportion of ingest as measured over the last year, with the topmost label being the largest proportion. There is always some disparity between “standard” picture sizes and the actual uploaded sizes. Those which do not fall into the labels used here are allocated to “OTHER.” Although the vast majority of our ingest shows standard picture sizes, that “OTHER” category has been persistently steady, showing that there will always be about 10 percent of our uploaders who upload non-standard sizes. The trend is clearly toward bigger pictures, with 480p dominating five years ago and HD (720p and 1080p) dominating now. It is interesting that we do not see step changes in behavior but rather a gradual acceleration to higher pixel densities. The 480p resolution does appear to be in a permanent decline however. 720p seems set to replace “vanilla” 480p in about a year.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;393&quot; src=&quot;https://lh6.googleusercontent.com/n3sgLUyaV62MlVRseh_HYsjk-p2krgEJp1Q1I583x5FUG4dRc-np91Ju1UfP12iuLgWKoFGDJnCjN0BZARm5x7eYEJ4IlqhwE-VgYTqQeJ57FX3P-ccBT-RwAtVmYnKr_KsXfW7U&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;713&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff7-3e6e-952c-b16b5f576299&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;With the 4K and 8K formats we see rapid take-up reflected in our ingest. The chart below breaks out just these two resolutions. Although understandably small as a proportion of the whole YouTube ingest profile, these formats are still significant and we notice that the take-up accelerated/spiked once announcements were made in 2013 (4K) and 2015 (8K). What is even more interesting is that the upload of 4K content started well before “official” announcement of the support. Our creators are always pushing the limits of our ingest and this is good evidence.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;351&quot; src=&quot;https://lh6.googleusercontent.com/kAPx3Xa5fzFPiEsK3Wgy22eo5aOg1eMmmCqpYAZO71EZ2fOHWHwukQ6dTPkkCEJ65DjNTZTof6cQO0u22VIxIWdzSId975co3fHjSjXe7JigwkkJauNH4MkJ-RlvTQM_57eekUT8&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;537&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff7-fab1-96d7-cb34356df0b3&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Audio channels&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;We observe that an increasing percentage of our media, which contain audio, contain stereo audio tracks as shown below in red. We also show here the relative amount of files having no audio (about 5 percent in 2015), and the trend is similar here as in the audio codec chart shown previously.  A growing proportion of tracks contain 5.1 material but that is swamped by the amount of mono and stereo sound files. Making a linear prediction of the curves below would seem to imply that mono audio will decline to less than 5 percent of ingest in just over a year’s time.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;289&quot; src=&quot;https://lh4.googleusercontent.com/J7CZBtEv1CgZq2NG-zWk3UOg2pJ5S79sFmk75L9sMudH5Re_C55pqF9nI9JBk0udESip815EbeUAt-7NBIkhe4jLYHDYqGfxmIm652C86emRk8YS6imdpkwLeiGb9mWuBp0Ne-lZ&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;529&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff8-509b-5d69-3f6bd10dafd1&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Interlacing&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;Interlacing is still with us. This is the legacy TV broadcast practice of constructing a video frame from two half height images that record the odd and even lines of the final frame, but at slightly different times. The fraction of our content that is interlaced on upload appears to be roughly 2-3 percent averaged over the last five years and there is no sign of that actually dwindling. This is perhaps because of the small but significant made-for-TV content that is uploaded. The reasons for the observed rapid changes in some months are intriguing. One suggestion is correlation with unusually large volume TV coverage e.g. 2012 Olympics and the U.S. election.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;324&quot; src=&quot;https://lh5.googleusercontent.com/ZnUZok-WvlcLI43r4_3J6Hw1n6314fZFWBs8UmLu6ueevUMhW3Dyl1TAqONQ-TLnXnBUkaitHQgjQhe574fZ4HByGgcoEXVfXBjCn6Ns_5WQ-Ar1DJmZksd_EWlX8MmshZL8Wh4d&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;672&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ff9-39bf-1b3e-bdfcd5dde990&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Color spaces&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;We are continually working on our ability to reproduce color faithfully between ingest and display. This is a notoriously challenging task across the consumer display industry for TV’s, monitors and mobile devices. The first step to color nirvana is the correct specification of the color space in the associated video file. Although color space specifications have been in place for some time, it has taken a long while for file-based content to properly represent this data across a wide range of consumer devices. The chart below reflects our observations of the top five spaces we see. We started collecting information in 2012 and compared to the stability in codecs and containers, the specification of color spaces in video data is clearly still evolving. It is only in the last three years that we have started to observe more consistent color labeling of video data, and as the chart shows below, BT709 (the default color space for HD resolution) has emerged as the dominant color space definition. At the end of 2015 there was still an alarmingly large proportion of video files without any color information, more than 70 percent. Note that the vertical axis on the chart below starts from 70 percent. The trend in that proportion is downwards and if we extend our curve of the decline in unspecified color spaces it would appear that it will be about a year before we can expect to see the majority of files having some color specification, and two years for almost all files to contain that metadata. We have just started to observe files expressing the recent BT2020 color space, being ingested at the end of 2015. These of course account for a tiny proportion of ingest (&amp;lt; .005 percent). It does herald the start of the HDR technology rollout though (as BT2020 is a color space associated with that format) and reflects various announcements about HDR capable devices made at CES 2016.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;362&quot; src=&quot;https://lh5.googleusercontent.com/fOjQ4atwcb0uAuesxoN33X_eugAweHbR69Nk03_g3WlVhScqIzbCm5A1qQVLZlnFWojti5tGRUuMP41ysbcKgOMCgWJMq1jIQ82RHPOGJTCQSK8A9N03wpYDX-kiJ-SDCGy0HpJ8&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;705&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ffa-bc16-0258-54766db1b1ef&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Frame rates&lt;/b&gt;&lt;br /&gt;&lt;br /&gt;The chart below shows how the use of a range of frame rates has actually not changed that much over time. As expected the U.S. and EU standards of 30 and 25Hz respectively, dominate the distribution. Less expected is that low frame rates of 15fps and lower also significantly impact our ingest. This is because of the relatively large proportion of educational material including slide shows, as well as music slide decks that are uploaded to YouTube. That sort of material tends to be captured at low frame rates. High frame rate (HFR) material (e.g. from 48Hz and upwards) is a steady flow especially since the announcement of HFR support in the YouTube player in 2014. Before 2014, the ceiling of our output target video was 30fps but since then we have raised the ceiling to 60fps. However the trend is not increasing as much as is say 1080p ingest itself. This possibly reflects bandwidth constraints on upload as well as the fact that most capture today especially on mobile devices still defaults to 25 or 30fps.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;arial&amp;quot;; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;433&quot; src=&quot;https://lh5.googleusercontent.com/ilHSkRhChwSKeA-APlnzxvGPHE25yH8XSDm_kvkBGhquF-WBdfboaJsqj7ZHIs_stVu9hX1Isi5SjJosSkJZ-atqpooz19olBusiflV08c5EFcLARDW80KX9Wed2hlx6uZWXzdh9&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;722&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-dc34ce5c-2ffb-6f94-d53f-9a22e1cdb5c0&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;We continuously analyze both a wide angle and close up view of the video file activity worldwide. That has given us a unique perspective on the evolution of video technology. In a sense the data is a reflection of the consensus of device manufacturers and creators in the area of media capture and creation. So we can see the growing agreement around video codecs, frame rates and stereo audio. Color space specification is still very poor however, and some expected consensus have not emerged. For example in the area of HFR content creation, 60+ fps is not quite yet on a growth curve as HD resolution has been over the last year.&lt;br /&gt;&lt;br /&gt;The data presented here show that even in the last five years the variability in data types and formats is reducing. However, as with many broadcasters and streaming sites we see enough variability in our ingested file profiles that we remain keen on standardization activities. We look forward to continuing engagement of the YouTube and Google engineering community in SMPTE, MPEG and AOM activities.&lt;br /&gt;&lt;br /&gt;Even with the dominance of certain technologies like H.264/AAC codecs and the MOV type containers, there will always be a small but significant portion of audio video data that falls outside the “consensus.” These small proportions are important to us however, because we want you to be confident that we’re going to do our darndest to help you broadcast yourself no matter what device you use to make your clip.&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Anil Kokaram, Tech Lead/Engineering Manager, AV Algorithms Team, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=T19g7w2NnT0&quot;&gt;Carlos Brathwaite&#39;s 4 sixes&lt;/a&gt;,&quot; Thierry Foucu, Tech Lead Transcoder Team, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=lX_pF03vCSU&quot;&gt;Sale of the Century&lt;/a&gt;,&quot; and Yang Hu, Software Engineer, recently watched &quot;&lt;a href=&quot;https://www.youtube.com/watch?v=619yq7Kf_YE&quot;&gt;MINECRAFT: How to build wooden mansion&lt;/a&gt;.&quot;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/4253663455654146799'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/4253663455654146799'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/04/a-look-into-youtubes-video-file-anatomy.html' title='A look into YouTube’s video file anatomy'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh5.googleusercontent.com/GjRVpquU5vtQ1wOL7rsV3mSHuwzPZnSM3MX6xCgbsgF_1GReENc0zRlphc5FRoUIXAPyxqZzXXPZcoBdombP284CNFg2o7-fHICf3LrGp-spjdlXLSWXGsM4dUWuTPqTUy2aJMr1=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-296529588456836994</id><published>2016-04-19T10:00:00.000-07:00</published><updated>2020-08-31T12:14:10.752-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="NAB 2016"/><category scheme="http://www.blogger.com/atom/ns#" term="virtual reality"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube live"/><title type='text'>New YouTube live features: live 360, 1440p, embedded captions, and VP9 ingestion</title><content type='html'>Yesterday at NAB 2016 we announced exciting new live and virtual reality features for YouTube. We’re working to get you one step closer to actually being in the moments that matter while they are happening. Let’s dive into the new features and capabilities that we are introducing to make this possible:&lt;br /&gt;&lt;br /&gt;&lt;b&gt;Live 360&lt;/b&gt;: About a year ago we announced the &lt;a href=&quot;http://youtubecreator.blogspot.com/2015/03/a-new-way-to-see-and-share-your-world.html&quot;&gt;launch&lt;/a&gt; of 360-degree videos at YouTube, giving &lt;a href=&quot;https://www.youtube.com/360&quot;&gt;creators&lt;/a&gt; a new way to connect to their audience and share their experiences. This week, we took the next step by introducing support for 360-degree on &lt;a href=&quot;https://support.google.com/youtube/answer/2474026?hl=en&quot;&gt;YouTube live&lt;/a&gt; for all creators and viewers around the globe.&lt;br /&gt;&lt;br /&gt;To make sure creators can tell awesome stories with virtual reality, we’ve been working with several camera and software vendors to support this new feature, such as &lt;a href=&quot;https://allie.camera/&quot;&gt;ALLie&lt;/a&gt; and &lt;a href=&quot;http://www.video-stitch.com/&quot;&gt;VideoStitch&lt;/a&gt;. Manufacturers interested in 360 through our Live API can use our &lt;a href=&quot;https://developers.google.com/youtube/v3/live/docs/liveStreams&quot;&gt;YouTube Live Streaming API&lt;/a&gt; to send 360-degree live streams to YouTube.&lt;br /&gt;&lt;br /&gt;Other 360-degree cameras can also be used to live stream to YouTube as long as they produce compatible output, for example, cameras that can act as a webcam over USB (see &lt;a href=&quot;https://support.google.com/youtube/topic/2853712?hl=en&amp;amp;ref_topic=4355169&quot;&gt;this guide&lt;/a&gt; for details on how to live stream to YouTube). Like 360-degree uploads, 360-degree live streams need to be streamed in the &lt;a href=&quot;https://en.wikipedia.org/wiki/Equirectangular_projection&quot;&gt;equirectangular projection&lt;/a&gt; format. Creators can use our &lt;a href=&quot;https://support.google.com/youtube/answer/2853700?hl=en&quot;&gt;Schedule Events&lt;/a&gt; interface to set up 360 live streams using this new option:&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: Roboto; font-size: 13.3333px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img alt=&quot;360_checkbox.png&quot; height=&quot;59&quot; src=&quot;https://lh4.googleusercontent.com/wIU01lWhPJBJX23amQuDZTX3U2_JS4soFAqLWtQHDWMOTWL7sX-s7HqjJ29W_fdEcRSG41SfZP1wWZQInqQv8FZOyW4sIWrIaFrDrtCM1GHgw1wPNK_l1r--lJ95ACw_nsZcZiwJ&quot; style=&quot;border: 3px solid rgb(183, 183, 183); transform: rotate(0rad);&quot; width=&quot;181&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-8b7c14f6-2f31-f5a0-1a8a-1e6e1ef8435e&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;Check out this &lt;a href=&quot;https://support.google.com/youtube/?p=360#topic=4355266&quot;&gt;help center page&lt;/a&gt; for some details.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: Arial; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;194&quot; src=&quot;https://lh5.googleusercontent.com/8nyaRLDxu4Kz1WvNxCDvS6hhhm_JGT50nr1dsDfolpJWwRrUl1Fc4Ob9b1SHmKAT4p6ACSLsFRCsrYCXdOPG5bGNF038jJJzyj_rTgOadlwZ3AYeVsw01y706KL73MvZVxI8L3p2&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;344&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-8b7c14f6-2f32-5891-2be0-a12150246859&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;&lt;b&gt;1440p live streaming&lt;/b&gt;: Content such as live 360 as well as video games are best enjoyed at high resolutions and high frame rates. We are also announcing support of 1440p 60fps resolution for live streams on YouTube. Live streams at 1440p have 70 percent more pixels than the standard HD resolution of 1080p. To ensure that your stream can be viewed on the broadest possible range of devices and networks, including those that don’t support such high resolutions or frame rates, we perform full transcoding on all streams and resolutions. A 1440p60 stream gets transcoded to 1440p60, 1080p60 and 720p60 as well as all resolutions from 1440p30 down to 144p30.&lt;br /&gt;&lt;br /&gt;Support for 1440p will be available from our &lt;a href=&quot;https://support.google.com/youtube/answer/2853700?hl=en&quot;&gt;creation dashboard&lt;/a&gt; as well as our &lt;a href=&quot;https://developers.google.com/youtube/v3/live/docs/liveStreams#cdn&quot;&gt;Live API&lt;/a&gt;. Creators interested in using this high resolution should make sure that their encoder is able to encode at such resolutions and that they have sufficient upload bandwidth on their network to sustain successful ingestion. A good rule of thumb is to provision at least twice the video bitrate.&lt;br /&gt;&lt;br /&gt;&lt;b&gt;VP9 ingestion / DASH ingestion&lt;/b&gt;: We are also announcing support for &lt;a href=&quot;http://youtube-eng.blogspot.com/2015/04/vp9-faster-better-buffer-free-youtube.html&quot;&gt;VP9&lt;/a&gt; ingestion. VP9 is a modern video codec that lets creators upload higher resolution videos with lower bandwidth, which is particularly important for high resolution 1440p content. To facilitate the ingestion of this new video codec we are also announcing support for &lt;b&gt;&lt;a href=&quot;https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP&quot;&gt;DASH&lt;/a&gt; ingestion&lt;/b&gt;, which is a simple, codec agnostic HTTP-based protocol. DASH ingestion will support &lt;a href=&quot;https://en.wikipedia.org/wiki/H.264/MPEG-4_AVC&quot;&gt;H.264&lt;/a&gt;&amp;nbsp;as well as &lt;a href=&quot;https://en.wikipedia.org/wiki/VP9&quot;&gt;VP9&lt;/a&gt; and &lt;a href=&quot;https://en.wikipedia.org/wiki/VP8&quot;&gt;VP8&lt;/a&gt;. HTTP-based ingestion is more resilient to corporate firewalls and also allows ingestion over &lt;a href=&quot;https://en.wikipedia.org/wiki/HTTPS&quot;&gt;HTTPS&lt;/a&gt;. It is also a simpler protocol to implement for game developers that want to offer in game streaming support with royalty free video codecs. &lt;a href=&quot;http://www.mediaexcel.com/&quot;&gt;MediaExcel&lt;/a&gt; and &lt;a href=&quot;https://www.wowza.com/&quot;&gt;Wowza Media Systems&lt;/a&gt; will both be demoing DASH VP9 encoding with YouTube live at their NAB booths.&lt;br /&gt;&lt;br /&gt;We will soon publish a detailed guide to DASH Ingestion on our support web site. For developers interested in DASH Ingestion, please join &lt;a href=&quot;https://groups.google.com/forum/#!forum/youtube-live-developers&quot;&gt;this Google group&lt;/a&gt; to receive updates.&lt;br /&gt;&lt;br /&gt;Embedded captions: To provide more support to broadcasters, we now accept embedded &lt;a href=&quot;https://en.wikipedia.org/wiki/EIA-608&quot;&gt;EIA-608&lt;/a&gt;/&lt;a href=&quot;https://en.wikipedia.org/wiki/CEA-708&quot;&gt;CEA-708&lt;/a&gt; captions over RTMP (H.264/AAC). That makes it easier to send captioned video content to YouTube and no longer requires posting caption data over side-band channels. We initially offer caption support for streams while they are live and will soon support the transitioning of caption data to the live recordings as well. Visit the &lt;a href=&quot;https://support.google.com/youtube/answer/3068031&quot;&gt;YouTube Help Center&lt;/a&gt; for more information on our live captioning support.&lt;br /&gt;&lt;br /&gt;&lt;div class=&quot;separator&quot; style=&quot;clear: both; text-align: center;&quot;&gt;&lt;span style=&quot;font-family: Arial; font-size: 14.6667px; margin-left: 1em; margin-right: 1em; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;147&quot; src=&quot;https://lh5.googleusercontent.com/Sg4hGN3elb0R886KXnp_zjjVJGlBOWevHYiugyZ_ovQPR5gV9DzMLEfHzdRXhn5x8XcTaM1fuRlutgNYp5A2xg8D7sR06PHR36gG3FyA-vnkJBrVRTYe_Az9VK3gnodMn5FRWsUB&quot; style=&quot;border: 1px solid rgb(0, 0, 0); transform: rotate(0rad);&quot; width=&quot;456&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;span id=&quot;docs-internal-guid-8b7c14f6-2f34-6cea-c289-15e496a342b2&quot;&gt;&lt;/span&gt;&lt;br /&gt;&lt;br /&gt;We first launched live streaming back in 2011, and we’ve live streamed memorable moments: 2012 Olympics, Red Bull Stratos Jump, League of Legends Championship, and Coachella Music Festival. We are excited to see what our community can create with these new tools!&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Nils Krahnstoever, Engineering Manager for Live&lt;/i&gt;&lt;br /&gt;&lt;i&gt;Kurt Wilms, Senior Product Manager for VR and Live&lt;/i&gt;&lt;br /&gt;&lt;i&gt;Sanjeev Verma, Product Manager for Video Formats&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/296529588456836994'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/296529588456836994'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2016/04/new-youtube-live-features-live-360.html' title='New YouTube live features: live 360, 1440p, embedded captions, and VP9 ingestion'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh4.googleusercontent.com/wIU01lWhPJBJX23amQuDZTX3U2_JS4soFAqLWtQHDWMOTWL7sX-s7HqjJ29W_fdEcRSG41SfZP1wWZQInqQv8FZOyW4sIWrIaFrDrtCM1GHgw1wPNK_l1r--lJ95ACw_nsZcZiwJ=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-1381947135917286815</id><published>2015-12-17T09:00:00.000-08:00</published><updated>2020-08-31T12:14:10.058-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Live Streaming API"/><category scheme="http://www.blogger.com/atom/ns#" term="LiveBroadcasts API"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Chat it up, streamers! New Live Chat, Fan Funding &amp; Sponsorships APIs</title><content type='html'>From the moment &lt;a href=&quot;http://gaming.youtube.com/&quot; target=&quot;_blank&quot;&gt;YouTube Gaming&lt;/a&gt; launched in August, we’ve consistently seen a pair of requests from our community: “Where are the chat bots? Where are the stream overlays?” A number of developers were happy to oblige, and some great new tools have launched for YouTube streamers.&lt;br /&gt;&lt;br /&gt;With those new tools have come some feedback on our APIs. Particularly, that there aren’t enough of them. So much is happening on YouTube live streams -- chatting, fan funding, sponsoring -- but there’s no good way to get the data out and into the types of apps that streamers want, like on-screen overlays, chat moderation bots and more.&lt;br /&gt;&lt;br /&gt;Well well, what have we here? A whole bunch of new additions to the &lt;b&gt;&lt;a href=&quot;https://developers.google.com/youtube/v3/live/docs/&quot; target=&quot;_blank&quot;&gt;Live Streaming API&lt;/a&gt;&lt;/b&gt;, getting you access to all those great chat messages, fan funding alerts and new sponsor messages!&lt;br /&gt;&lt;br /&gt;&lt;ul&gt;&lt;li&gt;&lt;b&gt;Fan Funding events&lt;/b&gt;, which occur when a user makes a one-time voluntary payment to support a creator.&lt;/li&gt;&lt;li&gt;&lt;b&gt;Live Chat events&lt;/b&gt;, which allow you to read the content of a YouTube live chat in real time, as well as adding new chat messages on behalf of the authenticated channel.&lt;/li&gt;&lt;li&gt;&lt;b&gt;Live Chat bans&lt;/b&gt;, which enable the automated application of chat “time-outs” and “bans.”&lt;/li&gt;&lt;li&gt;&lt;b&gt;Sponsors&lt;/b&gt;, which allows access to a list of YouTube users that are sponsoring the channel. A sponsor provides recurring monetary support to a creator and receives special benefits.&lt;/li&gt;&lt;/ul&gt;&lt;br /&gt;In addition, we’ve closed a small gap in the &lt;a href=&quot;https://developers.google.com/youtube/v3/live/docs/liveBroadcasts/list&quot; target=&quot;_blank&quot;&gt;LiveBroadcasts API&lt;/a&gt; by adding the ability to retrieve and modify the LiveBroadcast object for a channel’s “Stream now” stream.&lt;br /&gt;&lt;br /&gt;As part of the development process we gave early access to a few folks, and we’re excited to show off some great integrations that launch today:&lt;br /&gt;&lt;br /&gt;&lt;ul&gt;&lt;li&gt;Using our new Sponsorships feature? &lt;b&gt;&lt;u&gt;&lt;a href=&quot;https://blog.discordapp.com/youtube-gaming-integration/&quot; target=&quot;_blank&quot;&gt;Discord&lt;/a&gt;&lt;/u&gt;&lt;/b&gt; will let you offer your sponsors access to private voice and text servers.&lt;/li&gt;&lt;li&gt;Add live chat, new sponsors and new fan funding announcements to an overlay with the latest beta of &lt;b&gt;&lt;u&gt;&lt;a href=&quot;http://www.gameshow.net/&quot; target=&quot;_blank&quot;&gt;Gameshow&lt;/a&gt;&lt;/u&gt;&lt;/b&gt;.&lt;/li&gt;&lt;li&gt;Looking for some help with moderating and managing your live chat? Try out &lt;b&gt;&lt;u&gt;&lt;a href=&quot;https://beta.nightbot.tv/&quot; target=&quot;_blank&quot;&gt;Nightbot&lt;/a&gt;&lt;/u&gt;&lt;/b&gt;, a chat bot that can perform a variety of moderating tasks specifically designed to create a more efficient and friendly environment for your community.&lt;/li&gt;&lt;li&gt;Show off your live chat with an overlay in &lt;b&gt;&lt;u&gt;&lt;a href=&quot;https://www.xsplit.com/blog/youtube-live-chat-viewer-is-now-available-in-xsplit-broadcaster&quot; target=&quot;_blank&quot;&gt;XSplit Broadcaster&lt;/a&gt;&lt;/u&gt;&lt;/b&gt; using their new YouTube Live Chat plugin.&lt;/li&gt;&lt;/ul&gt;&lt;br /&gt;We’ve also spotted some libraries and sample code on Github that might help get you started, including &lt;a href=&quot;https://github.com/iopred/ytlivechatapi&quot; target=&quot;_blank&quot;&gt;this chat library in Go&lt;/a&gt; and this one in &lt;a href=&quot;https://github.com/shughes-uk/python-youtubechat&quot; target=&quot;_blank&quot;&gt;Python&lt;/a&gt;.&lt;br /&gt;&lt;br /&gt;We hope these new APIs can bring whole new categories of tools to the creator community. We’re excited to see what you build!&lt;br /&gt;&lt;br /&gt;&lt;i&gt;Marc Chambers, Developer Relations, recently watched ”&lt;a href=&quot;https://gaming.youtube.com/watch?v=zjPgDPi99kc&quot; target=&quot;_blank&quot;&gt;ArmA 3| Episode 1|Pilot the CH53 E SS&lt;/a&gt;.”&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1381947135917286815'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1381947135917286815'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2015/12/chat-it-up-streamers-new-live-chat-fan.html' title='Chat it up, streamers! New Live Chat, Fan Funding &amp; Sponsorships APIs'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-631408610317243115</id><published>2015-11-10T06:30:00.000-08:00</published><updated>2020-08-31T12:14:12.211-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="chrome"/><category scheme="http://www.blogger.com/atom/ns#" term="compositing"/><category scheme="http://www.blogger.com/atom/ns#" term="rendering"/><category scheme="http://www.blogger.com/atom/ns#" term="video"/><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Smoother &lt;video&gt; in Chrome</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; line-height: 1.38; white-space: pre-wrap;&quot;&gt;Video quality matters, and when an HD or HFR playback isn’t smooth, we notice. Chrome noticed. YouTube noticed. So we got together to make YouTube video playback smoother in Chrome, and we call it Project Butter.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;For some context, our brains fill in the motion in between frames if each frame is onscreen the same amount of time - this is called &lt;/span&gt;&lt;a href=&quot;https://en.wikipedia.org/wiki/Motion_interpolation&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;motion interpolation&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;. In other words, a 30 frames per second video won’t appear smooth unless each frame is spaced evenly each 1/30th of a second. Smoothness is more complicated than just this - you can read more about it in this &lt;/span&gt;&lt;a href=&quot;http://blogs.valvesoftware.com/abrash/why-virtual-isnt-real-to-your-brain-judder/&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;article by Michael Abrash at Valve&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;. &lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Frame rates, display refresh rates and cadence&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Your device’s screen redraws itself at a certain frame rate. Videos present frames at a certain rate. These rates are often not the same. At YouTube we commonly see videos authored at &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=1XP6Rpy7KCw&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;24&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=xZc4Vv-vQYM&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;25&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, 29.97, 30, 48, &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=GqqsKO4-D4I&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;50&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, 59.94, and &lt;/span&gt;&lt;a href=&quot;http://youtube.com/watch?=ZGIiXmAi14w&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;60&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; frames per second (fps) and these videos are viewed on displays with different &lt;/span&gt;&lt;a href=&quot;https://en.wikipedia.org/wiki/Refresh_rate&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;refresh rates&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; - the most common being 50Hz (Europe) and 60Hz (USA). &amp;nbsp;&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;For a video to be smooth we need to figure out the best, most regular way to display the frames - the best cadence. The ideal cadence is calculated as the ratio of the display rate to frame rate. For example, if we have a 60Hz display (a 1/60 second display interval) and a 30 fps clip, 60 / 30 == 2 which means each video frame should be displayed for two display intervals of total duration 2 * 1/60 second.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;We played videos a bunch of different ways and scored them on smoothness. &amp;nbsp;&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Smoothness score&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Using off the shelf &lt;/span&gt;&lt;a href=&quot;https://www.google.com/webhp?#tbm=shop&amp;amp;q=elgato+HD60&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;HDMI capture hardware&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; and some &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=ztgXC1e6mJI&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;special video clips&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; we computed a percentage score based on the number of times each video frame was displayed relative to a calculated optimal display count. The higher the score, the more frames aligned with the optimal display frequency. Below is a figure showing how Chrome 43 performed when playing a 30fps clip back on a 60Hz display:&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;170px;&quot; src=&quot;https://lh3.googleusercontent.com/8BcJoJflL8ksvpip_eMVgjZskFh1PTuaV0-wQSTOL7CHtuafsoYAmnpVpu_thiN-nOV2FAnOcomGZvyf2FEkssif-h3-eaWP6gCMXhndCHIs7pqG1rVZkHe_lfjzmedyxn7Nq00&quot; style=&quot;-webkit-transform: rotate(0.00rad); border: none; transform: rotate(0.00rad);&quot; width=&quot;650px;&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Smoothness: 68.49%, ~Dropped: 5 / 900 (0.555556%)&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The y-axis is the number of times each frame was displayed, while the x-axis is the frame number. As mentioned previously the calculated ideal display count for a 30fps clip on a 60Hz display is 2. So, in an ideal situation, the graph should be a flat horizontal line at 2, yet Chrome dropped many frames and displayed certain frames for as many as 4 display cycles! The smoothness score reflects this - &amp;nbsp;only 68.49 percent of frames were displayed correctly. How could we track down what was going on?&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Using some of the &lt;/span&gt;&lt;a href=&quot;https://www.chromium.org/developers/how-tos/trace-event-profiling-tool&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;performance tracing tools&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; built into Chrome, we identified &lt;/span&gt;&lt;a href=&quot;https://code.google.com/p/chromium/issues/detail?id=439548&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;timing issues&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; inherent to the existing design for video rendering as the culprit. These issues resulted in both missed and irregular video frames on a regular basis.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;430&quot; src=&quot;https://lh5.googleusercontent.com/qwi7ll3RjfiRLKXllBIFv90yEnZ6Nluo99GzEJ15hI1XKP3uNPAcLMkxGz0K237W7riRxGmNTMcAjcxKx1mPoSSV-leIHIOwP6CjyJvd2Fn1SF_ZIA2B32OdBXHE-CYv7lM6UA0&quot; style=&quot;border: none; transform: rotate(0rad);&quot; width=&quot;640&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;br /&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;There were two main problems in the interactions between Chrome’s &lt;/span&gt;&lt;a href=&quot;https://en.wikipedia.org/wiki/Compositing_window_manager&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;compositor&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; (responsible for drawing frames) and its media pipeline (responsible for generating frames) -- &amp;nbsp;&lt;/span&gt;&lt;/div&gt;&lt;ol style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: &#39;Open Sans&#39;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The compositor didn’t have a timely way of knowing when a video frame needed display. Video frames were selected on the media pipeline thread while the compositor would occasionally come along looking for them on the compositor thread, but if the compositor thread was busy it wouldn’t get the notification on time.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: &#39;Open Sans&#39;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Chrome’s media pipeline didn’t know &lt;/span&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 700; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;when&lt;/span&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; the compositor would be ready to draw its next new frame. This led to the media pipeline sometimes picking a frame that was too old by the time the compositor displayed it.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ol&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;In Chrome 44, we re-architected the media and compositor pipelines to communicate carefully about the intent to generate and display. Additionally, we also improved which video frames to pick by using the optimal display count information. With these changes, Chrome 44 significantly improved on smoothness scores across all video frame rates and display refresh rates:&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; &lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;img height=&quot;170px;&quot; src=&quot;https://lh4.googleusercontent.com/nD7fdQPhVYDCdCpWIRpqFwL1azhwIW8C8ezIMclD-h9QKAxtCuqZW7uTuGTK2iubOdvWOsr-Eku0RdhC0yym00I-3pexTralRNybrWwqbGySGWgH_RT4P1xLLSOljulXa4Tt0mk&quot; style=&quot;-webkit-transform: rotate(0.00rad); border: none; transform: rotate(0.00rad);&quot; width=&quot;650px;&quot; /&gt;&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt; text-align: center;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Smoothness: 99.33%, ~Dropped: 0 / 900 (0.000000%)&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Smooth like butter. Read more in &lt;/span&gt;&lt;a href=&quot;https://docs.google.com/document/d/1oUb_ap0TAa1sDci0wEQ6BEzd_lB7Eghv93NXyZ3952E/preview&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;public design document&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, if you’re interested in further details. &lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt; &lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;i&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Dale Curtis, Software Engineer, recently watched &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=iOztnsBPrAA&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;WARNING: SCARIEST GAME IN YEARS | Five Nights at Freddy&#39;s - Part 1&lt;/span&gt;&lt;/a&gt;&lt;/i&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;i&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Richard Leider, Engineering Manager, recently watched &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=a1FNSgqdhNk&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Late Art Tutorial&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;.&lt;/span&gt;&lt;/i&gt;&lt;/div&gt;&lt;i&gt;&lt;span style=&quot;font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Renganathan Ramamoorthy, Product Manager, recently watched &lt;/span&gt;&lt;a href=&quot;https://www.youtube.com/watch?v=9skaRCdcphc&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;color: #1155cc; font-family: &amp;quot;open sans&amp;quot;; font-size: 13.3333px; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Video Game High School&lt;/span&gt;&lt;/a&gt;&lt;/i&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/631408610317243115'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/631408610317243115'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2015/11/smoother-in-chrome.html' title='Smoother &amp;lt;video&amp;gt; in Chrome'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="https://lh3.googleusercontent.com/8BcJoJflL8ksvpip_eMVgjZskFh1PTuaV0-wQSTOL7CHtuafsoYAmnpVpu_thiN-nOV2FAnOcomGZvyf2FEkssif-h3-eaWP6gCMXhndCHIs7pqG1rVZkHe_lfjzmedyxn7Nq00=s72-c" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-7799598191369167550</id><published>2015-10-08T09:00:00.000-07:00</published><updated>2020-08-31T12:14:13.011-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Improving YouTube video thumbnails with deep neural nets</title><content type='html'>&lt;p&gt;&lt;a href=&quot;https://support.google.com/youtube/answer/72431?hl=en&quot;&gt;Video thumbnails&lt;/a&gt; are often the first things viewers see when they look for something interesting to watch. A strong, vibrant, and relevant thumbnail draws attention, giving viewers a quick preview of the content of the video, and helps them to find content more easily.  Better thumbnails lead to more clicks and views for video creators. &lt;/p&gt; &lt;p&gt; Inspired by the recent remarkable advances of &lt;a href=&quot;https://en.wikipedia.org/wiki/Deep_learning#Deep_neural_networks&quot;&gt;deep neural networks&lt;/a&gt; (DNNs) in computer vision, such as &lt;a href=&quot;http://googleresearch.blogspot.com/2014/09/building-deeper-understanding-of-images.html&quot;&gt;image&lt;/a&gt; and &lt;a href=&quot;http://googleresearch.blogspot.com/2015/04/beyond-short-snippets-deep-networks-for.html&quot;&gt;video&lt;/a&gt; classification, our team has recently launched an improved automatic YouTube &quot;thumbnailer&quot; in order to help creators showcase their video content. Here is how it works.  &lt;/p&gt; &lt;p&gt;&lt;b&gt;The Thumbnailer Pipeline&lt;/b&gt;&lt;br /&gt;While a video is being uploaded to YouTube, we first sample frames from the video at one frame per second. Each sampled frame is evaluated by a &lt;i&gt;quality model&lt;/i&gt; and assigned a single &lt;i&gt;quality score&lt;/i&gt;. The frames with the highest scores are selected, enhanced and rendered as thumbnails with different sizes and aspect ratios. Among all the components, the quality model is the most critical and turned out to be the most challenging to develop. In the latest version of the thumbnailer algorithm, we used a DNN for the quality model. So, what is the &lt;i&gt;quality model&lt;/i&gt; measuring, and how is the score calculated? &lt;/p&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;a href=&quot;http://4.bp.blogspot.com/-Q4ld3f5VShg/VhVGKDb3y9I/AAAAAAAAAvE/EwdS61_72c4/s1600/image02.png&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: auto; margin-right: auto;&quot;&gt;&lt;img border=&quot;0&quot; height=&quot;236&quot; src=&quot;http://4.bp.blogspot.com/-Q4ld3f5VShg/VhVGKDb3y9I/AAAAAAAAAvE/EwdS61_72c4/s640/image02.png&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;The main processing pipeline of the thumbnailer.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt; &lt;p&gt;&lt;b&gt;(Training) The Quality Model&lt;/b&gt;&lt;br /&gt;Unlike the task of identifying if a video contains your favorite animal, judging the visual quality of a video frame can be very subjective - people often have very different opinions and preferences when selecting frames as video thumbnails. One of the main challenges we faced was how to collect a large set of well-annotated training examples to feed into our neural network. Fortunately, on YouTube, in addition to having algorithmically generated thumbnails, many YouTube videos also come with carefully designed custom thumbnails uploaded by creators. Those thumbnails are typically well framed, in-focus, and center on a specific subject (e.g. the main character in the video).  We consider these custom thumbnails from popular videos as positive (high-quality) examples, and randomly selected video frames as negative (low-quality) examples. Some examples of the training images are shown below. &lt;p&gt; &lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;a href=&quot;http://4.bp.blogspot.com/-I9DYDBAcJyA/VhVGY3ZuFwI/AAAAAAAAAvM/pk3SlykWvRg/s1600/image00.png&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: auto; margin-right: auto;&quot;&gt;&lt;img border=&quot;0&quot; height=&quot;210&quot; src=&quot;http://4.bp.blogspot.com/-I9DYDBAcJyA/VhVGY3ZuFwI/AAAAAAAAAvM/pk3SlykWvRg/s640/image00.png&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Example training images.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;The visual quality model essentially solves a problem we call &quot;binary classification&quot;: given a frame, is it of high quality or not? We trained a DNN on this set using a similar architecture to the Inception network in &lt;a href=&quot;http://googleresearch.blogspot.com/2014/09/building-deeper-understanding-of-images.html&quot;&gt;GoogLeNet&lt;/a&gt; that achieved the top performance in the ImageNet 2014 competition.  &lt;p&gt;&lt;b&gt;Results&lt;/b&gt;&lt;br /&gt;Compared to the previous automatically generated thumbnails, the DNN-powered model is able to select frames with much better quality. In a human evaluation, the thumbnails produced by our new models are preferred to those from the previous thumbnailer in more than 65% of side-by-side ratings. Here are some examples of how the new quality model performs on YouTube videos: &lt;/p&gt; &lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;a href=&quot;http://3.bp.blogspot.com/-sMNtgX6u28o/VhVG3VriqoI/AAAAAAAAAvU/oyt22qJb3ns/s1600/image03.png&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: auto; margin-right: auto;&quot;&gt;&lt;img border=&quot;0&quot; height=&quot;206&quot; src=&quot;http://3.bp.blogspot.com/-sMNtgX6u28o/VhVG3VriqoI/AAAAAAAAAvU/oyt22qJb3ns/s640/image03.png&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Example frames with low and high quality score from the DNN quality model, from video “&lt;a href=&quot;https://www.youtube.com/watch?v=Ev394hPypiU&quot;&gt;Grand Canyon Rock Squirrel&lt;/a&gt;”.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt;&lt;table align=&quot;center&quot; cellpadding=&quot;0&quot; cellspacing=&quot;0&quot; class=&quot;tr-caption-container&quot; style=&quot;margin-left: auto; margin-right: auto; text-align: center;&quot;&gt;&lt;tbody&gt;&lt;tr&gt;&lt;td style=&quot;text-align: center;&quot;&gt;&lt;a href=&quot;http://2.bp.blogspot.com/-Z8DNhiLueVI/VhVHDuRGGtI/AAAAAAAAAvc/JPpQkXiXKLo/s1600/image01.png&quot; imageanchor=&quot;1&quot; style=&quot;margin-left: auto; margin-right: auto;&quot;&gt;&lt;img border=&quot;0&quot; height=&quot;160&quot; src=&quot;http://2.bp.blogspot.com/-Z8DNhiLueVI/VhVHDuRGGtI/AAAAAAAAAvc/JPpQkXiXKLo/s640/image01.png&quot; width=&quot;640&quot; /&gt;&lt;/a&gt;&lt;/td&gt;&lt;/tr&gt;&lt;tr&gt;&lt;td class=&quot;tr-caption&quot; style=&quot;text-align: center;&quot;&gt;Thumbnails generated by old vs. new thumbnailer algorithm.&lt;/td&gt;&lt;/tr&gt;&lt;/tbody&gt;&lt;/table&gt; &lt;p&gt;We recently launched this new thumbnailer across YouTube, which means creators can start to choose from higher quality thumbnails generated by our new thumbnailer. Next time you see an awesome YouTube thumbnail, don’t hesitate to give it a &lt;i&gt;thumbs up&lt;/i&gt;. ;) &lt;/p&gt; &lt;span class=&quot;byline-author&quot; style=&quot;font-style: oblique;&quot;&gt; &lt;p&gt;Weilong Yang, software engineer, recently watched “&lt;a href=&quot;https://www.youtube.com/watch?v=0rqeR-iLKYA&quot; style=&quot;text-decoration: none;&quot;&gt;Contact Juggling - His Skills are Totally Hypnotizing&lt;/a&gt;”  &lt;br /&gt; Min-hsuan Tsai, software engineer, recently watched ”&lt;a href=&quot;https://www.youtube.com/watch?v=skVhHlAixyY&quot; style=&quot;text-decoration: none;&quot;&gt;People Are Awesome 2015&lt;/a&gt;”  &lt;br /&gt; Thanks to the Video Content Analysis and YouTube Creator teams &lt;/p&gt; &lt;/span&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/7799598191369167550'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/7799598191369167550'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2015/10/improving-youtube-video-thumbnails-with_8.html' title='Improving YouTube video thumbnails with deep neural nets'/><author><name>YouTube</name><uri>http://www.blogger.com/profile/03369985352764236236</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" url="http://4.bp.blogspot.com/-Q4ld3f5VShg/VhVGKDb3y9I/AAAAAAAAAvE/EwdS61_72c4/s72-c/image02.png" height="72" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-1109038746813902833.post-1967743791385555634</id><published>2015-10-02T06:34:00.001-07:00</published><updated>2020-08-31T12:14:10.404-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="YouTube Engineering &amp; Developers Blog"/><title type='text'>Access to YouTube Analytics data in bulk</title><content type='html'>&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;font-family: Arial; font-size: 14.6667px; line-height: 1.38; white-space: pre-wrap;&quot;&gt;Want to get all of your YouTube data in bulk? Are you hitting the quota limits while accessing analytics data one request at a time? Do you want to be able to break down reports by more dimensions? What about accessing assets and revenue data?&lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; &lt;/span&gt;&lt;/div&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;With the new &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;YouTube Bulk Reports API&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, your authorized application can retrieve bulk data reports in the form of CSV files that contain YouTube Analytics data for a channel or content owner. Once activated, reports are generated daily and contain data for a unique, 24-hour period.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;While the known &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/analytics/&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;YouTube Analytics API&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; supports real-time targeted queries of much of the same data as the &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;YouTube Bulk Reports API&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, the latter is designed for applications that can retrieve and import large data sets, then use their own tools to filter, sort, and mine that data.&lt;/span&gt;&lt;/div&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;As of now the API supports video, playlist, ad performance, estimated earnings and asset reports.&lt;/span&gt;&lt;/div&gt;&lt;h1 dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 10pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: &#39;Trebuchet MS&#39;; font-size: 21.333333333333332px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;How to start developing&lt;/span&gt;&lt;/h1&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;ul style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;b&gt;Choose your reports:&lt;/b&gt;&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;ul style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/content_owner_reports#video-reports&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Video reports&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; provide statistics for all user activity related to a channel&#39;s videos or a content owner&#39;s videos. For example, these metrics include the number of views or ratings that videos received. Some video reports for content owners also include earnings and ad performance metrics.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/content_owner_reports#playlist-reports&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Playlist reports&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; provide statistics that are specifically related to video views that occur in the context of a playlist.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/content_owner_reports#ad-performance-reports&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Ad performance reports&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; provide impression-based metrics for ads that ran during video playbacks. These metrics account for each ad impression, and each video playback can yield multiple impressions.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/content_owner_reports#estimated-earnings-reports&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Estimated earnings reports&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; provide the total earnings for videos from Google-sold advertising sources as well as from non-advertising sources. These reports also contain some ad performance metrics.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reports/content_owner_reports#asset-reports&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Asset reports&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: #212121; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; provide user activity metrics related to videos that are linked to a content owners&#39; assets. For its data to included in the report, a video must have been uploaded by the content owner and then claimed as a match of an asset in the YouTube Content ID system.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ul&gt;&lt;/ul&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;ul style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;b&gt;Schedule reports: &lt;/b&gt;&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ul&gt;&lt;ol style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/guides/authorization&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Get an OAuth token&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; (authentication credentials)&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Call the &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/reportTypes/list&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;reportTypes.list&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; method to retrieve a list of the available report types&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Create a new reporting job by calling &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/create&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;jobs.create&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; and passing the desired report type (and/or query in the future)&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ol&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;ul style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;b&gt;Retrieve reports:&lt;/b&gt;&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ul&gt;&lt;ol style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/guides/authorization&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Get an OAuth token&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; (authentication credentials)&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Call the &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs/list&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;jobs.list&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; method to retrieve a list of the available reporting jobs and remember its ID.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Call the &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/reference/rest/v1/jobs.reports/list&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;reports.list&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; method with the jobId filter parameter set to the ID found in the previous step to retrieve a list of downloadable reports that that particular job created.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Creators can check the report’s last modified date to determine whether the report has been updated since the last time it was retrieved.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: decimal; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Fetch the report from the URL obtained by step 3.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ol&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;ul style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; list-style-type: disc; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.6667px; font-style: normal; font-variant: normal; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;b&gt;While using our sample code and tools&lt;/b&gt;&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;ul style=&quot;margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/libraries&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Client libraries&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; for many different programming languages can help you implement the YouTube Reporting API as well as many other Google APIs.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Don&#39;t write code from scratch! Our&lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/code_samples/java&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; Java&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, &lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/code_samples/php&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;PHP&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, and&lt;/span&gt;&lt;a href=&quot;https://developers.google.com/youtube/reporting/v1/code_samples/python&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; Python&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; code&lt;/span&gt;&lt;a href=&quot;https://github.com/youtube/api-samples&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; samples&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; will help you get started.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;li dir=&quot;ltr&quot; style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; list-style-type: circle; text-decoration: none; vertical-align: baseline;&quot;&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;The&lt;/span&gt;&lt;a href=&quot;https://developers.google.com/apis-explorer/#p/youtubereporting/v1/&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; APIs Explorer&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt; lets you try out sample calls before writing any code.&lt;/span&gt;&lt;/div&gt;&lt;/li&gt;&lt;/ul&gt;&lt;/ul&gt;&lt;b style=&quot;font-weight: normal;&quot;&gt;&lt;br /&gt;&lt;/b&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Cheers,&lt;/span&gt;&lt;/div&gt;&lt;br /&gt;&lt;div dir=&quot;ltr&quot; style=&quot;line-height: 1.38; margin-bottom: 0pt; margin-top: 0pt;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;—&lt;/span&gt;&lt;a href=&quot;https://www.google.com/+MarkusLanthaler&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Markus Lanthaler&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, &lt;/span&gt;&lt;a href=&quot;https://plus.google.com/105315392926801270115/&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Paul Harvey&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, &lt;/span&gt;&lt;a href=&quot;https://plus.google.com/104984860183880644209&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Ronnie Falcon&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, &lt;/span&gt;&lt;a href=&quot;https://plus.google.com/u/1/+IbrahimUlukaya&quot; style=&quot;text-decoration: none;&quot;&gt;&lt;span style=&quot;background-color: transparent; color: #1155cc; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: underline; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;Ibrahim Ulukaya&lt;/span&gt;&lt;/a&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;, and the YouTube API Team&lt;/span&gt;&lt;/div&gt;&lt;div&gt;&lt;span style=&quot;background-color: transparent; color: black; font-family: Arial; font-size: 14.666666666666666px; font-style: normal; font-variant: normal; font-weight: 400; text-decoration: none; vertical-align: baseline; white-space: pre-wrap;&quot;&gt;&lt;br /&gt;&lt;/span&gt;&lt;/div&gt;</content><link rel='edit' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1967743791385555634'/><link rel='self' type='application/atom+xml' href='http://www.blogger.com/feeds/1109038746813902833/posts/default/1967743791385555634'/><link rel='alternate' type='text/html' href='http://youtube-eng.googleblog.com/2015/10/access-to-youtube-analytics-data-in-bulk.html' title='Access to YouTube Analytics data in bulk'/><author><name>Ibrahim Ulukaya</name><uri>http://www.blogger.com/profile/10711891540978840954</uri><email>noreply@blogger.com</email><gd:image rel='http://schemas.google.com/g/2005#thumbnail' width='16' height='16' src='https://img1.blogblog.com/img/b16-rounded.gif'/></author></entry></feed>