<?xml version="1.0" encoding="UTF-8" standalone="no"?><?xml-stylesheet href="http://www.blogger.com/styles/atom.css" type="text/css"?><feed xmlns="http://www.w3.org/2005/Atom" xmlns:blogger="http://schemas.google.com/blogger/2008" xmlns:gd="http://schemas.google.com/g/2005" xmlns:georss="http://www.georss.org/georss" xmlns:openSearch="http://a9.com/-/spec/opensearchrss/1.0/" xmlns:thr="http://purl.org/syndication/thread/1.0"><id>tag:blogger.com,1999:blog-8698702854482141883</id><updated>2026-04-16T14:48:21.773-07:00</updated><category term="gsoc"/><category term="releases"/><category term="open source"/><category term="student programs"/><category term="gci"/><category term="conference"/><category term="students"/><category term="open source release"/><category term="announcements"/><category term="ghop"/><category term="meetups"/><category term="google summer of code"/><category term="events"/><category term="education"/><category term="Kubernetes"/><category term="security"/><category term="documentation"/><category term="machine learning"/><category term="Python"/><category term="Linux"/><category term="Season of Docs"/><category term="GSoC Meetups"/><category term="technical writing"/><category term="project hosting"/><category term="guest post"/><category term="testing"/><category term="hackathon"/><category term="AI"/><category term="statistics"/><category term="TensorFlow"/><category term="Android"/><category term="Cloud"/><category term="App Engine"/><category term="C++"/><category term="OSCON"/><category term="library"/><category term="wrap-up"/><category term="JavaScript"/><category term="Git"/><category term="Go"/><category term="news"/><category term="twios"/><category term="GKE"/><category term="Java"/><category term="community"/><category term="games"/><category term="accessibility"/><category term="Eclipse"/><category term="Google Cloud Platform"/><category term="KDE"/><category term="compression"/><category term="deep learning"/><category term="mentors"/><category term="peer bonus"/><category term="GNOME"/><category term="google open source"/><category term="OpenCensus"/><category term="R"/><category term="bazel"/><category term="database"/><category term="open source programs"/><category term="BSD"/><category term="Chrome"/><category term="Chromium"/><category term="Dart"/><category term="OSS"/><category term="licensing"/><category term="peer bonus program"/><category term="science"/><category term="silicon"/><category term="Google Earth"/><category term="Google I/O"/><category term="HTML5"/><category term="JAX"/><category term="Subversion"/><category term="awards"/><category term="fonts"/><category term="maps"/><category term="research"/><category term="GSoC 10 Things"/><category term="GitHub"/><category term="Selenium"/><category term="SoD"/><category term="VR"/><category term="containers"/><category term="databases"/><category term="distributed tracing"/><category term="fuzzing"/><category term="hardware"/><category term="report card"/><category term="usability"/><category term="BigQuery"/><category term="Django"/><category term="Gerrit"/><category term="Google Brain"/><category term="Haskell"/><category term="Knative"/><category term="OSS-Fuzz"/><category term="Samba"/><category term="audio"/><category term="computer vision"/><category term="contest"/><category term="google"/><category term="google code-in"/><category term="mobile"/><category term="observability"/><category term="projects"/><category term="rust"/><category term="Apache Beam"/><category term="Data"/><category term="Docker"/><category term="Flutter"/><category term="Free Software Foundation"/><category term="GCC"/><category term="GCP"/><category term="Google Season of Docs"/><category term="ML"/><category term="Spanner"/><category term="contributor"/><category term="diversity"/><category term="docs"/><category term="google cloud"/><category term="government"/><category term="ios"/><category term="metrics"/><category term="networking"/><category term="open silicon"/><category term="opentitan"/><category term="performance"/><category term="standards"/><category term="API"/><category term="AR"/><category term="Antmicro"/><category term="Creative Commons"/><category term="Explore"/><category term="GNU"/><category term="Gemma"/><category term="Istio"/><category term="JSON"/><category term="Machine Learning Framework"/><category term="Mercurial"/><category term="OpenTelemetry"/><category term="Perl"/><category term="PostgreSQL"/><category term="Vertex AI"/><category term="artificial intelligence"/><category term="compliance"/><category term="graphics"/><category term="instrumentation"/><category term="k8s"/><category term="open source ML"/><category term="open source software"/><category term="privacy"/><category term="protocol buffers"/><category term="reinforcement learning"/><category term="season of usability"/><category term="virtual events"/><category term="webdriver"/><category term="3d"/><category term="Apache"/><category term="Apache Software Foundation"/><category term="Best Practices"/><category term="BioJS"/><category term="C"/><category term="CEL"/><category term="CSS"/><category term="Census"/><category term="Code"/><category term="Dataflow"/><category term="Firebase"/><category term="Gateway API"/><category term="Google Compute Engine"/><category term="Ingress"/><category term="Learn"/><category term="ML Ops"/><category term="Neural Networks"/><category term="PHP"/><category term="RISC-V"/><category term="SDK"/><category term="SLSA"/><category term="SQL"/><category term="Science Journal"/><category term="Tekton"/><category term="Tracing"/><category term="Unicode"/><category term="YouTube"/><category term="bazelcon"/><category term="coding"/><category term="continuous integration"/><category term="differential privacy"/><category term="embedded"/><category term="evaluation"/><category term="fun propulsion lab"/><category term="gaming"/><category term="genomics"/><category term="golang"/><category term="hardware security"/><category term="health"/><category term="healthcare"/><category term="images"/><category term="internationalization"/><category term="microcontrollers"/><category term="microservices"/><category term="natural language"/><category term="network"/><category term="open data"/><category term="optimization"/><category term="patents"/><category term="production ML"/><category term="profiles"/><category term="quantum computing"/><category term="scaling ML"/><category term="scanning"/><category term="security scanning"/><category term="translation"/><category term="virtual reality"/><category term="visualization"/><category term="web"/><category term="3D Graphics"/><category term="A2A"/><category term="AI/ML"/><category term="Agents"/><category term="Announcement"/><category term="Apache iceberg"/><category term="Biglake"/><category term="CNCF"/><category term="CPU"/><category term="Cloud Composer"/><category term="Conformance"/><category term="D&amp;I"/><category term="Developers"/><category term="Efabless"/><category term="Etcd"/><category term="FOSDEM"/><category term="FOSSASIA"/><category term="Filesystems"/><category term="GIS"/><category term="GPU"/><category term="Gemini"/><category term="Generative AI"/><category term="Google App Engine"/><category term="Google I/O 2024"/><category term="Google OSS"/><category term="Haiku"/><category term="HashiCorp Vault"/><category term="IAMF"/><category term="Industry Trends"/><category term="JPEG"/><category term="Kotlin"/><category term="Learn Kubernetes with Google"/><category term="Linux.conf.au"/><category term="Logic Programming"/><category term="Natural Language Understanding"/><category term="OSPO"/><category term="Objective-C"/><category term="Open Source Summit"/><category term="Open source peer bonus"/><category term="OpenMPW"/><category term="OpenTelementry"/><category term="OpenTracing"/><category term="OpenXLA"/><category term="Oracle"/><category term="PDK"/><category term="POSSE"/><category term="Problem-Solving"/><category term="Programming Languages"/><category term="PyTorch"/><category term="Quantum"/><category term="Release Notes"/><category term="Ruby"/><category term="SIMD"/><category term="Terraform"/><category term="Traffic Splitting"/><category term="Tunix"/><category term="Unity"/><category term="Vault"/><category term="Vulnerabilities"/><category term="WebAssembly"/><category term="ZuriHac"/><category term="analytics"/><category term="australia"/><category term="authentication backend"/><category term="benchmark"/><category term="beta"/><category term="bioinformatics"/><category term="blockly"/><category term="cardboard"/><category term="chemistry"/><category term="cloud native"/><category term="compilers"/><category term="debugging"/><category term="eclipsa"/><category term="energy"/><category term="foundations"/><category term="gVisor"/><category term="geography"/><category term="geometry"/><category term="inclusion"/><category term="information security"/><category term="internships"/><category term="interoperability"/><category term="json-ld"/><category term="junit4"/><category term="kernel"/><category term="language"/><category term="machine perception"/><category term="mentor"/><category term="mentorship"/><category term="metabrainz"/><category term="network scanning"/><category term="parameterized-tests"/><category term="physics"/><category term="programs"/><category term="progression"/><category term="robotics"/><category term="schema.org"/><category term="search"/><category term="secret management"/><category term="serverless"/><category term="steam"/><category term="structured data"/><category term="student"/><category term="sugar labs"/><category term="summer of code"/><category term="textures"/><category term="tilt brush"/><category term="time zones"/><category term="tools"/><category term="typescript"/><category term="validation"/><category term="video"/><category term="wafer"/><category term="webvr"/><category term="writing"/><category term="#opensource #PDK #silicon #eda #180nm #efabless #OpenMPW"/><category term=". Web"/><category term=".NET"/><category term="1.17"/><category term="1.19"/><category term="1.21"/><category term="1.30"/><category term="1.31"/><category term="1.32"/><category term="1.33"/><category term="1.34"/><category term="180nm"/><category term="2FA"/><category term="3Daudio"/><category term="90nm"/><category term="A2April"/><category term="ADC"/><category term="AI&#10;Industry Trends"/><category term="AI Innovation"/><category term="AMP"/><category term="AOSSIE"/><category term="APM"/><category term="AWS"/><category term="AWS Trainium"/><category term="Abseil"/><category term="Agent Sandbox"/><category term="Agent2Agent"/><category term="Allstar"/><category term="Anomaly Detection"/><category term="Anthos"/><category term="Apache 2.0"/><category term="Apache Airflow"/><category term="Apache Flink"/><category term="Apache HBase"/><category term="Apache Spark"/><category term="Apigee"/><category term="Application Performance Management"/><category term="BMC"/><category term="Backup"/><category term="Bigtable"/><category term="Business &amp; Leadership"/><category term="C#"/><category term="CFP"/><category term="CICD"/><category term="CIS"/><category term="CQL"/><category term="CVE"/><category term="Cactus"/><category term="Case Study"/><category term="Chinese"/><category term="CircuitVerse"/><category term="Clang"/><category term="Clinical Quality Language"/><category term="Cloud Data Fusion"/><category term="Cloud Monitoring"/><category term="Cloud SQL"/><category term="Cloud Speech API"/><category term="Cloud TPU"/><category term="CloudCV"/><category term="Colab"/><category term="Common Expression Language"/><category term="Contributing"/><category term="Contributors Maintainers"/><category term="Control-M"/><category term="DAW"/><category term="DICOM"/><category term="DICOMWeb"/><category term="DRA"/><category term="Dapper"/><category term="Data Analytics"/><category term="Data Lakes"/><category term="Data Lineage"/><category term="Data Science"/><category term="Data analysis"/><category term="Data validation"/><category term="Data-Driven"/><category term="Datalake"/><category term="Dataproc"/><category term="Debian"/><category term="DeepDream"/><category term="DeepMind"/><category term="Demos"/><category term="DevOps"/><category term="DevSecOps"/><category term="Digital Pathology"/><category term="DoubleCheck"/><category term="Dynamic Resource Allocation"/><category term="EDW"/><category term="ESCA"/><category term="EclipsaAudio"/><category term="Embedded Linux"/><category term="Embodied AI"/><category term="Envoy"/><category term="Expression Language"/><category term="FDSOI"/><category term="FPGA"/><category term="Fluent Bit"/><category term="Fraud Detection"/><category term="Fuchsia"/><category term="FuzzBench"/><category term="Fuzzer Benchmarking"/><category term="Fuzzer Evaluation"/><category term="Fuzzer Evaluator"/><category term="GCVE"/><category term="GIF"/><category term="GNU Radio"/><category term="GPL"/><category term="GPS"/><category term="Gemmaverse"/><category term="GenerativeAI"/><category term="Google Ads"/><category term="Google Blocks"/><category term="Google Cloud Console"/><category term="Google Cloud VMware Engine"/><category term="Google Genomics"/><category term="Google My Business"/><category term="Google Play"/><category term="GoogleSQL"/><category term="HPC"/><category term="HUES"/><category term="High-Performance"/><category term="Host offloading"/><category term="Hugging Face"/><category term="IAM"/><category term="ICLR"/><category term="IaC"/><category term="Iceberg"/><category term="InSpec"/><category term="Inclusive"/><category term="Intel Xeon"/><category term="Intermediate"/><category term="IoT"/><category term="JanusGraph"/><category term="Japanese"/><category term="Jaspr"/><category term="Java library"/><category term="Jenkins"/><category term="Jigsaw"/><category term="Joomla"/><category term="Jpegli"/><category term="Jupyter"/><category term="Korean"/><category term="Kubernetes Operator"/><category term="LF"/><category term="LIDAR"/><category term="LLM"/><category term="LLM Post-training"/><category term="LLM training"/><category term="LMEval"/><category term="LaTeX"/><category term="LabLua"/><category term="Learn Kubernetes"/><category term="Linux Foundation"/><category term="Liquid Galaxy"/><category term="ML Dev Tools"/><category term="ML systems"/><category term="MLCommons"/><category term="MLLMs"/><category term="Maintainer"/><category term="Maintaining"/><category term="Marin 32B"/><category term="Measure"/><category term="MediaPipe"/><category term="Medical"/><category term="Medical Imaging"/><category term="Mentor Summit"/><category term="Mesh"/><category term="MicroK8s"/><category term="Migration"/><category term="Multi-Platform"/><category term="NGINX"/><category term="NIST"/><category term="NRNB"/><category term="Node.js"/><category term="OAuth"/><category term="OSS hardware"/><category term="OSSEU"/><category term="OSSNA"/><category term="Open Models"/><category term="Open Source Compliance"/><category term="Open source hardware"/><category term="OpenChain"/><category term="OpenFermion"/><category term="OpenLineage"/><category term="OpenMRS"/><category term="OpenMetrics"/><category term="Oppia"/><category term="Optimal Control"/><category term="Orange"/><category term="Pebble"/><category term="Phare Benchmark"/><category term="Policy"/><category term="Population Health"/><category term="PowerShell"/><category term="Public Dataset"/><category term="Public Lab"/><category term="QEMU"/><category term="RDMA"/><category term="RTOS"/><category term="Rails"/><category term="Rails Girls Summer of Code"/><category term="Raspberry Pi Foundation"/><category term="Refresh token"/><category term="Respect"/><category term="Respectful"/><category term="Responsible AI"/><category term="Routing"/><category term="Rust security"/><category term="S2"/><category term="SCoRe"/><category term="SFT"/><category term="SLAM"/><category term="SRE"/><category term="SSH authorization open-source"/><category term="STE||AR"/><category term="Sandbox"/><category term="Scene Graphs"/><category term="Scheduler"/><category term="Scratch"/><category term="Secrets Scanning"/><category term="Security Scorecards"/><category term="Semi-Supervised Learning"/><category term="Server-side Apply"/><category term="Service Mesh"/><category term="Site Reliability Engineering"/><category term="SkyWater"/><category term="Smart Buildings"/><category term="Smart Campaign"/><category term="Smartwatches"/><category term="Software"/><category term="Solutions"/><category term="Solve"/><category term="Sorting"/><category term="Sound Separation"/><category term="Sparrow"/><category term="Spatial Reasoning"/><category term="Spinnaker"/><category term="Stable Diffusion"/><category term="Supervised Fine Tuning"/><category term="Supply chain security; OpenSSF; GOSST; Scorecard; SLSA; sos.dev"/><category term="Sustainability"/><category term="Swagless"/><category term="Systers"/><category term="TPU"/><category term="TPU Compute"/><category term="TPU Optimization"/><category term="TPU Performance"/><category term="TPUs"/><category term="TensorFlow-XLA"/><category term="Test Automation"/><category term="Titan"/><category term="Topology Awareness"/><category term="Trademarks"/><category term="Tunix Framework"/><category term="VMware"/><category term="VPN"/><category term="VR/AR"/><category term="Verifiable Games"/><category term="Vision-Language Models"/><category term="WSI"/><category term="Web development"/><category term="WikiLoop"/><category term="Wikipedia"/><category term="Workflow Migration"/><category term="WriteAPI"/><category term="XLA"/><category term="YAML"/><category term="YouTube API"/><category term="ZetaSQL"/><category term="ads"/><category term="algorithms"/><category term="angular"/><category term="anniversary"/><category term="annotation guidelines"/><category term="aosp"/><category term="arXiv"/><category term="arduino"/><category term="asf"/><category term="asic"/><category term="astronomy"/><category term="atheris"/><category term="autoML"/><category term="back end development"/><category term="backupdr"/><category term="backups"/><category term="beam"/><category term="beam summit"/><category term="benchmarking"/><category term="benchmarks"/><category term="big data"/><category term="binpacker"/><category term="birthday"/><category term="black-box"/><category term="block coding"/><category term="block-based coding"/><category term="block-based programming"/><category term="blog"/><category term="browsers"/><category term="bug bounty"/><category term="captions"/><category term="chips-alliance"/><category term="cilium"/><category term="clojure"/><category term="cloud computing"/><category term="cloud development"/><category term="cloud ops"/><category term="co-design"/><category term="coala"/><category term="code review"/><category term="code-samples"/><category term="codec"/><category term="common library"/><category term="computeengine"/><category term="computer science"/><category term="conferences"/><category term="container security"/><category term="continuous delivery"/><category term="contribution"/><category term="coronovirus"/><category term="cost optimization"/><category term="covid"/><category term="covid-19"/><category term="crawling"/><category term="creators"/><category term="critical open source projects"/><category term="criticality score"/><category term="crowdsourcing"/><category term="cryptography"/><category term="data migration"/><category term="data mining"/><category term="datacenter"/><category term="dataset"/><category term="datasets"/><category term="developer library"/><category term="developer tools"/><category term="development"/><category term="diffusion models"/><category term="disasterrecovery"/><category term="distributed applications"/><category term="draco"/><category term="earthquakes"/><category term="edcation"/><category term="emergency"/><category term="execution"/><category term="external"/><category term="fDPO"/><category term="fhir"/><category term="file type detection"/><category term="foundation"/><category term="foundation model"/><category term="frameworks"/><category term="functional programming"/><category term="fuzzer"/><category term="fuzzing internship"/><category term="fuzzing research"/><category term="gci orgs"/><category term="gci students"/><category term="gcloud"/><category term="geology"/><category term="gmail"/><category term="google developers"/><category term="google peer bonus"/><category term="googlecloud"/><category term="governance"/><category term="graph"/><category term="graphdb"/><category term="gsoc student"/><category term="hl7"/><category term="immersive"/><category term="infrastructure"/><category term="inspiration"/><category term="instance metadata"/><category term="instructional videos"/><category term="interactive music"/><category term="interns"/><category term="junit5"/><category term="keystroke"/><category term="knative 1.0"/><category term="kotlin multiplatform"/><category term="kubecon"/><category term="language models"/><category term="latinx"/><category term="lca"/><category term="learn Istio"/><category term="lisp"/><category term="live panel"/><category term="live transcribe"/><category term="load testing"/><category term="locust"/><category term="logo"/><category term="lowrisc"/><category term="lua"/><category term="machine learning software infrastructure"/><category term="machine translation"/><category term="magika"/><category term="maintainers"/><category term="making"/><category term="mathematical software"/><category term="medical text"/><category term="melange"/><category term="memory"/><category term="milestone"/><category term="monitoring"/><category term="monolith"/><category term="multi-cluster"/><category term="musicbrainz"/><category term="mysql"/><category term="nmap"/><category term="npm"/><category term="open access"/><category term="open source award"/><category term="open source fuzzing"/><category term="open source security"/><category term="open-source"/><category term="opensource"/><category term="openssf"/><category term="operating system"/><category term="operations research"/><category term="oss criticality"/><category term="packaging"/><category term="partners"/><category term="peer bonus winner"/><category term="perfkit"/><category term="performance testing"/><category term="perkit benchmarker"/><category term="pigweed"/><category term="pixar"/><category term="pkb"/><category term="podcasting"/><category term="processing"/><category term="proxy"/><category term="quantum virtual machine"/><category term="rare disease"/><category term="recognition"/><category term="recommender systems"/><category term="registry"/><category term="repos"/><category term="response"/><category term="runtime"/><category term="saliency"/><category term="sampling"/><category term="scaling"/><category term="schedviz"/><category term="scorecards"/><category term="seL4"/><category term="secure"/><category term="secure compute"/><category term="sigstore"/><category term="simulation"/><category term="software supply chain"/><category term="spatial"/><category term="spatial audio"/><category term="spatial indexing"/><category term="spatialaudio"/><category term="speech recognition"/><category term="spherical geometry"/><category term="style"/><category term="summer internships in technology"/><category term="supply chain attacks"/><category term="supply chain security"/><category term="systemverilog"/><category term="tags"/><category term="technical documentation"/><category term="technical talks"/><category term="temporal cartography"/><category term="tfrecord"/><category term="tracepoints"/><category term="ui automation"/><category term="ulnerability management"/><category term="verification"/><category term="verilog"/><category term="virtual"/><category term="virtualization"/><category term="virtualmachine"/><category term="vm"/><category term="vms"/><category term="vulnerability"/><category term="vulnerability management"/><category term="vulnerability rewards program"/><category term="web fonts"/><category term="websites"/><category term="women in open source"/><category term="workshops"/><category term="zopfli"/><title type="text">Google Open Source Blog</title><subtitle type="html">News about Google's Open Source projects and programs.</subtitle><link href="http://opensource.googleblog.com/feeds/posts/default" rel="http://schemas.google.com/g/2005#feed" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default?redirect=false" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/" rel="alternate" type="text/html"/><link href="http://pubsubhubbub.appspot.com/" rel="hub"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default?start-index=26&amp;max-results=25&amp;redirect=false" rel="next" type="application/atom+xml"/><author><name>Unknown</name><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><generator uri="http://www.blogger.com" version="7.00">Blogger</generator><openSearch:totalResults>1523</openSearch:totalResults><openSearch:startIndex>1</openSearch:startIndex><openSearch:itemsPerPage>25</openSearch:itemsPerPage><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-8465741992892981242</id><published>2026-04-16T13:55:00.000-07:00</published><updated>2026-04-16T14:48:21.688-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="A2A"/><category scheme="http://www.blogger.com/atom/ns#" term="A2April"/><category scheme="http://www.blogger.com/atom/ns#" term="Agent2Agent"/><category scheme="http://www.blogger.com/atom/ns#" term="interoperability"/><category scheme="http://www.blogger.com/atom/ns#" term="Linux Foundation"/><title type="text">A year of open collaboration: Celebrating the anniversary of A2A</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Patricia Cruz&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEipL-i-8rPIj_0Pl6t503wSf-bEBAG6noLVPlGUOFtRUb66I7rwOk78noAQ6Au4zLI-fTSAyIQGhDN2KlKZux0iAddwaHGz9vk_v21tqvnxnwn0FF6eGLjvDElh2mJEHVx8vW9lobsIFrluCCFfwQq5je-C13LBXS48aIYDS29xXEBbEfAJ2tO8ol9hJG8/s1600/a2a-birthday.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEipL-i-8rPIj_0Pl6t503wSf-bEBAG6noLVPlGUOFtRUb66I7rwOk78noAQ6Au4zLI-fTSAyIQGhDN2KlKZux0iAddwaHGz9vk_v21tqvnxnwn0FF6eGLjvDElh2mJEHVx8vW9lobsIFrluCCFfwQq5je-C13LBXS48aIYDS29xXEBbEfAJ2tO8ol9hJG8/s1600/a2a-birthday.png"&gt;

&lt;figure class="borderless"&gt;
&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEipL-i-8rPIj_0Pl6t503wSf-bEBAG6noLVPlGUOFtRUb66I7rwOk78noAQ6Au4zLI-fTSAyIQGhDN2KlKZux0iAddwaHGz9vk_v21tqvnxnwn0FF6eGLjvDElh2mJEHVx8vW9lobsIFrluCCFfwQq5je-C13LBXS48aIYDS29xXEBbEfAJ2tO8ol9hJG8/s1600/a2a-birthday.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEipL-i-8rPIj_0Pl6t503wSf-bEBAG6noLVPlGUOFtRUb66I7rwOk78noAQ6Au4zLI-fTSAyIQGhDN2KlKZux0iAddwaHGz9vk_v21tqvnxnwn0FF6eGLjvDElh2mJEHVx8vW9lobsIFrluCCFfwQq5je-C13LBXS48aIYDS29xXEBbEfAJ2tO8ol9hJG8/s1600/a2a-birthday.png" alt="The A2A logo wearing a birthday hat"/&gt;&lt;/a&gt;
&lt;/figure&gt;

&lt;body&gt;
&lt;p&gt;One year ago, on April 9th, 2025 Google &lt;a href="https://developers.googleblog.com/en/a2a-a-new-era-of-agent-interoperability/"&gt;announced the Agent2Agent(A2A) protocol&lt;/a&gt;. We saw the need for a "common language" that allows AI agents built on different frameworks to collaborate well across diverse systems. Then, on June 23, 2025 at the Open Source Summit North America in Denver, Mike Smith stood on stage to share a pivotal moment for the future of AI interoperability when Google officially &lt;a href="https://developers.googleblog.com/en/google-cloud-donates-a2a-to-linux-foundation/"&gt;donated the A2A protocol to the Linux Foundation&lt;/a&gt;, establishing it as a vendor-neutral, community-governed standard.&lt;/p&gt;

&lt;p&gt;This move was driven by a core belief: for AI agents to truly transform how we work and live, they must be able to communicate across framework boundaries and organizational silos without being locked into a single provider's ecosystem. By placing A2A under the neutral stewardship of the Linux Foundation, we opened the doors for the entire industry to build, contribute, and innovate together.&lt;/p&gt;

&lt;h2&gt;A Foundation of Partners&lt;/h2&gt;
&lt;p&gt;The formation of the A2A Project was made possible through the support of our founding members, including Amazon Web Services, Cisco, Microsoft, Salesforce, SAP, and ServiceNow. Over the past twelve months, this coalition has grown, with over 100 technology companies now supporting the project.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUNOlR3YiKzGk6KLgA3svSfWJQfPiauDjBzEOsHfIWvFFcRzyTZKUyk6Gcj7KtHJXQtCoRt3rLN126Zk0JV9MDaMINtHtUOH5zy-JszEyuloTQZg0yrVc4CtDhzboRPEZNmkiNS4o23k-q5vogDHO1TqrZ1C6l-BS814JBBBKUzmZvgN2LJ7JUvwI90EA/s1600/a2a1stbirthday--7o29jvnwzro.png"&gt;&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUNOlR3YiKzGk6KLgA3svSfWJQfPiauDjBzEOsHfIWvFFcRzyTZKUyk6Gcj7KtHJXQtCoRt3rLN126Zk0JV9MDaMINtHtUOH5zy-JszEyuloTQZg0yrVc4CtDhzboRPEZNmkiNS4o23k-q5vogDHO1TqrZ1C6l-BS814JBBBKUzmZvgN2LJ7JUvwI90EA/s1600/a2a1stbirthday--7o29jvnwzro.png"&gt;&lt;/a&gt;
&lt;/figure&gt;

&lt;h2&gt;From Prototype to Production&lt;/h2&gt;
&lt;p&gt;The momentum since the donation has been remarkable. What began as a Google-led initiative has evolved into critical infrastructure for horizontal, peer-to-peer collaboration. Just one month ago, in March, the project reached a major milestone with the release of &lt;a href="https://a2a-protocol.org/latest/announcing-1.0/"&gt;A2A Protocol v1.0&lt;/a&gt;, the first stable, fully production-ready version of the standard.&lt;/p&gt;

&lt;p&gt;Key achievements from the community this year include:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Enhanced Security&lt;/strong&gt;: The implementation of Signed Agent Cards for cryptographic identity verification, ensuring trust in multi-agent workflows.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Web-Aligned Architecture&lt;/strong&gt;: Refined specifications that support familiar load-balancing and security patterns for enterprise-scale deployments.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Ecosystem Interoperability&lt;/strong&gt;: Demonstrating how diverse agents built with ADK, LangGraph, AG2 and CrewAI can delegate tasks and coordinate complex workflows seamlessly.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Experts teaching experts:&lt;/strong&gt; We have learned from our open collaboration and have &lt;a href="https://www.deeplearning.ai/short-courses/a2a-the-agent2agent-protocol/"&gt;shared our knowledge&lt;/a&gt;.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Looking Ahead&lt;/h2&gt;
&lt;p&gt;This flourishing ecosystem of agent protocols helps standardize how agents communicate, interact with the world, and solve real-world problems. The A2Family includes AP2 (Agent Payment Protocol), A2UI (Agent to User Interface), and UCP (Universal Commerce Protocol), which are examples of new protocols created using A2A's open extensibility model for agent communication.&lt;/p&gt;

&lt;p&gt;As we celebrate this first anniversary, we are more committed than ever to the "A2Family." The A2A protocol is designed to be complementary to existing standards like the Model Context Protocol (MCP); while MCP manages internal tool integration, A2A handles the vital external coordination between autonomous entities. &lt;/p&gt;

&lt;p&gt;We want to thank the vibrant ecosystem of developers, contributors, and partners who have helped harden this protocol into a world-class standard over the last year.&lt;/p&gt;

&lt;h2&gt;Join the A2April Celebration!&lt;/h2&gt;
&lt;p&gt;We're celebrating the first anniversary of A2A all month long with "A2April". You can join the fun by sharing a photo of yourself in the community using the hashtag #A2April. To help you get festive, we've put together a &lt;a href="https://goo.gle/celebrate-a2april"&gt;commemorative party hat template&lt;/a&gt; with full assembly instructions.&lt;/p&gt;

&lt;p&gt;Here's to many more years of innovation and open collaboration!  &lt;/p&gt;

&lt;h3&gt;Acknowledgements&lt;/h3&gt;
&lt;p&gt;Thank you to the following contributors: Mike Smith, Alan Blount, Kassandra Dhillon, Daryl Ducharme, and April Kyle Nassi&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/8465741992892981242" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/8465741992892981242" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/04/a-year-of-open-collaboration-celebrating-the-anniversary-of-a2a.html" rel="alternate" title="A year of open collaboration: Celebrating the anniversary of A2A" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEipL-i-8rPIj_0Pl6t503wSf-bEBAG6noLVPlGUOFtRUb66I7rwOk78noAQ6Au4zLI-fTSAyIQGhDN2KlKZux0iAddwaHGz9vk_v21tqvnxnwn0FF6eGLjvDElh2mJEHVx8vW9lobsIFrluCCFfwQq5je-C13LBXS48aIYDS29xXEBbEfAJ2tO8ol9hJG8/s72-c/a2a-birthday.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4580912722526853288</id><published>2026-04-15T11:30:00.000-07:00</published><updated>2026-04-15T11:30:00.116-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Dart"/><category scheme="http://www.blogger.com/atom/ns#" term="Flutter"/><category scheme="http://www.blogger.com/atom/ns#" term="Jaspr"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="Web development"/><title type="text">Jaspr: Why web development in Dart might just be a good idea</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Kilian Schulte&lt;/author&gt;, Netlight&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjm8NvVqvfhwqsFI23_UAN9Qqsg2TLbjAwrlb4ZZfttfBm96kNK3XE13sqFdYRINjmNC-ElzWCjGTGwCOQfrJVdwdEeAKJUjbGYHMsb7M6u0yqWv_sfpzCbA1GM9EkihOFWrFB6qJmgdY7TlZYY4mpQXHO0x5Am2x9xgKNMlfbNMaH05TvW_tXsk8OXeiw/s1600/jasprgoogleoss--o125irj98es.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjm8NvVqvfhwqsFI23_UAN9Qqsg2TLbjAwrlb4ZZfttfBm96kNK3XE13sqFdYRINjmNC-ElzWCjGTGwCOQfrJVdwdEeAKJUjbGYHMsb7M6u0yqWv_sfpzCbA1GM9EkihOFWrFB6qJmgdY7TlZYY4mpQXHO0x5Am2x9xgKNMlfbNMaH05TvW_tXsk8OXeiw/s1600/jasprgoogleoss--o125irj98es.png"&gt;
&lt;figure class="wide borderless"&gt;
  &lt;a alt="Jaspr logo blue dog shaking hands with Dart logo bird" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjm8NvVqvfhwqsFI23_UAN9Qqsg2TLbjAwrlb4ZZfttfBm96kNK3XE13sqFdYRINjmNC-ElzWCjGTGwCOQfrJVdwdEeAKJUjbGYHMsb7M6u0yqWv_sfpzCbA1GM9EkihOFWrFB6qJmgdY7TlZYY4mpQXHO0x5Am2x9xgKNMlfbNMaH05TvW_tXsk8OXeiw/s1600/jasprgoogleoss--o125irj98es.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjm8NvVqvfhwqsFI23_UAN9Qqsg2TLbjAwrlb4ZZfttfBm96kNK3XE13sqFdYRINjmNC-ElzWCjGTGwCOQfrJVdwdEeAKJUjbGYHMsb7M6u0yqWv_sfpzCbA1GM9EkihOFWrFB6qJmgdY7TlZYY4mpQXHO0x5Am2x9xgKNMlfbNMaH05TvW_tXsk8OXeiw/s1600/jasprgoogleoss--o125irj98es.png"/&gt;&lt;/a&gt;
  &lt;figcaption&gt;Jaspr, the open source web framework,  is built on Dart&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;Most developers know Dart as the language that powers Flutter, the multi-platform app framework. But the Dart ecosystem has so much more to offer. For example: Jaspr, a web framework that provides a familiar Flutter-like experience, but is made for building fast, SEO-friendly, and dynamic websites natively in Dart.&lt;/p&gt;

&lt;p&gt;Dart on the web is not a new idea. Initially, Dart was designed to run natively in browsers, similar to JavaScript. Google even developed AngularDart, a pure-Dart version of the popular JS framework. And although this is no longer supported, it resulted in some surprisingly powerful web tooling for Dart. &lt;a href="https://opensource.googleblog.com/2016/10/dart-in-2017-and-beyond.html"&gt;Back in 2016&lt;/a&gt;, teams at Google chose Dart for its strong type safety and excellent development experience, and it has only improved since then.&lt;/p&gt;

&lt;p&gt;However, all of this was unknown to me when I started building Jaspr in 2022. As a web developer who had transitioned to Flutter, I had grown to love Dart and wanted to explore using it for web development. So Jaspr started as a personal challenge: What would a modern web framework look like if it was built entirely in Dart?&lt;/p&gt;

&lt;p&gt;Creating Jaspr as an open source project has been one of the most challenging, but also rewarding journeys of my career. Starting out as a solo maintainer is definitely hard work, but it comes with absolute creative freedom. I can explore unconventional ideas, design APIs exactly how I envision them, and integrate modern features seen in other frameworks. All without being slowed down by processes or roadmaps. I poured more than three years of late nights and weekends into the framework. That dedication finally paid off in a way I had never imagined: Google selected Jaspr to completely rebuild and power the official Dart and Flutter websites.&lt;/p&gt;

&lt;h3&gt;Architecture &amp;amp; design&lt;/h3&gt;

&lt;p&gt;To understand how Jaspr actually works, let's look at its underlying design. Jaspr is primarily targeted at Flutter developers venturing into web development. Having a clearly defined niche like this greatly helped me shape the framework and prioritize features, while not getting spread too thin as a maintainer. &lt;/p&gt;

&lt;p&gt;One of Jaspr's core design principles is that it should look and feel familiar to Flutter, while relying on native web technologies like HTML and CSS. This sets it apart from Flutter, which since 2021 can also target the web, but instead optimizes for rendering consistency between platforms. It relies fully on the Canvas API for rendering, which comes at the cost of slower loading times and lower SEO. Therefore, Jaspr is the missing piece for Flutter developers wanting to build fast and optimized websites with great SEO.&lt;/p&gt;

&lt;p&gt;Jaspr results in a syntax that is remarkably close to Flutter's, and functionality that is much closer to something like React with an efficient, DOM-based rendering algorithm.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizzHShDRyvwjWUNIlrwck4VQfAGCTJMAqvOk7E_6fkuGnyUlyAP0ZznHAWmlPObKDN-OWgiXe_CFdXeP_rl93lni2SxU9EXlLWrKksk1rU5xWOr6_LTA53IXvgpiCTvvkJAy-H2hswIjDzUcSAElG7dfXV68ipyPmrHcgwTC7PxsnC8lJiU6nDAvMB-JI/s1600/jasprgoogleoss--8ytdkak894f.png"&gt;&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizzHShDRyvwjWUNIlrwck4VQfAGCTJMAqvOk7E_6fkuGnyUlyAP0ZznHAWmlPObKDN-OWgiXe_CFdXeP_rl93lni2SxU9EXlLWrKksk1rU5xWOr6_LTA53IXvgpiCTvvkJAy-H2hswIjDzUcSAElG7dfXV68ipyPmrHcgwTC7PxsnC8lJiU6nDAvMB-JI/s1600/jasprgoogleoss--8ytdkak894f.png"&gt;&lt;/a&gt;
  &lt;figcaption&gt;Example: Jaspr component | Flutter widget | React component&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;As you can see, Jaspr's &lt;code class="inline"&gt;StatelessComponent&lt;/code&gt; mirrors Flutter's &lt;code class="inline"&gt;StatelessWidget&lt;/code&gt;, but constructs HTML similar to React with JSX. Jaspr also provides a type-safe API for writing CSS rules directly in Dart.&lt;/p&gt;

&lt;p&gt;Client-side rendering is only one aspect of what Jaspr can do. Jaspr is built as a full-stack general purpose framework supporting both Server-Side Rendering (SSR) and Static Site Generation (SSG). In the JavaScript ecosystem, you usually find a hard split between rendering libraries (React, Vue) and meta-frameworks (Next, Nuxt, Astro). Jaspr combines these concepts into one versatile and coherent framework.&lt;/p&gt;

&lt;p&gt;In order to achieve this wide range of features with the limited resources I had, I naturally had to make compromises. Since I didn't want to limit the quality of any feature, my strategy focuses more on limiting features to what's important. I also learned to prioritize simple solutions and to design APIs that are flexible and composable.&lt;/p&gt;

&lt;p&gt;For instance, I built &lt;code class="inline"&gt;jaspr_content&lt;/code&gt; as a plugin for developing content-driven sites from Markdown and other sources, similar to Astro or VitePress. It provides all the core features needed to build massive documentation websites, and instead of serving every use case out of the box, it is flexible and open enough to be fully customizable. In fact, &lt;code class="inline"&gt;jaspr_content&lt;/code&gt; is what currently powers the new &lt;code class="inline"&gt;flutter.dev&lt;/code&gt; and &lt;code class="inline"&gt;dart.dev&lt;/code&gt; documentation, which contain over 3,900 pages.&lt;/p&gt;

&lt;h3&gt;Tooling and developer experience&lt;/h3&gt;

&lt;p&gt;In my opinion, a framework is only as good as its tooling, and this is where Dart truly shines and has provided Jaspr developers with a great developer experience. For example, Flutter is known for its stateful hot-reload, enabling you to swap out code instantly without losing client-side state. But hot-reload is actually a &lt;strong&gt;Dart&lt;/strong&gt; feature, enabled by its unique compiler architecture. &lt;/p&gt;

&lt;p&gt;For browser development, the &lt;code class="inline"&gt;dartdevc&lt;/code&gt; compiler performs modular and incremental compilation to JavaScript. It supports stateful hot-reload and provides a seamless debugging experience. By cleverly leveraging source-maps, you can step through native Dart code right in the browser, complete with breakpoints, value inspection, and runtime expression evaluation.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiN920I_wUtEwU-BSdKLbyaAwk8JUkQzH1jBovY2GoBwKspiFbfVKBnTEzuJkzj69NYsBYRCvgZDqTu-gJADwUuTDSFV3DEeCazPdiWz1zmvBoILqnM713YAGhAnvGetjb7_EXuWQrq-ls1V87hKgmmi6QV9-4ZRro5m8oS82ASYgicEPR9EL3BO8aHqwE/s1600/jasprgoogleoss--0pne7s38lhkf.png"&gt;&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiN920I_wUtEwU-BSdKLbyaAwk8JUkQzH1jBovY2GoBwKspiFbfVKBnTEzuJkzj69NYsBYRCvgZDqTu-gJADwUuTDSFV3DEeCazPdiWz1zmvBoILqnM713YAGhAnvGetjb7_EXuWQrq-ls1V87hKgmmi6QV9-4ZRro5m8oS82ASYgicEPR9EL3BO8aHqwE/s1600/jasprgoogleoss--0pne7s38lhkf.png" alt="An image show what debugging Jaspr or Dart code looks like when using Chrome DevTools"&gt;&lt;/a&gt;
  &lt;figcaption&gt;Debugging Jaspr / Dart code using Chrome DevTools&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;For production builds, Dart uses the &lt;code class="inline"&gt;dart2js&lt;/code&gt; compiler to generate a heavily optimized, tree-shaken JavaScript bundle, or the newer &lt;code class="inline"&gt;dart2wasm&lt;/code&gt; compiler for even better runtime performance through WebAssembly. On the server side, Dart's JIT compiler provides that same hot-reload and debugging capabilities, while its AOT compiler compiles your server code to optimized, platform-specific, native binaries for production environments.&lt;/p&gt;

&lt;p&gt;Jaspr builds on top of these and other capabilities, for example by giving developers full-stack debugging, custom lints and code assists, and something I call &lt;em&gt;component scopes&lt;/em&gt;. This is a neat editor feature that adds inline hints to your components, showing whether they are rendered on the server, the client, or both. When building full-stack apps, this makes it much easier to reason about which platform APIs or libraries you can safely use in a specific file. I'm also working on more features to make the full-stack development aspect even smoother. For example, a full-stack hot-reload where on any server-side change, whether updating code or (for example), editing a markdown file, the new pre-rendered HTML is "hot-reloaded" into the page while keeping all client-side state. Features like these are only possible due to Jaspr's approach to combine both server- and client-side rendering into one framework.&lt;/p&gt;

&lt;h3&gt;Impact and outlook&lt;/h3&gt;

&lt;p&gt;Last year, Google selected Jaspr for the Dart and Flutter websites, including &lt;code&gt;dart.dev&lt;/code&gt;, &lt;code&gt;flutter.dev&lt;/code&gt; and &lt;code&gt;&lt;a href="https://docs.flutter.dev/"&gt;docs.flutter.dev&lt;/a&gt;&lt;/code&gt; (&lt;a href="https://github.com/flutter/website"&gt;repo&lt;/a&gt;), which is used by over a million monthly active users. The sites were migrated from JS- and python-based static site generators to Jaspr and &lt;code class="inline"&gt;jaspr_content&lt;/code&gt;, resulting in a unified setup with less context switching and an easier contribution experience. The move to Jaspr also streamlined the development of brand-new interactive tutorials on &lt;code class="inline"&gt;dart.dev/learn&lt;/code&gt; and &lt;code class="inline"&gt;docs.flutter.dev/learn&lt;/code&gt;. For me this is not only an incredible trust in the capabilities of Jaspr, but also a great way to dogfood Jaspr at scale; it allowed me to invest more time and resources into improving Jaspr.&lt;/p&gt;

&lt;p&gt;With AI constantly shifting the scope of software development, I believe the concept of being a strict "domain expert" (a purely mobile or purely web developer) will matter less. However, developers and teams will increasingly value coherent tech stacks to reduce context-switching and leverage unified tooling. Just as React Native became massively popular because it allowed web developers to reuse their skills for mobile (or for companies to "reuse" their developers), Jaspr is a great option for teams working with both Flutter and the web. Apart from using existing skills, Jaspr and Flutter projects can also share up to 100% of their business logic, models, and validation code.&lt;/p&gt;

&lt;p&gt;Dart's type safety and high-quality tooling position it well for modern web development. Jaspr evolved to be the missing piece, a cohesive framework with modern features and a great development experience.&lt;/p&gt;

&lt;p&gt;I personally see Jaspr as an antithesis to the trend of AI causing everyone to converge onto the same stack, especially in web development. While this also has some benefits, I believe there is immense value in exploring alternative ecosystems. This can push boundaries, surface new ideas, and keep our industry vibrant. &lt;/p&gt;

&lt;p&gt;If there's one takeaway from my journey, it's this: Don't be afraid to build the tools you want to use. You never know where that codebase will take you, and it can be incredibly rewarding. &lt;/p&gt;

&lt;p&gt;If you're a Dart or Flutter developer curious about building websites with the skills you already have, there's never been a better time to start. Try out Jaspr now on its &lt;a href="https://playground.jaspr.site/"&gt;online playground&lt;/a&gt; (which is also built with Jaspr!) or by following the &lt;a href="https://docs.jaspr.site/quick_start"&gt;Jaspr quickstart&lt;/a&gt;.&lt;/p&gt;
&lt;p&gt;Learn more about Flutter's migration in &lt;a href="https://blog.flutter.dev/we-rebuilt-flutters-websites-with-dart-and-jaspr-317c00e8b400"&gt;We rebuilt Flutter's websites with Dart and Jaspr&lt;/a&gt;.
&lt;/p&gt;&lt;p&gt;
Oh, and if you're wondering where the name "Jaspr" came from — it's named after my dog, Jasper. If you ever find yourself wandering around &lt;a href="https://jaspr.site"&gt;jaspr.site&lt;/a&gt; and want to &lt;strong&gt;&lt;em&gt;Meet Jasper&lt;/em&gt;&lt;/strong&gt;, keep an eye out… you just might find a little easter egg tribute to him.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4580912722526853288" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4580912722526853288" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/04/jaspr-why-web-development-in-dart-might-just-be-a-good-idea.html" rel="alternate" title="Jaspr: Why web development in Dart might just be a good idea" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjm8NvVqvfhwqsFI23_UAN9Qqsg2TLbjAwrlb4ZZfttfBm96kNK3XE13sqFdYRINjmNC-ElzWCjGTGwCOQfrJVdwdEeAKJUjbGYHMsb7M6u0yqWv_sfpzCbA1GM9EkihOFWrFB6qJmgdY7TlZYY4mpQXHO0x5Am2x9xgKNMlfbNMaH05TvW_tXsk8OXeiw/s72-c/jasprgoogleoss--o125irj98es.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4094751371830506244</id><published>2026-04-10T11:30:00.000-07:00</published><updated>2026-04-10T11:30:00.122-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Cloud TPU"/><category scheme="http://www.blogger.com/atom/ns#" term="Host offloading"/><category scheme="http://www.blogger.com/atom/ns#" term="Intel Xeon"/><category scheme="http://www.blogger.com/atom/ns#" term="JAX"/><category scheme="http://www.blogger.com/atom/ns#" term="LLM training"/><title type="text">Leveraging CPU memory for faster, cost-efficient TPU LLM training</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Keyur Ruganathbhai Ranipa&lt;/author&gt;, &lt;author&gt;Qinglan Xiang&lt;/author&gt;, &lt;author&gt;Vrushabh Sanghavi&lt;/author&gt;, &lt;author&gt;Ramesh AG&lt;/author&gt; &amp;amp; &lt;author&gt;Weilin Wang&lt;/author&gt;, Intel&lt;br&gt;
 and &lt;author&gt;Penporn Koanantakool&lt;/author&gt;, Google&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSl-SjdERyxt8bQdTc9dB6dRfxQffyNdsExFuihXYg8XrCYPgKlO13tE6CaNjO6n6EIGGYuRn8vOsMSjV9Fkj4Ryec2i4BYDnWA2wDGQWUgYU79h0PMPfrNQg8WkGVs96GieUhNhfqSuOK18ieHxY4MHcFrBgrZurdcgDqfBUEDMK_cbjZQksnSyeIB0c/s1600/thumbnail.jpg"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSl-SjdERyxt8bQdTc9dB6dRfxQffyNdsExFuihXYg8XrCYPgKlO13tE6CaNjO6n6EIGGYuRn8vOsMSjV9Fkj4Ryec2i4BYDnWA2wDGQWUgYU79h0PMPfrNQg8WkGVs96GieUhNhfqSuOK18ieHxY4MHcFrBgrZurdcgDqfBUEDMK_cbjZQksnSyeIB0c/s1600/thumbnail.jpg"&gt;
&lt;a href="IMG" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSl-SjdERyxt8bQdTc9dB6dRfxQffyNdsExFuihXYg8XrCYPgKlO13tE6CaNjO6n6EIGGYuRn8vOsMSjV9Fkj4Ryec2i4BYDnWA2wDGQWUgYU79h0PMPfrNQg8WkGVs96GieUhNhfqSuOK18ieHxY4MHcFrBgrZurdcgDqfBUEDMK_cbjZQksnSyeIB0c/s1600/thumbnail.jpg" alt="Intel Xeon 6 Processor"/&gt;&lt;/a&gt;

&lt;h2&gt;Host offloading with JAX on Intel® Xeon® processors&lt;/h2&gt;

&lt;p&gt;As Large Language Models (LLMs) continue to scale into the hundreds of billions of parameters, device memory capacity has become a big limiting factor in training, as intermediate activations from every layer in the forward pass are needed in the backward pass. To reduce device memory pressure, these activations can be rematerialized during the backward pass, trading memory for recomputation. While rematerialization enables larger models to fit within limited device memory, it significantly increases training time and cost.&lt;/p&gt;&lt;p&gt;
Intel® Xeon® processors (5th and 6th Gen) with Advanced Matrix Extensions (AMX) enable practical host offloading of selected memory- and compute-intensive components in JAX training workflows. This approach can help teams train larger models, relieve accelerator memory pressure, improve end-to-end throughput, and reduce total cost of ownership—particularly on TPU-based Google Cloud instances.&lt;/p&gt;&lt;p&gt;
By publishing these results and implementation details, Google and Intel aim to promote transparency and share practical guidance with the community. This post describes how to enable activation offloading for JAX on TPU platforms and outlines considerations for building scalable, cost-aware hybrid CPU–accelerator training workflows.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;  
	&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiSxOxAaxMCyZB3T0jLV682S1wR8NiCzLHhyaJA5on5DHqRsnifdjhFUmYI5quI7kRaR-K6YYOPAqtwyzNA1txGVdji7Mhw7IqxLF5l7tywifDwVicJcukJSyQzNjb6pshfwqYWChz0fRr30c1aCK9B-LapG4ECa8bZn8Ahez0GqPLwGLvizEs2Opeqg7Y/s1600/Fig1_TPU_v5L_Pod_Front_View.jpg"&gt;
	&lt;figcaption&gt;&lt;strong&gt;Figure 1.&lt;/strong&gt; Google Cloud TPU Pod commonly used in LLM training.&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;h3&gt;Host offloading&lt;/h3&gt;
&lt;p&gt;Traditional LLM training is usually done on device accelerators alone. However, modern host machines have much larger memory size than accelerators (512GB or more) and can offer extra compute power, e.g., TFLOPS in case of Intel® Xeon® Scalable Processor with AMX capability. Leveraging host resources can be a great alternative to rematerialization. &lt;strong&gt;Host offloading&lt;/strong&gt; selectively moves computation or data between host and device to optimize performance and memory usage.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Host memory offloading&lt;/strong&gt; keeps frequently-accessed tensors on the device and spills the rest to CPU memory as an extra level of cache. &lt;strong&gt;Activation offloading&lt;/strong&gt; transfers activations computed on-device in the forward pass to the host, stores them in the host memory, and brings them back to the device in the backward pass for gradient computation. This unlocks the ability to train larger models, use bigger batch sizes, and improve throughput.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;
	&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjCtZ1VFioJeS0andvdS7hpk7hP_H92yn5TjoIbxeji0n0WwTzfZoGhN0Z83xH6vPFH_7CVq6lNJUlfhvAOb40j9eCQ-dKbV8TncHDXNKBMwiVgLXc1NT0hjN4E6l41bj42LKklgm0uMr18IQMFhSul8u1mjFc0sQkUqKrLGDC0I_69yodJOm8Zjt5GYmk/s1600/Fig2_offloading_illustration.png"&gt;
	&lt;figcaption&gt;&lt;strong&gt;Figure 2:&lt;/strong&gt; Memory offloading during forward and backward pass &lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;In this blog post, we provide a practical guide to offload activations through JAX to efficiently train larger models on TPUs with an Intel® Xeon® Scalable Processor.&lt;/p&gt;

&lt;h3&gt;Enabling memory offloading in JAX&lt;/h3&gt;
&lt;p&gt;JAX offers &lt;a href="https://docs.jax.dev/en/latest/notebooks/host-offloading.html"&gt;multiple strategies&lt;/a&gt; for offloading activations, model parameters, and optimizer states to the host. Users can use &lt;code&gt;checkpoint_names()&lt;/code&gt; to create a checkpoint for a tensor. The snippet below shows how to create a checkpoint &lt;code&gt; x&lt;/code&gt;:&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox python"&gt;from jax.ad_checkpoint import checkpoint_name 
 
def layer_name(x, w): 
  w1, w2 = w 
  x = checkpoint_name(x, "x") 
  y = x @ w1 
  return y @ w2, None  
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;Users can provide &lt;code&gt;checkpoint_policies()&lt;/code&gt; to select the appropriate memory optimization strategy for intermediate values. There are three strategies:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Recomputing during backward pass (default behavior) &lt;/li&gt;
&lt;li&gt;Storing on device &lt;/li&gt;
&lt;li&gt;Offloading to host memory after forward pass and loading back during backward pass &lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;The code below moves &lt;code&gt;x&lt;/code&gt; from device to the pinned host memory after the forward pass. &lt;br&gt;
 &lt;code&gt;from jax import checkpoint_policies as cp&lt;/code&gt;&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox python"&gt;policy = cp.save_and_offload_only_these_names( 
  names_which_can_be_saved=[],         # No values stored on device 
  names_which_can_be_offloaded=["x"],  # Offload activations labeled "x" 
  offload_src="device",                # Move from device memory 
  offload_dst="pinned_host"            # To pinned host memory 
) 
&lt;/code&gt;&lt;/pre&gt;

&lt;h3&gt;Measuring Host Offloading Benefits on TPU v5p&lt;/h3&gt;
&lt;p&gt;We examined TPU host-offloading on JAX on both fine-tuning and training workloads. All our experiments were run on Google Cloud Platform, using a single &lt;a href="https://docs.cloud.google.com/tpu/docs/v5p"&gt;v5p-8 TPU&lt;/a&gt;  instance with single host 4th Gen Intel® Xeon® Scalable Processor.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Fine-tuning PaliGemma2&lt;/strong&gt;: Using the base PaliGemma2 28B model for vision-language tasks, we &lt;a href="https://ai.google.dev/gemma/docs/paligemma/fine-tuning-paligemma"&gt;fine-tuned&lt;/a&gt; the attention layers of the language model (Gemma2 27B) while keeping all other parameters frozen. During fine-tuning, we set the LLM sequence length to 256 and the batch size to 256.&lt;/p&gt;&lt;p&gt;
The default checkpoint policy is &lt;code&gt;nothing_saveable&lt;/code&gt;, which does not keep any activations on-device during the forward pass. The activations are rematerialized during the backward pass for gradient computation. While this approach reduces memory pressure on the TPU, it increases compute time. To apply host offloading, we offload Q, K, and V projection weights using &lt;code&gt;save_and_offload_only_these_names&lt;/code&gt;. These activations are transferred to host memory (D2H) during the forward pass and fetched back during the backward pass (H2D), so the device neither stores nor recomputes them. Figure 2 shows 10% reduction in training time from host offloading. This translates directly into a similar reduction in TPU core-hours, yielding meaningful cost savings. The complete fine-tuning recipe is available at [&lt;a href="https://github.com/Intel-tensorflow/jax/tree/jax-host-offloading/examples/jax_host_offloading"&gt;JAX host offloading&lt;/a&gt;]. &lt;/p&gt;

&lt;figure class="wide borderless"&gt;
	&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj9RCj0uH2tKGlK8GJkXUbOvwo3bcrMuwn_MGuJqjDNu-Wdkn3ZWdeSGphCe32v_UriOaFqG7UpCuUmYKOhZH0v5jZ53kaOL5fnMSHwuSyuZvihYTHdQJgHW6cmeORuxGCo79aNDXgxckfHn1tMq7AEfsMBGIWmvLr0aQZUYvHQP43gLIzxOTSJLE8zprk/s1600/Fig3_paligemma2_results.png"&gt;
	&lt;figcaption&gt;&lt;strong&gt;Figure 3:&lt;/strong&gt; (Top) Training time comparison between full rematerialization and host offloading.&lt;br&gt;
      (Bottom) Memory analysis with and without host offloading.&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;&lt;strong&gt;Training Llama2-13B using MaxText:&lt;/strong&gt; &lt;a href="https://github.com/AI-Hypercomputer/maxtext"&gt;MaxText&lt;/a&gt; offers several &lt;a href="https://github.com/AI-Hypercomputer/maxtext/blob/c841dae593a64942b348a96692c0ae0f7b140182/src/MaxText/configs/base.yml#L286-L289"&gt;rematerialization strategies&lt;/a&gt; that can be specified in the training configuration file. We used the policy &lt;code&gt; remat_policy: 'qkv_proj_offloaded' &lt;/code&gt; to offload Q, K, and V projection weights. Figure 3 shows ~5% reduction in per-step training time compared to fully rematerializing all activations (&lt;code&gt; remat_policy: 'full'&lt;/code&gt;). &lt;/p&gt;

&lt;figure class="wide borderless"&gt;
	&lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEi6bSAeLi_TVhVKAtsRBo-vXPQcvNmeAGIYzprbl9nKukv-ZBZE_IqgXzW6K-RjW2Mqu9163Xe8oAkxc2kIaZE-JC17hja3cbUAj8Im4isHjIWTJ3IUSouf5oha6m20WS0jUKSVdNqCr2hN29Owtet51gtYd0iZ7GGi71LgSRtT7UWaKnv6SawQUo9LEVw/s1600/Fig4_llama2-13b_results.png"&gt;
	&lt;figcaption&gt;&lt;strong&gt;Figure 4:&lt;/strong&gt; MaxText Llama2-13B training statistics with and without host offloading.&lt;br&gt;
The step time was 5% faster with host offloading.&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;h3&gt;When to offload activations&lt;/h3&gt;
&lt;p&gt;Activation offloading is beneficial when the time to transfer activations across host and device is lower than the time to recompute them. The timing depends on multiple factors such as PCIe bandwidth, model size, batch size, sequence length, activation tensor sizes, compute capabilities of the device, etc. An additional factor is how much the data movement can be overlapped with computation to keep the device busy.  Figure 4 demonstrates an efficient overlap of the device-to-host transfer with compute during the backward pass in PaliGemma2 28B training.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgBgjqC2J6ahTokvulD685bfmcifsGrSfAoFzvGfzoxTCBcdC8-ypyF3MFD-Znd3ZpSgoKOe8aQ-gC_eYYmXSnduRQulI_2dVgc-DuVj585FkuFT76vwgLRBAAHVwAGe_AUAPJhFu2__BbTravFpdZSxXtOpdni42Mmvt8qHJ9A-fY8L7b6SsyGDfNZ-XM/s1600/Fig5_backward_pass_h2d_trace.png"&gt;
  &lt;figcaption&gt;&lt;strong&gt;Figure 5:&lt;/strong&gt; A JAX trace of PaliGemma2 training viewed on Perfetto.&lt;br&gt;
  Memory offloading overlaps with compute effectively during backward pass host to device.&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;Smaller model variants such as PaliGemma2 3B and 9B did not see benefits from host offloading because it is faster to rematerialize all tensors than to transfer them to and from the host. Therefore, identifying the appropriate workload and offloading policy is crucial to realizing performance gain from host offloading&lt;/p&gt;

&lt;h3&gt;Call to Action&lt;/h3&gt;
&lt;p&gt;If you train on TPUs and are limited by device memory, consider evaluating activation offloading. Start by labeling candidate activations (for example, Q/K/V projections) and compare step time, memory headroom, and overall cost across representative workloads.&lt;/p&gt;&lt;p&gt;
In our experiments, we observed up to ~10% improvement in end-to-end training time for larger workloads, which can reduce total cost of ownership (TCO) by shortening time-to-train or enabling the same workload on smaller instances.&lt;/p&gt;

&lt;h3&gt;Acknowledgments&lt;/h3&gt;
&lt;p&gt;Emilio Cota, and Karlo Basioli from Google and Eugene Zhulenev (formerly at Google).&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4094751371830506244" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4094751371830506244" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/04/leveraging-cpu-memory-for-faster-cost-efficient-tpu-llm-training.html" rel="alternate" title="Leveraging CPU memory for faster, cost-efficient TPU LLM training" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhSl-SjdERyxt8bQdTc9dB6dRfxQffyNdsExFuihXYg8XrCYPgKlO13tE6CaNjO6n6EIGGYuRn8vOsMSjV9Fkj4Ryec2i4BYDnWA2wDGQWUgYU79h0PMPfrNQg8WkGVs96GieUhNhfqSuOK18ieHxY4MHcFrBgrZurdcgDqfBUEDMK_cbjZQksnSyeIB0c/s72-c/thumbnail.jpg" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-1208132952370517719</id><published>2026-04-09T00:30:00.000-07:00</published><updated>2026-04-09T00:30:00.112-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="A2A"/><category scheme="http://www.blogger.com/atom/ns#" term="Agents"/><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="anniversary"/><category scheme="http://www.blogger.com/atom/ns#" term="birthday"/><title type="text">Celebrate A2April!</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Patricia Cruz&lt;/author&gt; &amp;amp; &lt;author&gt;Daryl Ducharme&lt;/author&gt;, Google Open Source&lt;/p&gt;
&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjceD7QSJkjePzfg0I6GcekqSxjDQtOXPgnIDxXlcruRELjhhOLWt39fIiclLr0t9sy9S6bXEtErtB5D-xsDXi2TMJnxi95WlX2A4AfqX6GYXWaOwQWWfUHcCZdujFkEEKEHo_CTu7JjFG1UWLQGgc2cwfFhrlY51KvIwa5OBd8lv2zKCitpuY3l45Aits/s1600/a2aprilpartyha--fetqhl1c3vi.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjceD7QSJkjePzfg0I6GcekqSxjDQtOXPgnIDxXlcruRELjhhOLWt39fIiclLr0t9sy9S6bXEtErtB5D-xsDXi2TMJnxi95WlX2A4AfqX6GYXWaOwQWWfUHcCZdujFkEEKEHo_CTu7JjFG1UWLQGgc2cwfFhrlY51KvIwa5OBd8lv2zKCitpuY3l45Aits/s1600/a2aprilpartyha--fetqhl1c3vi.png"&gt;

&lt;p&gt;Happy 1st Birthday to A2A! Join the community in celebrating the first anniversary of the A2A and &lt;a href="https://a2a-protocol.org/latest/announcing-1.0/"&gt;its recent 1.0 release&lt;/a&gt;. April 9th marks the &lt;a href="https://developers.googleblog.com/en/a2a-a-new-era-of-agent-interoperability/"&gt;official birthday&lt;/a&gt;, and we're celebrating all month long with &lt;em&gt;#A2April&lt;/em&gt;. To help you celebrate, we've used Gemini to make a party hat.&lt;/p&gt;

&lt;p&gt;Use the template and instructions below to create your commemorative party hat.&lt;/p&gt;

&lt;h2&gt;Assembly Instructions&lt;/h2&gt;
&lt;ol&gt;
&lt;li&gt;&lt;strong&gt;Print:&lt;/strong&gt; Print this document on heavy cardstock for the best results.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Cut:&lt;/strong&gt; Carefully cut along the solid outer border of the semi-circle template.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Fold:&lt;/strong&gt; Gently curve the template into a cone shape, overlapping the "Glue/Tape Tab" underneath the opposite edge.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Secure:&lt;/strong&gt; Use double-sided tape or a glue stick along the tab to hold the cone shape.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Finish:&lt;/strong&gt; Punch two small holes on opposite sides of the base and thread through an elastic string or ribbon to secure the hat to your head.&lt;/li&gt;
&lt;/ol&gt;

&lt;h2&gt;Party Hat Visualization&lt;/h2&gt;
&lt;figure class="wide"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjceD7QSJkjePzfg0I6GcekqSxjDQtOXPgnIDxXlcruRELjhhOLWt39fIiclLr0t9sy9S6bXEtErtB5D-xsDXi2TMJnxi95WlX2A4AfqX6GYXWaOwQWWfUHcCZdujFkEEKEHo_CTu7JjFG1UWLQGgc2cwfFhrlY51KvIwa5OBd8lv2zKCitpuY3l45Aits/s1600/a2aprilpartyha--fetqhl1c3vi.png"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjceD7QSJkjePzfg0I6GcekqSxjDQtOXPgnIDxXlcruRELjhhOLWt39fIiclLr0t9sy9S6bXEtErtB5D-xsDXi2TMJnxi95WlX2A4AfqX6GYXWaOwQWWfUHcCZdujFkEEKEHo_CTu7JjFG1UWLQGgc2cwfFhrlY51KvIwa5OBd8lv2zKCitpuY3l45Aits/s1600/a2aprilpartyha--fetqhl1c3vi.png"&gt;&lt;/a&gt;
  &lt;figcaption&gt;Make sure to print in landscape mode&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;h2&gt;Ways to Celebrate&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Social Media:&lt;/strong&gt; Share a photo of yourself wearing your hat with the tag #A2April to help generate that social media buzz.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Blog Series:&lt;/strong&gt; Keep an eye out for the upcoming A2April blog series featuring quotes from the team and stories from the open source community.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Community Quotes:&lt;/strong&gt; If you're using A2A in production, reach out to us via social media and share your story for the birthday post.&lt;/li&gt;
&lt;/ul&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/1208132952370517719" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/1208132952370517719" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/04/celebrate-a2april.html" rel="alternate" title="Celebrate A2April!" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjceD7QSJkjePzfg0I6GcekqSxjDQtOXPgnIDxXlcruRELjhhOLWt39fIiclLr0t9sy9S6bXEtErtB5D-xsDXi2TMJnxi95WlX2A4AfqX6GYXWaOwQWWfUHcCZdujFkEEKEHo_CTu7JjFG1UWLQGgc2cwfFhrlY51KvIwa5OBd8lv2zKCitpuY3l45Aits/s72-c/a2aprilpartyha--fetqhl1c3vi.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4168810863999534660</id><published>2026-04-06T12:00:00.000-07:00</published><updated>2026-04-06T12:01:37.556-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI"/><category scheme="http://www.blogger.com/atom/ns#" term="CNCF"/><category scheme="http://www.blogger.com/atom/ns#" term="Conformance"/><category scheme="http://www.blogger.com/atom/ns#" term="Dynamic Resource Allocation"/><category scheme="http://www.blogger.com/atom/ns#" term="Kubernetes"/><category scheme="http://www.blogger.com/atom/ns#" term="machine learning"/><title type="text">Kubernetes goes AI-First: Unpacking the new AI conformance program</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Duncan Campbell&lt;/author&gt;, &lt;author&gt;Kaslin Fields&lt;/author&gt;, Developer Source &amp;amp; Signal&lt;br&gt;
&amp;amp; &lt;author&gt;Janet Kuo&lt;/author&gt;, &lt;author&gt;Federico Bongiovanni&lt;/author&gt;, Google Kubernetes Engine(GKE)&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg5Ew4zwACrflispkhfyO2RPltMX4eBYd9sSXXT5hNZ6G2FugX5WAu3wE3fpuJya824APcC0qjVwLxzwxqg2hK957Z4bln1aCl6_Pwwht9BQW6HF-cqjgdQZzRRvhV2qlar_m_t37Pkl5O4BptwMY1D2CX-g_U_x5XhXvrCiaU93gX0GwRTtyCEanRAQTU/s1600/Header%20-%20OSS%20-%20Kubernetes%20Gateway%20API%20graduates%20to%20GA%20%281%29.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg5Ew4zwACrflispkhfyO2RPltMX4eBYd9sSXXT5hNZ6G2FugX5WAu3wE3fpuJya824APcC0qjVwLxzwxqg2hK957Z4bln1aCl6_Pwwht9BQW6HF-cqjgdQZzRRvhV2qlar_m_t37Pkl5O4BptwMY1D2CX-g_U_x5XhXvrCiaU93gX0GwRTtyCEanRAQTU/s1600/Header%20-%20OSS%20-%20Kubernetes%20Gateway%20API%20graduates%20to%20GA%20%281%29.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg5Ew4zwACrflispkhfyO2RPltMX4eBYd9sSXXT5hNZ6G2FugX5WAu3wE3fpuJya824APcC0qjVwLxzwxqg2hK957Z4bln1aCl6_Pwwht9BQW6HF-cqjgdQZzRRvhV2qlar_m_t37Pkl5O4BptwMY1D2CX-g_U_x5XhXvrCiaU93gX0GwRTtyCEanRAQTU/s1600/Header%20-%20OSS%20-%20Kubernetes%20Gateway%20API%20graduates%20to%20GA%20%281%29.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg5Ew4zwACrflispkhfyO2RPltMX4eBYd9sSXXT5hNZ6G2FugX5WAu3wE3fpuJya824APcC0qjVwLxzwxqg2hK957Z4bln1aCl6_Pwwht9BQW6HF-cqjgdQZzRRvhV2qlar_m_t37Pkl5O4BptwMY1D2CX-g_U_x5XhXvrCiaU93gX0GwRTtyCEanRAQTU/s1600/Header%20-%20OSS%20-%20Kubernetes%20Gateway%20API%20graduates%20to%20GA%20%281%29.png"/&gt;&lt;/a&gt;

&lt;p&gt;As AI workloads move from experimental notebooks into massive production environments, the industry is rallying around a new standard to ensure these workloads remain portable, reliable, and efficient.&lt;/p&gt;

&lt;p&gt;At the heart of this shift is the launch of the &lt;strong&gt;&lt;a href="https://github.com/cncf/k8s-ai-conformance"&gt;Certified Kubernetes AI Conformance program&lt;/a&gt;&lt;/strong&gt;.&lt;/p&gt;&lt;p&gt;
This initiative represents a significant investment in common, accessible, industry-wide standards, ensuring that the benefits of AI-first Kubernetes are available to everyone.&lt;/p&gt;

&lt;h1&gt;How Kubernetes is Evolving for an AI-First World&lt;/h1&gt;
&lt;p&gt;Traditional Kubernetes was built for stateless, cloud-first applications. However, AI workloads introduce unique complexities that standard conformance doesn't fully cover:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Specific Hardware Demands:&lt;/strong&gt; AI models require precise control over accelerators like GPUs and TPUs.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Networking and Latency:&lt;/strong&gt; Inference and distributed training require low-latency networking and specialized configurations.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Stateful Nature:&lt;/strong&gt; Unlike traditional web apps, AI often relies on complex, stateful data pipelines.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;The AI Conformance program acts as a &lt;strong&gt;superset&lt;/strong&gt; of standard Kubernetes conformance. To be AI-conformant, a platform must first pass all standard Kubernetes tests and then meet additional requirements specifically for AI.&lt;/p&gt;

&lt;h2&gt;Key Pillars of the AI Conformance Program&lt;/h2&gt;
&lt;p&gt;The Kubernetes AI Conformance program is being driven in the open via the &lt;a href="https://github.com/kubernetes-sigs/ai-conformance"&gt;AI Conformance&lt;/a&gt; program. This cross-company effort is led by industry experts Janet Kuo (Google), Mario Fahlandt (Kubermatic GmbH), Rita Zhang (Microsoft), and Yuan Tang (RedHat). This program is a collaborative effort within the open source ecosystem, involving multiple organizations and individuals. By developing this program in the open, the community ensures the standard is built on trust and directly addresses the diverse needs of the global ecosystem. The program establishes a verified set of capabilities that platforms across the industry, like Google Kubernetes Engine (GKE) and Azure Kubernetes Service (AKS) are already adopting.&lt;/p&gt;

&lt;h4&gt;Dynamic Resource Allocation (DRA)&lt;/h4&gt;
&lt;p&gt;&lt;a href="https://kubernetes.io/docs/concepts/scheduling-eviction/dynamic-resource-allocation/"&gt;DRA&lt;/a&gt; is the cornerstone of the new standard. It shifts resource allocation from simple accelerator quantity to fine-grained hardware control via attributes. For data scientists, this means they can now request specific hardware based on characteristics such as memory capacity or specialized capabilities, ensuring the environment perfectly matches the model's needs.&lt;/p&gt;

&lt;h4&gt;All-or-Nothing Scheduling&lt;/h4&gt;
&lt;p&gt;Distributed training jobs often face "deadlocks" where some pods start while others wait for resources, wasting expensive GPU time. AI Conformance mandates support for solutions like &lt;a href="https://kueue.sigs.k8s.io/"&gt;Kueue&lt;/a&gt;, allowing developers to ensure a job only begins when &lt;em&gt;all&lt;/em&gt; required resources are available, improving cluster efficiency.&lt;/p&gt;

&lt;h4&gt;Intelligent Autoscaling for AI Workloads&lt;/h4&gt;
&lt;p&gt;Conformant clusters must support &lt;a href="https://kubernetes.io/docs/concepts/workloads/autoscaling/horizontal-pod-autoscale/"&gt;Horizontal Pod Autoscaling (HPA)&lt;/a&gt; based on custom AI metrics, such as GPU or TPU utilization, rather than just standard CPU/memory. This allows clusters to scale up for heavy inference demand and scale down to save costs when idle.&lt;/p&gt;

&lt;h4&gt;Standardized Observability for High Performance&lt;/h4&gt;
&lt;p&gt;To manage AI at scale, you need deep visibility. The program requires platforms to expose rich accelerator performance metrics directly, enabling teams to monitor inference latency, throughput, and hardware health in a standardized way.&lt;/p&gt;

&lt;h2&gt;What's Next?&lt;/h2&gt;
&lt;p&gt;The launch of AI Conformance is just the beginning. As we head further into 2026, the community is adding &lt;strong&gt;automated testing&lt;/strong&gt; for certification and expanding the standard to include more advanced inference patterns and stricter security requirements.&lt;/p&gt;

&lt;p&gt;The ultimate goal? Making "AI-readiness" an inherent, invisible part of the Kubernetes standard.&lt;/p&gt;

&lt;p&gt;To get involved and help shape the future of AI on Kubernetes, consider joining &lt;a href="https://github.com/kubernetes-sigs/wg-ai-conformance"&gt;AI Conformance in Open Source Kubernetes&lt;/a&gt;. We welcome diverse perspectives, as your expertise and feedback are crucial to building a robust and inclusive standard for all.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4168810863999534660" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4168810863999534660" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/04/kubernetes-goes-ai-first-unpacking-the-new-ai-conformance-program.html" rel="alternate" title="Kubernetes goes AI-First: Unpacking the new AI conformance program" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg5Ew4zwACrflispkhfyO2RPltMX4eBYd9sSXXT5hNZ6G2FugX5WAu3wE3fpuJya824APcC0qjVwLxzwxqg2hK957Z4bln1aCl6_Pwwht9BQW6HF-cqjgdQZzRRvhV2qlar_m_t37Pkl5O4BptwMY1D2CX-g_U_x5XhXvrCiaU93gX0GwRTtyCEanRAQTU/s72-c/Header%20-%20OSS%20-%20Kubernetes%20Gateway%20API%20graduates%20to%20GA%20%281%29.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-3140178923879094443</id><published>2026-04-02T09:00:00.000-07:00</published><updated>2026-04-02T09:00:00.115-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="AI Innovation"/><category scheme="http://www.blogger.com/atom/ns#" term="Apache 2.0"/><category scheme="http://www.blogger.com/atom/ns#" term="Gemma"/><category scheme="http://www.blogger.com/atom/ns#" term="Gemmaverse"/><category scheme="http://www.blogger.com/atom/ns#" term="Open Models"/><title type="text">Gemma 4: Expanding the Gemmaverse with Apache 2.0</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Nia Castelly&lt;/author&gt; &amp;amp; &lt;author&gt;amanda casari&lt;/author&gt;, Google Open Source &amp;amp; &lt;author&gt;Olivier Lacombe&lt;/author&gt;, Google DeepMind&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiUJHmLQQD8rwCMVTcf48kwlolcDSUHMnVBQp_TvBu00sC8g9WPNUyd3b1Cheo1pr8joxf2VdmjR_OjoLsAb-aDPyz3AODACRW79cn_erskBcBSjEie3dM-gOePdbcIf-mTLhep2CRMpwy4L7GRr_z-haWjO7XKhCIiSPAHUxNdwAu4n4fPIOfJqdCCO2g/s1600/gemma-4_blog_open_source_keyword_header-light.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiUJHmLQQD8rwCMVTcf48kwlolcDSUHMnVBQp_TvBu00sC8g9WPNUyd3b1Cheo1pr8joxf2VdmjR_OjoLsAb-aDPyz3AODACRW79cn_erskBcBSjEie3dM-gOePdbcIf-mTLhep2CRMpwy4L7GRr_z-haWjO7XKhCIiSPAHUxNdwAu4n4fPIOfJqdCCO2g/s1600/gemma-4_blog_open_source_keyword_header-light.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiUJHmLQQD8rwCMVTcf48kwlolcDSUHMnVBQp_TvBu00sC8g9WPNUyd3b1Cheo1pr8joxf2VdmjR_OjoLsAb-aDPyz3AODACRW79cn_erskBcBSjEie3dM-gOePdbcIf-mTLhep2CRMpwy4L7GRr_z-haWjO7XKhCIiSPAHUxNdwAu4n4fPIOfJqdCCO2g/s1600/gemma-4_blog_open_source_keyword_header-light.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiUJHmLQQD8rwCMVTcf48kwlolcDSUHMnVBQp_TvBu00sC8g9WPNUyd3b1Cheo1pr8joxf2VdmjR_OjoLsAb-aDPyz3AODACRW79cn_erskBcBSjEie3dM-gOePdbcIf-mTLhep2CRMpwy4L7GRr_z-haWjO7XKhCIiSPAHUxNdwAu4n4fPIOfJqdCCO2g/s1600/gemma-4_blog_open_source_keyword_header-light.png"/&gt;&lt;/a&gt;

&lt;h2&gt;Gemma 4: Expanding the Gemmaverse with Apache 2.0&lt;/h2&gt;

&lt;p&gt;For over 20 years, Google has maintained an unwavering commitment to the open-source community. Our belief has been simple: open technology is &lt;a href="https://blog.google/technology/research/open-source-and-open-data/"&gt;good for our company, good for our users, and good for our world&lt;/a&gt;. This commitment to fostering collaborative learning and rigorous testing has consistently proven more effective than pursuing isolated improvements. It's been our approach ever since the 2005 launch of &lt;a href="https://summerofcode.withgoogle.com/"&gt;Google Summer of Code&lt;/a&gt;, and through our open-sourcing of &lt;a href="https://kubernetes.io/"&gt;Kubernetes&lt;/a&gt;, &lt;a href="https://www.android.com/"&gt;Android&lt;/a&gt;, and &lt;a href="https://go.dev/"&gt;Go&lt;/a&gt;, and it remains central to our ongoing, daily work alongside maintainers and organizations.&lt;/p&gt;

&lt;p&gt;Today, we are taking a significant step forward in that journey. Since first launch, the community has downloaded Gemma models over 400 million times and built a vibrant universe of over 100,000 inspiring variants, known in the community as the &lt;a href="https://deepmind.google/models/gemma/gemmaverse/"&gt;Gemmaverse&lt;/a&gt;. &lt;/p&gt;

&lt;p&gt;The &lt;a href="https://blog.google/innovation-and-ai/technology/developers-tools/gemma-4/"&gt;release of Gemma 4&lt;/a&gt; under the &lt;a href="https://www.apache.org/licenses/LICENSE-2.0"&gt;Apache 2.0 license&lt;/a&gt; — our most capable open models ranging from edge devices to 31B parameters — provides cutting-edge AI models for this community of developers. The industry-standard Apache license broadens the horizon for Gemma 4's applicability and usefulness, providing well-understood terms for modification, reuse, and further development.&lt;/p&gt;

&lt;h2&gt;A long legacy of open research&lt;/h2&gt;
&lt;p&gt;We are committed to making helpful, accessible AI technology and research so that everyone can innovate and grow. That's why many of our innovations are freely available, easy to deploy, and useful to developers across the globe. We have a long history of making our foundational machine-learning research, including &lt;a href="https://code.google.com/archive/p/word2vec/"&gt;word2vec&lt;/a&gt;, &lt;a href="https://docs.jax.dev/en/latest/"&gt;Jax&lt;/a&gt;, and the seminal &lt;a href="https://research.google/pubs/attention-is-all-you-need/"&gt;Transformers paper&lt;/a&gt;, publicly available for anyone to use and study.&lt;/p&gt;

&lt;p&gt;We accelerated this commitment last year. By sharing models that &lt;a href="https://blog.google/innovation-and-ai/products/2025-research-breakthroughs/"&gt;interpret complex genomic data&lt;/a&gt; and &lt;a href="https://blog.google/innovation-and-ai/products/google-gemma-ai-cancer-therapy-discovery/"&gt;identify tumor variants&lt;/a&gt;, we contributed to the "&lt;a href="https://research.google/blog/google-research-2025-bolder-breakthroughs-bigger-impact/"&gt;magic cycle&lt;/a&gt;" of research breakthroughs that translate into real-world impact. This week, however, marks a pivotal moment — &lt;strong&gt;Gemma 4 models are the first in the Gemmaverse to be released under the OSI-approved Apache 2.0 license.&lt;/strong&gt;&lt;/p&gt;

&lt;h2&gt;Empowering developers and researchers to deliver breakthrough innovations&lt;/h2&gt;
&lt;p&gt;Since we first launched Gemma in 2024, the community of early adopters has grown into a vast ecosystem of builders, researchers, and problem solvers. Gemma is already supporting sovereign digital infrastructure, from automating &lt;a href="https://publicpolicy.google/article/ukraine-ai-public-service/"&gt;state licensing in Ukraine&lt;/a&gt; to scaling &lt;a href="https://www.youtube.com/watch?v=ZhExnit0UdM"&gt;Project Navarasa across India's 22 official languages&lt;/a&gt;. And we know that developers need autonomy, control, and clarity in licensing for further AI innovation to reach its full potential.&lt;/p&gt;

&lt;p&gt;Gemma 4 brings three essential elements of free and open-source software directly to the community:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Autonomy:&lt;/strong&gt; By letting people build on and modify the Gemma 4 models, we are empowering researchers and developers with the freedom to advance their own breakthrough innovations however they see fit.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Control:&lt;/strong&gt; We understand that many developers require precise control over their development and deployment environments. Gemma 4 allows for local, private execution that doesn't rely on cloud-only infrastructure.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Clarity:&lt;/strong&gt; By applying the industry-standard Apache 2.0 license terms, we are providing clarity about developers' rights and responsibilities so that they can build freely and confidently from the ground up without the need to navigate prescriptive terms of service.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Building together to drive real-world impact&lt;/h2&gt;
&lt;p&gt;Gemma 4, as a release, is an invitation. Whether you are &lt;a href="https://blog.google/innovation-and-ai/products/dolphingemma/"&gt;a scientific researcher exploring the language of dolphins&lt;/a&gt;, an industry developer building the next generation of open AI agents, or a public institution looking to provide more effective, efficient, and localized services to your citizens, Google is excited to continue building with you. The Gemmaverse is your playground, and with Apache 2.0, the possibilities are more boundless than ever.&lt;/p&gt;

&lt;p&gt;We can't wait to see what you build.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/3140178923879094443" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/3140178923879094443" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/03/gemma-4-expanding-the-gemmaverse-with-apache-20.html" rel="alternate" title="Gemma 4: Expanding the Gemmaverse with Apache 2.0" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiUJHmLQQD8rwCMVTcf48kwlolcDSUHMnVBQp_TvBu00sC8g9WPNUyd3b1Cheo1pr8joxf2VdmjR_OjoLsAb-aDPyz3AODACRW79cn_erskBcBSjEie3dM-gOePdbcIf-mTLhep2CRMpwy4L7GRr_z-haWjO7XKhCIiSPAHUxNdwAu4n4fPIOfJqdCCO2g/s72-c/gemma-4_blog_open_source_keyword_header-light.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-1037770633549871659</id><published>2026-03-31T11:30:00.000-07:00</published><updated>2026-04-02T08:11:46.250-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="PostgreSQL"/><title type="text">Google Cloud: Investing in the future of PostgreSQL</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Dilip Kumar&lt;/author&gt;, Cloud SQL for PostgreSQL&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgwzJszSMRbAj7eM_f4aXx0OqZW72VjPDjvgV7ew9WiCUvUgdFtqunL5ySeShN7KD_eCNFlYdJY6gDRMjLmC7CzSriHzAHwEdwyOYdbmN2T6GyLf8mgI3OHcY4e9vpsPe6AE_t5lbw3E5cdV91M78Z83vRFHL3Ny_wiGTDJsrPGkKPZ6cco0wwlWhaLt-8/s1600/OSS-Logo-Banner.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgwzJszSMRbAj7eM_f4aXx0OqZW72VjPDjvgV7ew9WiCUvUgdFtqunL5ySeShN7KD_eCNFlYdJY6gDRMjLmC7CzSriHzAHwEdwyOYdbmN2T6GyLf8mgI3OHcY4e9vpsPe6AE_t5lbw3E5cdV91M78Z83vRFHL3Ny_wiGTDJsrPGkKPZ6cco0wwlWhaLt-8/s1600/OSS-Logo-Banner.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgwzJszSMRbAj7eM_f4aXx0OqZW72VjPDjvgV7ew9WiCUvUgdFtqunL5ySeShN7KD_eCNFlYdJY6gDRMjLmC7CzSriHzAHwEdwyOYdbmN2T6GyLf8mgI3OHcY4e9vpsPe6AE_t5lbw3E5cdV91M78Z83vRFHL3Ny_wiGTDJsrPGkKPZ6cco0wwlWhaLt-8/s1600/OSS-Logo-Banner.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgwzJszSMRbAj7eM_f4aXx0OqZW72VjPDjvgV7ew9WiCUvUgdFtqunL5ySeShN7KD_eCNFlYdJY6gDRMjLmC7CzSriHzAHwEdwyOYdbmN2T6GyLf8mgI3OHcY4e9vpsPe6AE_t5lbw3E5cdV91M78Z83vRFHL3Ny_wiGTDJsrPGkKPZ6cco0wwlWhaLt-8/s1600/OSS-Logo-Banner.png"/&gt;&lt;/a&gt;

&lt;p&gt;At Google Cloud, we are deeply committed to open source, and PostgreSQL is a cornerstone of our managed database offerings, including Cloud SQL &amp; AlloyDB.&lt;/p&gt;

&lt;p&gt;Continuing our work with the PostgreSQL community, we've been contributing to the core engine and participating in the patch review process. Below is a summary of that technical activity, highlighting our efforts to enhance the performance, stability, and resilience of the upstream project. By strengthening these core capabilities, we aim to drive innovation that benefits the entire global PostgreSQL ecosystem and its diverse user base.&lt;/p&gt;

&lt;p&gt;Our investments in PostgreSQL logical replication aim to unlock critical capabilities for all users. By enhancing conflict detection, we are paving the way for robust active-active replication setups, increasing write scalability and high availability. We are also focused on expanding logical replication to cover missing objects. This is key to enabling major version upgrades with minimal downtime, offering a more flexible alternative to &lt;code&gt;pg_upgrade&lt;/code&gt;. Furthermore, our ongoing contributions to bug fixes are dedicated to improving the overall stability and resilience of PostgreSQL for everyone in the community.&lt;/p&gt;

&lt;h2&gt;Technical contributions: July 2025 – December 2025&lt;/h2&gt;
&lt;p&gt;The following sections detail technical enhancements and bug fixes contributed to the &lt;a href="https://www.postgresql.org/"&gt;PostgreSQL&lt;/a&gt; open source project between July 2025 and December 2025. Primary engineering efforts were dedicated to advancing logical replication toward active-active capabilities, implementing missing features, optimizing pg_upgrade, and fixing bugs.&lt;/p&gt;

&lt;h3&gt;Logical Replication Enhancements&lt;/h3&gt;
&lt;p&gt;Logical replication is a critical feature of PostgreSQL enabling capabilities like near zero down time, major version upgrades, selective replication, active-active replication. We have been working towards closing some of the key gaps.&lt;/p&gt;

&lt;p&gt;&lt;a href=" https://commitfest.postgresql.org/patch/5378/"&gt;Automatic Conflict Detection&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Active-active replication is a mechanism for increasing PostgreSQL write scalability. One of the most significant hurdles for active-active PostgreSQL setups is handling row-level conflicts when the same data is modified on two different nodes. Historically, these conflicts could stall replication, requiring manual intervention.&lt;/p&gt;&lt;p&gt;
In this cycle, the community committed &lt;strong&gt;Automatic Conflict Detection&lt;/strong&gt; which is the first phase of Automatic Conflict Detection and Resolution. This foundation allows the replication worker to automatically detect when an incoming change (Insert, Update, or Delete) conflicts with the local state.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Contributors&lt;/strong&gt;: Dilip Kumar helped by performing code and design reviews. He is currently advancing the project's second phase, focusing on implementing conflict logging into a dedicated log table.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://commitfest.postgresql.org/patch/5111/"&gt;Logical replication of sequences&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;Until recently, logical replication in PostgreSQL was primarily limited to table data. Sequences did not synchronize automatically. This meant that during a migration or a major version upgrade, DBAs had to manually sync sequence values to prevent "duplicate key" errors on the new primary node. Since many databases rely on sequences, this was a significant hurdle for logical replication.&lt;/p&gt;&lt;p&gt;
  &lt;strong&gt;Contributors&lt;/strong&gt;: Dilip Kumar helped by performing code and design reviews.&lt;/p&gt;&lt;p&gt;
&lt;a href="https://www.postgresql.org/message-id/E1uoFOS-000b7I-05@gemulon.postgresql.org"&gt;Drop subscription deadlock&lt;/a&gt;&lt;/p&gt;&lt;p&gt;
The DROP SUBSCRIPTION command previously held an exclusive lock while connecting to the publisher to delete a replication slot.&lt;/p&gt;&lt;p&gt;
If the publisher was a new database on the same server, the connection process would stall while trying to access that same locked catalog.&lt;/p&gt;&lt;p&gt;
This conflict created a "self-deadlock," where the command was essentially waiting for itself to finish.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Contributors&lt;/strong&gt;: Dilip Kumar analyzed and authored the fix.&lt;/p&gt;

&lt;h3&gt;Upgrade Resilience&lt;/h3&gt;
&lt;p&gt;Operational ease of use and friction-less upgrades are important to PostgreSQL users. We have been working on improving the upgrade experience.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://commitfest.postgresql.org/patch/5737/ "&gt;pg_upgrade optimization for Large Objects&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;For databases with massive volumes of Large Objects, upgrades could previously span several days. This bottleneck is resolved by exporting the underlying data table directly rather than executing individual Large Object commands, resulting in an upgrade process that is several orders of magnitude faster.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Contributors&lt;/strong&gt;: Hannu Krosing, Nitin Motiani and, Saurabh Uttam, highlighted the severity of the issue, proposed the initial fix and actively drove it to the resolution.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://www.postgresql.org/message-id/E1ua6NR-005huo-2e%40gemulon.postgresql.org"&gt;Prevent logical slot invalidation during upgrade&lt;/a&gt;: &lt;/p&gt;&lt;p&gt;
Upgrade to PG17 fails if max_slot_wal_keep_size is not set to -1. This fix improves pg_upgrade's resilience, eliminating the need for users to manually set max_slot_wal_keep_size to -1. The server now automatically retains the necessary WAL data for upgrading logical replication slots, simplifying the upgrade process and reducing the risk of errors.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Contributors&lt;/strong&gt;: Dilip Kumar analyzed and authored the fix.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://www.postgresql.org/message-id/E1vA9eM-002KBc-0G%40gemulon.postgresql.org"&gt;pg_upgrade NOT NULL constraint related bug fix&lt;/a&gt;&lt;/p&gt;&lt;p&gt;
A bug in pg_dump previously failed to preserve non-inherited NOT NULL constraints on inherited columns during upgrades from version 17 or older.&lt;/p&gt;&lt;p&gt;
The fix updates the underlying query to ensure these specific schema constraints are correctly identified and migrated during the pg_upgrade process.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Contributors&lt;/strong&gt;: Dilip Kumar analyzed and authored the fix.&lt;/p&gt;

&lt;h3&gt;Miscellaneous Bug Fixes&lt;/h3&gt;
&lt;p&gt;We continue to contribute bug fixes to help improve the stability and quality of PostgreSQL.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://www.postgresql.org/message-id/E1v48lY-0011l7-1p@gemulon.postgresql.org"&gt;Make pgstattuple more robust about empty or invalid index pages&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;pgstattuple is a PostgreSQL extension for analyzing the physical storage of tables and indexes at the row (tuple) level, to determine whether a table is in need of maintenance. However, pgstattuple would raise errors with empty or invalid index pages in hash and gist code. This bug handles the empty and invalid index pages to make pgstattuple more robust.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Contributors&lt;/strong&gt;: Nitin Motiani and Dilip Kumar, participated as author and reviewer.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://www.postgresql.org/message-id/E1urGxY-001vb9-2n%40gemulon.postgresql.org"&gt;Loading extension from different path&lt;/a&gt;&lt;/p&gt;&lt;p&gt;
A bug incorrectly stripped the prefix from nested module paths when dynamically loading shared library files. This caused libraries in subdirectories to fail to load. The bug fix ensures the prefix is only removed for simple filenames, allowing the dynamic library expander to correctly find nested paths&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Contributors&lt;/strong&gt;: Dilip Kumar, reported and co-authored the fix for this bug.&lt;/p&gt;

&lt;p&gt;&lt;a href="https://www.postgresql.org/message-id/E1uzT3P-001RAm-2n@gemulon.postgresql.org"&gt;WAL flush logic hardening&lt;/a&gt;&lt;/p&gt;

&lt;p&gt;XLogFlush() and XLogNeedsFlush() are internal PostgreSQL functions that ensure log records are written to the WAL to ensure durability. In certain edge cases, like the end-of-recovery checkpoint, the functions relied on inconsistent criteria to decide which code path to follow. This inconsistency posed a risk for upcoming features i.e. Asynchronous I/O for writes that require XLogNeedsFlush() to work reliably.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Contributors:&lt;/strong&gt; Dilip Kumar, co-authored the fix for this bug.&lt;/p&gt;

&lt;h3&gt;Major Features in Development&lt;/h3&gt;
&lt;p&gt;Beyond our recent commits, the team is actively working on several high-impact proposals to further strengthen the PostgreSQL ecosystem.&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://commitfest.postgresql.org/patch/6252/"&gt;Conflict Log Table for Detection&lt;/a&gt;: Dilip Kumar is developing a proposal for a conflict log table designed to offer a queryable, structured record of all logical replication conflicts. This feature would include a configuration option to determine whether conflict details are recorded in the history table, server logs, or both.&lt;/li&gt;
&lt;/ul&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://commitfest.postgresql.org/patch/6219/"&gt;Dumping tables data in multiple chunks in pg_dump&lt;/a&gt;: Hannu Krosing is working on this feature, this enables parallel workers for single, large tables (terabytes in size) to saturate hardware limits and speed up exports.&lt;/li&gt;
&lt;/ul&gt;

&lt;ul&gt;
&lt;li&gt;&lt;a href="https://commitfest.postgresql.org/patch/5893/"&gt;Adding pg_dump flag for parallel export to pipes&lt;/a&gt;: Nitin Motiani is working on this feature. This introduces a flag which allows the user to provide pipe commands while doing parallel export/import from pg_dump/pg_restore (in directory format).&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Leadership&lt;/h2&gt;
&lt;p&gt;Beyond code, our team supports the ecosystem through community leadership. We are pleased to share that Dilip Kumar has been selected for the PGConf.dev 2026 &lt;a href="https://2026.pgconf.dev/about#:~:text=Dilip-,Kumar,-Google"&gt;Program Committee&lt;/a&gt; to help shape the project's premier developer conference.&lt;/p&gt;

&lt;h2&gt;Community Roadmap: Your Feedback Matters&lt;/h2&gt;
&lt;p&gt;We encourage you to utilize the comments area to propose new capabilities or refinements you wish to see in future iterations, and to identify key areas where the PostgreSQL open-source community should focus its investments.&lt;/p&gt;

&lt;h2&gt;Acknowledgement&lt;/h2&gt;
&lt;p&gt;We want to thank our open source contributors for their dedication to improving the upstream project.&lt;/p&gt;&lt;p&gt;
&lt;a href="https://www.linkedin.com/in/dilip-kumar-6b75863a/"&gt;Dilip Kumar&lt;/a&gt;: PostgreSQL &lt;a href="https://www.postgresql.org/community/contributors/#:~:text=Dilip%20Kumar%20(dilipbalaut%20at%20gmail.com)"&gt;significant&lt;/a&gt; contributor&lt;/p&gt;&lt;p&gt;
&lt;a href="https://www.linkedin.com/in/hannukrosing/"&gt;Hannu Krosing&lt;/a&gt;: PostgreSQL &lt;a href="https://www.postgresql.org/community/contributors/#:~:text=Hannu%20Krosing%20(hannuk%20at%20google.com)"&gt;significant&lt;/a&gt; contributor&lt;/p&gt;&lt;p&gt;
&lt;a href="https://www.linkedin.com/in/nitinmotiani/"&gt;Nitin Motiani&lt;/a&gt;: Contributing features and bug fixes&lt;/p&gt;&lt;p&gt;
&lt;a href="https://www.linkedin.com/in/itssaurabh/"&gt;Saurabh Uttam&lt;/a&gt;: Contributing bug fixes&lt;/p&gt;

&lt;p&gt;We also extend our sincere gratitude to the wider PostgreSQL open source members, especially the committers and reviewers, for their guidance, reviews, and for collaborating with us to make PostgreSQL the most advanced open source database in the world.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/1037770633549871659" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/1037770633549871659" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/03/google-cloud-investing-in-the-future-of-postgresql.html" rel="alternate" title="Google Cloud: Investing in the future of PostgreSQL" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgwzJszSMRbAj7eM_f4aXx0OqZW72VjPDjvgV7ew9WiCUvUgdFtqunL5ySeShN7KD_eCNFlYdJY6gDRMjLmC7CzSriHzAHwEdwyOYdbmN2T6GyLf8mgI3OHcY4e9vpsPe6AE_t5lbw3E5cdV91M78Z83vRFHL3Ny_wiGTDJsrPGkKPZ6cco0wwlWhaLt-8/s72-c/OSS-Logo-Banner.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4492552289481409192</id><published>2026-03-23T11:30:00.000-07:00</published><updated>2026-03-26T07:28:42.361-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="ML Dev Tools"/><category scheme="http://www.blogger.com/atom/ns#" term="TPU Optimization"/><category scheme="http://www.blogger.com/atom/ns#" term="TPU Performance"/><title type="text">Advanced TPU optimization with XProf: Continuous profiling, utilization insights, and LLO bundles</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Yogesh SY&lt;/author&gt;, AI Infra @ Google&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhs7m4o477ZQCJ0bvFJQ1vcwiRrwnWVJou08qXoJTGfrcZ5Asx-EEh-QiddwQX-44AWSK9_ghauVX9f1kaEF_MY7V3qht0qRkcKCDRi6r9YzHMl-kVTWAUCuaJZm44mH-gUeAQqTd9SQplucKvIAJGc2EkAiwHObN5crMYcEFba2-qoRhWft-IQPFithPQ/s1600/xprof-hero-image-2436x1200_k5YQpVQ.max-2200x2200.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhs7m4o477ZQCJ0bvFJQ1vcwiRrwnWVJou08qXoJTGfrcZ5Asx-EEh-QiddwQX-44AWSK9_ghauVX9f1kaEF_MY7V3qht0qRkcKCDRi6r9YzHMl-kVTWAUCuaJZm44mH-gUeAQqTd9SQplucKvIAJGc2EkAiwHObN5crMYcEFba2-qoRhWft-IQPFithPQ/s1600/xprof-hero-image-2436x1200_k5YQpVQ.max-2200x2200.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhs7m4o477ZQCJ0bvFJQ1vcwiRrwnWVJou08qXoJTGfrcZ5Asx-EEh-QiddwQX-44AWSK9_ghauVX9f1kaEF_MY7V3qht0qRkcKCDRi6r9YzHMl-kVTWAUCuaJZm44mH-gUeAQqTd9SQplucKvIAJGc2EkAiwHObN5crMYcEFba2-qoRhWft-IQPFithPQ/s1600/xprof-hero-image-2436x1200_k5YQpVQ.max-2200x2200.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhs7m4o477ZQCJ0bvFJQ1vcwiRrwnWVJou08qXoJTGfrcZ5Asx-EEh-QiddwQX-44AWSK9_ghauVX9f1kaEF_MY7V3qht0qRkcKCDRi6r9YzHMl-kVTWAUCuaJZm44mH-gUeAQqTd9SQplucKvIAJGc2EkAiwHObN5crMYcEFba2-qoRhWft-IQPFithPQ/s1600/xprof-hero-image-2436x1200_k5YQpVQ.max-2200x2200.png"/&gt;&lt;/a&gt;

&lt;h2&gt;Advanced TPU optimization with XProf: Continuous profiling, utilization insights, and LLO bundles&lt;/h2&gt;
&lt;p&gt;In our &lt;a href="https://cloud.google.com/blog/topics/developers-practitioners/supercharge-ml-performance-on-xpus-with-the-new-xprof-profiler-and-cloud-diagnostics-xprof-library/"&gt;previous post&lt;/a&gt;, we introduced the updated XProf and the Cloud Diagnostics XProf library, which are designed to help developers identify model bottlenecks and optimize memory usage. As machine learning workloads on TPUs continue to grow in complexity—spanning both massive training runs and large-scale inference—developers require even deeper visibility into how their code interacts with the underlying hardware.&lt;br&gt;
Today, we are exploring three advanced capabilities designed to provide "flight recorder" visibility and instruction-level insights: &lt;strong&gt;Continuous Profiling Snapshots&lt;/strong&gt;, the &lt;strong&gt;Utilization Viewer&lt;/strong&gt;, and &lt;strong&gt;LLO Bundle Visualization&lt;/strong&gt;.&lt;/p&gt;

&lt;h3&gt;Continuous Profiling Snapshots: The "Flight Recorder" for ML&lt;/h3&gt;
&lt;p&gt;&lt;a href="https://github.com/openxla/xprof/blob/master/docs/capturing_profiles.md"&gt;Standard profiling&lt;/a&gt; often relies on "sampling mode," where users manually trigger high-fidelity traces for short, predefined durations. While effective for general optimization, this traditional approach can miss transient anomalies, intermittent stragglers, or unexpected performance regressions that occur during long-running training jobs.&lt;br&gt;
To address this visibility gap, XProf is introducing &lt;strong&gt;&lt;a href="https://github.com/openxla/xprof/blob/master/docs/jax_profiling.md#capture-continuous-profiling-snapshots"&gt;Continuous Profiling Snapshots&lt;/a&gt;&lt;/strong&gt;. This feature functions as an "always-on" flight recorder for your TPU workloads.&lt;br&gt;
&lt;strong&gt;How it works:&lt;/strong&gt; Continuous profiling snapshots (&lt;a href="https://colab.research.google.com/drive/1pc_hZgf4ym3YYNxIXwdp6jZ_EpF7PWs6?usp=sharing"&gt;Google Colab&lt;/a&gt;) operates quietly in the background with minimal system overhead (approximately 7µs per packet CPU overhead). It utilizes a host-side circular buffer of roughly 2GB to seamlessly retain the last ~90 seconds of performance data. This architecture allows developers to snapshot performance data programmatically precisely when an anomaly occurs, bypassing the overhead and unpredictability of traditional one-shot profiling.&lt;/p&gt;

&lt;figure class="wide"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg0ClZxl61gmNXLttiGFUylm-jzO3avpMLtOWhjkC83gnaCvgCetO3ShwhNktXy29L6hqrvdbDdzXhyclcRDAW62TNRqQygLzh_BzQ2gOA342c1ULN3bIsJt2uRNUSXdvtQYorx6twEl16aE0zr5U5E5JJDqUiUcIgLIFG_fuA96HF9fX1Pr-usqohS50M/s1600/advancedtpuopt--xl9c7z50pm9.png"&gt;
  &lt;img alt="A diagram illustrating the limitation of traditional trace capturing, where a transient performance anomaly is missed because the trace capture was manually triggered before or after the anomaly occurred." src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg0ClZxl61gmNXLttiGFUylm-jzO3avpMLtOWhjkC83gnaCvgCetO3ShwhNktXy29L6hqrvdbDdzXhyclcRDAW62TNRqQygLzh_BzQ2gOA342c1ULN3bIsJt2uRNUSXdvtQYorx6twEl16aE0zr5U5E5JJDqUiUcIgLIFG_fuA96HF9fX1Pr-usqohS50M/s1600/advancedtpuopt--xl9c7z50pm9.png"&gt;&lt;/a&gt;
  &lt;figcaption&gt;Figure 1: Traditional trace capturing without Continuous Profiling Snapshots.&lt;/figcaption&gt;
&lt;/figure&gt;
&lt;br&gt;
&lt;figure class="wide"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhunrmuVmDnCOF3hAEMhWeUZGBHjWdMiqgrIADyUcvAWncL_OMaHQ7-bxk8XuEWhj9XlZP2qkF-5lnoQetdrpauX1heiJcBUbxMO339-qb7Jp_G8ZXQIHHCbXtQ3xYtrmtG77thF7uqVoLl2cEAJhKIhWfWVdGMU4eNh8OTvyTZpxwEXIQvOFMdozXAtJI/s1600/advancedtpuopt--52fb6ub2g5p.png"&gt;
  &lt;img alt="A diagram showing how Continuous Profiling Snapshots capture comprehensive context. A performance anomaly occurs, and the 'always-on' circular buffer allows the user to snapshot the performance data, capturing the anomaly and the preceding 90 seconds of context." src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhunrmuVmDnCOF3hAEMhWeUZGBHjWdMiqgrIADyUcvAWncL_OMaHQ7-bxk8XuEWhj9XlZP2qkF-5lnoQetdrpauX1heiJcBUbxMO339-qb7Jp_G8ZXQIHHCbXtQ3xYtrmtG77thF7uqVoLl2cEAJhKIhWfWVdGMU4eNh8OTvyTZpxwEXIQvOFMdozXAtJI/s1600/advancedtpuopt--52fb6ub2g5p.png"&gt;&lt;/a&gt;
  &lt;figcaption&gt;Figure 2: Comprehensive context captured via Continuous Profiling Snapshots.&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;h3&gt;Key technical features include:&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Circular Buffer Management:&lt;/strong&gt; Continuously holds recent trace data to ensure you can capture the exact moments leading up to an anomaly or regression.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Out-of-band State Tracking:&lt;/strong&gt; A lightweight service polls hardware registers for P-state (voltage and frequency) and trace-drop counters, ensuring the snapshot contains the necessary environmental context for accurate analysis.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Context Reconstruction:&lt;/strong&gt; The system safely decouples state capture from the trace stream. This ensures that any arbitrary snapshot retains the ground truth required for precise, actionable debugging.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;Visualizing Hardware Efficiency with the Utilization Viewer&lt;/h3&gt;
&lt;p&gt;Raw performance counters are powerful, but interpreting thousands of raw hardware metrics can be a daunting, time-consuming process. The new &lt;strong&gt;Utilization Viewer&lt;/strong&gt; bridges the gap between raw data streams and actionable optimization strategies.&lt;br&gt;
This tool translates raw performance counter values into easily understandable utilization percentages for specific hardware components, such as the TensorCore (TC), SparseCore (SC), and High Bandwidth Memory (HBM).&lt;/p&gt;

&lt;figure class="wide"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgMp-hl6SOka0_pQyxzRjuppTBA7a-yeVzWHhq5-LYsNqUceNHqGiyuFEqLE7pySkkOqmjw3oCvm9k_6oHSzWj5Z-3lzdHrRYYK_RMlxJCLCxrYJ6Dz9jnCZ-MViImgDDYwDebUm_E8IKs2VEK2JEuSVrfm438iNLrGKSzL5eiMGeLUHcKNXcd5p3ogqnQ/s1600/advancedtpuopt--owa3m5zj8xo.png"&gt;
  &lt;img alt="A screenshot or visualization of raw performance counter data, presented as a long, detailed list of thousands of uninterpreted hardware metrics and event counts." src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgMp-hl6SOka0_pQyxzRjuppTBA7a-yeVzWHhq5-LYsNqUceNHqGiyuFEqLE7pySkkOqmjw3oCvm9k_6oHSzWj5Z-3lzdHrRYYK_RMlxJCLCxrYJ6Dz9jnCZ-MViImgDDYwDebUm_E8IKs2VEK2JEuSVrfm438iNLrGKSzL5eiMGeLUHcKNXcd5p3ogqnQ/s1600/advancedtpuopt--owa3m5zj8xo.png"&gt;&lt;/a&gt;
  &lt;figcaption&gt;&lt;strong&gt;Figure: Raw Performance Counter&lt;/strong&gt;&lt;br/&gt;
    Figure 3: Deriving actionable insights from raw performance counters.&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;
&lt;strong&gt;From Counters to Insights:&lt;/strong&gt; Instead of requiring developers to manually analyze a raw list of event counts, the Utilization Viewer automatically derives high-level metrics. For example, it can translate raw bus activity into a clear utilization percentage (e.g., displaying an average MXU bus utilization of 7.3%). This immediate clarity allows you to determine at a glance whether your model is compute-bound or memory-bound.&lt;/p&gt;

&lt;figure class="wide"&gt;
  &lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgjY4byi_ez4zCfm_bFJ5hh9Ekg7YMccxRZ7sD6s9t_V_ALSFYR-1QIoTYGPyJA3Q1MMbwZQ2msqx9kku7EtDQqg3cc-ATh-bZ7kfAasdrtWEyHVZNc1edLkOrK91J0vag3unw2hDwinev0kWRe_Hc6Cmp49c1OvsceF-FzSWWWbzKkQKlz0Q_QPbm5ElE/s1600/advancedtpuopt--0sw744wi697n.png"&gt;
  &lt;img alt="A visualization from the Utilization Viewer showing automatically derived high-level metrics, displaying clear utilization percentages for key hardware components like TensorCore (TC), SparseCore (SC), and High Bandwidth Memory (HBM), to help determine if a model is compute-bound or memory-bound." src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgjY4byi_ez4zCfm_bFJ5hh9Ekg7YMccxRZ7sD6s9t_V_ALSFYR-1QIoTYGPyJA3Q1MMbwZQ2msqx9kku7EtDQqg3cc-ATh-bZ7kfAasdrtWEyHVZNc1edLkOrK91J0vag3unw2hDwinev0kWRe_Hc6Cmp49c1OvsceF-FzSWWWbzKkQKlz0Q_QPbm5ElE/s1600/advancedtpuopt--0sw744wi697n.png"&gt;&lt;/a&gt;
  &lt;figcaption&gt;Figure 4: Perf Counters Visualization in Utilization Viewer&lt;/figcaption&gt;
&lt;/figure&gt;


&lt;h3&gt;Inspecting the Metal: Low-Level Operations (LLO) Bundles&lt;/h3&gt;
&lt;p&gt;For advanced users and kernel developers utilizing Pallas, we are now exposing &lt;strong&gt;Low-Level Operations (LLO) bundle data&lt;/strong&gt;. LLO bundles represent the specific machine instructions issued to the TPU's functional units during every clock cycle.&lt;/p&gt;&lt;p&gt;
This feature is critical for "Instruction Scheduling" verification—ensuring that the compiler is honoring your programming intentions and correctly re-ordering instructions to maximize hardware performance.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;New Visualizations via Trace View Integration:&lt;/strong&gt; You can now visualize LLO bundles directly within the trace viewer. Through dynamic instrumentation, XProf inserts traces exactly when a bundle executes. This provides exact execution times and block utilization metrics, rather than relying on static compiler estimates.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Why it matters:&lt;/strong&gt; Accessing this level of granularity enables hyper-specific bottleneck analysis. For instance, developers can now identify idle cycles within the Matrix Multiplication Unit (MXU) pipeline, making it easier to spot and resolve latency between vmatmul and vpop instructions.&lt;/p&gt;

&lt;h3&gt;Conclusion&lt;/h3&gt;
&lt;p&gt;Whether you are trying to capture a fleeting performance regression with Continuous Profiling, verifying kernel efficiency with LLO Bundles, or assessing overall hardware saturation with the Utilization Viewer, these new features bring internal-grade Google tooling directly to the open-source community. These tools are engineered to provide the absolute transparency required to optimize high-scale ML workloads.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Get started by checking out the updated resources:&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;XProf GitHub Repository:&lt;/strong&gt;&lt;a href="https://github.com/openxla/XProf"&gt; https://github.com/openxla/XProf&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Official Documentation:&lt;/strong&gt;&lt;a href="https://openxla.org/xprof"&gt; https://openxla.org/xprof&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4492552289481409192" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4492552289481409192" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/03/advanced-tpu-optimization-with-xprof-continuous-profiling-utilization-insights-and-llo-bundles.html" rel="alternate" title="Advanced TPU optimization with XProf: Continuous profiling, utilization insights, and LLO bundles" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhs7m4o477ZQCJ0bvFJQ1vcwiRrwnWVJou08qXoJTGfrcZ5Asx-EEh-QiddwQX-44AWSK9_ghauVX9f1kaEF_MY7V3qht0qRkcKCDRi6r9YzHMl-kVTWAUCuaJZm44mH-gUeAQqTd9SQplucKvIAJGc2EkAiwHObN5crMYcEFba2-qoRhWft-IQPFithPQ/s72-c/xprof-hero-image-2436x1200_k5YQpVQ.max-2200x2200.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4047225196899517201</id><published>2026-03-16T14:00:00.000-07:00</published><updated>2026-03-16T14:00:00.108-07:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="google summer of code"/><category scheme="http://www.blogger.com/atom/ns#" term="gsoc"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="student programs"/><title type="text">Open Source, Open Doors, Apply Now for Google Summer of Code!</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Stephanie Taylor&lt;/author&gt;, &lt;author&gt;Mary Radomile&lt;/author&gt; &amp;amp; &lt;author&gt;Lucila Ortíz&lt;/author&gt;, GSoC Program Admins&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgolzJjEMdfqz_veFPVZJ9A6FvwYW47uiDFS2m15-wPbF3JM70Oov-K3-01JCg1LNOCLn4b25Apj02lze34OA0_Omxyl0yu5_dVjHPox6OIH9BvZH5ceCWSoWySfJciCZ8IAaK5g05gisVes74s7qE7KfR7MyVkofJtufpfngA6iq7lZNG0Be26rgli/w275-h275/OSS%20Blog%20GSoC%20Asset.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgolzJjEMdfqz_veFPVZJ9A6FvwYW47uiDFS2m15-wPbF3JM70Oov-K3-01JCg1LNOCLn4b25Apj02lze34OA0_Omxyl0yu5_dVjHPox6OIH9BvZH5ceCWSoWySfJciCZ8IAaK5g05gisVes74s7qE7KfR7MyVkofJtufpfngA6iq7lZNG0Be26rgli/w275-h275/OSS%20Blog%20GSoC%20Asset.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgolzJjEMdfqz_veFPVZJ9A6FvwYW47uiDFS2m15-wPbF3JM70Oov-K3-01JCg1LNOCLn4b25Apj02lze34OA0_Omxyl0yu5_dVjHPox6OIH9BvZH5ceCWSoWySfJciCZ8IAaK5g05gisVes74s7qE7KfR7MyVkofJtufpfngA6iq7lZNG0Be26rgli/w275-h275/OSS%20Blog%20GSoC%20Asset.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgolzJjEMdfqz_veFPVZJ9A6FvwYW47uiDFS2m15-wPbF3JM70Oov-K3-01JCg1LNOCLn4b25Apj02lze34OA0_Omxyl0yu5_dVjHPox6OIH9BvZH5ceCWSoWySfJciCZ8IAaK5g05gisVes74s7qE7KfR7MyVkofJtufpfngA6iq7lZNG0Be26rgli/w275-h275/OSS%20Blog%20GSoC%20Asset.png"/&gt;&lt;/a&gt;

&lt;p&gt;Join &lt;a href="https://g.co/gsoc"&gt;Google Summer of Code (GSoC)&lt;/a&gt; and start contributing to the world of open source development! Applications for GSoC are open from now - March 31, 2026 at 18:00 UTC.&lt;/p&gt;
&lt;p&gt;Google Summer of Code is celebrating its 22nd year in 2026!  GSoC started back in 2005 and has brought over 22,000 new contributors from 123 countries into the open source community. This is an exciting opportunity for students and beginners to open source (18+) to gain real-world experience during the summer. You will spend 12+ weeks coding, learning about open source development, and earn a stipend under the guidance of experienced mentors.&lt;/p&gt;

&lt;h2&gt;Apply and get started!&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;First things first. Read the &lt;em&gt;&lt;a href="https://google.github.io/gsocguides/student/"&gt;Contributor Guide&lt;/a&gt;&lt;/em&gt; and &lt;a href="https://developers.google.com/open-source/gsoc/help/student-advice"&gt;Advice for people applying for GSoC&lt;/a&gt; for application basics.&lt;/li&gt;
&lt;li&gt;Elevate your proposal! Review the &lt;a href="https://google.github.io/gsocguides/student/writing-a-proposal"&gt;Writing a proposal&lt;/a&gt; doc written by former contributors and the &lt;a href="https://developers.google.com/open-source/gsoc/resources/ai_guidance"&gt;Guidance for GSoC Contributors using AI tooling in GSoC 2026&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;Read the &lt;a href="https://summerofcode.withgoogle.com/rules"&gt;Program Rules&lt;/a&gt;, &lt;a href="https://developers.google.com/open-source/gsoc/faq"&gt;FAQ&lt;/a&gt;, and join us in our &lt;a href="http://discord.gg/google-dev-community"&gt;Discord Channel&lt;/a&gt; to connect with the GSoC community.&lt;/li&gt;
&lt;li&gt;Explore the &lt;a href="https://summerofcode.withgoogle.com/programs/2026/organizations"&gt;184 mentoring organizations&lt;/a&gt;, find a couple that align with your interests/skills &lt;strong&gt;and reach out immediately&lt;/strong&gt; by using their preferred contact methods listed on the GSoC site. Do not email mentors directly unless explicitly told to do so in their instructions.&lt;/li&gt;
&lt;li&gt;Watch our &lt;a href="https://www.youtube.com/watch?v=Wxjxwx7mqaI&amp;list=PLOU2XLYxmsIL7-SZlT0UHBWEG3DBwbaoA&amp;index=5"&gt;Intro to GSoC video&lt;/a&gt;, as well as the &lt;a href="https://www.youtube.com/playlist?list=PLxNYxgaZ8RscKC8NFOyQK2YdMCrM9Gb5A"&gt;GSoC Org Highlight videos&lt;/a&gt; and &lt;a href="https://www.youtube.com/playlist?list=PLxNYxgaZ8Rsdbe90rZ-FHMO4yqR1nOA66"&gt;Community Talks Series&lt;/a&gt; to get inspired about projects that contributors have worked on in the past.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Please remember that mentors are volunteers and they are being inundated with hundreds of requests from interested participants. It may take time for them to respond to you. Follow their Contributor Guidance instructions exactly. Do not just start submitting PRs without reading their guidance section first. &lt;/p&gt;
&lt;p&gt;Complete your registration and submit your project proposals on the &lt;a href="https://summerofcode.withgoogle.com/"&gt;GSoC site&lt;/a&gt; before the deadline on &lt;strong&gt;Tuesday, March 31, 2026 at 18:00 UTC.&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;We wish all our applicants the best of luck!&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4047225196899517201" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4047225196899517201" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/03/open-source-open-doors-apply-now-for-google-summer-of-code.html" rel="alternate" title="Open Source, Open Doors, Apply Now for Google Summer of Code!" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgolzJjEMdfqz_veFPVZJ9A6FvwYW47uiDFS2m15-wPbF3JM70Oov-K3-01JCg1LNOCLn4b25Apj02lze34OA0_Omxyl0yu5_dVjHPox6OIH9BvZH5ceCWSoWySfJciCZ8IAaK5g05gisVes74s7qE7KfR7MyVkofJtufpfngA6iq7lZNG0Be26rgli/s72-w275-h275-c/OSS%20Blog%20GSoC%20Asset.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-3802112918831729458</id><published>2026-03-04T11:30:00.000-08:00</published><updated>2026-03-04T11:30:00.115-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="hardware security"/><category scheme="http://www.blogger.com/atom/ns#" term="open silicon"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="opentitan"/><category scheme="http://www.blogger.com/atom/ns#" term="silicon"/><title type="text">OpenTitan shipping in production</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Cyrus Stoller&lt;/author&gt; &amp;amp; &lt;author&gt;Miguel Osorio&lt;/author&gt;, OpenTitan&lt;/p&gt;

&lt;meta content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjwJr3EDJXlAiISJQCv2DMWsD6dtomk3NMxjfNyOeoSTkiU1lWskg11axvCknOUMFK-TYwXuTfm91DsIEgqbAE6DSTX8cA_Z7oUhUhe1cc0LBRvlRNjr4gzpHK2OWtmeSHf7UFSHcojyma5MdOqSAQJQZv9hqcZFHjRLsdgSmSr9lRjow96wBAh7_uRsYQ/s1600/6b6cUfEMUcXHJLu.png" name="twitter:image"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjwJr3EDJXlAiISJQCv2DMWsD6dtomk3NMxjfNyOeoSTkiU1lWskg11axvCknOUMFK-TYwXuTfm91DsIEgqbAE6DSTX8cA_Z7oUhUhe1cc0LBRvlRNjr4gzpHK2OWtmeSHf7UFSHcojyma5MdOqSAQJQZv9hqcZFHjRLsdgSmSr9lRjow96wBAh7_uRsYQ/s1600/6b6cUfEMUcXHJLu.png"&gt;

&lt;p&gt;Last year, we &lt;a href="https://opensource.googleblog.com/2025/02/fabrication-begins-for-production-opentitan-silicon.html"&gt;shared&lt;/a&gt; the exciting news that fabrication of production OpenTitan silicon had begun. Today, we're proud to announce that OpenTitan® is now shipping in commercially available Chromebooks.&lt;/p&gt;

&lt;p&gt;The first OpenTitan part is being produced by &lt;a href="https://www.nuvoton.com"&gt;Nuvoton&lt;/a&gt;, a leader in silicon security.&lt;/p&gt;
&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjwJr3EDJXlAiISJQCv2DMWsD6dtomk3NMxjfNyOeoSTkiU1lWskg11axvCknOUMFK-TYwXuTfm91DsIEgqbAE6DSTX8cA_Z7oUhUhe1cc0LBRvlRNjr4gzpHK2OWtmeSHf7UFSHcojyma5MdOqSAQJQZv9hqcZFHjRLsdgSmSr9lRjow96wBAh7_uRsYQ/s1600/6b6cUfEMUcXHJLu.png" class="header-image"&gt;&lt;img alt="a close up of a blue circuit board focused on an IC" border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjwJr3EDJXlAiISJQCv2DMWsD6dtomk3NMxjfNyOeoSTkiU1lWskg11axvCknOUMFK-TYwXuTfm91DsIEgqbAE6DSTX8cA_Z7oUhUhe1cc0LBRvlRNjr4gzpHK2OWtmeSHf7UFSHcojyma5MdOqSAQJQZv9hqcZFHjRLsdgSmSr9lRjow96wBAh7_uRsYQ/s1600/6b6cUfEMUcXHJLu.png"/&gt;&lt;/a&gt;

&lt;h3&gt;What is OpenTitan?&lt;/h3&gt;

&lt;p&gt;Over the past seven years, Google has worked with the open source communities to build OpenTitan, the first open source silicon Root of Trust (RoT). The RoT is the foundation upon which all other security properties of a device are derived, and anchoring this in silicon provides the strongest possible security guarantees that the code being executed is authorized and verified.&lt;/p&gt;

&lt;p&gt;The OpenTitan project and its community are actively supported and maintained by &lt;a href="https://lowrisc.org/"&gt;lowRISC C.I.C.&lt;/a&gt;, an independent non-profit.&lt;/p&gt;

&lt;p&gt;OpenTitan provides the community with a high-quality, low-cost, commoditized hardware RoT that can be used across the Google ecosystem and also facilitates the broader adoption of Google-endorsed security features across the industry. Because OpenTitan is open source, you can choose to buy it from a commercial partner or manufacture it yourself based on your use case. In any of these scenarios, you can review and test OpenTitan's capabilities with a degree of transparency never afforded before in security silicon. This allows optimization for the use case at hand, whether it is having multiple reliable suppliers or ensuring the complete end-to-end control of the manufacturing process. &lt;/p&gt;

&lt;p&gt;With OpenTitan, we are pushing the boundaries of what can be expected from a silicon RoT. For example, OpenTitan is the first commercially available open source RoT to support post-quantum cryptography (PQC) secure boot based on SLH-DSA. This helps future proof the security posture of these devices against potential adversaries with the capability to break classical public-key cryptography (e.g., RSA) via quantum computing. In addition, by applying commercial-grade design verification (DV) and top-level testing to an open source design, we have pushed for the highest quality while still allowing these chips to be transparent and independently verifiable. An added advantage of this approach is that we expect the high quality IP developed for OpenTitan to be re-usable in other projects going forward.&lt;/p&gt;

&lt;p&gt;In addition to delivering this first instance of OpenTitan silicon as a product, we are proud of the processes that we have collaboratively developed along the way. In particular, both individual IP blocks and the top-level Earl Grey design have functional and code coverage above 90%—to the highest industry standards—with 40k+ tests running nightly. Regressions are caught and resolved quickly, ensuring design quality is maintained over the long term. &lt;a href="https://opentitan.org/book/doc/security/specs/ownership_transfer/"&gt;Ownership transfer&lt;/a&gt; gives confidence that the silicon is working for you and helps to move away from co-signing so that you are in full control of your own update schedule. And since any IP is of little value without the ability to navigate and deploy it, we've prioritized thorough and accurate &lt;a href="https://opentitan.org/documentation/index.html"&gt;documentation&lt;/a&gt;, together with onboarding materials to streamline welcoming new developers to the project.&lt;/p&gt;

&lt;p&gt;With lowRISC CIC and in collaboration with our OpenTitan partners, we pioneered open source security silicon development. While challenges are expected when doing something for the first time, the benefits of working in the open source have been clear: fast and efficient cross-organizational collaboration, retention of expertise regardless of employer, shared maintenance burdens, and high levels of academic research engagement.&lt;/p&gt;
&lt;h3&gt;What's next?&lt;/h3&gt;

&lt;p&gt;Firstly, bringup to deploy OpenTitan in Google's datacenters is underway and expected to land later this year.&lt;/p&gt;

&lt;p&gt;Secondly, while we're thrilled about the advantages that this first generation OpenTitan part brings to Google's security posture, we have more on our roadmap, and have already begun work on a second generation part that will support lattice-based PQC (e.g., ML-DSA and ML-KEM) for secure boot and attestation. Stay tuned – more info on this coming soon! &lt;/p&gt;


&lt;p&gt;Thirdly, OpenTitan started with the security use case because it is the hardest to get right. Having successfully demonstrated that we are able to deliver secure open silicon, we're confident that the same methodology can be used to develop additional open source designs targeting a wide range of use cases (whether the focus is on security, safety, or elsewhere). We're excited to see re-use of IP that was developed for OpenTitan being adapted for &lt;a href="https://chipsalliance.github.io/caliptra-web/"&gt;Caliptra&lt;/a&gt;, a RoT block that can be integrated into datacenter-class SoCs. &lt;/p&gt;
&lt;p&gt;&lt;strong&gt;Getting Involved&lt;/strong&gt;&lt;/p&gt;

&lt;p&gt;OpenTitan shipping in production is a defining milestone for us and all contributors to the project. We're excited to see more open source silicon developed for commercial use cases in the future, and to see this ecosystem grow with lowRISC's introduction of &lt;a href="https://lowrisc.org/news/opentitan-introduces-new-membership-tiers-and-deliverables-to-accelerate-deployment/"&gt;new membership tiers&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;As the following metrics show (baselined from the project's public launch in 2019), the OpenTitan community is rapidly growing:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Over ten times the number of commits at launch: from 2,500 to over 29,200.&lt;/li&gt;
&lt;li&gt;275+ contributors to the code base&lt;/li&gt;
&lt;li&gt;3.2k Github stars&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;If you are interested in learning more, contributing to &lt;a href="https://opentitan.org/"&gt;OpenTitan&lt;/a&gt;, or using OpenTitan IP in one of your projects, visit the open source &lt;a href="https://github.com/lowRISC/opentitan"&gt;GitHub repository&lt;/a&gt; or &lt;a href="mailto:get-involved@opentitan.org"&gt;reach out to the OpenTitan team&lt;/a&gt;.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/3802112918831729458" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/3802112918831729458" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/03/opentitan-shipping-in-production.html" rel="alternate" title="OpenTitan shipping in production" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjwJr3EDJXlAiISJQCv2DMWsD6dtomk3NMxjfNyOeoSTkiU1lWskg11axvCknOUMFK-TYwXuTfm91DsIEgqbAE6DSTX8cA_Z7oUhUhe1cc0LBRvlRNjr4gzpHK2OWtmeSHf7UFSHcojyma5MdOqSAQJQZv9hqcZFHjRLsdgSmSr9lRjow96wBAh7_uRsYQ/s72-c/6b6cUfEMUcXHJLu.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-8531855932270531624</id><published>2026-03-03T11:30:00.000-08:00</published><updated>2026-03-03T11:30:00.113-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="CEL"/><category scheme="http://www.blogger.com/atom/ns#" term="Common Expression Language"/><category scheme="http://www.blogger.com/atom/ns#" term="Python"/><title type="text">Announcing CEL-expr-python: the Common Expression Language in Python, now open source</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Olena Huang&lt;/author&gt;, CEL (Common Expression Language) team&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuKigzFt1kkyx3eqlthONlcc6yKytKeoDmT2ADbA8GpreYwX_3zW2faeNB1D7F-NImDReaJs0TKdTB-gJjzKxhKpUaEXJef-PU2gIAJEfSrmvoUrLiBcyfqcxBrfYkP2TKUSIsY9SnuFVIAWVr_zFoJhyphenhyphen-pg_7XTlpt1dvs_yCce1obzhNJJkIC2umV8A/s1600/Cel_FullColor_RGB_notype.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuKigzFt1kkyx3eqlthONlcc6yKytKeoDmT2ADbA8GpreYwX_3zW2faeNB1D7F-NImDReaJs0TKdTB-gJjzKxhKpUaEXJef-PU2gIAJEfSrmvoUrLiBcyfqcxBrfYkP2TKUSIsY9SnuFVIAWVr_zFoJhyphenhyphen-pg_7XTlpt1dvs_yCce1obzhNJJkIC2umV8A/s1600/Cel_FullColor_RGB_notype.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuKigzFt1kkyx3eqlthONlcc6yKytKeoDmT2ADbA8GpreYwX_3zW2faeNB1D7F-NImDReaJs0TKdTB-gJjzKxhKpUaEXJef-PU2gIAJEfSrmvoUrLiBcyfqcxBrfYkP2TKUSIsY9SnuFVIAWVr_zFoJhyphenhyphen-pg_7XTlpt1dvs_yCce1obzhNJJkIC2umV8A/s1600/Cel_FullColor_RGB_notype.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuKigzFt1kkyx3eqlthONlcc6yKytKeoDmT2ADbA8GpreYwX_3zW2faeNB1D7F-NImDReaJs0TKdTB-gJjzKxhKpUaEXJef-PU2gIAJEfSrmvoUrLiBcyfqcxBrfYkP2TKUSIsY9SnuFVIAWVr_zFoJhyphenhyphen-pg_7XTlpt1dvs_yCce1obzhNJJkIC2umV8A/s1600/Cel_FullColor_RGB_notype.png"/&gt;&lt;/a&gt;

&lt;p&gt;We're excited to announce the open source release of CEL-expr-python, a Python implementation of the Common Expression Language (CEL)! CEL (&lt;a href="https://cel.dev/"&gt;cel.dev&lt;/a&gt;) is a powerful, non-Turing complete expression language designed for simplicity, speed, safety, and portability. CEL is designed to be embedded in an application, and you can use CEL to make decisions, validate data, or apply rules based on the information your application has.&lt;/p&gt;

&lt;h2&gt;What is CEL-expr-python?&lt;/h2&gt;
&lt;p&gt;CEL-expr-python provides a native Python API for compiling and evaluating CEL expressions that's maintained by the CEL team. We'd like to acknowledge the fantastic work already done by the open source communities around support for CEL in Python, and look forward to your contributions to help us further enrich the CEL ecosystem.&lt;/p&gt;&lt;p&gt;
The CEL team has chosen to develop CEL-expr-python by wrapping our official C++ implementation to ensure maximum consistency with CEL semantics while enabling Python users to extend and enrich the experience on top of this production-ready core in Python directly. Additionally, new features and optimizations implemented in CEL C++ will automatically and immediately become available in CEL-expr-python.&lt;/p&gt;

&lt;h2&gt;Who is it for?&lt;/h2&gt;
&lt;p&gt;If you're working on a Python project that needs to:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Evaluate expressions defined dynamically (e.g., loaded from a database, configuration, or user input).&lt;/li&gt;
&lt;li&gt;Implement and enforce policies in a clear, concise, and secure manner.&lt;/li&gt;
&lt;li&gt;Validate data against a set of rules.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;...then CEL-expr-python is for you!&lt;/p&gt;

&lt;h2&gt;Why use CEL-expr-python?&lt;/h2&gt;
&lt;p&gt;CEL has become a prevalent technology for applications like policy enforcement, data validation, and dynamic configuration. CEL-expr-python allows Python developers to leverage the same benefits, including:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Safety:&lt;/strong&gt; CEL expressions are side-effect free and terminate guaranteed.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Speed:&lt;/strong&gt; Designed for efficient evaluation.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Portability:&lt;/strong&gt; Expressions are language-agnostic.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Familiarity:&lt;/strong&gt; Builds upon established CEL concepts.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;With CEL-expr-python, you can now seamlessly integrate this technology within your Python stack.&lt;/p&gt;

&lt;h2&gt;Get Started!&lt;/h2&gt;
&lt;p&gt;Check out the CEL-expr-python Repository here: &lt;a href="https://github.com/cel-expr/cel-python"&gt;https://github.com/cel-expr/cel-python&lt;/a&gt;&lt;/p&gt;&lt;p&gt;
We're thrilled to bring CEL-expr-python to the open source communities and can't wait to see what you build with it!&lt;/p&gt;&lt;p&gt;
Here's a code snippet demonstrating how to initialize CEL-expr-python and evaluate an expression.&lt;/p&gt;
&lt;pre&gt;&lt;code class="codebox python"&gt;from cel_expr_python import cel

# Define variables
cel_env = cel.NewEnv(variables={"who": cel.Type.STRING})
expr = cel_env.compile("'Hello, ' + who + '!'")

# Evaluate and print the compiled expression
print(expr.eval(data={"who": "World"})))  // Hello, World!
&lt;/code&gt;&lt;/pre&gt;
  
&lt;p&gt;For a more in-depth tutorial, check out our codelab here: &lt;a href="https://github.com/cel-expr/cel-python/blob/main/codelab/index.lab.md"&gt;https://github.com/cel-expr/cel-python/blob/main/codelab/index.lab.md&lt;/a&gt;&lt;/p&gt;
&lt;p&gt;The CEL-expr-python repository will be initially available as &lt;strong&gt;read-only&lt;/strong&gt;. We encourage you to try it out in your projects and share your experiences. Feel free to leave feedback in our &lt;a href="https://github.com/cel-expr/cel-python/issues "&gt;github issue queue&lt;/a&gt;, as we are eager to hear your feedback and will be working promptly to address any issues or suggestions.&lt;/p&gt;
&lt;p&gt;While we are not accepting external contributions at this moment, we are committed to building a community around CEL-expr-python and plan to open up for contributions in the future. Stay tuned for updates.&lt;/p&gt;
</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/8531855932270531624" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/8531855932270531624" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/03/announcing-cel-expr-python-the-common-expression-language-in-python-now-open-source.html" rel="alternate" title="Announcing CEL-expr-python: the Common Expression Language in Python, now open source" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhuKigzFt1kkyx3eqlthONlcc6yKytKeoDmT2ADbA8GpreYwX_3zW2faeNB1D7F-NImDReaJs0TKdTB-gJjzKxhKpUaEXJef-PU2gIAJEfSrmvoUrLiBcyfqcxBrfYkP2TKUSIsY9SnuFVIAWVr_zFoJhyphenhyphen-pg_7XTlpt1dvs_yCce1obzhNJJkIC2umV8A/s72-c/Cel_FullColor_RGB_notype.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-8966730896428037758</id><published>2026-02-20T16:00:00.000-08:00</published><updated>2026-02-20T16:02:20.823-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="events"/><category scheme="http://www.blogger.com/atom/ns#" term="news"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="twios"/><title type="text">This Week in Open Source #15</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Daryl Ducharme&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"/&gt;&lt;/a&gt;

&lt;h2&gt;This Week in Open Source for February 20th, 2026&lt;/h2&gt;
&lt;p&gt;&lt;em&gt;A look around the world of open source&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;We're preparing for a busy conference season, with events like SCALE 23x  and KubeCon + CloudNativeCon Europe on the horizon. A core part of our mission is "learning and sharing what we learn" so that our communities can continue to thrive together. Conferences are a great place to fulfill that mission.&lt;/p&gt;

&lt;p&gt;This week, we're highlighting a few "Open Source Reads" that tackle some of the biggest questions facing our ecosystem today—from the complex ethics of AI-generated content to the global impact of open AI models. We hope these links provide valuable context as we work together to sustain the critical infrastructure we all rely on.&lt;/p&gt;

&lt;h3&gt;Upcoming Events&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;February 24 - 25: &lt;a href="https://events.linuxfoundation.org/lf-member-summit/"&gt;The Linux Foundation Member Summit&lt;/a&gt; is happening in Napa, California. It is the annual gathering for &lt;a href="https://www.linuxfoundation.org/"&gt;Linux Foundation&lt;/a&gt; members that fosters collaboration, innovation, and partnerships among the leading projects and organizations working to drive digital transformation with open source technologies.&lt;/li&gt;
&lt;li&gt;March 5 - 8: &lt;a href="https://www.socallinuxexpo.org/scale/23x"&gt;SCALE 23x&lt;/a&gt; is happening in Pasadena, California. It is North America's largest community-run open source conference and includes four days of sessions, workshops, and community activities focused on open source, security, DevOps, cloud native, and more.&lt;/li&gt;
&lt;li&gt;March 9 - 10: &lt;a href="https://eventyay.com/e/88882f3e"&gt;FOSSASIA Summit 2026&lt;/a&gt; is happening in Bangkok, Thailand. It will be a two-day hybrid event that showcases the latest in open technologies, fostering collaboration across enterprises, developers, educators, and communities.&lt;/li&gt;
&lt;li&gt;March 16 - 17: &lt;a href="https://26.foss-backstage.de/"&gt;FOSS Backstage&lt;/a&gt; is happening in Berlin, Germany. This conference brings together the brightest minds in the industry to discuss and explore all about FOSS community, management and compliance.&lt;/li&gt;
&lt;li&gt;March 22: &lt;a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/features-add-ons/maintainer-summit/"&gt;Maintainer Summit EU&lt;/a&gt; is happening just before CloudNativeCon in Amsterdam, The Netherlands. This is an exclusive event for the people behind our projects to gather face-to-face, collaborate, and celebrate cloud first projects.&lt;/li&gt;
&lt;li&gt;March 23 - 26: &lt;a href="https://events.linuxfoundation.org/kubecon-cloudnativecon-europe/"&gt;Kubecon + CloudNativeCon Europe&lt;/a&gt; is happening in Amsterdam, The Netherlands. This is the flagship conference for the &lt;a href="https://www.cncf.io/"&gt;Cloud Native Computing Foundation (CNCF)&lt;/a&gt; and brings together adopters and technologists from leading open source and cloud first communities.&lt;/li&gt;
&lt;li&gt;March 26 - 29: &lt;a href="https://atmosphereconf.org/"&gt;ATmosphereConf&lt;/a&gt; is happening in Vancouver, British Columbia. This conference from the &lt;a href="https://atprotocol.dev/"&gt;AT Protocol Community&lt;/a&gt; took a 2 day conference then booked the venue for the two days prior (March 26th &amp; 27th) with smaller theaters and break out rooms for everything from extended events to developer training to building together.&lt;/li&gt;
&lt;li&gt;April 7 - 8: &lt;a href="https://events.linuxfoundation.org/pytorch-conference-europe/"&gt;PyTorch Conference EU&lt;/a&gt; is happening in Paris, France. Hosted by the &lt;a href="https://pytorch.org/foundation/"&gt;PyTorch Foundation&lt;/a&gt;, this conference gathers top-tier AI pioneers, researchers, and developers to explore the future of AI.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;Open Source Reads and Links&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;[Blog] &lt;a href="https://theshamblog.com/an-ai-agent-published-a-hit-piece-on-me/"&gt;An AI Agent wrote a hit piece on me&lt;/a&gt; - An AI agent wrote a harmful article about a maintainer after he rejected its code for a popular Python library. This shows a new risk where AI can attack people to get what it wants. We must be careful as AI misbehavior can hurt reputations and trust in software. This was followed by a &lt;a href="https://theshamblog.com/an-ai-agent-published-a-hit-piece-on-me-part-2/"&gt;part 2 after more things happened&lt;/a&gt;.&lt;/li&gt;
&lt;li&gt;[Blog] &lt;a href="https://angiejones.tech/stop-closing-the-door-fix-the-house/"&gt;Stop closing the door; fix the house&lt;/a&gt; - A different take on the crossover between AI and open source. Instead of closing contributions due to poor AI generated code, maintainers should guide contributors and AI tools with clear rules and automation. This helps keep quality high and keeps the community open.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a href="https://www.helpnetsecurity.com/2026/02/18/open-source-adoption-patching-challenges/"&gt;Everyone uses open source, but patching moves too slowly&lt;/a&gt; - "Maintenance is the highest form of creation." Open source requires maintenance, especially when 60% of security incidents hit unpatched code. How can we work together to keep our communities healthy and secure?&lt;/li&gt;
&lt;li&gt;[Paper] &lt;a href="https://www.nature.com/articles/s43246-026-01105-0"&gt;AI-powered open-source infrastructure for accelerating materials discovery and advanced manufacturing&lt;/a&gt; - Gen AI isn't the only type of AI in the game. This paper explains how AI and open-source tools help speed up the discovery of new materials. Through using data, simulations, and machine learning together we can build efficient and sustainable platforms. &lt;/li&gt;
&lt;li&gt;[Blog] &lt;a href="https://www.jeffgeerling.com/blog/2026/ai-is-destroying-open-source/"&gt;AI is destroying open source and it's not even good yet&lt;/a&gt; - Here's another post about how maintainers face more work and frustration because AI often makes mistakes and doesn't help fix problems. This growing issue could get worse as AI becomes more widely used without careful oversight. So, what types of conversations should we be having to create that oversight?&lt;/li&gt;
&lt;li&gt;[Article] &lt;a href="https://www.technologyreview.com/2026/02/12/1132811/whats-next-for-chinese-open-source-ai/"&gt;What's next for Chinese open source AI?&lt;/a&gt; - Chinese companies are creating and sharing powerful &lt;a href="https://opensource.org/ai/open-source-ai-definition"&gt;open AI models&lt;/a&gt; that anyone can use and modify. Because these models are cheaper and widely adopted globally, they challenge the western AI models. This open approach is changing how AI innovation happens and who controls its future. &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Which of these stories will you be chatting about at your next meetup or conference? Let us know!  Share with us on our &lt;a href="https://x.com/GoogleOSS"&gt;@GoogleOSS&lt;/a&gt; X account or our &lt;a href="https://bsky.app/profile/opensource.google"&gt;@opensource.google&lt;/a&gt; Bluesky account.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/8966730896428037758" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/8966730896428037758" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/02/this-week-in-open-source-15.html" rel="alternate" title="This Week in Open Source #15" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s72-c/header1.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4728287161714386776</id><published>2026-02-19T13:00:00.000-08:00</published><updated>2026-02-19T13:00:00.110-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="google summer of code"/><category scheme="http://www.blogger.com/atom/ns#" term="gsoc"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="student programs"/><title type="text">Introducing the 185 Organizations for GSoC 2026</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Stephanie Taylor&lt;/author&gt;, &lt;author&gt;Mary Radomile&lt;/author&gt; &amp;amp; &lt;author&gt;Lucila Ortíz&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj9aNPkT4INLLL-8j-vMp9T6p91PpfT0g7y-IKFkEKOzqQJpFWjrAEe3tGLZViJ6r0VJPFSu9u3R7knzutkRonz6Uj9rcVb7gHKeQymYiHO9wMzjUndSr1JCz8fwCik0sstE0_hIv_6WWw25XLmjuWDK6QqcteSlMbrWMLMtT82JN13-a6GT3jTMUiRiJw/s1600/gsoc2025statis--ozwpeb9glpr.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj9aNPkT4INLLL-8j-vMp9T6p91PpfT0g7y-IKFkEKOzqQJpFWjrAEe3tGLZViJ6r0VJPFSu9u3R7knzutkRonz6Uj9rcVb7gHKeQymYiHO9wMzjUndSr1JCz8fwCik0sstE0_hIv_6WWw25XLmjuWDK6QqcteSlMbrWMLMtT82JN13-a6GT3jTMUiRiJw/s1600/gsoc2025statis--ozwpeb9glpr.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj9aNPkT4INLLL-8j-vMp9T6p91PpfT0g7y-IKFkEKOzqQJpFWjrAEe3tGLZViJ6r0VJPFSu9u3R7knzutkRonz6Uj9rcVb7gHKeQymYiHO9wMzjUndSr1JCz8fwCik0sstE0_hIv_6WWw25XLmjuWDK6QqcteSlMbrWMLMtT82JN13-a6GT3jTMUiRiJw/s1600/gsoc2025statis--ozwpeb9glpr.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj9aNPkT4INLLL-8j-vMp9T6p91PpfT0g7y-IKFkEKOzqQJpFWjrAEe3tGLZViJ6r0VJPFSu9u3R7knzutkRonz6Uj9rcVb7gHKeQymYiHO9wMzjUndSr1JCz8fwCik0sstE0_hIv_6WWw25XLmjuWDK6QqcteSlMbrWMLMtT82JN13-a6GT3jTMUiRiJw/s1600/gsoc2025statis--ozwpeb9glpr.png"/&gt;&lt;/a&gt;

&lt;p&gt;The complete list of Google Summer of Code (GSoC) Mentoring Organizations is &lt;a href="https://summerofcode.withgoogle.com/programs/2026/organizations"&gt;now available&lt;/a&gt;! 2026 brings us&lt;strong&gt; 185 &lt;/strong&gt;open source communities who are eager to mentor a new group of open source contributors. &lt;strong&gt;Now&lt;/strong&gt; &lt;strong&gt;is the time&lt;/strong&gt; for prospective contributors to start looking for a community to participate with. Visit the full list of 2026 organizations to learn about each community, their project ideas, and read the specific contributor guidance to apply.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Who can apply as a GSoC contributor?&lt;/strong&gt;&lt;br&gt;
If you are 18 or older and a student or just starting out in open source (less than 2 years of open source experience), GSoC is for you! &lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Why participate in Google Summer of Code?&lt;/strong&gt;&lt;br&gt;
GSoC offers a unique opportunity to gain real-world experience and build new skills through open source contributions while being mentored by experienced maintainers and developers. &lt;/p&gt;

&lt;p&gt;The application period starts on &lt;strong&gt;March 16th at 1800 UTC. &lt;/strong&gt;Here are some things you can do to get started:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Visit our&lt;a href="https://summerofcode.withgoogle.com/"&gt; GSoC website&lt;/a&gt; and read the &lt;a href="https://developers.google.com/open-source/gsoc/faq"&gt;FAQ&lt;/a&gt;, the &lt;a href="https://developers.google.com/open-source/gsoc/resources/manual"&gt;Contributor Guide&lt;/a&gt;, &lt;a href="https://developers.google.com/open-source/gsoc/help/student-advice"&gt;Advice for people applying for GSoC&lt;/a&gt;,  &lt;a href="https://developers.google.com/open-source/gsoc/rules"&gt;Program Rules&lt;/a&gt; and the &lt;a href="https://developers.google.com/open-source/gsoc/videos#potential_gsoc_contributors"&gt;videos for Potential GSoC Contributors&lt;/a&gt; to learn the basics about GSoC. &lt;/li&gt;
&lt;li&gt;Browse the Organization pages—use filters (languages, categories) to narrow down your choices.&lt;/li&gt;
&lt;li&gt;Look at the Project Ideas for the Orgs you like. Pick one that excites you and reach out to them ASAP to talk about it.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Heads up! &lt;/strong&gt;Each Org has its own application steps and maybe some required pre-tasks. Check their &lt;em&gt;Contributor guidance link&lt;/em&gt; in their profile. &lt;strong&gt;Chatting and contributing early is HUGE for getting accepted!&lt;/strong&gt;&lt;/li&gt;
&lt;li&gt;Write a proposal based on the organization guidelines,&lt;strong&gt; also remember that some orgs do not allow the use of AI, be aware of their guidelines.&lt;/strong&gt;&lt;/li&gt;
&lt;li&gt;We strongly recommend submitting your proposal on the &lt;a href="http://g.co/gsoc"&gt;GSoC site&lt;/a&gt; at least 3 days before the hard deadline March 31, 1800 UTC.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;&lt;strong&gt;Mark your calendars with the upcoming important GSoC 2026 dates!&lt;/strong&gt;&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;Contributor application period: &lt;strong&gt;March 16 - 31 1800 UTC&lt;/strong&gt;&lt;/li&gt;
&lt;li&gt;GSoC 2026 Accepted Contributors announced: April 30 1800 UTC &lt;/li&gt;
&lt;li&gt;Coding Starts: May 25  &lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;Thank you for being part of this wonderful community and we wish the best of luck to all the 2026 applicants! &lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4728287161714386776" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4728287161714386776" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/02/introducing-the-185-organizations-for-gsoc-2026.html" rel="alternate" title="Introducing the 185 Organizations for GSoC 2026" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj9aNPkT4INLLL-8j-vMp9T6p91PpfT0g7y-IKFkEKOzqQJpFWjrAEe3tGLZViJ6r0VJPFSu9u3R7knzutkRonz6Uj9rcVb7gHKeQymYiHO9wMzjUndSr1JCz8fwCik0sstE0_hIv_6WWw25XLmjuWDK6QqcteSlMbrWMLMtT82JN13-a6GT3jTMUiRiJw/s72-c/gsoc2025statis--ozwpeb9glpr.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4191970675388406760</id><published>2026-02-12T11:30:00.000-08:00</published><updated>2026-02-12T11:30:00.112-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Gateway API"/><category scheme="http://www.blogger.com/atom/ns#" term="Ingress"/><category scheme="http://www.blogger.com/atom/ns#" term="Kubernetes"/><category scheme="http://www.blogger.com/atom/ns#" term="networking"/><category scheme="http://www.blogger.com/atom/ns#" term="NGINX"/><title type="text">The End of an Era: Transitioning Away from Ingress NGINX</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Kaslin Fields&lt;/author&gt;, Cloud DevRel &amp;amp; &lt;author&gt;Rob Scott&lt;/author&gt;, GCP Networking&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizpQgX40H9SS7xp8k0ezxjdzXE7z7VNetnVAY8OYHU3Mwreh5n6Datm-yHHNkgAdzR2iv2Kdq_4aHJDp1DNA2EVWG2QViYte0hu7yMsXKHaxXvbI0jD2OFRS3G3QPf_PUcoonske0CdJ0dOnFO1dEvtj_XZCotfLtId1gtlGD3eD92-hMmS9HI-D5YkAY/s1600/BlogHeroImageIngressNGINX.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizpQgX40H9SS7xp8k0ezxjdzXE7z7VNetnVAY8OYHU3Mwreh5n6Datm-yHHNkgAdzR2iv2Kdq_4aHJDp1DNA2EVWG2QViYte0hu7yMsXKHaxXvbI0jD2OFRS3G3QPf_PUcoonske0CdJ0dOnFO1dEvtj_XZCotfLtId1gtlGD3eD92-hMmS9HI-D5YkAY/s1600/BlogHeroImageIngressNGINX.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizpQgX40H9SS7xp8k0ezxjdzXE7z7VNetnVAY8OYHU3Mwreh5n6Datm-yHHNkgAdzR2iv2Kdq_4aHJDp1DNA2EVWG2QViYte0hu7yMsXKHaxXvbI0jD2OFRS3G3QPf_PUcoonske0CdJ0dOnFO1dEvtj_XZCotfLtId1gtlGD3eD92-hMmS9HI-D5YkAY/s1600/BlogHeroImageIngressNGINX.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizpQgX40H9SS7xp8k0ezxjdzXE7z7VNetnVAY8OYHU3Mwreh5n6Datm-yHHNkgAdzR2iv2Kdq_4aHJDp1DNA2EVWG2QViYte0hu7yMsXKHaxXvbI0jD2OFRS3G3QPf_PUcoonske0CdJ0dOnFO1dEvtj_XZCotfLtId1gtlGD3eD92-hMmS9HI-D5YkAY/s1600/BlogHeroImageIngressNGINX.png" alt="An AI generated image that depicts an old crumbling building labeled Ingress NGINX displays a banner saying Retiring March 2026. An equally crumbling road leads away from the building to a glowing, nice, new archway with the Kubernetes Logo at the top, with Gateway API across the top arch. Within the arch at the center of the light is another Kubernetes-inspired logo with the words Gateway API below it. Several light paths of varying colors, with labels like HTTP Route and TCP Route lead away from the archway, some of them off the side of the frame, and some of them to a set of servers. Some of the servers have clouds with up or down arrows above them, indicating cloud traffic moving in or out."/&gt;&lt;/a&gt;

&lt;p&gt;For many of us, the first time we successfully routed traffic into a Kubernetes cluster, we did it using &lt;strong&gt;Ingress NGINX&lt;/strong&gt;. It was the project that turned a complex networking API into something we could actually use.&lt;/p&gt;

&lt;p&gt;However, the &lt;a target="_blank" href="http://kubernetes.io/blog/2026/01/29/ingress-nginx-statement/"&gt;Kubernetes community recently announced&lt;/a&gt; that Ingress NGINX is officially entering retirement. Maintenance will cease in &lt;strong&gt;March 2026&lt;/strong&gt;.&lt;/p&gt;

&lt;p&gt;Here is what you need to know about why this is happening, what comes next, and why this "forced" migration is actually a great opportunity for your infrastructure.&lt;/p&gt;

&lt;h2&gt;Clarifying Terminology&lt;/h2&gt;

&lt;p&gt;First, there are some confusing, overlapping terms here. Let's clarify.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;strong&gt;&lt;a target="_blank" href="https://kubernetes.io/docs/concepts/services-networking/ingress/"&gt;Ingress API&lt;/strong&gt; &lt;/a&gt;- Kubernetes introduced the Ingress API as a Generally Available (GA) feature in 2020 with the release of Kubernetes version 1.19. This API is still available in Kubernetes with no immediate plans for deprecation or removal. However, it is "feature-frozen" meaning it is no longer being actively worked on or updated. The community has instead moved to Gateway API, which we'll talk more about later in this post.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;&lt;a target="_blank" href="https://github.com/kubernetes/ingress-nginx/"&gt;Ingress NGINX&lt;/a&gt; &lt;/strong&gt;- "Ingress" is an API object available by default in Kubernetes, as described above. You can define your Ingress needs as an Ingress resource. But that resource won't actually do anything without a controller. Ingress NGINX is a very popular controller that uses NGINX as a reverse proxy and load balancer. This will no longer be maintained as of March 2026.&lt;/li&gt;
&lt;blockquote&gt;As it says in the &lt;a target="_blank" href="https://kubernetes.io/blog/2025/11/11/ingress-nginx-retirement/"&gt;&lt;em&gt;What You Need To Know&lt;/em&gt; blog from the Kubernetes project&lt;/a&gt;, "Existing deployments of Ingress NGINX will continue to function and installation artifacts will remain available." However "there will be no further releases, no bugfixes, and no updates to resolve any security vulnerabilities that may be discovered."&lt;/blockquote&gt;
&lt;li&gt;&lt;strong&gt;&lt;a target="_blank" href="https://docs.nginx.com/nginx-ingress-controller/"&gt;NGINX Ingress Controller&lt;/strong&gt;&lt;/a&gt; - to make things more confusing, there is another controller called "NGINX Ingress." This controller to implement ingress for your Kubernetes resources via NGINX and NGINX Plus, owned and maintained by F5 / NGINX Inc. This will continue to be maintained and available in both its Open Source and Commercial forms.&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;In this blog post,we are going to talk about "Ingress NGINX," the controller being deprecated. We will also talk about "Ingress" or the "Ingress API", which is still around, but feature-frozen.&lt;/p&gt;

&lt;h2&gt;What Problem Did Ingress NGINX Solve?&lt;/h2&gt;

&lt;p&gt;In the early days of Kubernetes, getting external traffic to your pods was a nightmare. You either had to use expensive, cloud-specific LoadBalancers for every single service or manage complex NodePorts.&lt;/p&gt;

&lt;p&gt;While the Kubernetes Ingress API was introduced as a standard specification for Layer 7 routing (HTTP/HTTPS), it was inherently limited, designed for a simpler time in Kubernetes' history, and offered minimal features. Features like advanced routing, traffic splitting, and non-HTTP protocols were not natively supported by the API&lt;/p&gt;

&lt;p&gt;Ingress NGINX solved this problem by serving as a robust Ingress controller that executed the API's rules. Leveraging the widely adopted NGINX reverse proxy, the controller provided a powerful, provider-agnostic entry point for cluster traffic. It was able to:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Consolidate multiple services under a single IP address.&lt;/li&gt;
&lt;li&gt;Provide robust Layer 7 capabilities, including SSL/TLS termination and basic load balancing.&lt;/li&gt;
&lt;li&gt;Use familiar NGINX configuration logic inside a cloud-native environment.&lt;/li&gt;
&lt;li&gt;Extend the basic Ingress API to support advanced features, such as rate limiting, custom headers, and sophisticated traffic management, by allowing users to inject familiar, raw NGINX configuration logic using custom &lt;code&gt;nginx.ingress.kubernetes.io&lt;/code&gt; annotations (often called "snippets").&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;This flexibility, achieved by translating standard Ingress objects into feature-rich NGINX configurations, made Ingress NGINX the de-facto controller and the "Swiss Army Knife" of Kubernetes networking.&lt;/p&gt;

&lt;h2&gt;Why is it Retiring?&lt;/h2&gt;

&lt;p&gt;If it's so popular, why kill it? The very flexibility that made it so popular also (at least partially) led to its demise. &lt;a target="_blank" href="https://kubernetes.io/blog/2025/11/11/ingress-nginx-retirement/"&gt;The announcement&lt;/a&gt; points to two primary "silent killers":&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;The "Snippet" Security Debt&lt;/strong&gt;: Ingress NGINX gained popularity through its flexibility, specifically "snippets" that let users inject raw NGINX config via annotations. Today, these are viewed as major security risks, as they can allow for configuration injection attacks. Fixing this architectural "feature" has become an insurmountable task.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;The Maintainership Gap&lt;/strong&gt;: Despite having millions of users, the project was sustained by only one or two people working in their spare time. In an industry where security vulnerabilities move fast, "best-effort" maintenance isn't enough to protect the global ecosystem.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Time for Gateway API&lt;/h2&gt;

&lt;p&gt;The removal of the popular NGINX ingress implementation opens up an opportunity to transition to the &lt;strong&gt;&lt;a target="_blank" href="https://gateway-api.sigs.k8s.io/"&gt;Gateway API&lt;/strong&gt;&lt;/a&gt;. While the Ingress API in Kubernetes is not going anywhere (just the NGINX variant of it), development on it is frozen, and there are reasons for that.&lt;/p&gt;

&lt;p&gt;Think of Gateway API as "Ingress 2.0." While the Ingress API is a single, limited resource, Gateway API is role-oriented. It separates the concerns of the &lt;strong&gt;Infrastructure Provider&lt;/strong&gt; (who sets up the LB), the &lt;strong&gt;Cluster Operator &lt;/strong&gt;(who defines policies), and the &lt;strong&gt;Application Developer&lt;/strong&gt; (who routes the traffic).&lt;/p&gt;

&lt;p&gt;For the &lt;a target="_blank" href="https://kubernetespodcast.com/"&gt;Kubernetes Podcast from Google&lt;/a&gt;, we've interviewed Kubernetes maintainers working on Gateway API (Like in &lt;a target="_blank" href="https://kubernetespodcast.com/episode/248-gateway-updates/"&gt;this episode featuring Lior Lieberman&lt;/a&gt;), and they tell a great story about why it was developed. In the early days of Kubernetes, the maintainers &amp; contributors weren't sure exactly what users would need with regard to ingress management for workloads running on Kubernetes. The early Kubernetes Ingress object was an attempt to address the problems the maintainers thought users would need to solve, and they didn't get it all right. The annotations Ingress-NGINX supported on top of the Ingress API helped cover the many gaps in the Kubernetes API, but the annotations tied you to Ingress-NGINX. Those gaps have now been largely closed by Gateway API, and the API is supported by many conformant implementations, so you can have confidence in the portability of the API.&lt;/p&gt;

&lt;p&gt;An important feature of Gateway API's design is that it is an API standard defined by the community, but implemented by &lt;strong&gt;your&lt;/strong&gt; infrastructure or networking solution provider. Networking ultimately boils down to cables transmitting electrical signals between machines. What kind of machines and how they're connected has a big impact on the types of ingress capabilities available to you- or at least in how they're actually implemented. Gateway API provides a standard set of capabilities that you can access in a standardized way, while allowing for the reality of different networking implementations across providers. It's meant to help you get the most out of your infrastructure- regardless of what that infrastructure actually is.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;How Gateway API solves the old problems with Ingress NGINX:&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Security by Design: &lt;/strong&gt;No more "configuration snippets." Features are built into the API natively, reducing the risk of accidental misconfiguration.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Standardization: &lt;/strong&gt;Unlike the old Ingress API, which required custom annotations for almost everything (like traffic splitting), Gateway API builds these features into the spec, offering greater portability.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Extensibility:&lt;/strong&gt; It is designed to handle more than just HTTP—it brings the same power to TCP, UDP, and gRPC.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;The Challenges of Transitioning&lt;/h2&gt;

&lt;p&gt;Migration is rarely "click and play." Users moving away from Ingress NGINX should prepare for:&lt;/p&gt;

&lt;ul&gt;
  &lt;li&gt;&lt;strong&gt;Annotation Mapping&lt;/strong&gt;: Most of your &lt;code&gt;nginx.ingress.kubernetes.io&lt;/code&gt; annotations won't work on new controllers. You'll need to map these to the new Gateway API "HTTPRoute" logic.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Learning Curve&lt;/strong&gt;: Gateway API has more "objects" to manage (Gateways, GatewayClasses, Routes). It takes a moment to wrap your head around the hierarchy, but it was implemented that way based on experience - these objects should help you manage your workloads' ingress needs more efficiently.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Feature Parity&lt;/strong&gt;: If you rely on very specific, obscure NGINX modules, you'll need to verify that your new controller (be it Envoy-based like Emissary or Cilium, or a different NGINX-based provider) supports them.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Why It's Worth It&lt;/h2&gt;

&lt;p&gt;The retirement of Ingress NGINX is not just a chore; it is a forcing function for adopting more sustainable architecture. By migrating to Gateway API, you gain:&lt;/p&gt;

&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Stability and Active Development&lt;/strong&gt;: Gateway API is a General Availability (GA) networking standard that has maintained a "standard channel" without a single breaking change or API version deprecation for over two years. Unlike many Ingress controllers where development has largely paused, most Gateway controllers are far more actively maintained and continue to add new features like CORS and timeouts.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Portability&lt;/strong&gt;: Choosing a different Ingress controller might seem easier, but if you rely on Ingress-NGINX annotations, you will likely have to migrate to another set of implementation-specific annotations. Gateway API provides more portable features directly in the core API and ensures a consistent experience across different implementations. When you select an implementation that is conformant with the latest v1.4 release, you can be confident that the behavior of these features will be consistent.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Future-Proof Extensibility&lt;/strong&gt;: While Gateway API supports many more features than the core Ingress API, if you find a needed feature missing, an implementation is likely to provide a similar or equivalent feature as an implementation-specific extension. For example, GKE Gateway and Envoy Gateway extend the API with their own custom policies.&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Next Steps&lt;/h2&gt;

&lt;p&gt;Start your migration planning today to capitalize on the opportunity and meet the deadline.&lt;/p&gt;

&lt;ol&gt;
&lt;li&gt;&lt;strong&gt;Audit Your Usage&lt;/strong&gt;: Run &lt;code&gt;kubectl get pods --all-namespaces -l app.kubernetes.io/name=ingress-nginx&lt;/code&gt; to see where you are still using the legacy controller.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Utilize Automation&lt;/strong&gt;: Check out the &lt;a target="_blank" href="https://github.com/kubernetes-sigs/ingress2gateway"&gt;ingress2gateway&lt;/a&gt; project. A lot of work is going into this tool to make the migration experience better, including adding support for the most widely used Ingress-NGINX annotations.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Experiment and Provide Feedback&lt;/strong&gt;: Give Gateway API a try! Start a PoC with a conformant Gateway API implementation (like &lt;a target="_blank" href="https://docs.cloud.google.com/kubernetes-engine/docs/concepts/gateway-api"&gt;GKE Gateway&lt;/a&gt;, Cilium, or Envoy Gateway). The community welcomes help and feedback on ingress2gateway and encourages users to share feedback on what Gateway API is getting right and wrong.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Adhere to the Timeline&lt;/strong&gt;: You have until March 2026 before the security updates stop. Start your migration planning sooner rather than later!&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;For more details on migrating from Ingress to Gateway API refer to &lt;a target="_blank" href="https://docs.cloud.google.com/kubernetes-engine/docs/how-to/migrate-ingress-gateway"&gt;our documentation&lt;/a&gt;.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4191970675388406760" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4191970675388406760" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/02/the-end-of-an-era-transitioning-away-from-ingress-nginx.html" rel="alternate" title="The End of an Era: Transitioning Away from Ingress NGINX" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizpQgX40H9SS7xp8k0ezxjdzXE7z7VNetnVAY8OYHU3Mwreh5n6Datm-yHHNkgAdzR2iv2Kdq_4aHJDp1DNA2EVWG2QViYte0hu7yMsXKHaxXvbI0jD2OFRS3G3QPf_PUcoonske0CdJ0dOnFO1dEvtj_XZCotfLtId1gtlGD3eD92-hMmS9HI-D5YkAY/s72-c/BlogHeroImageIngressNGINX.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4312197850206338627</id><published>2026-02-06T14:30:00.000-08:00</published><updated>2026-02-06T14:31:38.467-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="events"/><category scheme="http://www.blogger.com/atom/ns#" term="news"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="twios"/><title type="text">This Week in Open Source #14</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Daryl Ducharme&lt;/author&gt; &amp;amp; &lt;author&gt;amanda casari&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"/&gt;&lt;/a&gt;
&lt;h2 class="article-title"&gt;This Week in Open Source for February 06, 2026&lt;/h2&gt;
&lt;p class="article-subtitle"&gt;&lt;em&gt;A look around the world of open source&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;Here we are at the beginning of February, and the world of open source is navigating a fascinating landscape of innovation and challenge. The main focus of many articles this week is on the evolving relationship between AI and software maintenance. But open source is about more than just the code; it's about the people and the spirit of collaboration. With that we look at the Open Gaming Collective which is pushing Linux gaming further and the SLSA framework and how it is foundational in software security.&lt;/p&gt;

&lt;p&gt;Dive in to see what's happening this week in open source!&lt;/p&gt;

&lt;h3&gt;Upcoming Events&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;February 24 - 25: &lt;a target="_blank" href="https://events.linuxfoundation.org/lf-member-summit/"&gt;The Linux Foundation Member Summit&lt;/a&gt; is happening in Napa, California. It is the annual gathering for &lt;a target="_blank" href="https://www.linuxfoundation.org/"&gt;Linux Foundation&lt;/a&gt; members that fosters collaboration, innovation, and partnerships among the leading projects and organizations working to drive digital transformation with open source technologies.&lt;/li&gt;
&lt;li&gt;March 5 - 8: &lt;a target="_blank" href="https://www.socallinuxexpo.org/scale/23x"&gt;SCALE 23x&lt;/a&gt; is happening in Pasadena, California. It is North America's largest community-run open source conference and includes four days of sessions, workshops, and community activities focused on open source, security, DevOps, cloud native, and more.&lt;/li&gt;
&lt;li&gt;March 9 - 10: &lt;a target="_blank" href="https://eventyay.com/e/88882f3e"&gt;FOSSASIA Summit 2026&lt;/a&gt; is happening in Bangkok, Thailand. It will be a two-day hybrid event that showcases the latest in open technologies, fostering collaboration across enterprises, developers, educators, and communities.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;Open Source Reads and Links&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://www.theregister.com/2026/01/21/curl_ends_bug_bounty/"&gt;Curl shutters bug bounty program to remove incentive for submitting AI slop&lt;/a&gt; - The maintainer of popular open-source data transfer tool cURL has ended the project's bug bounty program after maintainers struggled to assess a flood of AI-generated contributions.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://www.404media.co/vibe-coding-is-killing-open-source-software-researchers-argue/"&gt;Vibe Coding Is Killing Open Source Software, Researchers Argue&lt;/a&gt; - So much open source software is utilized when people vibe code with LLMs. However, vibe coders don't give back, &lt;a target="_blank" href="https://arxiv.org/pdf/2601.15494"&gt;according to research&lt;/a&gt;. What can be done to make vibe coders understand the importance of the open source ecosystem and giving back?&lt;/li&gt;
&lt;li&gt;[Blog] &lt;a target="_blank" href="https://redmonk.com/kholterhoff/2026/02/03/ai-slopageddon-and-the-oss-maintainers/"&gt;AI Slopageddon and the OSS Maintainers&lt;/a&gt; - AI-generated low-quality code, called "AI slop," is overwhelming open source maintainers and harming collaboration. Some projects have banned AI contributions, while others require disclosure and careful review to manage the problem. How can we make changes when platforms benefit from AI tools but often ignore the burden this puts on maintainers?&lt;/li&gt;
&lt;li&gt;[Paper] &lt;a target="_blank" href="https://arxiv.org/pdf/2601.16809"&gt;Will It Survive? Deciphering the Fate of AI-Generated Code in Open Source&lt;/a&gt; - AI-generated code lasts longer in open-source projects than human-written code. It is changed less often but has more bug fixes and security updates. Predicting when AI code will be modified is hard because many outside factors affect it.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://www.gamingonlinux.com/2026/01/open-gaming-collective-ogc-formed-to-push-linux-gaming-even-further/"&gt;Open Gaming Collective (OGC) formed to push Linux gaming even further&lt;/a&gt; - On the &lt;em&gt;fun side of open source&lt;/em&gt; the &lt;a target="_blank" href="https://opengamingcollective.org/"&gt;Open Gaming Collective&lt;/a&gt; is a new group uniting many Linux gaming projects to work together. They will share important tools and kernel patches to make Linux gaming better and less fragmented. Bazzite and other members will use OGC's shared improvements for better hardware support and gaming experience.&lt;/li&gt;
&lt;li&gt;[Blog] &lt;a target="_blank" href="https://slsa.dev/blog/2025/12/supply-chain-robots-slsa"&gt;Supply Chain Robots, Electric Sheep, and SLSA&lt;/a&gt; - Securing the software supply chain is crucial to protect against attacks that can compromise code and build systems. SLSA is a practical framework that helps organizations improve supply chain security step-by-step by verifying source code and build integrity. A good read to understand this aspect of software security.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;As we like to say, "a community is a garden, not a building; it requires tending, not just construction".&lt;/p&gt;
&lt;p&gt;How is your team tending to your open source "garden" this month? We'd love to hear your stories! Share them on our &lt;a target="_blank" href="https://x.com/GoogleOSS"&gt;@GoogleOSS&lt;/a&gt; X account or our &lt;a target="_blank" href="https://bsky.app/profile/opensource.google"&gt;@opensource.google&lt;/a&gt; Bluesky account.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4312197850206338627" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4312197850206338627" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/02/this-week-in-open-source-14.html" rel="alternate" title="This Week in Open Source #14" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s72-c/header1.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-220145958796706398</id><published>2026-02-03T11:30:00.000-08:00</published><updated>2026-02-03T11:30:00.113-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="GoogleSQL"/><category scheme="http://www.blogger.com/atom/ns#" term="SQL"/><category scheme="http://www.blogger.com/atom/ns#" term="ZetaSQL"/><title type="text">ZetaSQL is being renamed to GoogleSQL</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Olena Huang&lt;/author&gt;, GoogleSQL team&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg22FlRJ_it2Eob2LLbAV6Lua3gA3r1jjDvAyqtXw2IdCVOQ4zw6h33USiKl-r31BYUhuw1Lfck4ujSTdl3dvbG3sxt_Y3-AuSD9kXvg5skS3DPHWGh9P-nG14wEqKKjOB0nwLeZlW77gnT9NfFanXLl9Z3n4oW0zUHJjwhytI2tzBXllRBwsgpmPaedJA/s1600/zeta_to_googlesql.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg22FlRJ_it2Eob2LLbAV6Lua3gA3r1jjDvAyqtXw2IdCVOQ4zw6h33USiKl-r31BYUhuw1Lfck4ujSTdl3dvbG3sxt_Y3-AuSD9kXvg5skS3DPHWGh9P-nG14wEqKKjOB0nwLeZlW77gnT9NfFanXLl9Z3n4oW0zUHJjwhytI2tzBXllRBwsgpmPaedJA/s1600/zeta_to_googlesql.png"&gt;

&lt;a href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg22FlRJ_it2Eob2LLbAV6Lua3gA3r1jjDvAyqtXw2IdCVOQ4zw6h33USiKl-r31BYUhuw1Lfck4ujSTdl3dvbG3sxt_Y3-AuSD9kXvg5skS3DPHWGh9P-nG14wEqKKjOB0nwLeZlW77gnT9NfFanXLl9Z3n4oW0zUHJjwhytI2tzBXllRBwsgpmPaedJA/s1600/zeta_to_googlesql.png" class="header-image"&gt;&lt;img border="0" class="ratio-21-9" alt="AI Generated image of the word ZetaSQL followed by a double arrow then the word GoogleSQL." src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg22FlRJ_it2Eob2LLbAV6Lua3gA3r1jjDvAyqtXw2IdCVOQ4zw6h33USiKl-r31BYUhuw1Lfck4ujSTdl3dvbG3sxt_Y3-AuSD9kXvg5skS3DPHWGh9P-nG14wEqKKjOB0nwLeZlW77gnT9NfFanXLl9Z3n4oW0zUHJjwhytI2tzBXllRBwsgpmPaedJA/s1600/zeta_to_googlesql.png"/&gt;&lt;/a&gt;

&lt;p&gt;We're excited to announce a small but significant change: the open-source project known as ZetaSQL has been officially renamed to GoogleSQL(&lt;a href="https://github.com/google/googlesql"&gt;https://github.com/google/googlesql&lt;/a&gt;). This move unifies the name of our powerful SQL dialect, analysis, and parsing libraries under a single, consistent banner, whether you're using it within Google's cloud and internal services or as part of the open-source community.&lt;/p&gt;

&lt;p&gt;For years, GoogleSQL has been the standard SQL dialect across many Google services like BigQuery and Spanner. Originally, while we called the language component GoogleSQL internally, we weren't using that name to describe the dialect in our public-facing products. Since then, we've started using the GoogleSQL name in our public-facing products and documentation, to emphasize that it's the same shared dialect across products.&lt;/p&gt;

&lt;p&gt;Now, we're renaming the open source package too, to emphasize that it supports the same SQL dialect used in BigQuery, Spanner, and other products. The goal of open sourcing our work was always to allow developers outside of Google to leverage the same robust and compliant SQL foundation. With the name change, we aim to reduce confusion and make it easier for everyone to find and discuss the same great technology. Whether you're an internal engineer, a Google Cloud customer, or an open-source developer, you're using GoogleSQL.&lt;/p&gt;

&lt;p&gt;This is primarily a branding change. The technology, features, and the team behind it remain the same. The open-source repository will continue to thrive, now proudly bearing the GoogleSQL name. We believe this unification will strengthen the GoogleSQL ecosystem, making it more accessible and understandable for our growing community of users and contributors.&lt;/p&gt;
&lt;p&gt;We're enthusiastic about this next chapter for GoogleSQL in the open-source world and look forward to continued collaboration and innovation with the community.&lt;/p&gt;
</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/220145958796706398" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/220145958796706398" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/02/zetasql-is-being-renamed-to-googlesql.html" rel="alternate" title="ZetaSQL is being renamed to GoogleSQL" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEg22FlRJ_it2Eob2LLbAV6Lua3gA3r1jjDvAyqtXw2IdCVOQ4zw6h33USiKl-r31BYUhuw1Lfck4ujSTdl3dvbG3sxt_Y3-AuSD9kXvg5skS3DPHWGh9P-nG14wEqKKjOB0nwLeZlW77gnT9NfFanXLl9Z3n4oW0zUHJjwhytI2tzBXllRBwsgpmPaedJA/s72-c/zeta_to_googlesql.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-2723651588892762307</id><published>2026-01-23T15:00:00.000-08:00</published><updated>2026-01-23T15:09:10.894-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="events"/><category scheme="http://www.blogger.com/atom/ns#" term="news"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="twios"/><title type="text">This Week in Open Source #13</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Daryl Ducharme&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"/&gt;&lt;/a&gt;

&lt;h2&gt;This Week in Open Source for January 23, 2026&lt;/h2&gt;
&lt;p&gt;&lt;em&gt;A look around the world of open source&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;Can you believe we're already wrapping up the first month of the year? January is coming to a close. The open source ecosystem is buzzing with activity, from the upcoming community gatherings at FOSDEM in Brussels to new conversations around AI standards and cloud flexibility. &lt;/p&gt;

&lt;p&gt;Google Open Source believes that "a community is a garden, not a building". It requires constant tending to thrive. This week, we're looking at how we can all contribute to that growth—whether it's by securing the software supply chain, standardizing AI agents, or simply learning from the legends of our field like Linus Torvalds.&lt;/p&gt;

&lt;p&gt;Dive in to see what's happening this week in open source!&lt;/p&gt;

&lt;h3&gt;Upcoming Events&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;January 29: &lt;a target="_blank" href="https://chaoss.community/chaosscon-2026-eu/"&gt;CHAOSScon Europe 2026&lt;/a&gt; is co-located with FOSDEM in Brussels, Belgium. This conference revolves around discussing open source project health, CHAOSS updates, use cases, and hands-on workshops for developers, community managers, project managers, and anyone interested in measuring open source project health. It  also shares insights from the CHAOSS context working groups including OSPOs, University Open Source, and Open Source in Science and Research.&lt;/li&gt;
&lt;li&gt;January 31 - February 1: &lt;a target="_blank" href="https://fosdem.org/2026/"&gt;FOSDEM 2026&lt;/a&gt; is happening at the Université Libre de Bruxelles in Brussels, Belgium. It is a free event for software developers to meet, share ideas and collaborate. Every year, thousands of developers of free and open source software from all over the world gather at the event in Brussels.&lt;/li&gt;
&lt;li&gt;February 24 - 25: &lt;a target="_blank" href="https://events.linuxfoundation.org/lf-member-summit/"&gt;The Linux Foundation Member Summit&lt;/a&gt; is happening in Napa, California. It is the annual gathering for &lt;a target="_blank" href="https://www.linuxfoundation.org/"&gt;Linux Foundation&lt;/a&gt; members that fosters collaboration, innovation, and partnerships among the leading projects and organizations working to drive digital transformation with open source technologies.&lt;/li&gt;
&lt;li&gt;March 5 - 8: &lt;a target="_blank" href="https://www.socallinuxexpo.org/scale/23x"&gt;SCALE 23x&lt;/a&gt; is happening in Pasadena, California. It is North America's largest community-run open source conference and includes four days of sessions, workshops, and community activities focused on open source, security, DevOps, cloud native, and more.&lt;/li&gt;
&lt;li&gt;March 9 - 10: &lt;a target="_blank" href="https://eventyay.com/e/88882f3e"&gt;FOSSASIA Summit 2026&lt;/a&gt; is happening in Bangkok, Thailand. It will be a two-day hybrid event that showcases the latest in open technologies, fostering collaboration across enterprises, developers, educators, and communities.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;Open Source Reads and Links&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://thehackernews.com/2026/01/the-state-of-trusted-open-source.html"&gt;The state of trusted open source&lt;/a&gt; - This review of the state of trusted open source report goes over many statistics. One of the interesting ones is that vulnerabilities most often hide in the smaller dependencies of the larger projects we might be focused on. What does this mean for your approach to security? How should various open source communities deal with this?&lt;/li&gt;
&lt;li&gt;[Blog] &lt;a target="_blank" href="https://www.softwareheritage.org/2026/01/21/software-heritage-archive-digital-public-good/"&gt;Software Heritage Archive recognized as a digital public good&lt;/a&gt; - As the Software Heritage Archive celebrates its 10th anniversary, the Archive has scaled to protect over 27 billion unique source files, even solving the "2PB problem" by deploying protocols that compressed 78TB of graph data into a 3TB research dataset. This ensures that humanity's executable history remains a global commons rather than a proprietary secret, aligning with our belief at Google that &lt;em&gt;Code is for today, Open Source is forever.&lt;/em&gt; &lt;/li&gt;
&lt;li&gt;[Blog] &lt;a target="_blank" href="https://allthingsopen.org/articles/agent-definition-language-open-standard-ai-agents"&gt;Agent Definition Language: The open standard AI agents have been missing&lt;/a&gt; - The Agent Definition Language (ADL) creates a clear, shared way to describe AI agents so they work well across different systems. This helps teams understand what agents do, how they behave, and how to govern them safely. As an open and standard, ADL makes AI agents easier to build, review, and share in the open-source community.&lt;/li&gt;
&lt;li&gt;[Blog] &lt;a target="_blank" href="https://medium.com/google-cloud/ai-agent-engineering-in-go-with-the-google-adk-4f2a992c6db4"&gt;AI Agent Engineering in Go with the Google ADK&lt;/a&gt; - AI, agents, and the related protocols touch on many open source projects. This post gives you a technical hands on with the &lt;a target="_blank" href="https://github.com/GoogleCloudPlatform/agent-starter-pack"&gt;Agent Starter Pack&lt;/a&gt;. By following it you'll learn how to build, test, and securely deploy a &lt;a target="_blank" href="https://go.dev/"&gt;Go&lt;/a&gt; AI agent using Google Cloud services.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://thenewstack.io/bryan-cantrill-how-kubernetes-broke-the-aws-cloud-monopoly/"&gt;How Kubernetes Broke the AWS Cloud Monopoly&lt;/a&gt; - Before &lt;a target="_blank" href="https://kubernetes.io/"&gt;Kubernetes&lt;/a&gt;, companies felt locked into AWS because of its unique APIs. Kubernetes allowed apps to run on any cloud, giving users more choice and helping other cloud providers grow. This has made multi-cloud the way forward for many enterprises. Are you utilizing a multi-cloud strategy? Has Kubernetes helped you get there?&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://itsfoss.com/news/linus-torvalds-vibe-coding/"&gt;Even Linux Creator Linus Torvalds is Using AI to Code in 2026&lt;/a&gt; - Opinions vary on where and whether AI is useful in various areas. One place that it has shown the greatest benefit is in as a tool for writing code. It seems Linus Torvalds has started to use it to assist with part of his &lt;a target="_blank" href="https://github.com/torvalds/AudioNoise"&gt;AudioNoise&lt;/a&gt; side project. What a good way to find out how best AI can work for oneself. How have you been using AI with your code?&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;What exciting open source events and news are you hearing about? Let us know on our &lt;a target="_blank" href="https://x.com/GoogleOSS"&gt;@GoogleOSS&lt;/a&gt; X account or our new &lt;a target="_blank" href="https://bsky.app/profile/opensource.google"&gt;@opensource.google&lt;/a&gt; Bluesky account.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/2723651588892762307" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/2723651588892762307" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/01/this-week-in-open-source-13.html" rel="alternate" title="This Week in Open Source #13" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s72-c/header1.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-6907928925750809762</id><published>2026-01-21T11:30:00.000-08:00</published><updated>2026-02-12T10:56:32.151-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Go"/><category scheme="http://www.blogger.com/atom/ns#" term="golang"/><category scheme="http://www.blogger.com/atom/ns#" term="Programming Languages"/><title type="text">A JSON schema package for Go</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Jonathan Amsterdam&lt;/author&gt; &amp;amp; &lt;author&gt;Sam Thanawalla&lt;/author&gt;, The Go Team&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjt8SJuD-1m9SmMcH8buiQh615lLFYsqqKBtkOWQoUKOR8wup3gFghx_R1E_acIw967E6xkwn54LgFInGHimzUV1ym5tA0pH3dYbBHGc5zubeJU4hm13x9A-403ZuT8gRKLKuqdZl3pGY/s16000/gosreheader.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjt8SJuD-1m9SmMcH8buiQh615lLFYsqqKBtkOWQoUKOR8wup3gFghx_R1E_acIw967E6xkwn54LgFInGHimzUV1ym5tA0pH3dYbBHGc5zubeJU4hm13x9A-403ZuT8gRKLKuqdZl3pGY/s16000/gosreheader.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjt8SJuD-1m9SmMcH8buiQh615lLFYsqqKBtkOWQoUKOR8wup3gFghx_R1E_acIw967E6xkwn54LgFInGHimzUV1ym5tA0pH3dYbBHGc5zubeJU4hm13x9A-403ZuT8gRKLKuqdZl3pGY/s16000/gosreheader.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjt8SJuD-1m9SmMcH8buiQh615lLFYsqqKBtkOWQoUKOR8wup3gFghx_R1E_acIw967E6xkwn54LgFInGHimzUV1ym5tA0pH3dYbBHGc5zubeJU4hm13x9A-403ZuT8gRKLKuqdZl3pGY/s16000/gosreheader.png"/&gt;&lt;/a&gt;

&lt;p&gt;&lt;a target="_blank" href="https://json-schema.org"&gt;JSON Schema&lt;/a&gt; is a specification for describing JSON values that has become a critical part of
LLM infrastructure. We recently released &lt;a target="_blank" href="https://pkg.go.dev/github.com/google/jsonschema-go/jsonschema"&gt;github.com/google/jsonschema-go/jsonschema&lt;/a&gt;, a comprehensive JSON Schema package for Go. We use it in the &lt;a target="_blank" href="https://github.com/modelcontextprotocol/go-sdk"&gt;official Go SDK for MCP&lt;/a&gt; and expect it to become the canonical JSON Schema package for Google's Go SDKs that work with LLMs.&lt;/p&gt;

&lt;p&gt;JSON Schema has been around for many years. Why are we doing this now, and what do LLMs have to do with it?&lt;/p&gt;

&lt;p&gt;JSON is a flexible way to describe values. A JSON value can be null, a string, a number, a boolean, a list of values, or a mapping from strings to values. In programming language terms, JSON is dynamically typed. For example, a JSON array can contain a mix of strings, numbers, or any other JSON value. That flexibility can be quite powerful, but sometimes it's useful to constrain it. Think of JSON Schema as a type system for JSON, although its expressiveness goes well beyond typical type systems. You can write a JSON schema that requires all array elements to be strings, as you could in a typical programming language type system, but you can also constrain the length of the array or insist that its first three elements are strings of length at least five while the remaining elements are numbers.&lt;/p&gt;

&lt;p&gt;The ability to describe the shape of JSON values like that has always been useful, but it is vital when trying to coax JSON values out of LLMs, whose output is notoriously hard to constrain. JSON Schema provides an expressive and precise way to tell an LLM how its JSON output should look. That's particularly useful for generating inputs to tools, which are usually ordinary functions with precise requirements on their input. It also turns out to be useful to describe a tool's output to the LLM. So frameworks like &lt;a target="_blank" href="https://modelcontextprotocol.io"&gt;MCP&lt;/a&gt; use JSON Schema to specify both the inputs to and outputs from tools. JSON Schema has become the &lt;em&gt;lingua franca&lt;/em&gt; for defining structured interactions with LLMs.&lt;/p&gt;

&lt;h2&gt;Requirements for a JSON Schema package&lt;/h2&gt;
&lt;p&gt;Before writing our own package, we took a careful look at the existing JSON Schema packages; we didn't want to reinvent the wheel. But we couldn't find one that had all the features that we felt were important:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;&lt;strong&gt;Schema creation:&lt;/strong&gt; A clear, easy-to-use Go API to build schemas in code.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Serialization:&lt;/strong&gt; A way to convert a schema to and from its JSON representation.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Validation:&lt;/strong&gt; A way to check whether a given JSON value conforms to a schema.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Inference:&lt;/strong&gt; A way to generate a JSON Schema from an existing Go type. &lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;We looked at the following packages:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/invopop/jsonschema"&gt;https://github.com/invopop/jsonschema&lt;/a&gt; provides inference, but not validation.&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/santhosh-tekuri/jsonschema"&gt;https://github.com/santhosh-tekuri/jsonschema&lt;/a&gt; does not provide inference.&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/xeipuuv/gojsonschema"&gt;https://github.com/xeipuuv/gojsonschema&lt;/a&gt; does not provide a way to construct a schema in code.&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/qri-io/jsonschema"&gt;https://github.com/qri-io/jsonschema&lt;/a&gt; does not provide inference or a way to construct a schema in code.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;It didn't seem feasible to cobble together what we needed from multiple packages, so we decided to write &lt;a target="_blank" href="https://pkg.go.dev/github.com/google/jsonschema-go/jsonschema"&gt;our own&lt;/a&gt;.&lt;/p&gt;

&lt;h2&gt;A Tour of jsonschema-go&lt;/h2&gt;

&lt;h3&gt;A Simple, open Schema struct&lt;/h3&gt;
&lt;p&gt;At the core of the package is a straightforward Go struct that directly represents the JSON Schema specification. This open design means you can create complex schemas by writing a struct literal:&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox json"&gt;var schema = &amp;amp;jsonschema.Schema{
  Type:        "object",
  Description: "A simple person schema",
  Properties: map[string]*jsonschema.Schema{
    "name": {Type: "string"},
    "age": {Type: "integer", Minimum: jsonschema.Ptr(0.0)},
  },
  Required: []string{"name"},
}
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;A &lt;code&gt;Schema&lt;/code&gt; will marshal to a valid JSON value representing the schema, and any JSON value representing a schema can be unmarshalled into a &lt;code&gt;Schema&lt;/code&gt;.&lt;/p&gt;

&lt;p&gt;The &lt;code&gt;Schema&lt;/code&gt; struct defines fields for all standard JSON Schema keywords that are defined in popular specification drafts. To handle additional keywords not present in the specification, &lt;code&gt;Schema&lt;/code&gt; includes an &lt;code&gt;Extra&lt;/code&gt; field of type &lt;code&gt;map[string]any&lt;/code&gt;.&lt;/p&gt;

&lt;h3&gt;Validation and resolution&lt;/h3&gt;
&lt;p&gt;Before using a schema to validate JSON values, the schema itself must be validated, and its references to other schemas must be followed so that those schemas can themselves be checked. We call this process &lt;em&gt;resolution&lt;/em&gt;. Calling &lt;code&gt;Resolve&lt;/code&gt; on a &lt;code&gt;Schema&lt;/code&gt; returns a &lt;code&gt;jsonschema.Resolved&lt;/code&gt;, an opaque representation of a valid schema optimized for validation. &lt;code&gt;Resolved.Validate&lt;/code&gt; accepts almost any value that can be obtained from calling &lt;code&gt;json.Umarshal&lt;/code&gt;: null, basic types like strings and numbers, &lt;code&gt;[]any&lt;/code&gt;, and &lt;code&gt;map[string]any&lt;/code&gt;. It returns an error describing all the ways in which the value fails to satisfy the schema.&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox json"&gt;rs, err := schema.Resolve(nil)
if err != nil {
  return err
}
err = rs.Validate(map[string]any{"name": "John Doe", "age": 20})
if err != nil {
  fmt.Printf("validation failed: %v\n", err)
}
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;Originally, &lt;code&gt;Validate&lt;/code&gt; accepted a Go struct. We removed that feature because it is not possible to validate some schemas against a struct. For example, If a struct field has a non-pointer type, there is no way to determine whether the corresponding key was present in the original JSON, so there is no way to enforce the &lt;code&gt;required&lt;/code&gt; keyword.&lt;/p&gt;

&lt;h3&gt;Inference from Go types&lt;/h3&gt;
&lt;p&gt;While it's always possible to create a schema by constructing a &lt;code&gt;Schema&lt;/code&gt; value, it's often convenient to create one from a Go value, typically a struct. This operation, which we call &lt;em&gt;inference&lt;/em&gt;, is provided by the functions &lt;code&gt;For&lt;/code&gt; and &lt;code&gt;ForType&lt;/code&gt;. Here is &lt;code&gt;For&lt;/code&gt; in action:&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox json"&gt;type Person struct {
    Name string `json:"name" jsonschema:"person's full name"`
    Age int `json:"age,omitzero"`
}

schema, err := jsonschema.For[Person](nil)

/* schema is:
{
    "type": "object",
    "required": ["name"],
    "properties": {
        "age":  {"type": "integer"},
        "name": {
            "type": "string",
            "description": "person's full name"
        }
    },
    "additionalProperties": false
}
*/
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;&lt;code&gt;For&lt;/code&gt; gets information from struct field tags. As this example shows, it uses the name in the &lt;code&gt;json&lt;/code&gt; tag as the property name, and interprets &lt;code&gt;omitzero&lt;/code&gt; or  &lt;code&gt;omitempty&lt;/code&gt; to mean that a field is optional. It also looks for a &lt;code&gt;jsonschema&lt;/code&gt; tag to get property descriptions. (We considered adding support for other keywords to the &lt;code&gt;jsonschema&lt;/code&gt; tag as some other packages do, but that quickly gets complicated. We left an escape hatch in case we decide to support other keywords in the future.)&lt;/p&gt;

&lt;p&gt;&lt;code&gt;ForType&lt;/code&gt; works the same way, but takes a &lt;code&gt;reflect.Type&lt;/code&gt;. It's useful when the type is known only at runtime.&lt;/p&gt;

&lt;h3&gt;&lt;/h3&gt;

&lt;h3&gt;A foundation for the Go community &lt;/h3&gt;
&lt;p&gt;By providing a high-quality JSON Schema package, we aim to strengthen the entire Go ecosystem for AI applications (and, indeed, any application that needs to validate JSON). This library is already a critical dependency for Google's own AI SDKs, and we're committed to its long-term health. We welcome external contributions, whether they are bug reports, bug fixes, performance enhancements, or support for additional JSON Schema drafts. Before beginning work, please file an issue on our &lt;a target="_blank" href="https://github.com/google/jsonschema-go/issues"&gt;issue tracker&lt;/a&gt;.&lt;/p&gt;
</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/6907928925750809762" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/6907928925750809762" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/01/a-json-schema-package-for-go.html" rel="alternate" title="A JSON schema package for Go" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhjt8SJuD-1m9SmMcH8buiQh615lLFYsqqKBtkOWQoUKOR8wup3gFghx_R1E_acIw967E6xkwn54LgFInGHimzUV1ym5tA0pH3dYbBHGc5zubeJU4hm13x9A-403ZuT8gRKLKuqdZl3pGY/s72-c/gosreheader.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-78834753670138284</id><published>2026-01-19T12:00:00.000-08:00</published><updated>2026-01-19T12:00:00.114-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="google summer of code"/><category scheme="http://www.blogger.com/atom/ns#" term="gsoc"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="student programs"/><title type="text">Mentor Org Applications for Google Summer of Code 2026 open through Feb 3</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Stephanie Taylor&lt;/author&gt;, &lt;author&gt;Mary Radomile&lt;/author&gt; &amp;amp; &lt;author&gt;Lucila Ortíz&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUXNIeq6Bx0_3tLzTdA5AJwaORFwh5SuPlSvaynzE-PUOu7msKItnvzvByElo_SdAdcJTPq3yujUS7Rq-vYPMTq6YDc2Dxz0ibM0dhJTo0O_vc1kZ1bisEpZ82-3IBN3DZ0fo3veNeQN5HucZKPqGUUWcahf9RPZmoI0cxkzMypRm-bdC3-1QZhl_8aVo/s355/image2.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUXNIeq6Bx0_3tLzTdA5AJwaORFwh5SuPlSvaynzE-PUOu7msKItnvzvByElo_SdAdcJTPq3yujUS7Rq-vYPMTq6YDc2Dxz0ibM0dhJTo0O_vc1kZ1bisEpZ82-3IBN3DZ0fo3veNeQN5HucZKPqGUUWcahf9RPZmoI0cxkzMypRm-bdC3-1QZhl_8aVo/s355/image2.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUXNIeq6Bx0_3tLzTdA5AJwaORFwh5SuPlSvaynzE-PUOu7msKItnvzvByElo_SdAdcJTPq3yujUS7Rq-vYPMTq6YDc2Dxz0ibM0dhJTo0O_vc1kZ1bisEpZ82-3IBN3DZ0fo3veNeQN5HucZKPqGUUWcahf9RPZmoI0cxkzMypRm-bdC3-1QZhl_8aVo/s355/image2.png" class="header-image" style="display: block;width: 355px;"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUXNIeq6Bx0_3tLzTdA5AJwaORFwh5SuPlSvaynzE-PUOu7msKItnvzvByElo_SdAdcJTPq3yujUS7Rq-vYPMTq6YDc2Dxz0ibM0dhJTo0O_vc1kZ1bisEpZ82-3IBN3DZ0fo3veNeQN5HucZKPqGUUWcahf9RPZmoI0cxkzMypRm-bdC3-1QZhl_8aVo/s355/image2.png"/&gt;&lt;/a&gt;

&lt;p&gt;Attention open source enthusiasts! Mentoring organization applications for Google Summer of Code (GSoC) 2026 are officially open. This is your opportunity to guide students and developers early in their careers. &lt;strong&gt;The application window begins today, Monday, January 19th, and will remain open until February 3, 2026, at 18:00 UTC.&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
 &lt;/p&gt;&lt;p&gt;
To find more information about the process of becoming a mentor organization, please review our official &lt;a target="_blank" href="https://summerofcode.withgoogle.com/"&gt;GSoC site&lt;/a&gt;. We also recommend consulting the &lt;a target="_blank" href="https://google.github.io/gsocguides/mentor/"&gt;Mentor Guide&lt;/a&gt; and the &lt;a target="_blank" href="https://developers.google.com/open-source/gsoc/help/oa-tips"&gt;GSoC Organization Admin Tips&lt;/a&gt;, as both provide tips for preparing your community and strengthening your application.&lt;/p&gt;

&lt;p&gt;GSoC welcomes a wide variety of open source projects working in AI/ML, security, cloud, development tools, science, medicine, data, media, and more! For 2026, we are looking for even more innovative projects working on Artificial Intelligence/Machine Learning and Security.&lt;/p&gt;

&lt;p&gt;&lt;strong&gt;Requirements for GSoC Mentoring Organizations:&lt;/strong&gt;&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;An established open source project with at least 18 months of history&lt;/li&gt;
&lt;li&gt;Software produced and released under an &lt;a target="_blank" href="https://opensource.org/licenses"&gt;Open Source Initiative (OSI)-approved license&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;A robust community with members who are enthusiastic and prepared to mentor GSoC participants&lt;/li&gt;
&lt;li&gt;An active project characterized by regular engagement, rather than infrequent contributions&lt;/li&gt;
&lt;li&gt;A comprehensive list of Project Ideas (refer to the &lt;a target="_blank" href="https://google.github.io/gsocguides/mentor/defining-a-project-ideas-list"&gt;mentor guide&lt;/a&gt; for best practices)&lt;/li&gt;
&lt;li&gt;A clear grasp of GSoC objectives and &lt;a target="_blank" href="https://summerofcode.withgoogle.com/rules"&gt;program rules&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;A high-quality application that provides a detailed explanation of your project and its specific goals&lt;/li&gt;
&lt;/ol&gt;

&lt;p&gt;&lt;strong&gt;2026 Mentoring Organizations will be announced on February 19, 18:00 UTC*&lt;/strong&gt;.&lt;/p&gt;
&lt;p&gt;For first-time organizations interested in participating, we strongly suggest getting a referral from experienced organizations that think that your project is a good fit.&lt;/p&gt;
&lt;figure class="wide"&gt;
  &lt;iframe class="BLOG_video_class" allowfullscreen="" height="383" youtube-src-id="L4JNz6zWzLs" src="https://www.youtube.com/embed/L4JNz6zWzLs"&gt;&lt;/iframe&gt;
  &lt;figcaption&gt;&lt;a target="_blank" href="https://www.youtube.com/watch?v=L4JNz6zWzLs&amp;t=1s"&gt;Google Summer of Code: Organizations Apply&lt;/a&gt;&lt;/figcaption&gt;
&lt;/figure&gt;

&lt;p&gt;Please visit the &lt;a target="_blank" href="https://summerofcode.withgoogle.com/"&gt;GSoC site&lt;/a&gt; for even more information on how to apply and review the detailed &lt;a target="_blank" href="https://developers.google.com/open-source/gsoc/timeline"&gt;timeline&lt;/a&gt; for important deadlines this year. We recommend reading our &lt;a target="_blank" href="https://summerofcode.withgoogle.com/help"&gt;help page&lt;/a&gt; on our website for easy access to all the most important resources for all the applicants.&lt;/p&gt;

&lt;p&gt;We look forward to seeing your organization applications and learning more about your communities!&lt;/p&gt;

&lt;p&gt;&lt;em&gt;&lt;strong&gt;*Interested GSoC Contributor?&lt;/strong&gt; After mentoring organizations are announced you can (and should!) begin researching each organization and reviewing project ideas to find the community that fits your interests. GSoC contributor applications are open from March 16-31.&lt;/em&gt;&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/78834753670138284" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/78834753670138284" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/01/mentor-org-applications-for-google-summer-of-code-2026-open-through-feb-3.html" rel="alternate" title="Mentor Org Applications for Google Summer of Code 2026 open through Feb 3" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEgUXNIeq6Bx0_3tLzTdA5AJwaORFwh5SuPlSvaynzE-PUOu7msKItnvzvByElo_SdAdcJTPq3yujUS7Rq-vYPMTq6YDc2Dxz0ibM0dhJTo0O_vc1kZ1bisEpZ82-3IBN3DZ0fo3veNeQN5HucZKPqGUUWcahf9RPZmoI0cxkzMypRm-bdC3-1QZhl_8aVo/s72-c/image2.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-4340011541733917437</id><published>2026-01-14T11:30:00.000-08:00</published><updated>2026-01-14T16:56:20.952-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Apache iceberg"/><category scheme="http://www.blogger.com/atom/ns#" term="Biglake"/><category scheme="http://www.blogger.com/atom/ns#" term="Public Dataset"/><title type="text">Explore public datasets with Apache Iceberg &amp; BigLake</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Talat Uyarer&lt;/author&gt; &amp;amp; &lt;author&gt;Alex Stephen&lt;/author&gt;, Biglake Team&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEghiYATojGDNO06Mt2KCIaxLP9l7zyNM0m630XNBySQnVjipwpBA0A0iNb5_YcI8j65kFyNSRh80QW-FA4kJcmajYU2siAtIRKNUVnqOE5qZOaWeZDZ_NmwrFM84UxpXHKgGqTm9UlfmhYJ11POq0reSJWiwibFiBMDF2Wqmskpli52Z3TrEsupDngeqIQ/s1600/publicdataset-apacheicebetg-blog.jpg"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEghiYATojGDNO06Mt2KCIaxLP9l7zyNM0m630XNBySQnVjipwpBA0A0iNb5_YcI8j65kFyNSRh80QW-FA4kJcmajYU2siAtIRKNUVnqOE5qZOaWeZDZ_NmwrFM84UxpXHKgGqTm9UlfmhYJ11POq0reSJWiwibFiBMDF2Wqmskpli52Z3TrEsupDngeqIQ/s1600/publicdataset-apacheicebetg-blog.jpg"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEghiYATojGDNO06Mt2KCIaxLP9l7zyNM0m630XNBySQnVjipwpBA0A0iNb5_YcI8j65kFyNSRh80QW-FA4kJcmajYU2siAtIRKNUVnqOE5qZOaWeZDZ_NmwrFM84UxpXHKgGqTm9UlfmhYJ11POq0reSJWiwibFiBMDF2Wqmskpli52Z3TrEsupDngeqIQ/s1600/publicdataset-apacheicebetg-blog.jpg" class="header-image"&gt;&lt;img alt="A vintage-style illustration titled THE PUBLIC DATASETS OF APACHE ICEBERG shows a man in a boat named BigLake Explorer viewing a large iceberg." border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEghiYATojGDNO06Mt2KCIaxLP9l7zyNM0m630XNBySQnVjipwpBA0A0iNb5_YcI8j65kFyNSRh80QW-FA4kJcmajYU2siAtIRKNUVnqOE5qZOaWeZDZ_NmwrFM84UxpXHKgGqTm9UlfmhYJ11POq0reSJWiwibFiBMDF2Wqmskpli52Z3TrEsupDngeqIQ/s1600/publicdataset-apacheicebetg-blog.jpg"/&gt;&lt;/a&gt;

&lt;p&gt;The promise of the Open Data Lakehouse is simple: your data should not be locked into a single engine. It should be accessible, interoperable, and built on open standards. Today, we are taking a major step forward in making that promise a reality for developers, data engineers, and researchers everywhere.&lt;/p&gt;&lt;p&gt;
We are thrilled to announce the availability of high-quality &lt;strong&gt;Public Datasets served via the Apache Iceberg REST Catalog&lt;/strong&gt;. Hosted on Google Cloud's BigLake, these datasets are available for &lt;strong&gt;read-only access&lt;/strong&gt; to anyone with a Google Cloud account.&lt;/p&gt;&lt;p&gt;
Whether you are using Apache Spark, Trino, Flink, or BigQuery, you can now connect to a live, production-grade Iceberg Catalog and start querying data immediately. No copying files, no managing storage bucket. Just configure your catalog and query.&lt;/p&gt;

&lt;h2&gt;How to Access Public Datasets&lt;/h2&gt;
&lt;p&gt;This initiative is designed to be engine-agnostic. We provide the storage and the catalog and you bring the compute. This allows you to benchmark different engines, test new Iceberg features, or simply explore interesting data without setting up infrastructure or finding data to ingest.&lt;/p&gt;

&lt;h3&gt;How to Connect with Apache Spark&lt;/h3&gt;
&lt;p&gt;You can connect to the public dataset using any standard Spark environment (local, Google Cloud Dataproc, or other vendors). You only need to point your Iceberg catalog configuration to our public REST endpoint.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Prerequisites:&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;A Google Cloud Project (for authentication).&lt;/li&gt;
&lt;li&gt;Standard Google Application Default Credentials (ADC) set up in your environment.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Spark Configuration:&lt;/p&gt;&lt;p&gt;
Use the following configuration flags when starting your Spark Shell or SQL session. This configures a catalog named bqms (BigQuery Metastore) pointing to our public REST endpoint.&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox bash" style="font-size:14px"&gt;PROJECT_ID=&amp;lt;YOUR_PROJECT_ID&amp;gt;

  spark-sql \
    --packages org.apache.iceberg:iceberg-spark-runtime-3.5_2.12:1.10.0,org.apache.iceberg:iceberg-gcp-bundle:1.10.0 \
    --conf spark.hadoop.hive.cli.print.header=true \
    --conf spark.sql.catalog.bqms=org.apache.iceberg.spark.SparkCatalog \
    --conf spark.sql.catalog.bqms.type=rest \
    --conf spark.sql.catalog.bqms.uri=https://biglake.googleapis.com/iceberg/v1/restcatalog \
    --conf spark.sql.catalog.bqms.warehouse=gs://biglake-public-nyc-taxi-iceberg \
    --conf spark.sql.catalog.bqms.header.x-goog-user-project=$PROJECT_ID \
    --conf spark.sql.catalog.bqms.rest.auth.type=google \
    --conf spark.sql.catalog.bqms.io-impl=org.apache.iceberg.gcp.gcs.GCSFileIO \
    --conf spark.sql.catalog.bqms.header.X-Iceberg-Access-Delegation=vended-credentials \
    --conf spark.sql.defaultCatalog=bqms
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;&lt;em&gt;Note: Replace &amp;lt;YOUR_PROJECT_ID&amp;gt; with your actual Google Cloud Project ID. This is required for the REST Catalog to authenticate your quota usage, even for free public access.&lt;/em&gt;&lt;/p&gt;

&lt;h2&gt;Exploring the Data: Sample Queries&lt;/h2&gt;
&lt;p&gt;Once connected, you have full SQL access to the datasets. We are launching with the classic &lt;strong&gt;NYC Taxi&lt;/strong&gt; dataset, modeled as an Iceberg table to showcase partitioning and metadata capabilities.&lt;/p&gt;

&lt;h3&gt;1. The "Hello World" of Analytics&lt;/h3&gt;
&lt;p&gt;This query aggregates millions of records to find the average fare and trip distance by passenger count. It demonstrates how Iceberg efficiently scans data files without needing to list directories.&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox sql" style="font-size:14px"&gt;SELECT 
    passenger_count,
    COUNT(1) AS num_trips,
    ROUND(AVG(total_amount), 2) AS avg_fare,
    ROUND(AVG(trip_distance), 2) AS avg_distance
FROM 
    bqms.public_data.nyc_taxicab
WHERE 
    data_file_year = 2021
    AND passenger_count &gt; 0
GROUP BY 
    passenger_count
ORDER BY 
    num_trips DESC;
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;&lt;strong&gt;What this demonstrates:&lt;/strong&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Partition Pruning:&lt;/strong&gt; The query filters on data_file_year, allowing the engine to skip scanning data from other years entirely.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Vectorized Reads:&lt;/strong&gt; Engines like Spark can process the Parquet files efficiently in batches.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;2. Time Travel: Auditing Data History&lt;/h3&gt;
&lt;p&gt;One of Iceberg's most powerful features is Time Travel. You can query the table as it existed at a specific point in the past.&lt;/p&gt;

&lt;pre&gt;&lt;code class="codebox sql" style="font-size:14px"&gt;-- Compare the row count of the current version vs. a specific snapshot
SELECT 
    'Current State' AS version, 
    COUNT(*) AS count 
FROM bqms.public_data.nyc_taxicab
UNION ALL
SELECT 
    'Past State' AS version, 
    COUNT(*) AS count 
FROM bqms.public_data.nyc_taxicab VERSION AS OF 2943559336503196801;
&lt;/code&gt;&lt;/pre&gt;

&lt;p&gt;Description:&lt;/p&gt;&lt;p&gt;
This query allows you to audit changes. By querying the history metadata table (e.g., &lt;code class="sql"&gt;SELECT * FROM bqms.public_data.nyc_taxicab.history&lt;/code&gt;), you can find snapshot IDs and "travel back" to see how the dataset grew over time.&lt;/p&gt;

&lt;h2&gt;
Coming Soon: An Iceberg V3 Playground&lt;/h2&gt;
&lt;p&gt;We are not just hosting static data; we are building a playground for the future of Apache Iceberg. We plan to release new datasets specifically designed to help you test &lt;strong&gt;Iceberg V3 Spec&lt;/strong&gt; features.&lt;/p&gt;

&lt;h2&gt;Start Building Today&lt;/h2&gt;
&lt;p&gt;The goal of these public datasets is to lower the barrier to entry. You don't need to manage infrastructure to learn Iceberg; you just need to connect. Whether you are a data analyst, data scientist, data engineer or a data enthusiast, today you can:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;Use BigQuery (via BigLake) to query these tables directly using SQL, combining them with your private data.&lt;/li&gt;
&lt;li&gt;Test your OSS engine (e.g. Spark, Trino, Flink etc.) configurations against a live REST Catalog.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;Start building an open, managed and high-performance Iceberg lakehouse to enable advanced analytics and data science with &lt;a href="https://cloud.google.com/biglake" target="_blank"&gt;https://cloud.google.com/biglake&lt;/a&gt; today!&lt;/p&gt;

&lt;p&gt;Happy Querying!&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4340011541733917437" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/4340011541733917437" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/01/explore-public-datasets-with-apache-iceberg-and-biglake.html" rel="alternate" title="Explore public datasets with Apache Iceberg &amp; BigLake" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEghiYATojGDNO06Mt2KCIaxLP9l7zyNM0m630XNBySQnVjipwpBA0A0iNb5_YcI8j65kFyNSRh80QW-FA4kJcmajYU2siAtIRKNUVnqOE5qZOaWeZDZ_NmwrFM84UxpXHKgGqTm9UlfmhYJ11POq0reSJWiwibFiBMDF2Wqmskpli52Z3TrEsupDngeqIQ/s72-c/publicdataset-apacheicebetg-blog.jpg" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-413265108863315874</id><published>2026-01-09T15:00:00.000-08:00</published><updated>2026-01-09T15:05:22.662-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="events"/><category scheme="http://www.blogger.com/atom/ns#" term="news"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="twios"/><title type="text">This Week in Open Source #12</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Daryl Ducharme&lt;/author&gt; &amp;amp; &lt;author&gt;amanda casari&lt;/author&gt;, Google Open Source&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s1600/header1.png"/&gt;&lt;/a&gt;

&lt;h2 class="article-title"&gt;This Week in Open Source for January 9, 2026&lt;/h2&gt;
&lt;p class="article-subtitle"&gt;&lt;em&gt;A look around the world of open source&lt;/em&gt;&lt;/p&gt;

&lt;p&gt;Here we are at the beginning of a new year. What will it bring to the open source world? What new projects will be started? What should we be focusing on? What is your open source resolution for 2026? One of ours is to better connect with various open source communities on social media. We've gotten off to a big start by launching an &lt;a target="_blank" href="https://bsky.app/profile/opensource.google"&gt;official Google Open Source account on Bluesky&lt;/a&gt;. Already, we are enjoying the community there.&lt;/p&gt;

&lt;h3&gt;Upcoming Events&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;January 21 - 23: &lt;a target="_blank" href="https://2026.everythingopen.au/"&gt;Everything Open 2026&lt;/a&gt; is happening in Canberra, Australia. Everything Open is a conference focused on open technologies, including Linux, open source software, open hardware and open data, and the communities that surround them. The conference provides technical deep-dives as well as updates from industry leaders and experts on a wide array of topics from these areas.&lt;/li&gt;
&lt;li&gt;January 29: &lt;a target="_blank" href="https://chaoss.community/chaosscon-2026-eu/"&gt;CHAOSScon Europe 2026&lt;/a&gt; is co-located with FOSDEM in Brussels, Belgium. This conference revolves around discussing open source project health, CHAOSS updates, use cases, and hands-on workshops for developers, community managers, project managers, and anyone interested in measuring open source project health. It  also shares insights from the CHAOSS context working groups including OSPOs, University Open Source, and Open Source in Science and Research.&lt;/li&gt;
&lt;li&gt;January 31 - February 1: &lt;a target="_blank" href="https://fosdem.org/2026/"&gt;FOSDEM 2026&lt;/a&gt; is happening at the Université Libre de Bruxelles in Brussels, Belgium. It is a free event for software developers to meet, share ideas and collaborate. Every year, thousands of developers of free and open source software from all over the world gather at the event in Brussels.&lt;/li&gt;
&lt;li&gt;February 24 - 25: &lt;a target="_blank" href="https://events.linuxfoundation.org/lf-member-summit/"&gt;The Linux Foundation Member Summit&lt;/a&gt; is happening in Napa, California. It is the annual gathering for &lt;a target="_blank" href="https://www.linuxfoundation.org/"&gt;Linux Foundation&lt;/a&gt; members that fosters collaboration, innovation, and partnerships among the leading projects and organizations working to drive digital transformation with open source technologies.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;Open Source Reads and Links&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;[Talk] &lt;a target="_blank" href="https://opensource.org/blog/state-of-the-source-at-ato-2025-state-of-the-open-ai"&gt;State of the Source at ATO 2025: State of the "Open" AI&lt;/a&gt; - At the end of last year Open Source Initiative gave a summary of Gabriel Toscano's talk at All Things Open. In the talk he discusses how  AI models call themselves "open" but often lack the legal or technical freedoms that true open source requires. Analysis of ~20,000 Hugging Face models found Apache 2.0 and MIT are common, but many models have no license or use restrictive custom terms. The study warns that inconsistent labeling and mutable restrictions muddy openness and urges clearer licensing and platform checks.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://thenewstack.io/the-reality-of-open-source-more-puppies-less-beer/"&gt;The Reality of Open Source: More Puppies, Less Beer&lt;/a&gt; - Bitnami's removal of popular containers last year shows that open source can suddenly change and disrupt users. Organizations must evaluate who funds and maintains each open source component, not just the code. Plan for business continuity, supply-chain visibility, and the ability to fork or replace critical components.&lt;/li&gt;
&lt;li&gt;[Blog] &lt;a target="_blank" href="https://opensource.org/blog/the-open-source-community-and-u-s-public-policy"&gt;The Open Source Community and U.S. Public Policy&lt;/a&gt; - The &lt;a target="_blank" href="https://opensource.org/"&gt;Open Source Initiative&lt;/a&gt; is increasing its U.S. policy work to ensure open source developers are part of technology and AI rulemaking.  Since policymakers often lack deep knowledge of open source, the community must explain how shared code differs from deployed systems. Joining groups like the &lt;a target="_blank" href="https://opensource.org/programs/open-policy-alliance"&gt;Open Policy Alliance&lt;/a&gt; helps nonprofits engage and influence policy.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://www.theregister.com/2025/11/25/pebble_eink_smartwatch_open_source/"&gt;Pebble, the e-ink smartwatch that refuses to die, just went fully open source&lt;/a&gt; - Pebble, the e-ink smartwatch with a tumultuous history, is making a move sure to please the DIY enthusiasts that make up the bulk of its fans: Its entire software stack is now fully open source, and key hardware design files are available too.&lt;/li&gt;
&lt;li&gt;[Article] &lt;a target="_blank" href="https://thenewstack.io/forget-predictions-tech-leaders-actual-2026-resolutions/"&gt;Forget Predictions: Tech Leaders' Actual 2026 Resolutions&lt;/a&gt; - We want to know your open source resolutions and perhaps these resolutions from some tech leaders (open source and otherwise) can point you in a direction. Their plans run the gamut of securing and managing AI responsibly, reducing noise in security data, and creating healthier tech habits. The common theme is intentional, measurable change over speculation.&lt;/li&gt;
&lt;li&gt;[Paper] &lt;a target="_blank" href="https://www.arxiv.org/pdf/2512.05470"&gt;Everything is Context: Agentic File System Abstraction for Context Engineering&lt;/a&gt; - GenAI systems may produce inaccurate or misleading outputs due to limited contextual awareness and evolving data sources. Thus mechanisms are needed to govern how persistent knowledge transitions into bounded context in a traceable, verifiable, and human-aware manner, ensuring that human judgment and knowledge are embedded within the system's evolving context for reasoning and evaluation. &lt;br&gt;&lt;br&gt;
The paper proposes using a file-system abstraction based on the open-source &lt;a target="_blank" href="https://www.aigne.io/en/framework"&gt;AIGNE framework&lt;/a&gt; to manage all types of context for generative AI agents. This unified infrastructure makes context persistent, traceable, and governed so agents can read, write, and version memory, tools, and human input.&lt;/li&gt;
&lt;/ul&gt;

&lt;p&gt;What exciting open source events and news are you hearing about? Let us know on our &lt;a target="_blank" href="https://x.com/GoogleOSS"&gt;@GoogleOSS&lt;/a&gt; X account or our new &lt;a target="_blank" href="https://bsky.app/profile/opensource.google"&gt;@opensource.google&lt;/a&gt; Bluesky account.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/413265108863315874" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/413265108863315874" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2026/01/this-week-in-open-source-12.html" rel="alternate" title="This Week in Open Source #12" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEizuKHbBriafpnCVl8A7gazsybuVNKqwyYE1n5-RfAYhu6i5bN55iTw0LE_S0KLGWBJU9ERHgsnd9lZ3J94PhlE5hpZ5YIeBHH8PjS2yuRciaN7VgqLUISB9Ofpqst0n6tawyH6etvfFro4lqZv2X8EomVGJUTL8CSaHp0XyK3LxbJIhwi6ENKX620R4ME/s72-c/header1.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-6346008971365805381</id><published>2025-12-18T11:30:00.000-08:00</published><updated>2025-12-18T11:30:00.112-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="foundation model"/><category scheme="http://www.blogger.com/atom/ns#" term="JAX"/><category scheme="http://www.blogger.com/atom/ns#" term="Marin 32B"/><category scheme="http://www.blogger.com/atom/ns#" term="open source"/><category scheme="http://www.blogger.com/atom/ns#" term="TPUs"/><title type="text">Training Marin 32B: What an open lab can build with TPUs, JAX, and a little persistence</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;David Hall&lt;/author&gt;, Open Athena&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiBa0-tY649AEMig_Twadbzo0V6iFsqsU1zIqGSeNY5mYDvc7ii1-U4DJf4jsbUGBi02KJAuCkt-6W61ZpMhyM6fnahqNDto7t-vpy0LLtCmYtVNDqEQcx6CV7i6vDRBjQKT-0jfYOx3h5CR1brasxG6rahf85CmiG_mxgmaFRpVofXJHe2pqri5NQ1dno/s1600/AwDU5tGz4AfqLpp.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiBa0-tY649AEMig_Twadbzo0V6iFsqsU1zIqGSeNY5mYDvc7ii1-U4DJf4jsbUGBi02KJAuCkt-6W61ZpMhyM6fnahqNDto7t-vpy0LLtCmYtVNDqEQcx6CV7i6vDRBjQKT-0jfYOx3h5CR1brasxG6rahf85CmiG_mxgmaFRpVofXJHe2pqri5NQ1dno/s1600/AwDU5tGz4AfqLpp.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiBa0-tY649AEMig_Twadbzo0V6iFsqsU1zIqGSeNY5mYDvc7ii1-U4DJf4jsbUGBi02KJAuCkt-6W61ZpMhyM6fnahqNDto7t-vpy0LLtCmYtVNDqEQcx6CV7i6vDRBjQKT-0jfYOx3h5CR1brasxG6rahf85CmiG_mxgmaFRpVofXJHe2pqri5NQ1dno/s1600/AwDU5tGz4AfqLpp.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiBa0-tY649AEMig_Twadbzo0V6iFsqsU1zIqGSeNY5mYDvc7ii1-U4DJf4jsbUGBi02KJAuCkt-6W61ZpMhyM6fnahqNDto7t-vpy0LLtCmYtVNDqEQcx6CV7i6vDRBjQKT-0jfYOx3h5CR1brasxG6rahf85CmiG_mxgmaFRpVofXJHe2pqri5NQ1dno/s1600/AwDU5tGz4AfqLpp.png"/&gt;&lt;/a&gt;

&lt;p&gt;&lt;em&gt;Last summer, we partnered with Google &lt;a target="_blank" href="https://developers.googleblog.com/en/stanfords-marin-foundation-model-first-fully-open-model-developed-using-jax/"&gt;to share how Marin trained a fully open 8B foundation model&lt;/a&gt; using &lt;a target="_blank" href="https://docs.jax.dev/"&gt;JAX&lt;/a&gt; and &lt;a target="_blank" href="https://jax-ml.github.io/scaling-book/tpus/"&gt;TPUs&lt;/a&gt;. Since then, our process hasn't changed much, but the scale has. Over the summer, we trained a 32B model entirely in the open, and most days there was just one person keeping the run moving.&lt;/em&gt;&lt;/p&gt;&lt;p&gt;
Large-scale training is usually associated with big teams and bigger infrastructure. Large model releases typically have hundreds of authors. &lt;a target="_blank" href="https://marin.community"&gt;Marin&lt;/a&gt; tests a different hypothesis: using open source software and data, &lt;strong&gt;small teams can train serious foundation models if the tooling is good, the platform is stable, and the process is transparent.&lt;/strong&gt; The Marin 32B run was our strongest validation yet.&lt;/p&gt;
&lt;p&gt;&lt;hr style="height:1px;color:#ccc;" /&gt;

&lt;h2&gt;A model built with one hand on the helm&lt;/h2&gt;

&lt;p&gt;Marin was started at Stanford University's &lt;a target="_blank" href="crfm.stanford.edu"&gt;Center for Research on Foundation Models&lt;/a&gt; with the goal of building radically open foundation models. In May, we released &lt;a target="_blank" href="https://marin.readthedocs.io/en/latest/reports/marin-8b-retro/"&gt;Marin 8B Base&lt;/a&gt;, which bested the popular Llama 3.1 8B Base on 14 of 19 benchmarks. Marin 8B was trained using &lt;a target="_blank" href="https://cloud.google.com/tpu"&gt;Google TPU &lt;/a&gt;v4 and TPU v5e from the &lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;TPU Research Cloud&lt;/a&gt;.&lt;/p&gt;

&lt;p&gt;Building on that success, we set out to build a 32B model starting in June. Our 32B training run followed Marin's usual "Tootsie Roll" style: start with a solid recipe, instrument heavily, and adapt mid-flight when necessary. That flexibility matters, because the first time you train at a larger scale, issues inevitably arise.&lt;/p&gt;

&lt;p&gt;The timing, however, was less than ideal, as universities tend to empty out over the summer. Students graduate, get internships, go home, or travel the world. Marin was no different. By June, our team was down to one full time research engineer, with a few PhD students providing guidance when they weren't busy with their dissertations. Nevertheless, we pushed forward.&lt;/p&gt;

&lt;p&gt;To spoil the ending, the model turned out quite well. On release, &lt;a target="_blank" href="https://huggingface.co/marin-community/marin-32b-base"&gt;Marin 32B Base&lt;/a&gt; was the &lt;strong&gt;best open source base model&lt;/strong&gt;, and it outperformed comparable open-weights models like Google's &lt;a target="_blank" href="https://deepmind.google/models/gemma/gemma-3/"&gt;Gemma 3&lt;/a&gt; 27B PT on 24 of 42 base-model evaluations.&lt;/p&gt;

&lt;p&gt;There were many bumps along the way, resulting in multiple mid-run corrections, but through it all &lt;strong&gt;Google's TPU infrastructure stayed rock-solid&lt;/strong&gt;, and JAX's predictable performance let us iterate quickly. This meant that even with a tiny team, we could diagnose, patch, and continue training without losing momentum.&lt;/p&gt;&lt;p&gt;
To be blunt: &lt;strong&gt;one researcher kept the 32B run alive all summer,&lt;/strong&gt; juggling preemptible slices, rebuilding optimizer state, switching architectures, and generally shepherding ~6.4 trillion tokens across v5p and v4 pods—while mostly working on other Marin projects. The fact that this was possible speaks to the stability of the TPU platform and the maturity of the JAX/Marin stack.&lt;/p&gt;

&lt;h2&gt;The short version of a long summer&lt;/h2&gt;
&lt;p&gt;Our &lt;a target="_blank" href="https://marin.readthedocs.io/en/latest/reports/marin-32b-retro/"&gt;retrospective&lt;/a&gt; goes into much more detail about every spike, switch and cooldown. Here's the condensed version.&lt;/p&gt;&lt;p&gt;
We began with a Llama-3-style 32B backbone and our best 8B data mix, running on preemptible TPU v5p pods. Preemptions were predictable, and recovery was nearly automatic. As availability tightened, however, we moved to dedicated TPU v4 capacity. After a slight tweak to gradient checkpointing to accommodate the older hardware (made easy by JAX's built-in support), we were back up and running and performance stayed excellent.&lt;/p&gt;&lt;p&gt;
Around 70k steps, persistent loss spikes appeared. We tried clipping, update-norm guards, skip-step heuristics, "necromancy" (rebuilding optimizer state), and swapping in optimizers like Muon. Nothing helped. The model needed architectural support.&lt;/p&gt;&lt;p&gt;
So, we warm-started the run onto a Qwen3-style architecture, which is the same as the Llama 3 architecture, except that it adds QK-Norm to attention. After a brief loss bump, the spikes vanished. The model recovered to its expected trajectory within ~10 billion tokens and remained stable.&lt;/p&gt;&lt;p&gt;
Towards the end of training, it was time for a cool down. When training LLMs, one "cools down" the model by lowering the learning rate and changing the data mix to higher quality data. Our first cooldown surfaced two issues: contamination from a cached math dataset, and a training-loss phase shift caused by our linear-congruential shuffle. Switching to a Feistel-based shuffle fixed the latter completely. After cleaning the data and re-running the cooldown, the second cooldown was smooth and produced the final model.&lt;/p&gt;

&lt;h2&gt;The result: a strong, open 32B base model&lt;/h2&gt;
&lt;p&gt;Marin 32B Base is a competitive open-source base model. It outperformed Olmo 2 32B Base—the previous best fully open-source base model—on 32 of 42 tasks, and it performs especially well on knowledge-heavy evaluations like ARC, BoolQ, and PIQA.&lt;/p&gt;&lt;p&gt;
Head-to-head, Marin 32B Base also beat Gemma 3 27B PT on 24 of 42 tasks, and its overall average rank places it alongside Qwen 2.5 32B and the &lt;a target="_blank" href="https://allenai.org/blog/olmo3"&gt;newer Olmo 3 32B&lt;/a&gt; models. On our evaluation suite, Marin 32B Base actually&lt;strong&gt; ties Olmo 3 32B Base in win rate,&lt;/strong&gt; despite Olmo 3 being trained by a much larger team and arriving a month later.&lt;/p&gt;

&lt;figure class="wide borderless"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjEzJIH3Tlwqsrv6F6LWA6wpNUIvbiyy5R8vb-98Eja4WFWhY6j1nv_jfQZQcqkUDuJ6BdqAOi7a-yJxhTWiYqvK4xOLXySznX-SH15ijVFnCFqWqFjUAuN2KZjkkSpPXOWI-dVCQs2mTKXQGxyhExGtMeM6-xxYUkdcuZ9BSaLJZESBUbPFLKAKpcnqAM/s1600/marin32bgoogle--2aojbj33hpx.png"/&gt;
&lt;figcaption&gt;&lt;p&gt;
&lt;em&gt;&lt;strong&gt;Mean rank across our evaluation suite (lower is better). &lt;/strong&gt;Marin 32B Base lands in the top cluster of open(-weight) models, alongside Qwen 2.5 and Olmo 3, and ahead of Gemma 3 27B PT and Olmo 2 32B. Gray bars indicate open weight models, while blue bars indicate open source models.&lt;/em&gt;&lt;/p&gt;&lt;/figcaption&gt;
&lt;p&gt;While Olmo 3 32B Base now comfortably leads on math and coding benchmarks, Marin 32B Base holds its own and still leads on many knowledge QA evaluations. For a model trained with a fraction of the team size typically expected for a 30B-scale run, we're proud of where it landed.&lt;/p&gt;&lt;p&gt;
  
Because Marin 32B Base (like Olmo 3 32B) is open source, the weights, code, data recipes, and every experimental detour are public. Anyone can reproduce, audit, or build on the work.&lt;/p&gt;

&lt;hr style="height:1px;color:#ccc;" /&gt;

&lt;h2&gt;The stack that made it possible&lt;/h2&gt;

&lt;h3&gt;TPU stability across large slices&lt;/h3&gt;
&lt;p&gt;During the run, we moved across preemptible v5p-512 slices coordinated with Cloud TPU Multislice, a v4-2048 slice for the long middle, and several mid-run architectural transitions. Throughout, TPUs were completely reliable for us: no mysterious hangs, no collective-op debugging. Preemptions were predictable and easy to recover from.&lt;/p&gt;

&lt;h3&gt;JAX + Levanter = predictable performance&lt;/h3&gt;
&lt;p&gt;Levanter builds on JAX's &lt;a target="_blank" href="https://github.com/openxla/xla"&gt;XLA&lt;/a&gt; compilation. In practice, what mattered for us was deterministic restarts, stable MFU at scale without custom kernels, and JAX's activation checkpointing, which made the v5p to v4 migration easy.&lt;/p&gt;

&lt;h3&gt;Marin's experiment system&lt;/h3&gt;
&lt;p&gt;Marin logs every step of the experimental pipeline: hyperparameters, code versions, datasets, metrics, and artifacts. Even with architectural switches and restarts, the run never devolved into a tangle of scripts. And because it's all open, anyone can retrace or reproduce the training.&lt;/p&gt;

&lt;h2&gt;What's next&lt;/h2&gt;
&lt;p&gt;Marin 32B Base is a strong base model, but we're not done. Here's what's coming next:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;A reasoning-optimized Marin 32B&lt;/li&gt;
&lt;li&gt;Hardened multislice TPU support for smoother preemptible training&lt;/li&gt;
&lt;li&gt;Exploring MoE variants for the next scale&lt;/li&gt;
&lt;li&gt;Continuing to release everything, including successes and failures, openly&lt;/li&gt;
&lt;/ul&gt;

&lt;h2&gt;Closing thought&lt;/h2&gt;
&lt;p&gt;Training a 32B model with a small team isn't about heroics but about using the right tools and infrastructure. TPUs' reliability, JAX's clarity and performance, and Marin's open, reproducible process provided the leverage we needed. If the 8B run showed that open labs can build credible models, the 32B run showed they can do it at scale: quietly, steadily, and with far fewer people than you might expect.&lt;/p&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/6346008971365805381" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/6346008971365805381" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2025/12/training-marin-32b-what-an-open-lab-can-build-with-tpus-jax-and-a-little-persistence.html" rel="alternate" title="Training Marin 32B: What an open lab can build with TPUs, JAX, and a little persistence" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiBa0-tY649AEMig_Twadbzo0V6iFsqsU1zIqGSeNY5mYDvc7ii1-U4DJf4jsbUGBi02KJAuCkt-6W61ZpMhyM6fnahqNDto7t-vpy0LLtCmYtVNDqEQcx6CV7i6vDRBjQKT-0jfYOx3h5CR1brasxG6rahf85CmiG_mxgmaFRpVofXJHe2pqri5NQ1dno/s72-c/AwDU5tGz4AfqLpp.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-9019206617634031064</id><published>2025-12-17T11:30:00.000-08:00</published><updated>2025-12-17T11:30:00.107-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="fDPO"/><category scheme="http://www.blogger.com/atom/ns#" term="JAX"/><category scheme="http://www.blogger.com/atom/ns#" term="Spatial Reasoning"/><category scheme="http://www.blogger.com/atom/ns#" term="Tunix"/><category scheme="http://www.blogger.com/atom/ns#" term="Vision-Language Models"/><title type="text">SpatialReasoner: Teaching VLMs to "see" structure — Accelerated with Tunix on TPUs</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;Yifan Shen&lt;/author&gt; &amp;amp; &lt;author&gt;Ismini Lourentzou&lt;/author&gt;, University of Illinois Urbana-Champaign, and &lt;author&gt;Srikanth Kilaru&lt;/author&gt;, Google ML Frameworks&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-AvdnMqoiGey1yX5ZX1VOb9r3rz3qa81kpsDtpMrUiWgGLTrglp4EfnWNvFGWmJ7J76MzOlMtDGoJw_jRX3WXIuCPGhR8WZGhJy1Kh3FXDrTJjqM7yNquyoZjlffQK5SYMaZ-zf3uyQXs5L_bG9do6O39tZw-om8dzuwqAYAHjbUsK31MaeLk38Jc08M/s1600/6eRPFe7QaoDTbw6.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-AvdnMqoiGey1yX5ZX1VOb9r3rz3qa81kpsDtpMrUiWgGLTrglp4EfnWNvFGWmJ7J76MzOlMtDGoJw_jRX3WXIuCPGhR8WZGhJy1Kh3FXDrTJjqM7yNquyoZjlffQK5SYMaZ-zf3uyQXs5L_bG9do6O39tZw-om8dzuwqAYAHjbUsK31MaeLk38Jc08M/s1600/6eRPFe7QaoDTbw6.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-AvdnMqoiGey1yX5ZX1VOb9r3rz3qa81kpsDtpMrUiWgGLTrglp4EfnWNvFGWmJ7J76MzOlMtDGoJw_jRX3WXIuCPGhR8WZGhJy1Kh3FXDrTJjqM7yNquyoZjlffQK5SYMaZ-zf3uyQXs5L_bG9do6O39tZw-om8dzuwqAYAHjbUsK31MaeLk38Jc08M/s1600/6eRPFe7QaoDTbw6.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-AvdnMqoiGey1yX5ZX1VOb9r3rz3qa81kpsDtpMrUiWgGLTrglp4EfnWNvFGWmJ7J76MzOlMtDGoJw_jRX3WXIuCPGhR8WZGhJy1Kh3FXDrTJjqM7yNquyoZjlffQK5SYMaZ-zf3uyQXs5L_bG9do6O39tZw-om8dzuwqAYAHjbUsK31MaeLk38Jc08M/s1600/6eRPFe7QaoDTbw6.png"/&gt;&lt;/a&gt;

&lt;p&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
We are seeing an increasing interest in &lt;a target="_blank" href="https://github.com/google/tunix"&gt;Tunix&lt;/a&gt; among researchers focusing on the post-training phase of model development. As a native &lt;a target="_blank" href="jax.dev"&gt;JAX&lt;/a&gt; library, Tunix offers the flexibility needed to refine foundation models—including Vision-Language Models (VLMs) and not just LLMs—helping them significantly improve their spatial reasoning capabilities.&lt;/p&gt;&lt;p&gt;
Today, we are highlighting the work of the &lt;a target="_blank" href="https://plan-lab.github.io/"&gt;PLAN Lab (Perception and LANguage Lab)&lt;/a&gt; at the &lt;a target="_blank" href="https://illinois.edu/"&gt;University of Illinois Urbana-Champaign (UIUC)&lt;/a&gt;. To address the critical lack of spatial awareness in VLMs, they built &lt;a target="_blank" href="https://plan-lab.github.io/projects/spatialreasoner/"&gt;SpatialReasoner-R1&lt;/a&gt;, a model capable of fine-grained spatial logic. They utilized Tunix and leveraged the &lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;Google TPU Research Cloud (TRC)&lt;/a&gt; to scale their experiments.&lt;/p&gt;&lt;p&gt;
In this blog, &lt;a target="_blank" href="http://ischool.illinois.edu/people/ismini-lourentzou"&gt;Professor Ismini Lourentzou&lt;/a&gt; and her team explain how they used Tunix's modular design to implement novel alignment algorithms and improve spatial reasoning in VLMs.&lt;/p&gt;
&lt;p&gt;&lt;strong&gt;The "Where" Problem in VLMs&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Modern Vision-Language Models (VLMs) can describe images and answer basic visual questions with impressive fluency. However, they often struggle with fine-grained spatial understanding. If you ask a VLM to estimate distances, directions, or the precise relative positions of objects, it frequently "hallucinates" coordinates or produces inconsistent reasoning with vague answers.&lt;/p&gt;&lt;p&gt;
These capabilities are critical for real-world applications, such as robotics, where precise spatial reasoning enables safe and intelligent interaction with physical environments.&lt;/p&gt;&lt;p&gt;
To bridge this gap, we developed the &lt;a target="_blank" href="https://plan-lab.github.io/spatialreasoner"&gt;SpatialReasoner-R1&lt;/a&gt; (4B and 8B versions), a model trained to perform step-by-step visually grounded spatial reasoning. It achieves 95.59 on Qualitative Accuracy and 77.3 on Quantitative Accuracy for our 8B fDPO model, outperforming the strongest baseline by ~9% in average accuracy on the SPATIALRGPT-Bench while preserving strong general vision-language abilities.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;The Method: Fine-Grained Direct Preference Optimization (fDPO)&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
The secret sauce behind SpatialReasoner-R1 is a new technique called &lt;strong&gt;Fine-Grained Direct Preference Optimization (fDPO)&lt;/strong&gt;.&lt;/p&gt;&lt;p&gt;
Standard alignment methods (like DPO) usually give a model a simple "thumbs up" or "thumbs down" for an entire response. But spatial reasoning is complex— for example, a model might correctly identify an object yet make a flawed logical inference about its location. &lt;/p&gt;&lt;p&gt;
fDPO introduces segment-specific preference granularity. We optimize separate loss components for:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Descriptive Grounding: Does the model correctly perceive and describe the objects in the image?&lt;/li&gt;
&lt;li&gt;Logical Reasoning: Is the step-by-step deduction sound and follows coherent spatial logic?&lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;To generate high-quality training signals, we built a Multi-Model Monte Carlo Tree Search (M3CTS) data generation pipeline, which constructs diverse reasoning trajectories that guide the model toward reliable spatial understanding.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Tunix: Modularity for Novel Research&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Implementing a custom objective like fDPO can be difficult in rigid frameworks. Tunix addresses this by providing a well-structured and extensible &lt;a target="_blank" href="https://tunix.readthedocs.io/en/latest/api/api_sft.html#tunix.DPOTrainer"&gt;DPOTrainer&lt;/a&gt;  that makes it possible to introduce new alignment objectives without reengineering the training pipeline. &lt;/p&gt;&lt;p&gt;
This modularity meant we could reuse the entire underlying training stack—sharding, data loading, and loop management—while injecting our novel research logic &lt;strong&gt;&lt;em&gt;with just a small amount of well-contained code&lt;/em&gt;&lt;/strong&gt;.&lt;/p&gt;&lt;p&gt;
While our backbone model (Sa2VA) required specific architectural handling, the core fDPO algorithm is model-agnostic. We found the Tunix experience smooth and well-documented, making it easy to prototype and iterate on fine-tuning workflows without reinventing the wheel.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Google TRC &amp;amp; TPUs: Reliability at Scale&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Training a model to reason over long horizons requires significant compute. The &lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;Google TPU Research Cloud (TRC)&lt;/a&gt; provided the infrastructure we needed to make large-scale training practical.&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;Scalability:&lt;/strong&gt; Tunix's integration with TPUs allowed us to scale our experiments seamlessly.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Reliability:&lt;/strong&gt; The system performed reliably across multiple TPU runs, which was essential for conducting large-scale spatial reasoning benchmarks.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Support:&lt;/strong&gt; The Google Tunix and TRC teams assisted with infrastructure setup and experiment design, helping us refine our multi-model exploration strategy.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;strong&gt;Looking Ahead: Open Source Contributions&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
We believe that open-source, extensible tools like Tunix are vital for fostering innovation. They lower the barrier for researchers to experiment with new training objectives without rebuilding core infrastructure.&lt;/p&gt;&lt;p&gt;
In that spirit, we contributed our fDPO implementation back to the Tunix ecosystem. We open-source the core fDPO components, enabling the community to apply segment-specific preference optimization to their own models.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Get Started&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
You can explore our research and the tools we used below:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a target="_blank" href="https://plan-lab.github.io/projects/spatialreasoner"&gt;SpatialReasoner Project Page&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://tunix.readthedocs.io/"&gt;Tunix Documentation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/google/tunix"&gt;Tunix GitHub Repository&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;Google TRC&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="http://jax.dev"&gt;JAX documentation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://jaxstack.ai/"&gt;JAX AI Stack documentation&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/9019206617634031064" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/9019206617634031064" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2025/12/spatialreasoner-teaching-vlms-to-see-structure-accelerated-with-tunix-on-tpus.html" rel="alternate" title="SpatialReasoner: Teaching VLMs to &quot;see&quot; structure — Accelerated with Tunix on TPUs" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEh-AvdnMqoiGey1yX5ZX1VOb9r3rz3qa81kpsDtpMrUiWgGLTrglp4EfnWNvFGWmJ7J76MzOlMtDGoJw_jRX3WXIuCPGhR8WZGhJy1Kh3FXDrTJjqM7yNquyoZjlffQK5SYMaZ-zf3uyQXs5L_bG9do6O39tZw-om8dzuwqAYAHjbUsK31MaeLk38Jc08M/s72-c/6eRPFe7QaoDTbw6.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-5404747879957934834</id><published>2025-12-16T11:30:00.000-08:00</published><updated>2025-12-16T11:30:00.107-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="LLM Post-training"/><category scheme="http://www.blogger.com/atom/ns#" term="reinforcement learning"/><category scheme="http://www.blogger.com/atom/ns#" term="TPU Compute"/><category scheme="http://www.blogger.com/atom/ns#" term="Tunix Framework"/><category scheme="http://www.blogger.com/atom/ns#" term="Verifiable Games"/><title type="text">GRL: Turning verifiable games into a post-training suite for LLM agents with Tunix on TPUs</title><content type="html">&lt;p class="byline"&gt;by &lt;author&gt;The GRL Team&lt;/author&gt;, UC San Diego and &lt;author&gt;Lin Chai &lt;/author&gt; &amp;amp; &lt;author&gt;Srikanth Kilaru&lt;/author&gt;, Google ML Frameworks&lt;/p&gt;

&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiVErJ8wiLXVXoX916jqHK6mA1w0ZA_ZHRs6Cggj__Ume9wspVE9I0bhGtHYZG2OhJXtyaxnIlBoz0-FLXH1owVLc7235mW2owxm4CaKJ7sx-3afpOlHWIPMMz6gGtQ2pyNy4UvEce3QMGAPovIqN9zpsk43PxGMJGZTXIhemwl3Okl5XhyphenhyphenrIEtG0oKcBw/s1600/Tunix-Logo.png"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiVErJ8wiLXVXoX916jqHK6mA1w0ZA_ZHRs6Cggj__Ume9wspVE9I0bhGtHYZG2OhJXtyaxnIlBoz0-FLXH1owVLc7235mW2owxm4CaKJ7sx-3afpOlHWIPMMz6gGtQ2pyNy4UvEce3QMGAPovIqN9zpsk43PxGMJGZTXIhemwl3Okl5XhyphenhyphenrIEtG0oKcBw/s1600/Tunix-Logo.png"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiVErJ8wiLXVXoX916jqHK6mA1w0ZA_ZHRs6Cggj__Ume9wspVE9I0bhGtHYZG2OhJXtyaxnIlBoz0-FLXH1owVLc7235mW2owxm4CaKJ7sx-3afpOlHWIPMMz6gGtQ2pyNy4UvEce3QMGAPovIqN9zpsk43PxGMJGZTXIhemwl3Okl5XhyphenhyphenrIEtG0oKcBw/s1600/Tunix-Logo.png" class="header-image"&gt;&lt;img border="0" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiVErJ8wiLXVXoX916jqHK6mA1w0ZA_ZHRs6Cggj__Ume9wspVE9I0bhGtHYZG2OhJXtyaxnIlBoz0-FLXH1owVLc7235mW2owxm4CaKJ7sx-3afpOlHWIPMMz6gGtQ2pyNy4UvEce3QMGAPovIqN9zpsk43PxGMJGZTXIhemwl3Okl5XhyphenhyphenrIEtG0oKcBw/s1600/Tunix-Logo.png"/&gt;&lt;/a&gt;

&lt;p&gt;&lt;strong&gt;Introduction&lt;/strong&gt;&lt;/p&gt;
&lt;p&gt;JAX is widely recognized for its power in training large-scale AI models. However, a primary bottleneck in the next phase of AI development—LLM post-training with Reinforcement Learning (RL)—is the scarcity of environments with verifiable rewards.&lt;/p&gt;&lt;p&gt;
Today, we are highlighting the work of the GRL (Game Reinforcement Learning) team at UC San Diego. To solve the data bottleneck, they have built a pipeline to turn video games into rigorous reasoning benchmarks. They utilized &lt;a target="_blank" href="https://tunix.readthedocs.io/"&gt;Tunix&lt;/a&gt;, a JAX-native research-friendly RL framework that supports multi-host, multi-turn capabilities, and leveraged the &lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;Google TPU Research Cloud (TRC)&lt;/a&gt; to scale their experiments. The results are promising: this approach has yielded significant improvements in model quality, particularly in planning and reasoning tasks, proving that games can be a viable substrate for serious AI capability training.&lt;/p&gt;&lt;p&gt;
In this blog the GRL team explains how they are combining game environments, modular Tunix library for RL post-training, and TPU compute to train the next generation of agents.&lt;/p&gt;
&lt;hr style="height:1px;color:#ccc;" /&gt;

&lt;h3&gt;Why Verifiable Games for LLM Post-Training?&lt;/h3&gt;
&lt;p&gt;Current RL post-training has shown strong gains in domains like math and coding because success can be auto-checked. However, these settings are often narrow and short-term. We are effectively overfitting RL to clean problems, while the next generation of agents must operate in messy, multi-step worlds.&lt;/p&gt;&lt;p&gt;
To unlock RL as a systematic method for reasoning, we need a diverse pool of environments where rewards are grounded in explicit, machine-checkable rules. Games are this missing, underused substrate.&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;&lt;strong&gt;The Performance Gap:&lt;/strong&gt; LLMs still perform surprisingly poorly on many strategy games, revealing a clear gap between model behavior and human-level interactive competence.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Verifiable Signals:&lt;/strong&gt; Games come with built-in verifiable signals—wins, scores, puzzle completion—meaning outcomes are automatically and unambiguously graded without human labeling.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Long-Horizon Reasoning:&lt;/strong&gt; Unlike short QA tasks, games force models to plan, explore, and reason over many steps.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Abundance:&lt;/strong&gt; Decades of RL research has produced a standardized ecosystem of diverse environments ready to be recycled.&lt;/li&gt;
&lt;/ol&gt;

&lt;figure class="wide borderless"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjZ5HHlkp19v3J7HH3xPBla9gXYgzymJkZ90HUwnk-lTN3ytuTiGgKsDMXxppLKqxLVb9VjDliGMqh3VPXJ4SlqRHw_LXG92kseZGTmmTzup6s2-PNlTPRXvCWxmc0SPqx8O11S1XdBLxHsWXgdRELsbPFj1uHPoKNwb1ZtNSt3uTgA1dFdIT6BUeSjJt0/s1600/44VkmAsPfgKejYL.png"/&gt;
&lt;/figure&gt;

&lt;h3&gt;Game Reinforcement Learning (GRL): A Unified Game-to-Post-Training Pipeline&lt;/h3&gt;
&lt;p&gt;To harness this ecosystem, we built GRL, a comprehensive suite designed to recycle diverse game environments into a reusable post-training resource. Our mission is to prioritize environments with executable success checks—ranging from text-based puzzles to embodied 3D worlds and web/GUI workflows. Our code and ecosystem live under the LM Games organization (&lt;a target="_blank" href="http://lmgame.org"&gt;lmgame.org&lt;/a&gt;).&lt;/p&gt;&lt;p&gt;
GRL provides three key capabilities:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;strong&gt;A Unified Pipeline:&lt;/strong&gt; We standardize the conversion of games into RL-ready environments with structured states and consistent metrics. This makes results comparable across models and research groups.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Versatile Configuration:&lt;/strong&gt; Researchers can tailor interaction styles (e.g., max_turns, natural language feedback) while mixing training data from different tasks seamlessly. This allows for training on puzzles, math, and web tasks within a single run.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Algorithm-Agnostic Interface:&lt;/strong&gt; GRL works with any agentic training algorithm. While we frequently use PPO, the system serves as a robust testbed for developing new RL techniques.&lt;/li&gt;
&lt;/ul&gt;

&lt;h3&gt;The Engine: Plugging into the Tunix RL Framework&lt;/h3&gt;

&lt;figure class="wide borderless"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEj2jELp00Nduu6e___0uPNPqHc6YXlKCqfaIvY39w-Lu7FAiAhF8jkNIVulgIvGe4IIaYrR1FDRHyWFHMCdF6akwoO-e6B7QtjqQB23y62tpihIpXxBPrQO-qkOh1QkKSjgp-H_3avnIgzOBYzLOtdUTbALf4XJIYkD2aFLQGW5o7YZz6RVnHHTiz_psb4/s1600/8PTkca8mGW6swXG.png"/&gt;
&lt;/figure&gt;

&lt;p&gt;&lt;strong&gt;Designed for Research Flexibility and Multi-Turn Agents&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
In practice, plugging a GRL game agent into Tunix is seamless thanks to its modular design. Tunix is built specifically to support multi-turn agentic tasks, allowing researchers to leverage native one-turn inference APIs to achieve complex multi-turn rollouts, then batch those outputs directly back into the training flow. This research flexibility is key; the framework is lightweight enough for quick iteration and benchmarking, yet modular enough to allow fine-grained adjustments to reward functions, algorithms, and hardware-aware settings like mesh sizes.&lt;/p&gt;&lt;p&gt;
We first define an agent_cfg (see picture above) that tells the system which game to play (eg. Sokoban or Tetris), how the LLM should talk (chat template + reasoning style), and its budgets (max turns, tokens per turn, action format). On the Tunix side, we then load a pre-trained model into three roles: actor, critic, and reference and build ClusterConfig to specify rollout and training configs and PpoConfig to specify RL hyperparameters. The glue is minimal and the layout is clear and research friendly: once agent_cfg, ppo_cfg, and cluster_cfg are defined, we construct an RLCluster and pass everything into PpoLearner, which gives us a complete multi-turn PPO trainer in JAX.  &lt;/p&gt;&lt;p&gt;
Our multi-turn RL workflow is equally lightweight from the user's point of view. For example, with a 5-turn budget, the trainer repeatedly lets the LLM "play" the game for up to five conversational turns: at each turn it sees the current grid or state, reasons in language using the chat template, outputs a series of actions, and receives the next state and a verifiable reward signal (win/loss/score/step penalty). GRL's agent + env configs handle all the orchestration: they log observations, actions, and rewards into structured trajectories, which Tunix then turns into token-level advantages and returns for PPO updates. You don't manually build datasets or rollouts; the trainer owns the loop - interact -&amp;gt; log -&amp;gt; compute rewards -&amp;gt; update policy -&amp;gt; repeat. &lt;/p&gt;&lt;p&gt;
In our preliminary experiments using this setup, training Qwen2.5-7B-Instruct on Sokoban and Tetris yielded strong in-domain gains (&lt;strong&gt;+2-56%&lt;/strong&gt; across game variants). We also observed modest generalization to out-of-domain tasks, with consistent improvements in planning tasks (Blocksworld: &lt;strong&gt;+3-7%&lt;/strong&gt;) and positive but unstable signals in computer use (Webshop: ~&lt;strong&gt;+6%&lt;/strong&gt;). All scripts and configs are available in the GRL repo: &lt;a target="_blank" href="https://github.com/lmgame-org/GRL/tree/main"&gt;https://github.com/lmgame-org/GRL/tree/main&lt;/a&gt;. To reproduce the end-to-end Tunix + GRL training example (including our Sokoban/Tetris runs), you can simply clone the repo and run one line: bash tunix_quick_training_example.sh.  &lt;/p&gt;

&lt;h3&gt;Google TRC &amp;amp; TPUs: Accelerating Game-Based RL at Scale&lt;/h3&gt;
&lt;p&gt;A critical component of our research was the Google TPU Research Cloud (TRC) program. Access to Cloud TPUs allowed us to move from small-scale prototypes to production-grade training runs with minimal friction.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;TPUs and JAX directly attacked our two biggest bottlenecks:&lt;/strong&gt;&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;&lt;strong&gt;Rollout Throughput:&lt;/strong&gt; Using the &lt;a target="_blank" href="https://github.com/vllm-project/tpu-inference"&gt;vLLM-TPU&lt;/a&gt; path via tpu-inference, we could serve multiple model families on the same TPU v5p backend. This boosted sampling throughput, making the data-collection loop tighter and multi-environment concurrency cheaper.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Multi-Host Scale for 7B Models:&lt;/strong&gt; Tunix's lightweight design combined with &lt;a target="_blank" href="https://docs.jax.dev/en/latest/jax.sharding.html"&gt;JAX's mesh-based sharding&lt;/a&gt; allowed us to scale the same code from a single host to multi-host setups declaratively. This capability was essential for our experiments with 7B parameter models (such as Qwen2.5-7B), where we leveraged 2 v5p-8 hosts with minimal code change (in fact, only an env var config). The scale up is seamless, proving that the infrastructure can handle the heavy computational lifting required for modern LLM post-training without requiring complex engineering overhauls.&lt;/li&gt;
&lt;li&gt;&lt;strong&gt;Hardware Advantage:&lt;/strong&gt; At the hardware level, the performance gains were significant. Each TPU v5p chip delivers around 459 BF16 TFLOPs, compared to roughly 312 on an NVIDIA A100. This raw power, combined with the &lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;TRC&lt;/a&gt; program's support, meant that large-N studies—involving more seeds, longer horizons, and more environments—became routine experiments rather than "special ops" engineering challenges.&lt;/li&gt;
&lt;/ol&gt;
&lt;p&gt;This combination of Tunix's flexible abstraction and TRC's massive compute resources allowed us to iterate quickly on ideas while benefiting from production-grade infrastructure.&lt;/p&gt;

&lt;h3&gt;Get Started&lt;/h3&gt;
&lt;p&gt;GRL and Tunix are open for the community to explore. You can reproduce our end-to-end training example (including the Sokoban/Tetris runs) by cloning the &lt;a target="_blank" href="https://github.com/lmgame-org/GRL"&gt;repo&lt;/a&gt;, following the &lt;a target="_blank" href="https://github.com/lmgame-org/GRL?tab=readme-ov-file#installation"&gt;installation instructions&lt;/a&gt;, and then running a single command:&lt;/p&gt;&lt;p&gt;
&lt;code&gt;bash tunix_quick_training_example.sh&lt;/code&gt;&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/lmgame-org/GRL"&gt;GRL Repository&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://tunix.readthedocs.io/"&gt;Tunix Documentation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/google/tunix"&gt;Tunix GitHub Repository&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://sites.research.google/trc/about/"&gt;Google TRC&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="jax.dev"&gt;JAX documentation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://jaxstack.ai/"&gt;JAX AI Stack documentation&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/5404747879957934834" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/5404747879957934834" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2025/12/grl-turning-verifiable-games-into-a-post-training-suite-for-llm-agents-with-tunix-on-tpus.html" rel="alternate" title="GRL: Turning verifiable games into a post-training suite for LLM agents with Tunix on TPUs" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiVErJ8wiLXVXoX916jqHK6mA1w0ZA_ZHRs6Cggj__Ume9wspVE9I0bhGtHYZG2OhJXtyaxnIlBoz0-FLXH1owVLc7235mW2owxm4CaKJ7sx-3afpOlHWIPMMz6gGtQ2pyNy4UvEce3QMGAPovIqN9zpsk43PxGMJGZTXIhemwl3Okl5XhyphenhyphenrIEtG0oKcBw/s72-c/Tunix-Logo.png" width="72"/></entry><entry><id>tag:blogger.com,1999:blog-8698702854482141883.post-3314718157308965800</id><published>2025-12-15T11:30:00.000-08:00</published><updated>2025-12-15T11:30:00.120-08:00</updated><category scheme="http://www.blogger.com/atom/ns#" term="Embodied AI"/><category scheme="http://www.blogger.com/atom/ns#" term="ESCA"/><category scheme="http://www.blogger.com/atom/ns#" term="JAX"/><category scheme="http://www.blogger.com/atom/ns#" term="MLLMs"/><category scheme="http://www.blogger.com/atom/ns#" term="Scene Graphs"/><title type="text">ESCA: Grounding embodied agents with scene graphs — Accelerated by JAX</title><content type="html">&lt;p class="byline"&gt;by The ESCA Team, University of Pennsylvania &amp;amp; &lt;author&gt;Srikanth Kilaru&lt;/author&gt;, Google ML Frameworks&lt;/p&gt;
&lt;meta name="twitter:image" content="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiHetnyLvYDYs2vLWwfdUwhIoB3kRFQ7OyPMOJOAiIK4eKpPvXKYwKiU-aUUOu-etu2w0-ZWvtWduFX6f_tPDDaxpaVg7wB8RdOq_oCFYnadHXJwK5rUd1QaB_T-XZk8AfjklxqVZGfWhkA2tfqb7CTyokNBc1IpzcIVxkt-ApPcPhhlpgPvHyFDSh31DM/s1600/JAX_ESCA.jpg"&gt;
&lt;img class="metadata" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiHetnyLvYDYs2vLWwfdUwhIoB3kRFQ7OyPMOJOAiIK4eKpPvXKYwKiU-aUUOu-etu2w0-ZWvtWduFX6f_tPDDaxpaVg7wB8RdOq_oCFYnadHXJwK5rUd1QaB_T-XZk8AfjklxqVZGfWhkA2tfqb7CTyokNBc1IpzcIVxkt-ApPcPhhlpgPvHyFDSh31DM/s1600/JAX_ESCA.jpg"&gt;

&lt;a target="_blank" href="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiTj8yjYIvDgkSGxdn9-UlZK7PBcc_su1gTPME70q4cNa4s_lYwUOmNnlek-NfG5U-7UZKu8z74Orqlbg_lkW-32yNFQ4xnR0ChwPVp91iEt9at5OLeysFnvTGcjnJzx4YQf-agpRHf2BE_PORJtgMdwaN-wLFL4BObOZV9fqI0wNMlMq1wGiE7Ly1Eqps/s1600/Screenshot%202025-12-15%20at%2009.21.34.png" class="header-image" style="position:relative; display:block; width:100%;"&gt;&lt;img border="0" style="margin:0 auto;" src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiTj8yjYIvDgkSGxdn9-UlZK7PBcc_su1gTPME70q4cNa4s_lYwUOmNnlek-NfG5U-7UZKu8z74Orqlbg_lkW-32yNFQ4xnR0ChwPVp91iEt9at5OLeysFnvTGcjnJzx4YQf-agpRHf2BE_PORJtgMdwaN-wLFL4BObOZV9fqI0wNMlMq1wGiE7Ly1Eqps/s1600/Screenshot%202025-12-15%20at%2009.21.34.png"/&gt;&lt;/a&gt;&lt;br/&gt;

&lt;strong&gt;Introduction&lt;/strong&gt;
&lt;p&gt;Multi-Modal Language Models (MLLMs) are increasingly forming the core of the brain for general-purpose embodied agents — AI that can navigate and act in the physical world as robots.  While MLLMs are making rapid progress, they often stumble on a critical hurdle: precise visual perception. They struggle to reliably capture the fine-grained links between low-level visual features and high-level textual semantics.&lt;/p&gt;&lt;p&gt;
Today, we are highlighting the work of &lt;a target="_blank" href="https://www.cis.upenn.edu/~mhnaik"&gt;Prof. Mayur Naik's&lt;/a&gt; research team at the &lt;a target="_blank" href="https://www.upenn.edu/"&gt;University of Pennsylvania&lt;/a&gt;. To bridge the gap between high-level language and low-level visual features, they developed &lt;a target="_blank" href="https://arxiv.org/abs/2510.15963"&gt;ESCA (Embodied and Scene-Graph Contextualized Agent)&lt;/a&gt;. By porting their neurosymbolic pipeline to JAX, they achieved the real-time performance necessary for high-throughput decision-making. This work also demonstrates that JAX drives performance gains across a wide range of hardware, including standard CPUs and NVIDIA GPUs, and not just on Google TPUs.&lt;/p&gt;&lt;p&gt;
In this blog, the UPenn team explains how they combined structured scene graphs with JAX's functional design to reduce perception errors by over 50% and achieve a 25% speedup in inference.&lt;/p&gt;
&lt;p&gt;&lt;hr style="height:1px;color:#ccc;" /&gt;

&lt;p&gt;&lt;strong&gt;The "Grounding" Problem in Embodied AI&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Existing MLLMs are powerful, but they can be surprisingly "blind" when tasked with interacting with the physical world. In our empirical analysis of 60 navigation tasks from &lt;em&gt;&lt;a target="_blank" href="https://embodiedbench.github.io"&gt;EmbodiedBench&lt;/em&gt;&lt;/a&gt;, we found that 69% of agent failures stemmed from perception errors. See the figure below.&lt;/p&gt;
&lt;p&gt;The three top-level error types are &lt;u&gt;Perc&lt;/u&gt;eption, &lt;u&gt;Reas&lt;/u&gt;oning, and &lt;u&gt;Plan&lt;/u&gt;ning. The second-level errors are &lt;u&gt;Ha&lt;/u&gt;llucination, &lt;u&gt;W&lt;/u&gt;rong Recognition, &lt;u&gt;S&lt;/u&gt;patial &lt;u&gt;U&lt;/u&gt;nderstanding, &lt;u&gt;S&lt;/u&gt;patial &lt;u&gt;R&lt;/u&gt;easoning, &lt;u&gt;R&lt;/u&gt;eflection &lt;u&gt;E&lt;/u&gt;rror, &lt;u&gt;I&lt;/u&gt;naccurate &lt;u&gt;A&lt;/u&gt;ction, and &lt;u&gt;C&lt;/u&gt;ollision. For clarity, the figure uses these acronyms to label the different error types.&lt;/p&gt;
&lt;figure class="borderless"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEjYd-XN8mfWOOEJVsAJxic20SfidkUTf90fQW_Fizr44-Pq8dLXCG8A2lMjQWlyXzwzcFqUugPluTanjlUP8nWpztXEuGjVylV0H2Iqi7_bC_Dr3Q1xbIE_6P2HttA9pCUVU015pLH9Ig1m2ZopPM_7w9s2DYNpyfoc_nWyQZuhH1Dj4jpDNzyYl7Wv5xI/s1600/sfTUA9VMwqCr4tu.png"/&gt;
&lt;/figure&gt;

&lt;p&gt;
The models struggle to capture fine-grained links between visual features and textual semantics. They might recognize a "kitchen," but fail to identify the specific spatial relationship between a knife and a cutting board required to complete a task.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Enter ESCA: The Anglerfish of AI&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
To solve this, we introduced &lt;a target="_blank" href="https://arxiv.org/abs/2510.15963"&gt;ESCA&lt;/a&gt;, a framework designed to contextualize MLLMs through open-domain scene graph generation.&lt;/p&gt;

&lt;figure class="wide"&gt;
  &lt;img src="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEhPyedvwB-ltk6YmyAV0miRDv9_YRTbKKgezbFOJ1Q3_qJ6EKgugYdBLSApZUl5YPnjiFqd5vEZOkZQOW43QWL-0s_O_YYevw6VttQF_OJ8RjGAtQO0NKyNEkDpI9E27nLYkTWtZxR3awf1FMkQRn52-7C0Lm-0ZTbf6MPHjDVc2jqhgAHkveIPnbnNF1I/s1600/jaxupennblog--jmdplhhxm4s.png"&gt;
&lt;/figure&gt;

&lt;p&gt;Think of ESCA like the bioluminescent lure of a &lt;a target="_blank" href="https://www.montereybayaquarium.org/animals/animals-a-to-z/deep-sea-anglerfish"&gt;deep-sea anglerfish&lt;/a&gt;. Just as the fish illuminates its dark surroundings to reveal prey, ESCA "illuminates" the agent's environment by generating a structured Scene Graph—a map of objects, attributes, and relationships (e.g., &lt;em&gt;Cup [Red] ON Table&lt;/em&gt;).&lt;/p&gt;&lt;p&gt;
A key innovation here is Selective Grounding. Injecting a massive scene graph of everything in the room can overwhelm the model. Instead, ESCA identifies only the subset of objects and relations pertinent to the current instruction. It performs probabilistic reasoning to construct prompts enriched with exactly the contextual details the agent needs to act.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;The Engine: LASER and Scallop&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
At the core of ESCA is &lt;a target="_blank" href="https://arxiv.org/abs/2304.07647"&gt;LASER&lt;/a&gt;, a CLIP-based foundation model trained on &lt;a target="_blank" href="https://huggingface.co/datasets/video-fm/ESCA-video-87K"&gt;87k video-caption pairs&lt;/a&gt;. LASER uses &lt;a target="_blank" href="https://www.scallop-lang.org/"&gt;Scallop&lt;/a&gt;—our neurosymbolic programming language that supports JAX backends—to align predicted scene graphs with logical specifications. This pipeline allows us to train low-level perception models to produce detailed graphs without needing tedious frame-level annotations.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;JAX User Experience&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;1. The Power of Statelessness&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
JAX's design encouraged a fully functional, stateless architecture. Every component, from feature extraction to similarity computation, was made into a pure modular function. This structure enabled effective use of&lt;a target="_blank" href="https://docs.jax.dev/en/latest/jit-compilation.html"&gt; jit (Just-In-Time) compilation&lt;/a&gt;. The &lt;a target="_blank" href="https://github.com/openxla/xla"&gt;XLA compiler&lt;/a&gt; could fuse sequences—like normalization, matrix multiplication, and softmax—into fewer kernels, reducing intermediate buffers and lowering GPU overhead.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;2. Handling Complex Control Flow&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Our pipeline requires selecting the "top-k" most relevant objects from a probabilistic scene graph. This introduces complex control flow. JAX provided the primitives we needed to handle this efficiently:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;We used&lt;a target="_blank" href="https://docs.jax.dev/en/latest/_autosummary/jax.lax.cond.html"&gt; jax.lax.cond&lt;/a&gt; to manage control flow inside the probabilistic graph.&lt;/li&gt;
&lt;li&gt;We leveraged &lt;a target="_blank" href="https://docs.jax.dev/en/latest/jax.nn.html"&gt;jax.nn&lt;/a&gt; and &lt;a target="_blank" href="https://docs.jax.dev/en/latest/jax.numpy.html"&gt;jax.numpy&lt;/a&gt; for all activation functions and batched math in a JIT-friendly way.&lt;/li&gt;
&lt;/ul&gt;
&lt;p&gt;&lt;strong&gt;3. Debugging and Transparency&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Migrating to JAX was also a learning experience. Tools like&lt;a target="_blank" href="https://docs.jax.dev/en/latest/_autosummary/jax.debug.print.html"&gt; jax.debug.print/callback()&lt;/a&gt; allowed us to inspect values &lt;em&gt;inside&lt;/em&gt; jit-compiled functions, while&lt;a target="_blank" href="https://docs.jax.dev/en/latest/_autosummary/jax.disable_jit.html"&gt; jax.disable_jit()&lt;/a&gt; let us easily switch to eager execution to step through the program seeing intermediate values.&lt;/p&gt;&lt;p&gt;
Furthermore, the transparency of the open-source system was impressive. Being able to read the annotated source code and see how Python functions trace into &lt;a target="_blank" href="https://docs.jax.dev/en/latest/jaxpr.html"&gt;jaxpr&lt;/a&gt; (JAX expression) gave us deep insight into how to design inference logic that scales.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;4. Seamless Integration with Flax&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
&lt;a target="_blank" href="https://flax.readthedocs.io/en/v0.8.3/experimental/nnx/index.html"&gt;NNX&lt;/a&gt; fits into our workflow perfectly. We used &lt;a target="_blank" href="https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/module.html#flax.nnx.Module"&gt;nnx.Module&lt;/a&gt; to structure the model and &lt;a target="_blank" href="https://flax.readthedocs.io/en/latest/api_reference/flax.core.frozen_dict.html#flax-core-frozen-dict-package"&gt;FrozenDict&lt;/a&gt; to keep parameters organized and immutable. The &lt;a target="_blank" href="https://flax.readthedocs.io/en/latest/api_reference/flax.nnx/helpers.html#flax.nnx.TrainState"&gt;TrainState&lt;/a&gt; object made managing model parameters and optimizer states straightforward, without adding the complexity often found in other frameworks.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;JAX Performance: A 25% Speedup&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
Embodied agents operate in a continuous loop: planning, acting, and updating their understanding of a dynamic world. High latency here is a dealbreaker. We ported LASER from PyTorch to JAX to improve real-time performance, and the benefits were significant. By rewriting our core similarity computations and feature pipelines as pure functions wrapped in &lt;a target="_blank" href="https://docs.jax.dev/en/latest/_autosummary/jax.jit.html"&gt;jax.jit&lt;/a&gt;, we achieved significant gains.&lt;/p&gt;&lt;p&gt;
On an NVIDIA H100 GPU, JAX reduced the average time per frame from 18.15 ms (PyTorch) to 14.55 ms (JAX)—a roughly 25% speedup.&lt;/p&gt;

&lt;table class="style0"&gt;
  &lt;thead&gt;
    &lt;tr&gt;
      &lt;th class="style1"&gt;&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Framework&lt;/strong&gt;&lt;/th&gt;
      &lt;th class="style1"&gt;&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Hardware&lt;/strong&gt;&lt;/th&gt;
      &lt;th class="style1"&gt;&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Avg Time Per Frame (ms) ↓&lt;/strong&gt;&lt;/th&gt;
      &lt;th class="style1"&gt;&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;FPS ↑&lt;/strong&gt;&lt;/th&gt;
    &lt;/tr&gt;
  &lt;/thead&gt;
  &lt;tbody&gt;
    &lt;tr&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
PyTorch&lt;/td&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
H100 GPU&lt;/td&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
18.15 ± 0.73&lt;/td&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
55.15 ± 2.31&lt;/td&gt;
    &lt;/tr&gt;
    &lt;tr&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
JAX&lt;/td&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
H100 GPU&lt;/td&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
14.55 ± 0.64&lt;/td&gt;
      &lt;td class="style1"&gt;&lt;/p&gt;&lt;p&gt;
68.82 ± 3.13&lt;/td&gt;
    &lt;/tr&gt;
  &lt;/tbody&gt;
&lt;/table&gt;

&lt;p&gt;&lt;strong&gt;Conclusion&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
ESCA demonstrates that better data—structured, grounded scene graphs—can solve the perception bottleneck in Embodied AI. But it also demonstrates that better infrastructure is required to run these systems in the real world. JAX provided the speed, transparency, and modularity needed to turn our research into a real-time agent capable of reliable reasoning.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Acknowledgements&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
This research was made possible through support from a Google Research Award to the University of Pennsylvania and from the ARPA-H program on Safe and Explainable AI under award D24AC00253-00.&lt;/p&gt;&lt;p&gt;
&lt;strong&gt;Get Started&lt;/strong&gt;&lt;/p&gt;&lt;p&gt;
You can explore the LASER code, the ESCA framework and documentation for JAX and Flax at:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/video-fm/ESCA"&gt;ESCA Repository&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://github.com/video-fm/LASER"&gt;LASER/SGClip Repository&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://docs.jax.dev/"&gt;JAX Documentation&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a target="_blank" href="https://flax.readthedocs.io/"&gt;Flax Documentation&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;</content><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/3314718157308965800" rel="edit" type="application/atom+xml"/><link href="http://www.blogger.com/feeds/8698702854482141883/posts/default/3314718157308965800" rel="self" type="application/atom+xml"/><link href="http://opensource.googleblog.com/2025/12/grounding-embodied-agents-with-scene-graphs-accelerated-by-jax.html" rel="alternate" title="ESCA: Grounding embodied agents with scene graphs — Accelerated by JAX" type="text/html"/><author><name>Google Open Source</name><uri>http://www.blogger.com/profile/03718388509216889937</uri><email>noreply@blogger.com</email><gd:image height="16" rel="http://schemas.google.com/g/2005#thumbnail" src="https://img1.blogblog.com/img/b16-rounded.gif" width="16"/></author><media:thumbnail xmlns:media="http://search.yahoo.com/mrss/" height="72" url="https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiHetnyLvYDYs2vLWwfdUwhIoB3kRFQ7OyPMOJOAiIK4eKpPvXKYwKiU-aUUOu-etu2w0-ZWvtWduFX6f_tPDDaxpaVg7wB8RdOq_oCFYnadHXJwK5rUd1QaB_T-XZk8AfjklxqVZGfWhkA2tfqb7CTyokNBc1IpzcIVxkt-ApPcPhhlpgPvHyFDSh31DM/s72-c/JAX_ESCA.jpg" width="72"/></entry></feed>