[{"data":1,"prerenderedAt":570},["ShallowReactive",2],{"/en-us/the-source/authors/lee-faus/":3,"footer-en-us":33,"the-source-navigation-en-us":342,"the-source-newsletter-en-us":369,"lee-faus-articles-list-authors-en-us":381,"lee-faus-articles-list-en-us":411,"lee-faus-page-categories-en-us":569},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"config":8,"seo":10,"content":12,"type":25,"slug":26,"_id":27,"_type":28,"title":11,"_source":29,"_file":30,"_stem":31,"_extension":32},"/en-us/the-source/authors/lee-faus","authors",false,"",{"layout":9},"the-source",{"title":11},"Lee Faus",[13,23],{"componentName":14,"type":14,"componentContent":15},"TheSourceAuthorHero",{"config":16,"name":11,"role":18,"bio":19,"headshot":20},{"gitlabHandle":17},"lfaus","Global Field CTO","Lee Faus is a Global Field CTO at GitLab. Lee has been a software architect, teacher, professor, and educator for over 25 years. He leverages his experience as an educator to bring complex technology concepts into a business forum where executives gain valuable advice to positively impact their business.",{"altText":11,"config":21},{"src":22},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463398/vivhlomglvnstamj54bo.jpg",{"componentName":24,"type":24},"TheSourceArticlesList","author","lee-faus","content:en-us:the-source:authors:lee-faus.yml","yaml","content","en-us/the-source/authors/lee-faus.yml","en-us/the-source/authors/lee-faus","yml",{"_path":34,"_dir":35,"_draft":6,"_partial":6,"_locale":7,"data":36,"_id":338,"_type":28,"title":339,"_source":29,"_file":340,"_stem":341,"_extension":32},"/shared/en-us/main-footer","en-us",{"text":37,"source":38,"edit":44,"contribute":49,"config":54,"items":59,"minimal":330},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":39,"config":40},"View page source",{"href":41,"dataGaName":42,"dataGaLocation":43},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":45,"config":46},"Edit this page",{"href":47,"dataGaName":48,"dataGaLocation":43},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":50,"config":51},"Please contribute",{"href":52,"dataGaName":53,"dataGaLocation":43},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":55,"facebook":56,"youtube":57,"linkedin":58},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[60,87,160,228,291],{"title":61,"links":62,"subMenu":68},"Platform",[63],{"text":64,"config":65},"DevSecOps platform",{"href":66,"dataGaName":67,"dataGaLocation":43},"/platform/","devsecops platform",[69],{"title":70,"links":71},"Pricing",[72,77,82],{"text":73,"config":74},"View plans",{"href":75,"dataGaName":76,"dataGaLocation":43},"/pricing/","view plans",{"text":78,"config":79},"Why Premium?",{"href":80,"dataGaName":81,"dataGaLocation":43},"/pricing/premium/","why premium",{"text":83,"config":84},"Why Ultimate?",{"href":85,"dataGaName":86,"dataGaLocation":43},"/pricing/ultimate/","why ultimate",{"title":88,"links":89},"Solutions",[90,95,100,105,110,115,120,125,130,135,140,145,150,155],{"text":91,"config":92},"Digital transformation",{"href":93,"dataGaName":94,"dataGaLocation":43},"/solutions/digital-transformation/","digital transformation",{"text":96,"config":97},"Security & Compliance",{"href":98,"dataGaName":99,"dataGaLocation":43},"/solutions/security-compliance/","security & compliance",{"text":101,"config":102},"Automated software delivery",{"href":103,"dataGaName":104,"dataGaLocation":43},"/solutions/delivery-automation/","automated software delivery",{"text":106,"config":107},"Agile development",{"href":108,"dataGaName":109,"dataGaLocation":43},"/solutions/agile-delivery/","agile delivery",{"text":111,"config":112},"Cloud transformation",{"href":113,"dataGaName":114,"dataGaLocation":43},"/topics/cloud-native/","cloud transformation",{"text":116,"config":117},"SCM",{"href":118,"dataGaName":119,"dataGaLocation":43},"/solutions/source-code-management/","source code management",{"text":121,"config":122},"CI/CD",{"href":123,"dataGaName":124,"dataGaLocation":43},"/solutions/continuous-integration/","continuous integration & delivery",{"text":126,"config":127},"Value stream management",{"href":128,"dataGaName":129,"dataGaLocation":43},"/solutions/value-stream-management/","value stream management",{"text":131,"config":132},"GitOps",{"href":133,"dataGaName":134,"dataGaLocation":43},"/solutions/gitops/","gitops",{"text":136,"config":137},"Enterprise",{"href":138,"dataGaName":139,"dataGaLocation":43},"/enterprise/","enterprise",{"text":141,"config":142},"Small business",{"href":143,"dataGaName":144,"dataGaLocation":43},"/small-business/","small business",{"text":146,"config":147},"Public sector",{"href":148,"dataGaName":149,"dataGaLocation":43},"/solutions/public-sector/","public sector",{"text":151,"config":152},"Education",{"href":153,"dataGaName":154,"dataGaLocation":43},"/solutions/education/","education",{"text":156,"config":157},"Financial services",{"href":158,"dataGaName":159,"dataGaLocation":43},"/solutions/finance/","financial services",{"title":161,"links":162},"Resources",[163,168,173,178,183,188,193,198,203,208,213,218,223],{"text":164,"config":165},"Install",{"href":166,"dataGaName":167,"dataGaLocation":43},"/install/","install",{"text":169,"config":170},"Quick start guides",{"href":171,"dataGaName":172,"dataGaLocation":43},"/get-started/","quick setup checklists",{"text":174,"config":175},"Learn",{"href":176,"dataGaName":177,"dataGaLocation":43},"https://university.gitlab.com/","learn",{"text":179,"config":180},"Product documentation",{"href":181,"dataGaName":182,"dataGaLocation":43},"https://docs.gitlab.com/","docs",{"text":184,"config":185},"Blog",{"href":186,"dataGaName":187,"dataGaLocation":43},"/blog/","blog",{"text":189,"config":190},"Customer success stories",{"href":191,"dataGaName":192,"dataGaLocation":43},"/customers/","customer success stories",{"text":194,"config":195},"Remote",{"href":196,"dataGaName":197,"dataGaLocation":43},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":199,"config":200},"GitLab Services",{"href":201,"dataGaName":202,"dataGaLocation":43},"/services/","services",{"text":204,"config":205},"TeamOps",{"href":206,"dataGaName":207,"dataGaLocation":43},"/teamops/","teamops",{"text":209,"config":210},"Community",{"href":211,"dataGaName":212,"dataGaLocation":43},"/community/","community",{"text":214,"config":215},"Forum",{"href":216,"dataGaName":217,"dataGaLocation":43},"https://forum.gitlab.com/","forum",{"text":219,"config":220},"Events",{"href":221,"dataGaName":222,"dataGaLocation":43},"/events/","events",{"text":224,"config":225},"Partners",{"href":226,"dataGaName":227,"dataGaLocation":43},"/partners/","partners",{"title":229,"links":230},"Company",[231,236,241,246,251,256,261,266,271,276,281,286],{"text":232,"config":233},"About",{"href":234,"dataGaName":235,"dataGaLocation":43},"/company/","company",{"text":237,"config":238},"Jobs",{"href":239,"dataGaName":240,"dataGaLocation":43},"/jobs/","jobs",{"text":242,"config":243},"Leadership",{"href":244,"dataGaName":245,"dataGaLocation":43},"/company/team/e-group/","leadership",{"text":247,"config":248},"Team",{"href":249,"dataGaName":250,"dataGaLocation":43},"/company/team/","team",{"text":252,"config":253},"Handbook",{"href":254,"dataGaName":255,"dataGaLocation":43},"https://handbook.gitlab.com/","handbook",{"text":257,"config":258},"Investor relations",{"href":259,"dataGaName":260,"dataGaLocation":43},"https://ir.gitlab.com/","investor relations",{"text":262,"config":263},"Environmental, social and governance (ESG)",{"href":264,"dataGaName":265,"dataGaLocation":43},"/environmental-social-governance/","environmental, social and governance",{"text":267,"config":268},"Diversity, inclusion and belonging (DIB)",{"href":269,"dataGaName":270,"dataGaLocation":43},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":272,"config":273},"Trust Center",{"href":274,"dataGaName":275,"dataGaLocation":43},"/security/","trust center",{"text":277,"config":278},"Newsletter",{"href":279,"dataGaName":280,"dataGaLocation":43},"/company/contact/","newsletter",{"text":282,"config":283},"Press",{"href":284,"dataGaName":285,"dataGaLocation":43},"/press/","press",{"text":287,"config":288},"Modern Slavery Transparency Statement",{"href":289,"dataGaName":290,"dataGaLocation":43},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":292,"links":293},"Contact Us",[294,299,304,309,314,319,324],{"text":295,"config":296},"Contact an expert",{"href":297,"dataGaName":298,"dataGaLocation":43},"/sales/","sales",{"text":300,"config":301},"Get help",{"href":302,"dataGaName":303,"dataGaLocation":43},"/support/","get help",{"text":305,"config":306},"Customer portal",{"href":307,"dataGaName":308,"dataGaLocation":43},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"text":310,"config":311},"Status",{"href":312,"dataGaName":313,"dataGaLocation":43},"https://status.gitlab.com/","status",{"text":315,"config":316},"Terms of use",{"href":317,"dataGaName":318,"dataGaLocation":43},"/terms/","terms of use",{"text":320,"config":321},"Privacy statement",{"href":322,"dataGaName":323,"dataGaLocation":43},"/privacy/","privacy statement",{"text":325,"config":326},"Cookie preferences",{"dataGaName":327,"dataGaLocation":43,"id":328,"isOneTrustButton":329},"cookie preferences","ot-sdk-btn",true,{"items":331},[332,334,336],{"text":315,"config":333},{"href":317,"dataGaName":318,"dataGaLocation":43},{"text":320,"config":335},{"href":322,"dataGaName":323,"dataGaLocation":43},{"text":325,"config":337},{"dataGaName":327,"dataGaLocation":43,"id":328,"isOneTrustButton":329},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"_path":343,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"logo":344,"subscribeLink":349,"navItems":353,"_id":365,"_type":28,"title":366,"_source":29,"_file":367,"_stem":368,"_extension":32},"/shared/en-us/the-source/navigation",{"altText":345,"config":346},"the source logo",{"src":347,"href":348},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750191004/t7wz1klfb2kxkezksv9t.svg","/the-source/",{"text":350,"config":351},"Subscribe",{"href":352},"#subscribe",[354,358,361],{"text":355,"config":356},"Artificial Intelligence",{"href":357},"/the-source/ai/",{"text":96,"config":359},{"href":360},"/the-source/security/",{"text":362,"config":363},"Platform & Infrastructure",{"href":364},"/the-source/platform/","content:shared:en-us:the-source:navigation.yml","Navigation","shared/en-us/the-source/navigation.yml","shared/en-us/the-source/navigation",{"_path":370,"_dir":9,"_draft":6,"_partial":6,"_locale":7,"title":371,"description":372,"submitMessage":373,"formData":374,"_id":378,"_type":28,"_source":29,"_file":379,"_stem":380,"_extension":32},"/shared/en-us/the-source/newsletter","The Source Newsletter","Stay updated with insights for the future of software development.","You have successfully signed up for The Source’s newsletter.",{"config":375},{"formId":376,"formName":377,"hideRequiredLabel":329},1077,"thesourcenewsletter","content:shared:en-us:the-source:newsletter.yml","shared/en-us/the-source/newsletter.yml","shared/en-us/the-source/newsletter",{"amanda-rueda":382,"andre-michael-braun":383,"andrew-haschka":384,"ayoub-fandi":385,"brian-wald":386,"bryan-ross":387,"chandler-gibbons":388,"dave-steer":389,"ddesanto":390,"derek-debellis":391,"emilio-salvador":392,"erika-feldman":393,"george-kichukov":394,"gitlab":395,"grant-hickman":396,"haim-snir":397,"iganbaruch":398,"jlongo":399,"joel-krooswyk":400,"josh-lemos":401,"julie-griffin":402,"kristina-weis":403,"lee-faus":11,"ncregan":404,"rschulman":405,"sabrina-farmer":406,"sandra-gittlen":407,"sharon-gaudin":408,"stephen-walters":409,"taylor-mccaslin":410},"Amanda Rueda","Andre Michael Braun","Andrew Haschka","Ayoub Fandi","Brian Wald","Bryan Ross","Chandler Gibbons","Dave Steer","David DeSanto","Derek DeBellis","Emilio Salvador","Erika Feldman","George Kichukov","GitLab","Grant Hickman","Haim Snir","Itzik Gan Baruch","Joseph Longo","Joel Krooswyk","Josh Lemos","Julie Griffin","Kristina Weis","Niall Cregan","Robin Schulman","Sabrina Farmer","Sandra Gittlen","Sharon Gaudin","Stephen Walters","Taylor McCaslin",{"allArticles":412,"visibleArticles":568,"showAllBtn":329},[413,453,493,533],{"_path":414,"_dir":415,"_draft":6,"_partial":6,"_locale":7,"config":416,"seo":420,"content":424,"type":448,"slug":449,"category":415,"_id":450,"_type":28,"title":421,"_source":29,"_file":451,"_stem":452,"_extension":32,"date":425,"description":422,"timeToRead":426,"heroImage":423,"keyTakeaways":427,"articleBody":431,"faq":432},"/en-us/the-source/ai/why-automotive-software-development-needs-human-centered-ai","ai",{"layout":9,"template":417,"articleType":418,"author":26,"featured":329,"gatedAsset":419,"isHighlighted":6,"authorName":11},"TheSourceArticle","Regular","source-lp-transform-automotive-devops-secure-fast-future-ready",{"title":421,"description":422,"ogImage":423},"Why automotive software development needs human-centered AI","Learn why balancing AI assistance with human expertise is crucial for automotive embedded systems development and creating competitive advantages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463704/u3dshy4qn6rtrklfalx7.png",{"title":421,"date":425,"description":422,"timeToRead":426,"heroImage":423,"keyTakeaways":427,"articleBody":431,"faq":432},"2025-06-02","6 min read",[428,429,430],"AI in automotive embedded software development works best as a Level 2 assistant, meaning human expertise remains essential for effective embedded development in vehicles.","The right human-AI balance varies across different automotive software domains; teams that find the right balance between AI assistance and human expertise will gain competitive advantages.","Creating effective human-AI partnerships requires intentional processes such as mandatory human review checkpoints for safety-critical systems.","Software is an essential part of modern automobiles. This year, the lines of code in the average car are expected to reach [650 million](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/), an increase from 200 million in 2020. What’s more, we’re seeing a shift from distributed architectures for vehicle firmware toward zonal architectures with central high-performance computers (HPCs). All of this creates complexity and novel software challenges.\n\nEmbedded systems developers are trying to adapt to this complexity. At the same, market pressures are forcing them to accelerate their development processes and ship innovation faster.\n\nArtificial intelligence (AI) can help address these challenges, but its implementation raises important questions. To what degree should AI tools autonomously generate and review code in automotive embedded systems? How much human oversight is advisable? Drawing from the automotive industry's vocabulary, I propose that embedded development requires Level 2 AI assistance - at least right now.\n\n## Understanding Level 2 automation for AI in embedded development\nIn automotive driving automation, [Level 2 systems](https://www.sae.org/blog/sae-j3016-update) represent partial automation: a carefully balanced human-machine collaboration. These systems can help control steering, acceleration, and braking in specific scenarios, but the driver must stay engaged. They must monitor the environment and be ready to take control at any moment. The human remains legally responsible for the vehicle's operation and must supervise the automation continually. In contrast, Level 4-5 systems aim to operate with minimal or no human oversight in defined conditions.\n\nThis framework provides a useful analogy for AI in embedded development. Current AI tools excel at providing suggestions and automating routine tasks, much like Level 2 driver assistance. They can suggest code, help with testing, and identify potential issues. However, their contextual understanding has limitations. Given the high stakes of automotive embedded systems, combining AI's capabilities with human wisdom and oversight is best.\n\n## Why AI excels as a development assistant\nAI shows remarkable capabilities across numerous areas of embedded development. Here are just a few examples from the growing list of applications:\n\nFirst, AI can [generate and complete code](https://docs.gitlab.com/user/project/repository/code_suggestions/) for common patterns in C/C++, reducing developers' time spent on routine programming tasks. And if prompted correctly, AI can respect embedded-specific constraints like memory limitations and hardware interfaces.\n\nSecond, AI can [generate tests](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#write-tests-in-the-ide) that you can run on cloud-based ARM CPUs or virtual hardware. This helps teams \"shift left\" in testing their firmware and catch issues earlier in development when they're less expensive to fix. It also helps identify edge cases you might have otherwise overlooked.\n\nThird, AI can help [accelerate the remediation of security vulnerabilities](https://docs.gitlab.com/user/application_security/vulnerabilities/#explaining-a-vulnerability) in your code. AI tools can help interpret security findings from your security scanners. They can even suggest potential approaches to address issues, supporting development teams as they work to meet cybersecurity requirements in this highly regulated space. AI thus helps expedite remediation.\n\nBeyond these examples, AI is increasingly valuable for [root cause analysis](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#troubleshoot-failed-cicd-jobs-with-root-cause-analysis) of complex issues, comprehensive [code reviews](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/#have-gitlab-duo-review-your-code), automated [code refactoring](https://about.gitlab.com/blog/2024/08/26/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/) for optimization, [explaining](https://docs.gitlab.com/user/project/merge_requests/changes/#explain-code-in-a-merge-request) complex legacy code, and providing conversational assistance through [AI chat capabilities](https://docs.gitlab.com/user/gitlab_duo_chat/). As AI evolves, so will the ways in which it assists embedded development teams.\n\n## The essential human element\nThough these AI capabilities are quite powerful, they cannot - and should not - replace human expertise. Embedded developers bring domain knowledge that spans both software and hardware domains, understanding not just how to code, but how that code interacts with physical components under varying conditions.\n\nMoreover, embedded developers understand the intricate relationships between different vehicle subsystems. Far from replacing such expertise, AI must integrate with human beings' contextual knowledge.\n\nHumans also bring creativity and innovation to solving unique automotive challenges. When faced with conflicting requirements or novel problems, human engineers draw on experience and intuition that AI simply doesn't possess.\n\nThe human-centered approach is critical in automotive development, where safety and reliability cannot be compromised. Just as a driver must remain alert and ready to take control of a Level 2 automated vehicle, developers must maintain ultimate responsibility for AI-generated code. While valuable, AI suggestions require expert validation. Developers must review and verify that proposed solutions solve the problem correctly within the specific automotive context.\n\nThis human oversight becomes even more critical when considering the consequences of errors. In enterprise software, a bug might cause inconvenience; in automotive systems, it could potentially impact passenger safety. Developers bring ethical judgment and a holistic understanding of the operating environment that AI currently lacks. They can anticipate edge cases based on real-world driving conditions and evaluate AI recommendations against their practical experience with actual vehicle systems.\n\n## Creating an effective human-AI partnership\nBelow are some initial approaches to consider as you begin building productive partnerships between developers and AI.\n\nStart by identifying specific high-volume, low-risk tasks where AI can provide immediate value: unit test generation for non-safety-critical components, documentation updates, and routine code standardization are excellent entry points.\n\nImplement a tiered approach to AI integration based on system criticality. For infotainment or connectivity systems, teams might leverage more autonomous AI assistance. For safety-related systems, establish mandatory human review checkpoints with structured approval workflows. Create clear guidelines on which code components require senior engineer review versus those where junior developers can approve AI suggestions with minimal oversight.\n\nReview processes also need adaptation. Rather than having humans review AI-generated code in isolation, teams should implement collaborative workflows where AI assists with the review itself, highlighting potential issues for human evaluation. Consider adopting structured prompting techniques. For example, have developers specify constraints like memory requirements, coding standards, or performance parameters before generating AI suggestions.\n\nThese examples represent starting points for effective human-AI collaboration in embedded development.\n\n## Looking to the future\nThe human-AI partnership will evolve across different automotive domains as AI capabilities advance. Teams should prepare by focusing on higher-value skills that complement AI capabilities, such as systems architecture, integration expertise, and hardware-software design.\n\nThe teams that succeed will find the right balance, leveraging AI to handle routine tasks while keeping humans at the center of the development process. This is the path to realizing AI's productivity promise.\n\n_I'll be discussing topics like this and more with Dr. Felix Kortmann of Ignite by FORVIA HELLA in a webinar on June 11. The webinar will be on “Building the Future of Automotive Software.” Join us to learn how to effectively balance AI assistance with human expertise in your embedded development teams. [Register here](https://page.gitlab.com/webcasts-jun11-gitlab-ignite-by-foriva-hella-emea-amer.html?utm_medium=referral&utm_source=gmail&utm_campaign=20250611_global_cmp_webcast_speedsecurity_en_&utm_content=salespromo_x_auto)._",[433,436,439,442,445],{"header":434,"content":435},"What is Level 2 AI assistance in automotive software development?","Level 2 AI refers to a collaborative human-AI model where AI supports tasks like code generation and testing, but developers retain oversight and responsibility. Like Level 2 driving automation, the human stays in control, ensuring contextual accuracy and safety.",{"header":437,"content":438},"How does the role of AI differ across various automotive software domains?","AI adds value across all domains, but oversight levels vary. Safety-critical systems require stricter human validation, while infotainment systems allow more autonomous AI use. Teams should tailor AI workflows based on system risk and regulatory requirements.",{"header":440,"content":441},"How can teams establish effective AI review processes for embedded code?","Teams should use a tiered review structure. AI can perform initial quality checks — flagging syntax issues or common errors — while human experts review critical code sections and system interfaces. Clear guidelines should define when AI-generated suggestions require additional human verification or senior engineer approval to ensure safe integration within embedded systems.",{"header":443,"content":444},"What skills should embedded developers focus on as AI capabilities expand?","Embedded developers should deepen their understanding of systems architecture, hardware-software integration, and domain-specific safety requirements. Skills in prompt engineering and AI collaboration, such as framing effective prompts and interpreting model outputs, are also increasingly important. These competencies help developers remain effective evaluators and collaborators alongside AI systems.",{"header":446,"content":447},"How can AI help address the shortage of embedded software expertise in the automotive industry?","AI reduces the burden on experienced engineers by automating routine development tasks like boilerplate coding, unit testing, and documentation. This allows senior engineers to focus on high-impact projects and mentoring. At the same time, AI tools help junior developers ramp up faster by guiding them through embedded-specific best practices, accelerating onboarding and reducing skill barriers.","article","why-automotive-software-development-needs-human-centered-ai","content:en-us:the-source:ai:why-automotive-software-development-needs-human-centered-ai:index.yml","en-us/the-source/ai/why-automotive-software-development-needs-human-centered-ai/index.yml","en-us/the-source/ai/why-automotive-software-development-needs-human-centered-ai/index",{"_path":454,"_dir":455,"_draft":6,"_partial":6,"_locale":7,"config":456,"seo":458,"content":462,"type":448,"slug":489,"category":455,"_id":490,"_type":28,"title":459,"_source":29,"_file":491,"_stem":492,"_extension":32,"date":463,"description":460,"timeToRead":464,"heroImage":461,"keyTakeaways":465,"articleBody":469,"faq":470},"/en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain","security",{"layout":9,"template":417,"articleType":418,"author":26,"featured":329,"gatedAsset":457,"isHighlighted":6,"authorName":11},"source-lp-devsecops-the-key-to-modern-security-resilience",{"title":459,"description":460,"ogImage":461},"Embedding risk intelligence into your software supply chain","Transform your security strategy by embedding risk assessment into development workflows instead of treating it as a final checkpoint.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463994/rexeefvqpj1xs8vq7ugl.jpg",{"title":459,"date":463,"description":460,"timeToRead":464,"heroImage":461,"keyTakeaways":465,"articleBody":469,"faq":470},"2025-04-22","5 min read",[466,467,468],"Focus on business impact instead of vulnerability counts by targeting security threats that pose actual danger to your business rather than trying to fix every potential issue.","Embed risk checks throughout development by adding quality metrics and automated testing early in your software pipeline to catch issues when they’re easier to fix.","Create audit trails for security decisions through “breadcrumbed” processes that document who approved changes and why, creating accountability and improving future decisions.","It’s a nightmare scenario for any business: Hackers have exposed the personal information of millions of your users. What if this wasn’t due to critical vulnerabilities in your application but simply poorly configured API endpoints that hackers could abuse to farm user data? That’s precisely what happened to a popular tech company in 2023, and it’s more common than you might think.\n\nSecurity resources are finite, and [not all threats pose equal business risk](https://about.gitlab.com/the-source/security/security-its-more-than-culture-addressing-the-root-cause-of-common-security/). Organizations that are laser-focused on technical severity ratings rather than actual business impact could be leaving themselves open to unanticipated risks.\n\nMeanwhile, the urgency for better risk quantification has never been greater. Threat actors can now leverage multiple AI systems to execute sophisticated, multi-pronged attacks targeting exploitable vulnerabilities. These AI-accelerated campaigns can quickly identify and exploit business-critical weaknesses that traditional security approaches might overlook or deprioritize - turning yesterday's “medium-risk” vulnerability into today’s multimillion-dollar breach.\n\nTo counter these evolving threats and navigate this growing complexity, leading organizations are fundamentally reimagining their approach. Instead of treating security as a separate function that happens after development, they’re embedding **risk intelligence** throughout their software supply chain. This approach allows them to focus resources where they matter most, reduce time-to-market for secure products, and demonstrate due diligence to regulators and customers.\n\nThe key is distinguishing between vulnerabilities that might cause harm and those that will cause damage in your specific business context. Companies can achieve stronger security and faster innovation by rethinking how risk is evaluated and managed across development and operations.\n\n## Limiting risk through data-driven change management\n**Risk intelligence helps you focus on threats that matter. It’s the difference between knowing you have 3,000 vulnerabilities and understanding which 50 could harm your business.**\n\nKey elements of risk intelligence include:\n\n**Exploitability assessment (reachability)**: Not all vulnerabilities can be weaponized. Risk intelligence evaluates which security findings have actual attack paths versus those that exist in code but cannot be reached by malicious actors.\n\n**Dependency context**: Risk-based security recognizes that a vulnerable package doesn’t just affect one application - it can impact dozens or hundreds across your organization. Modern approaches map dependencies across projects, enabling teams to understand the cascading impact of vulnerabilities throughout the organization. This ecosystem view provides critical context for prioritization decisions.\n\n**Continuous risk monitoring**: Instead of point-in-time assessments, risk intelligence requires ongoing monitoring that adjusts as threat landscapes evolve. A vulnerability that was low risk yesterday may become critical today based on emerging exploit techniques.\n\nSo how can you move from reactive security scanning to proactive risk intelligence? The journey begins where your software does - in the software factory itself.\n\n## The software factory: Quality gates and risk signals\nThe software factory is where code transforms from an idea to a deployable package. This phase encompasses everything from initial code commits to unit testing to packaging, creating the foundation for your entire software supply chain. By adding risk checks early, teams can find and fix issues before they spread. Just as critical is establishing clear attribution for every code change, knowing exactly who made each change (contractor, consultant, or employee), why, and when - creating an audit trail providing crucial risk assessment context.\n\nThe software factory offers three key opportunities to embed risk assessment into your development process:\n\n### Collaboration through quality intelligence\nEstablishing cross-functional quality metrics can help organizations create a shared understanding of risk across teams. Potential metrics include code coverage trends, security vulnerability density, technical debt accumulation, performance regression patterns, API compatibility scores, and documentation completeness.\n\n### Transparency through correlated data\nRisk intelligence requires connecting disparate data points into a comprehensive view. Quality intelligence dashboards with real-time metrics and trend visualization help teams spot emerging risk patterns, while documentation traceability creates auditable trails linking requirements, changes, and security findings. Automated data collection enables cross-system correlation between code changes and security findings, with pattern recognition algorithms identifying unusual behaviors that manual review might miss. This democratized intelligence empowers all stakeholders to make risk-informed decisions instead of siloing information within security teams.\n\n### Automation for quality assurance\nManual risk assessment can’t scale to modern development speeds. Continuous testing pipelines with automated security scans and performance tests provide early feedback on potential risks without slowing velocity. Automated quality gates enforce minimum standards throughout development, and risk threshold monitoring flags concerning trends before they become critical. These automated guardrails maintain consistent risk assessment while allowing development teams to maintain productivity and improve safety without sacrificing speed.\n\n## Software logistics: Risk management through team-based scorecards\nAfter code is packaged, it enters the logistics phase - provisioning, deployment, configuration, monitoring, and maintenance. Here, potential bugs meet real-world exposure. This makes assessing risk in actual operating conditions vital. However, traditional approaches to risk assessment at this stage are often inflexible and inefficient.\n\n> [Learn how effective software logistics can enable operations teams to efficiently support developers and accelerate delivery](https://about.gitlab.com/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation/).\n\nEffective risk intelligence means helping teams focus on why they should deploy instead of why they shouldn’t - replacing the binary, inflexible assessment methods of the past with an automated, metrics-driven approach. Here are three critical aspects to keep in mind:\n\n### Collaborative assessment model\nModern risk approaches replace binary go/no-go decisions with multi-stakeholder evaluations, sometimes called a Change Advisory Board (CAB), incorporating diverse perspectives. Security teams evaluate vulnerability context and exploitability, operations teams assess deployment impact and rollback capabilities, and business stakeholders weigh customer impact against needs. This team-based approach builds consensus around acceptable risk rather than imposing rigid standards, allowing for nuanced decisions that balance security with business objectives.\n\n### Scorecard transparency\nEffective risk evaluation requires visible criteria that consider multiple dimensions of impact. Comprehensive scorecards include security risk factors that assess severity and real-world exploitability, operational metrics that evaluate system stability implications, compliance requirements for relevant regulations, and business impact on customers and revenue. This transparent approach creates a holistic risk profile that provides the context necessary for informed deployment decisions while ensuring all stakeholders understand the basis for security choices.\n\n### Automated scorecard processing\nManual risk assessment creates bottlenecks that slow deployment cycles. Modern approaches use real-time processing with automated score calculation and threshold monitoring to evaluate changes continuously. Integration with CI/CD pipelines, security tools, and compliance systems ensures risk data flows automatically between systems without manual intervention. This automation maintains consistent evaluation standards while eliminating the delays typically associated with security reviews.\n\n## From vulnerability counts to business impact: The future of software security\nThe future of application security isn’t about finding more vulnerabilities - it’s about understanding the risk those vulnerabilities pose. By embedding risk intelligence throughout your software supply chain, you can drive team collaboration to help you create secure software faster.\n\nEstablishing this risk assessment process across both your software factory and logistics phases has an added benefit: You’ll create an auditable trail that documents who made security decisions, what evidence they considered, when changes were approved, and why specific actions were taken. This transparency provides accountability across the entire software supply chain, builds institutional memory of risk management approaches, and creates data to inform future decisions. The resulting traceability transforms security from a point-in-time assessment to an ongoing, verifiable process demonstrating due diligence to auditors, regulators, and customers.",[471,474,477,480,483,486],{"header":472,"content":473},"What is risk intelligence in software development?","Risk intelligence is the practice of evaluating security threats based on their real-world business impact rather than just technical severity. It helps teams focus on exploitable and high-priority vulnerabilities, streamlining security efforts.",{"header":475,"content":476},"How does embedding risk checks early improve software security?","Introducing risk assessments during early development phases allows teams to catch and resolve issues sooner, reducing costs and complexity. This shift from reactive to proactive security enhances both speed and safety.",{"header":478,"content":479},"Why should organizations move beyond vulnerability counts?","Counting vulnerabilities doesn't reflect the true risk landscape. Many may be unreachable or irrelevant. Prioritizing based on exploitability and business context ensures limited security resources are used effectively.",{"header":481,"content":482},"How do audit trails contribute to better risk management?","Audit trails document who made a change, why, and when. These records provide accountability, aid compliance, and offer valuable insight for improving future decision-making and demonstrating due diligence.",{"header":484,"content":485},"What role does automation play in risk intelligence?","Automation enables consistent, scalable risk evaluation across CI/CD pipelines. It helps enforce security standards, reduces manual bottlenecks, and ensures timely responses to emerging risks without slowing development.",{"header":487,"content":488},"What’s the benefit of team-based scorecards for deployment decisions?","Team-based scorecards bring together inputs from security, operations, and business teams. This collaborative model replaces rigid go/no-go decisions with nuanced assessments that balance innovation and acceptable risk.","embedding-risk-intelligence-into-your-software-supply-chain","content:en-us:the-source:security:embedding-risk-intelligence-into-your-software-supply-chain:index.yml","en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain/index.yml","en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain/index",{"_path":494,"_dir":495,"_draft":6,"_partial":6,"_locale":7,"config":496,"seo":498,"content":502,"type":448,"slug":529,"category":495,"_id":530,"_type":28,"title":499,"_source":29,"_file":531,"_stem":532,"_extension":32,"date":503,"description":500,"timeToRead":504,"heroImage":501,"keyTakeaways":505,"articleBody":509,"faq":510},"/en-us/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation","platform",{"layout":9,"template":417,"articleType":418,"author":26,"featured":329,"gatedAsset":497,"isHighlighted":6,"authorName":11},"source-lp-building-a-resilient-software-development-practice",{"title":499,"description":500,"ogImage":501},"Why software logistics is key to accelerating innovation","Transform deployment processes with software logistics, enabling your operations team to efficiently support developers and accelerate delivery.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463545/nomdlhvlawqmncg0g1p8.png",{"title":499,"date":503,"description":500,"timeToRead":504,"heroImage":501,"keyTakeaways":505,"articleBody":509,"faq":510},"2025-04-15","4 min read",[506,507,508],"Software logistics focuses on what happens after code is packaged — provisioning, deployment, configuration, monitoring, and maintenance — optimizing the crucial second half of the software supply chain.","With typically only 1% of technical staff in operations roles, organizations need a “logistics mindset” to automate deployment processes and create better developer experiences.","Implementing a platform-as-a-product approach to software logistics enables standardization while maintaining flexibility, reducing security risks and accelerating deployment cycles.","Software isn’t just powering your business - it is your business. However, while organizations invest heavily in development capabilities, they often overlook a critical component: software logistics.\n\nSoftware logistics encompasses everything that happens after code is packaged for delivery: provisioning, deployment, configuration, monitoring, and maintenance. Think of it as the crucial second half of the software supply chain, where even the most brilliantly engineered solutions can falter without proper execution.\n\nThe challenge is clear: For every 100 developers in your organization, statistics show you likely have just one operations person. Those resources typically focus on network engineering, database administration, platform engineering, and site reliability. As generative AI is poised to dramatically increase the amount of code developers produce, this creates an unsustainable bottleneck in your software delivery execution.\n\n## Why traditional approaches fall short\n**Traditional approaches to this imbalance typically fall into two categories: overburdening operations teams or forcing developers to become operations experts. Neither works well.**\n\nWhen operations teams become overwhelmed, they create restrictive processes that slow down delivery. When developers are forced to handle operations, they spend less time on their core strength - solving business problems through code. Our [research shows](https://about.gitlab.com/developer-survey/2024/ai/) developers typically spend only 21% of their time writing new code, with the rest consumed by meetings, maintenance, and administrative tasks.\n\nThis inefficiency is frustrating and expensive. Every day, your innovations sit waiting for deployment, which is lost business value.\n\n## The premium delivery model for software\nWhat if you could bring guaranteed reliability and predictability to your software delivery? That’s the promise of effective software logistics.\n\nJust as modern logistics companies revolutionized retail by streamlining the supply chain - getting products from warehouses to customers efficiently - organizations must move software from package registries to production environments smoothly.\n\nMore and more organizations are investing in [platform engineering](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/) to accelerate software development by standardizing best practices and components for development teams. However, if your platform engineering initiatives are focused only on developer experience, you’re missing a critical piece of the puzzle. While improving developer experience is important, efficiency gains in code creation are meaningless if your organization lacks the operational maturity to deploy, configure, monitor, and maintain that code effectively.\n\nThat’s where software logistics comes in: ensuring that increased code velocity translates to actual business value rather than creating deployment bottlenecks or operational chaos.\n\n## The competitive advantage of software logistics\nAn effective software logistics strategy offers several key advantages:\n- **Accelerated delivery cycles**: Reduce the time from code completion to production deployment from weeks to days or even hours.\n- **Enhanced security posture**: Build security into development pipelines rather than treating it as a final gate, reducing vulnerabilities while maintaining velocity.\n- **Improved operational efficiency**: Enable your limited operations staff to support more developers through automation and self-service capabilities.\n- **Better resource utilization**: Focus your expensive development talent on creating business value rather than wrestling with deployment complexities.\n\n## Optimizing for effective software logistics\nIn conversations with technical leaders at organizations of all sizes, I’ve observed several consistent patterns that distinguish successful software logistics implementations. Here are three steps you can take to optimize your software logistics:\n\n### Build an enterprise application delivery framework\nModern software delivery requires sophisticated orchestration across diverse environments, deployment strategies, and operational concerns. An effective framework should include aspects such as **release orchestration** to coordinate the deployment of interdependent services across environments; **progressive delivery strategies** such as canary releases and feature flags that allow controlled rollouts with automated verification; and **provisioning automation** that creates the underlying infrastructure through policy-controlled interfaces while enforcing security guardrails and compliance requirements. By generating attestations at each stage, this framework creates a verifiable record of the entire delivery process and enables real-time risk assessment and compliance validation.\n\n### Adopt a platform with a unified data store\nTop-performing organizations need comprehensive metrics across their entire delivery pipeline, from code commit to production performance. You can’t manage what you don't measure - and the best teams measure everything from development velocity to operational stability to security posture. A unified data fabric serves as the nervous system for effective software logistics, connecting previously siloed information across the entire software delivery lifecycle and enabling intelligent decision-making and automation.\n\n### Boost developer autonomy through golden pipelines\nIntuitive interfaces that allow developers to initiate deployments without understanding underlying complexity, with appropriate guardrails built in, reduce the burden on operations teams while accelerating delivery cycles. As one platform engineering leader told me, “Our job is to make the platform so easy to use that teams can run themselves.”\n\n## Software logistics: The competitive differentiator for digital-first organizations\nAs competitive pressures accelerate, the ability to efficiently move software from testing to production becomes a critical competitive differentiator. Adopting a software logistics mindset can help your limited operations staff effectively support your development organization, accelerating innovation while maintaining security and reliability.",[511,514,517,520,523,526],{"header":512,"content":513},"What is software logistics in the context of software development?","Software logistics refers to the processes that occur after code is packaged, including provisioning, deployment, configuration, monitoring, and maintenance. It represents the second half of the software supply chain, ensuring reliable, secure, and efficient delivery to production.",{"header":515,"content":516},"Why is software logistics becoming more important now?","As generative AI accelerates the rate of code creation, organizations face growing pressure to deploy and maintain this code efficiently. With limited operations resources, effective software logistics is essential to prevent bottlenecks and turn development speed into business value.",{"header":518,"content":519},"How does poor software logistics affect delivery cycles?","Without optimized logistics, organizations experience deployment delays, inconsistent operations, and over-reliance on either stretched operations teams or developers doing ops work. This undermines innovation velocity and increases operational risk.",{"header":521,"content":522},"What role do “golden pipelines” play in software logistics?","Golden pipelines offer pre-configured, automated deployment workflows that developers can use independently. These pipelines enhance developer autonomy while embedding security and compliance guardrails, reducing reliance on operations teams.",{"header":524,"content":525},"How can a unified data store improve software logistics?","A unified data store connects metrics across the software delivery lifecycle, from code commit to production. This enables real-time insights, performance tracking, and automation, allowing organizations to manage delivery risk and optimize outcomes.",{"header":527,"content":528},"Why should platform engineering include a logistics focus?","While many platform engineering efforts focus on improving developer experience, the logistics side ensures that increased coding velocity translates into actual deployment efficiency. Without logistics capabilities, development speed does not equal business agility.","why-software-logistics-is-key-to-accelerating-innovation","content:en-us:the-source:platform:why-software-logistics-is-key-to-accelerating-innovation:index.yml","en-us/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation/index.yml","en-us/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation/index",{"_path":534,"_dir":495,"_draft":6,"_partial":6,"_locale":7,"config":535,"seo":537,"content":541,"type":448,"slug":564,"category":495,"_id":565,"_type":28,"title":538,"_source":29,"_file":566,"_stem":567,"_extension":32,"date":542,"description":539,"timeToRead":464,"heroImage":540,"keyTakeaways":543,"articleBody":547,"faq":548},"/en-us/the-source/platform/finops-balancing-financial-responsibility-and-innovation",{"layout":9,"template":417,"articleType":418,"author":26,"featured":6,"gatedAsset":536,"isHighlighted":6,"authorName":11},"transform-your-software-development",{"title":538,"description":539,"ogImage":540},"FinOps: Balancing financial responsibility and innovation","Explore how FinOps harmonizes financial accountability with business objectives, promoting cost-effective innovation in modern enterprises.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463866/i27a3wecdhplvd9wbxqr.png",{"title":538,"date":542,"description":539,"timeToRead":464,"heroImage":540,"keyTakeaways":543,"articleBody":547,"faq":548},"2024-11-26",[544,545,546],"FinOps fosters collaboration between finance, engineering, and business teams, aligning cloud investments with strategic business goals for maximum value.","By enhancing financial transparency, FinOps empowers organizations to make swift, data-driven decisions that reduce cloud costs.","Implementing FinOps creates a balance between innovation and cloud cost management, easing tension between product development and operations teams.","When cloud spending grows alongside your engineering teams, a painful trade-off often emerges: push developers to ship faster, or rein in escalating costs. But imagine boosting developer productivity by 30% while slashing project costs by 25%. That might sound impossible, yet it’s a realistic goal for organizations that adopt FinOps (Financial Operations) - a data-driven approach that applies DevOps principles and practices to optimize the costs associated with people, process, and technology across the software development lifecycle.\n\nI’ve seen firsthand how FinOps transforms organizations by bringing financial clarity to every aspect of software development. I recently met with the DevOps team at an insurance company that is beginning its FinOps journey. Early discussions included determining basic measurements such as cloud spend and identifying other revenue-driving, cost-reducing metrics through value stream management. These conversations are critical for FinOps practitioners to evaluate how teams and resources are organized and allocated, and which processes and tools may be implemented to drive change.\n\nFrom team structures to development processes to technology choices, this visibility helps leaders optimize investments across their entire operation. By bringing together finance, product, and engineering teams, FinOps enables CFOs, CPOs, and CTOs to make informed decisions - improving efficiency across the business.\n\nImproving efficiency and optimizing costs isn’t just a technical challenge - it’s also a strategic business imperative, especially as organizations invest more money in the cloud. FinOps brings necessary financial accountability to the cloud’s variable spend model. Let’s walk through the benefits of FinOps frameworks and how you can begin incorporating FinOps methodologies into your operational workflows.\n\n## What is FinOps?\nFinOps, short for Financial Operations, succeeds by transforming how finance, engineering, technology, and business teams operate. Through real-time data and analytics, teams gain immediate visibility into how they are using resources (such as cloud resources) and can take action before costs escalate. This proactive approach to financial accountability enables quick informed decisions about resource allocation, leading to measurable cost savings.\n\nAt its core, FinOps is a cultural practice that makes this transformation sustainable. By establishing clear processes and shared metrics, teams ensure their daily technology decisions support broader business objectives.\n\n## Why is FinOps so popular right now?\nAs many companies focus on generative AI and developer productivity, they need guardrails, such as automated workflows and reusable templates, on the delivery side of the equation to ensure [paved pathways](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/) are adopted. This becomes essential for organizations that are modernizing their applications and scaling cloud architectures in production.\n\nThe challenge intensifies when managing non-production costs, such as continuous integration (CI) costs. Companies that have adopted a data-driven approach can gain deep visibility into their CI costs. They can see the financial implications of scaling CI horizontally or vertically across different processor architectures. By implementing standards like ephemeral testing environments, they ensure code quality and security while optimizing spending.\n\nProduct owners, who normally own the budget for a product line, can also work with IT teams and engineering leads to provide transparency metrics. This collaboration allows leaders to roll up budget projections across multiple services and ensure that infrastructure resources are being leveraged at their optimal capacity. The result: finance teams can finally see which applications generate the most investment return.\n\n## Bridging technical and financial domains\nCreating a FinOps model can involve both a carrot and stick approach. The carrot approach fosters a more collaborative and transparent environment. The stick approach, on the other hand - for example, reprimanding development teams for overspending - most often leads to a breakdown in processes. You want FinOps to account for what developers need to do their work and how it impacts the company’s bottom line, not just monitor their use of cloud resources.\n\nI recently met with a major airline that was spending close to $5 million a year on CI runner fleets. Security scans, dependency scans, and token scanning all ran inside these runner fleets. They could have skipped the security step to reduce their spend, but the [potential for security issues](https://about.gitlab.com/the-source/security/how-to-strengthen-security-by-applying-devsecops-principles/) was a much bigger concern than spending the money on the runner fleets. Instead of skipping the security step, the company needed to identify ways to make their runner fleets as a whole more efficient to reduce their spend _and_ encourage developers to experiment and innovate.\n\nA successful FinOps program does not require a centralized team of full-time FinOps professionals. FinOps serves as a strategic liaison among cross-functional teams such as finance, product, and engineering. A typical FinOps program includes various job roles and functions, such as a CTO or VP of Engineering, a finance leader, and one or more engineering leaders who regularly collaborate to evaluate issues, identify new efficiency opportunities, and build remediation plans.\n\nAligning technical operations with financial objectives helps ensure that cloud infrastructure and software development investments yield the highest possible return. This can demonstrate to DevSecOps teams how their work contributes directly to increasing revenue, how they may be able to reduce costs, or both.\n\n## Smart financial control in developer workflows\nFinOps monitors resource consumption from both a user and operational standpoint to help optimize developer workflows. One way to accomplish this is to analyze CI jobs and identify which ones cost more than their value justifies. Every software development pipeline contains multiple jobs, each requiring an execution resource like a virtual machine (VM) or container. The longer each job takes to execute, the higher the cost. FinOps helps developers understand which jobs are performing poorly so they know which ones they need to refactor.\n\nThis creates a self-service model that frees technology teams to work within clear guidelines. For example, a policy might prohibit someone from provisioning $100,000 worth of resources on AWS, but they can spin up an EC2 image to conduct testing. However, if they can justify why they need to provision $100,000 worth of resources, they can submit a request explaining how the project will potentially generate revenue for the company. If approved, they can begin their work.\n\nHowever, I want to reassure DevSecOps professionals that FinOps isn't about restricting innovation through monitoring. Instead, it provides full visibility into your organization’s cloud usage and spending, helping teams identify opportunities to improve cloud productivity. In addition to fostering collaboration among finance, technology, and business teams, FinOps analyzes usage patterns and forecasts demand to anticipate whether resources need to be scaled up or down to meet future needs before overspending occurs.\n\n## Easing the tension\nThere’s a constant tug-of-war between engineering and operations teams. Engineering’s mission is to drive innovation that generates new revenue streams while creating great customer experiences. The operations team focuses on maximizing productivity while saving money. FinOps eases the tension between these groups by increasing developer productivity while reducing wasteful spending - aligning technical efficiency with financial prudence.\n\nFinOps helps business leaders think in precise numbers, not subjective costs. It’s imperative to approach software development with a clear understanding of its financial impact on the organization to make informed decisions on project continuance based on two key criteria: will the project increase revenue or reduce costs?\n\nAt its core, FinOps isn’t just about cutting cloud costs; it’s also about optimizing the entire software development lifecycle and making continuous improvements. The goal is to help engineers and operations consider financial effectiveness alongside technical innovation so they understand how their work maps to boosting the organization’s bottom line.\n\n_Read more about FinOps on the [FinOps Foundation website](https://www.finops.org/introduction/what-is-finops/)._",[549,552,555,558,561],{"header":550,"content":551},"What are the key benefits of adopting FinOps?","Adopting FinOps leads to cost savings, improved resource allocation, and better cross-team collaboration. Organizations gain financial transparency, allowing them to scale cloud services efficiently, reduce waste, and ensure that every technology investment supports business growth and profitability.",{"header":553,"content":554},"How does FinOps align financial objectives with DevSecOps?","FinOps helps DevSecOps teams balance security and efficiency by ensuring that cost-saving measures do not compromise essential security processes. Instead of cutting security scans to save money, organizations use FinOps to optimize infrastructure, making security operations more cost-effective while maintaining compliance.",{"header":556,"content":557},"Why is FinOps important?","FinOps, short for Financial Operations, is a framework that helps organizations balance financial accountability with innovation by optimizing cloud spending and software development costs. It enables teams to make data-driven financial decisions while maintaining agility, improving collaboration between finance, engineering, and product teams.",{"header":559,"content":560},"What role do engineering teams play in a FinOps strategy?","Engineering teams are crucial in FinOps, as they make decisions that directly impact cloud costs. FinOps encourages developers to take ownership of cost efficiency by monitoring resource consumption, refactoring inefficient CI jobs, and leveraging automation to optimize deployments. The goal is to foster a culture where financial awareness becomes a natural part of development.",{"header":562,"content":563},"How does FinOps improve cloud cost management?","FinOps provides real-time visibility into cloud spending, allowing organizations to track resource utilization and prevent unnecessary costs. By integrating financial insights into DevOps workflows, companies can identify inefficiencies, optimize continuous integration (CI) costs, and ensure that infrastructure investments deliver maximum value.","finops-balancing-financial-responsibility-and-innovation","content:en-us:the-source:platform:finops-balancing-financial-responsibility-and-innovation:index.yml","en-us/the-source/platform/finops-balancing-financial-responsibility-and-innovation/index.yml","en-us/the-source/platform/finops-balancing-financial-responsibility-and-innovation/index",[413,453,493,533],{"ai":355,"platform":362,"security":96},1752588124707]