[{"data":1,"prerenderedAt":1130},["ShallowReactive",2],{"/en-us/the-source/ai/":3,"footer-en-us":35,"the-source-navigation-en-us":344,"the-source-newsletter-en-us":370,"the-source-resources-en-us":382,"ai-articles-list-authors-en-us":425,"ai-articles-list-en-us":456,"ai-page-categories-en-us":1129},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"type":8,"config":9,"seo":10,"content":14,"slug":28,"_id":29,"_type":30,"title":7,"_source":31,"_file":32,"_stem":33,"_extension":34},"/en-us/the-source/ai","the-source",false,"","category",{"layout":5},{"title":11,"description":12,"ogImage":13},"Artificial Intelligence","Explore expert insights on how AI is transforming software development, and how organizations can get the most out of their AI investments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463300/eoudcbj5aoucl0spsp0c.png",[15,20],{"componentName":16,"type":16,"componentContent":17},"TheSourceCategoryHero",{"title":11,"description":12,"image":18},{"config":19},{"src":13},{"componentName":21,"type":21,"componentContent":22},"TheSourceCategoryMainSection",{"config":23},{"gatedAssets":24},[25,26,27],"source-lp-how-to-get-started-using-ai-in-software-development","navigating-ai-maturity-in-devsecops","source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach","ai","content:en-us:the-source:ai:index.yml","yaml","content","en-us/the-source/ai/index.yml","en-us/the-source/ai/index","yml",{"_path":36,"_dir":37,"_draft":6,"_partial":6,"_locale":7,"data":38,"_id":340,"_type":30,"title":341,"_source":31,"_file":342,"_stem":343,"_extension":34},"/shared/en-us/main-footer","en-us",{"text":39,"source":40,"edit":46,"contribute":51,"config":56,"items":61,"minimal":332},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":41,"config":42},"View page source",{"href":43,"dataGaName":44,"dataGaLocation":45},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":47,"config":48},"Edit this page",{"href":49,"dataGaName":50,"dataGaLocation":45},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":52,"config":53},"Please contribute",{"href":54,"dataGaName":55,"dataGaLocation":45},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":57,"facebook":58,"youtube":59,"linkedin":60},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[62,89,162,230,293],{"title":63,"links":64,"subMenu":70},"Platform",[65],{"text":66,"config":67},"DevSecOps platform",{"href":68,"dataGaName":69,"dataGaLocation":45},"/platform/","devsecops platform",[71],{"title":72,"links":73},"Pricing",[74,79,84],{"text":75,"config":76},"View plans",{"href":77,"dataGaName":78,"dataGaLocation":45},"/pricing/","view plans",{"text":80,"config":81},"Why Premium?",{"href":82,"dataGaName":83,"dataGaLocation":45},"/pricing/premium/","why premium",{"text":85,"config":86},"Why Ultimate?",{"href":87,"dataGaName":88,"dataGaLocation":45},"/pricing/ultimate/","why ultimate",{"title":90,"links":91},"Solutions",[92,97,102,107,112,117,122,127,132,137,142,147,152,157],{"text":93,"config":94},"Digital transformation",{"href":95,"dataGaName":96,"dataGaLocation":45},"/solutions/digital-transformation/","digital transformation",{"text":98,"config":99},"Security & Compliance",{"href":100,"dataGaName":101,"dataGaLocation":45},"/solutions/security-compliance/","security & compliance",{"text":103,"config":104},"Automated software delivery",{"href":105,"dataGaName":106,"dataGaLocation":45},"/solutions/delivery-automation/","automated software delivery",{"text":108,"config":109},"Agile development",{"href":110,"dataGaName":111,"dataGaLocation":45},"/solutions/agile-delivery/","agile delivery",{"text":113,"config":114},"Cloud transformation",{"href":115,"dataGaName":116,"dataGaLocation":45},"/solutions/cloud-native/","cloud transformation",{"text":118,"config":119},"SCM",{"href":120,"dataGaName":121,"dataGaLocation":45},"/solutions/source-code-management/","source code management",{"text":123,"config":124},"CI/CD",{"href":125,"dataGaName":126,"dataGaLocation":45},"/solutions/continuous-integration/","continuous integration & delivery",{"text":128,"config":129},"Value stream management",{"href":130,"dataGaName":131,"dataGaLocation":45},"/solutions/value-stream-management/","value stream management",{"text":133,"config":134},"GitOps",{"href":135,"dataGaName":136,"dataGaLocation":45},"/solutions/gitops/","gitops",{"text":138,"config":139},"Enterprise",{"href":140,"dataGaName":141,"dataGaLocation":45},"/enterprise/","enterprise",{"text":143,"config":144},"Small business",{"href":145,"dataGaName":146,"dataGaLocation":45},"/small-business/","small business",{"text":148,"config":149},"Public sector",{"href":150,"dataGaName":151,"dataGaLocation":45},"/solutions/public-sector/","public sector",{"text":153,"config":154},"Education",{"href":155,"dataGaName":156,"dataGaLocation":45},"/solutions/education/","education",{"text":158,"config":159},"Financial services",{"href":160,"dataGaName":161,"dataGaLocation":45},"/solutions/finance/","financial services",{"title":163,"links":164},"Resources",[165,170,175,180,185,190,195,200,205,210,215,220,225],{"text":166,"config":167},"Install",{"href":168,"dataGaName":169,"dataGaLocation":45},"/install/","install",{"text":171,"config":172},"Quick start guides",{"href":173,"dataGaName":174,"dataGaLocation":45},"/get-started/","quick setup checklists",{"text":176,"config":177},"Learn",{"href":178,"dataGaName":179,"dataGaLocation":45},"https://university.gitlab.com/","learn",{"text":181,"config":182},"Product documentation",{"href":183,"dataGaName":184,"dataGaLocation":45},"https://docs.gitlab.com/","docs",{"text":186,"config":187},"Blog",{"href":188,"dataGaName":189,"dataGaLocation":45},"/blog/","blog",{"text":191,"config":192},"Customer success stories",{"href":193,"dataGaName":194,"dataGaLocation":45},"/customers/","customer success stories",{"text":196,"config":197},"Remote",{"href":198,"dataGaName":199,"dataGaLocation":45},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":201,"config":202},"GitLab Services",{"href":203,"dataGaName":204,"dataGaLocation":45},"/services/","services",{"text":206,"config":207},"TeamOps",{"href":208,"dataGaName":209,"dataGaLocation":45},"/teamops/","teamops",{"text":211,"config":212},"Community",{"href":213,"dataGaName":214,"dataGaLocation":45},"/community/","community",{"text":216,"config":217},"Forum",{"href":218,"dataGaName":219,"dataGaLocation":45},"https://forum.gitlab.com/","forum",{"text":221,"config":222},"Events",{"href":223,"dataGaName":224,"dataGaLocation":45},"/events/","events",{"text":226,"config":227},"Partners",{"href":228,"dataGaName":229,"dataGaLocation":45},"/partners/","partners",{"title":231,"links":232},"Company",[233,238,243,248,253,258,263,268,273,278,283,288],{"text":234,"config":235},"About",{"href":236,"dataGaName":237,"dataGaLocation":45},"/company/","company",{"text":239,"config":240},"Jobs",{"href":241,"dataGaName":242,"dataGaLocation":45},"/jobs/","jobs",{"text":244,"config":245},"Leadership",{"href":246,"dataGaName":247,"dataGaLocation":45},"/company/team/e-group/","leadership",{"text":249,"config":250},"Team",{"href":251,"dataGaName":252,"dataGaLocation":45},"/company/team/","team",{"text":254,"config":255},"Handbook",{"href":256,"dataGaName":257,"dataGaLocation":45},"https://handbook.gitlab.com/","handbook",{"text":259,"config":260},"Investor relations",{"href":261,"dataGaName":262,"dataGaLocation":45},"https://ir.gitlab.com/","investor relations",{"text":264,"config":265},"Environmental, social and governance (ESG)",{"href":266,"dataGaName":267,"dataGaLocation":45},"/environmental-social-governance/","environmental, social and governance",{"text":269,"config":270},"Diversity, inclusion and belonging (DIB)",{"href":271,"dataGaName":272,"dataGaLocation":45},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":274,"config":275},"Trust Center",{"href":276,"dataGaName":277,"dataGaLocation":45},"/security/","trust center",{"text":279,"config":280},"Newsletter",{"href":281,"dataGaName":282,"dataGaLocation":45},"/company/contact/","newsletter",{"text":284,"config":285},"Press",{"href":286,"dataGaName":287,"dataGaLocation":45},"/press/","press",{"text":289,"config":290},"Modern Slavery Transparency Statement",{"href":291,"dataGaName":292,"dataGaLocation":45},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":294,"links":295},"Contact Us",[296,301,306,311,316,321,326],{"text":297,"config":298},"Contact an expert",{"href":299,"dataGaName":300,"dataGaLocation":45},"/sales/","sales",{"text":302,"config":303},"Get help",{"href":304,"dataGaName":305,"dataGaLocation":45},"/support/","get help",{"text":307,"config":308},"Customer portal",{"href":309,"dataGaName":310,"dataGaLocation":45},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"text":312,"config":313},"Status",{"href":314,"dataGaName":315,"dataGaLocation":45},"https://status.gitlab.com/","status",{"text":317,"config":318},"Terms of use",{"href":319,"dataGaName":320,"dataGaLocation":45},"/terms/","terms of use",{"text":322,"config":323},"Privacy statement",{"href":324,"dataGaName":325,"dataGaLocation":45},"/privacy/","privacy statement",{"text":327,"config":328},"Cookie preferences",{"dataGaName":329,"dataGaLocation":45,"id":330,"isOneTrustButton":331},"cookie preferences","ot-sdk-btn",true,{"items":333},[334,336,338],{"text":317,"config":335},{"href":319,"dataGaName":320,"dataGaLocation":45},{"text":322,"config":337},{"href":324,"dataGaName":325,"dataGaLocation":45},{"text":327,"config":339},{"dataGaName":329,"dataGaLocation":45,"id":330,"isOneTrustButton":331},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"_path":345,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"logo":346,"subscribeLink":351,"navItems":355,"_id":366,"_type":30,"title":367,"_source":31,"_file":368,"_stem":369,"_extension":34},"/shared/en-us/the-source/navigation",{"altText":347,"config":348},"the source logo",{"src":349,"href":350},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750191004/t7wz1klfb2kxkezksv9t.svg","/the-source/",{"text":352,"config":353},"Subscribe",{"href":354},"#subscribe",[356,359,362],{"text":11,"config":357},{"href":358},"/the-source/ai/",{"text":98,"config":360},{"href":361},"/the-source/security/",{"text":363,"config":364},"Platform & Infrastructure",{"href":365},"/the-source/platform/","content:shared:en-us:the-source:navigation.yml","Navigation","shared/en-us/the-source/navigation.yml","shared/en-us/the-source/navigation",{"_path":371,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"title":372,"description":373,"submitMessage":374,"formData":375,"_id":379,"_type":30,"_source":31,"_file":380,"_stem":381,"_extension":34},"/shared/en-us/the-source/newsletter","The Source Newsletter","Stay updated with insights for the future of software development.","You have successfully signed up for The Source’s newsletter.",{"config":376},{"formId":377,"formName":378,"hideRequiredLabel":331},1077,"thesourcenewsletter","content:shared:en-us:the-source:newsletter.yml","shared/en-us/the-source/newsletter.yml","shared/en-us/the-source/newsletter",[383,399,412],{"_path":384,"_dir":385,"_draft":6,"_partial":6,"_locale":7,"config":386,"title":388,"description":389,"link":390,"_id":396,"_type":30,"_source":31,"_file":397,"_stem":398,"_extension":34},"/shared/en-us/the-source/gated-assets/navigating-ai-maturity-in-devsecops","gated-assets",{"id":26,"formId":387},1002,"Navigating AI maturity in DevSecOps","Read our survey findings from more than 5,000 DevSecOps professionals worldwide for insights on how organizations are incorporating AI into the software development lifecycle.",{"text":391,"config":392},"Read the report",{"href":393,"dataGaName":394,"dataGaLocation":395},"https://about.gitlab.com/developer-survey/2024/ai/","Navigating AI Maturity in DevSecOps","thesource","content:shared:en-us:the-source:gated-assets:navigating-ai-maturity-in-devsecops.yml","shared/en-us/the-source/gated-assets/navigating-ai-maturity-in-devsecops.yml","shared/en-us/the-source/gated-assets/navigating-ai-maturity-in-devsecops",{"_path":400,"_dir":385,"_draft":6,"_partial":6,"_locale":7,"config":401,"title":402,"description":403,"link":404,"_id":409,"_type":30,"_source":31,"_file":410,"_stem":411,"_extension":34},"/shared/en-us/the-source/gated-assets/source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach",{"id":27},"AI guide for enterprise leaders: Building the right approach","Download our guide for enterprise leaders to learn how to prepare your C-suite, executive leadership, and development teams for what AI can do today — and will do in the near future — to accelerate software development.",{"text":405,"config":406},"Read the guide",{"href":407,"dataGaName":408,"dataGaLocation":395},"https://about.gitlab.com/the-source/ai/ai-guide-for-enterprise-leaders-building-the-right-approach","AI Guide For Enterprise Leaders: Building the Right Approach","content:shared:en-us:the-source:gated-assets:source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach.yml","shared/en-us/the-source/gated-assets/source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach.yml","shared/en-us/the-source/gated-assets/source-lp-ai-guide-for-enterprise-leaders-building-the-right-approach",{"_path":413,"_dir":385,"_draft":6,"_partial":6,"_locale":7,"config":414,"title":415,"description":416,"link":417,"_id":422,"_type":30,"_source":31,"_file":423,"_stem":424,"_extension":34},"/shared/en-us/the-source/gated-assets/source-lp-how-to-get-started-using-ai-in-software-development",{"id":25,"formId":387},"How to get started using AI in software development","Learn how to strategically implement AI to boost efficiency, security, and reduce context switching. Empower every member of your team with AI capabilities.",{"text":418,"config":419},"Download the guide",{"href":420,"dataGaName":421,"dataGaLocation":395},"https://about.gitlab.com/the-source/ai/getting-started-with-ai-in-software-development-a-guide-for-leaders","How to Get Started Using AI in Software Development","content:shared:en-us:the-source:gated-assets:source-lp-how-to-get-started-using-ai-in-software-development.yml","shared/en-us/the-source/gated-assets/source-lp-how-to-get-started-using-ai-in-software-development.yml","shared/en-us/the-source/gated-assets/source-lp-how-to-get-started-using-ai-in-software-development",{"amanda-rueda":426,"andre-michael-braun":427,"andrew-haschka":428,"ayoub-fandi":429,"brian-wald":430,"bryan-ross":431,"chandler-gibbons":432,"dave-steer":433,"ddesanto":434,"derek-debellis":435,"emilio-salvador":436,"erika-feldman":437,"george-kichukov":438,"gitlab":439,"grant-hickman":440,"haim-snir":441,"iganbaruch":442,"jlongo":443,"joel-krooswyk":444,"josh-lemos":445,"julie-griffin":446,"kristina-weis":447,"lee-faus":448,"ncregan":449,"rschulman":450,"sabrina-farmer":451,"sandra-gittlen":452,"sharon-gaudin":453,"stephen-walters":454,"taylor-mccaslin":455},"Amanda Rueda","Andre Michael Braun","Andrew Haschka","Ayoub Fandi","Brian Wald","Bryan Ross","Chandler Gibbons","Dave Steer","David DeSanto","Derek DeBellis","Emilio Salvador","Erika Feldman","George Kichukov","GitLab","Grant Hickman","Haim Snir","Itzik Gan Baruch","Joseph Longo","Joel Krooswyk","Josh Lemos","Julie Griffin","Kristina Weis","Lee Faus","Niall Cregan","Robin Schulman","Sabrina Farmer","Sandra Gittlen","Sharon Gaudin","Stephen Walters","Taylor McCaslin",{"allArticles":457,"visibleArticles":1128,"showAllBtn":6},[458,489,519,551,583,615,643,676,707,734,759,776,803,832,845,857,868,896,920,932,943,970,992,1004,1032,1046,1073,1100],{"title":459,"date":460,"description":461,"timeToRead":462,"heroImage":463,"keyTakeaways":464,"articleBody":468,"faq":469,"config":485},"From vibe coding to agentic AI: A roadmap for technical leaders","2025-06-12","Discover how to implement vibe coding and agentic AI in your development process to increase productivity while maintaining code quality and security.","5 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463655/i6g9scccza0l35n6i1bf.png",[465,466,467],"AI-assisted development is transforming software creation, enabling teams to focus on business logic and user experience rather than syntax details, but requires proper governance to ensure quality.","Organizations should adopt an evolutionary approach to AI implementation — starting with basic assistance, then expanding across the development lifecycle, establishing governance frameworks, and gradually introducing autonomous agents.","The engineering landscape is shifting as AI handles routine coding tasks, creating demand for new specialized roles and requiring developers to focus on strategic thinking, architecture design, and effective AI collaboration.","A new wave of generative artificial intelligence (AI) tools is redefining how we build software and who can participate in the process. At the forefront of this revolution is \"vibe coding\" - using natural language prompts to generate functional code without having to fully understand how the code works.\n\n[According to GitLab research](https://about.gitlab.com/developer-survey/), 78% of teams have already integrated AI-assisted coding tools into software development workflows, and AI is demonstrating measurable efficiency improvements. Vibe coding lowers the barriers to entry for development. However, when software engineers use AI-generated code without critical evaluation or deep comprehension, that might also lead to lower quality and increased security vulnerabilities.\n\nTraditional development approaches rely heavily on specific programming languages and syntax rules. Vibe coding lowers the need to fully comprehend the nuances of every language and development pattern, but it does not eliminate that need. This tension between accessibility and quality reflects a broader transformation in software creation.\n\nAI is fundamentally shifting what development means. Team members can focus on desired outcomes rather than implementation details. Logic, business requirements, and user experience precede syntax correctness and language expertise. Organizations increasingly value professionals who can effectively bridge product vision with technical execution - often without writing traditional code.\n\nWhile vibe coding offers tremendous potential to accelerate development and democratize software creation, it must be implemented thoughtfully with proper governance to ensure that speed doesn't come at the expense of quality and maintainability.\n\n## Agentic AI and vibe coding\nVibe coding is about getting something to appear to work quickly rather than building a robust, efficient, and maintainable solution based on solid knowledge. This is where [agentic AI](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) can help. Agents can take abstract instructions like \"build a customer database\" and autonomously handle all the technical implementation details, bridging the gap between quick prototypes and properly engineered solutions.\n\nWhile vibe coding primarily focuses on code generation through natural language prompts, agentic AI expands these capabilities into an autonomous development ecosystem. Vibe coding involves a human developer using AI without requiring deep understanding, while agentic AI takes on a more proactive, autonomous role in building software based on a given goal.\n\nThe two approaches complement each other perfectly: vibe coding provides a solid foundation for human-AI interaction through natural language, while agentic systems build upon this foundation to create self-directed development partners that handle complex tasks by making independent decisions and taking action with minimal supervision.\n\nAgentic AI systems enhance vibe coding by integrating deeply into development workflows, conducting sophisticated code reviews, recommending infrastructure optimizations, and adapting to changing requirements. [Industry research from Deloitte](https://www2.deloitte.com/us/en/insights/industry/technology/technology-media-and-telecom-predictions/2025/autonomous-generative-ai-agents-still-under-development.html) indicates that 25% of companies using generative AI will implement agentic AI pilots in 2025, which is expected to double by 2027.\n\nSuccessfully implementing vibe coding and agentic AI together requires careful planning. Organizations must establish robust security protocols, ensure regulatory compliance, and create clear communication channels between AI systems and existing tools. Despite these challenges, the combined power of vibe coding and agentic AI delivers significant benefits in development speed, code quality, and resource optimization.\n\n## Implementation strategy for teams and leadership: An evolutionary approach\nDevelopment teams and technical leaders can follow this evolutionary path to effectively implement vibe coding and agentic AI:\n1. **Begin with AI assistance**: Introduce developers to AI tools that improve productivity for routine tasks. Focus on building familiarity, comfort, and confidence with AI assistance for coding, documentation, and simple problem-solving.\n1. **Expand AI assistance across the software development lifecycle**: Move beyond code generation tools to integrate AI into testing, debugging, code review, and documentation. Identify repetitive, time-intensive workflows where AI can create immediate value with minimal disruption.\n1. **Establish governance frameworks & interoperability standards**: Create clear policies for AI tool usage, including data access permissions, security protocols, and quality standards for AI-generated code. Define protocols for how AI systems will share information and collaborate across platforms, as well as the level of human input required when using AI tools.\n1. **Introduce autonomous AI agents for specific tasks**: Deploy agents to handle self-contained development tasks with a degree of autonomy. These agents take abstract goals like \"optimize this database query\" and handle the implementation details independently while maintaining code quality.\n1. **Scale agent implementation across the organization**: Expand the scope of tasks handled by agents and introduce multiple agents working together on complex projects. Integrate agents deeply into the end-to-end software development lifecycle and redesign team structures to create cross-functional groups combining technical expertise and domain knowledge.\n1. **Continuously improve through feedback and education**: Implement systems to monitor agent performance with clear metrics and correction protocols. Invest in organization-wide AI literacy through training programs for prompt engineering, AI collaboration techniques, and effective system oversight.\n\nThis evolutionary approach ensures technical implementation and organizational leadership progress together in the AI transformation journey, maximizing the benefits of vibe coding while building robust, efficient solutions.\n\n## The changing developer landscape\nThe engineering role is evolving as vibe coding and [agentic AI](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/) handle more of the heavy lifting in software development. Less experienced developers face a steeper learning curve with fewer straightforward tasks available for initial skill-building. Simultaneously, senior engineers must adapt as AI takes over more complex tasks and traditional oversight responsibilities.\n\nBeyond the changing dynamics for existing roles, we’re seeing the emergence of entirely new positions like prompt engineers who guide and refine AI outputs. The most valuable engineering skills have shifted toward architecture design, strategic thinking, and effective AI collaboration.\n\nWhile this disruption creates uncertainty for some traditional roles and compensation models, it also opens doors for those who position themselves at the intersection of human creativity and machine efficiency. The most successful engineers will be those who strategically delegate routine work to AI while applying their uniquely human expertise to innovation and complex problem-solving.\n\nFor technical leaders, the strategic implications are clear: organizations that embrace vibe coding and agentic AI gain decisive competitive advantages through accelerated development cycles, improved code quality, and more efficient resource allocation. However, organizations will need to adopt AI responsibly, with governance frameworks to ensure that efficiency doesn’t come at the expense of security. Those who fail to do so may find themselves multiple innovation cycles behind in an increasingly AI-powered development landscape.",[470,473,476,479,482],{"header":471,"content":472},"What is vibe coding and how does it change the development process?","Vibe coding refers to the use of natural language prompts to generate functional code without needing deep knowledge of programming syntax. It shifts the focus from how code is written to what it achieves, allowing developers to prioritize business logic and user experience.",{"header":474,"content":475},"How does agentic AI complement vibe coding?","While vibe coding enables quick code generation, agentic AI takes it further by autonomously executing tasks based on goals. It bridges the gap between rapid prototyping and robust implementation, acting as a self-directed development partner that enhances productivity and quality.",{"header":477,"content":478},"What are the first steps organizations should take to implement AI in development workflows?","Start with AI-assisted tools that handle routine tasks like documentation, test creation, and debugging. As teams grow more confident, expand AI's role across the development lifecycle and introduce governance policies to manage its use responsibly.",{"header":480,"content":481},"How does the rise of AI change the skills developers need?","AI is reshaping engineering roles. Developers now need stronger architectural thinking, system design, and AI collaboration skills. Routine coding tasks are increasingly delegated to AI, so human expertise will focus more on oversight, creativity, and strategic problem-solving.",{"header":483,"content":484},"What are the risks of using AI without governance in software development?","Without governance, AI-generated code may introduce security vulnerabilities, lack maintainability, or violate compliance standards. Organizations must establish security protocols, clear usage policies, and quality checks to ensure responsible AI adoption.",{"layout":5,"template":486,"articleType":487,"author":488,"featured":331,"gatedAsset":27,"isHighlighted":6,"authorName":436},"TheSourceArticle","Regular","emilio-salvador",{"title":490,"date":491,"description":492,"timeToRead":493,"heroImage":494,"keyTakeaways":495,"articleBody":499,"faq":500,"config":516},"Why automotive software development needs human-centered AI","2025-06-02","Learn why balancing AI assistance with human expertise is crucial for automotive embedded systems development and creating competitive advantages.","6 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463704/u3dshy4qn6rtrklfalx7.png",[496,497,498],"AI in automotive embedded software development works best as a Level 2 assistant, meaning human expertise remains essential for effective embedded development in vehicles.","The right human-AI balance varies across different automotive software domains; teams that find the right balance between AI assistance and human expertise will gain competitive advantages.","Creating effective human-AI partnerships requires intentional processes such as mandatory human review checkpoints for safety-critical systems.","Software is an essential part of modern automobiles. This year, the lines of code in the average car are expected to reach [650 million](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/), an increase from 200 million in 2020. What’s more, we’re seeing a shift from distributed architectures for vehicle firmware toward zonal architectures with central high-performance computers (HPCs). All of this creates complexity and novel software challenges.\n\nEmbedded systems developers are trying to adapt to this complexity. At the same, market pressures are forcing them to accelerate their development processes and ship innovation faster.\n\nArtificial intelligence (AI) can help address these challenges, but its implementation raises important questions. To what degree should AI tools autonomously generate and review code in automotive embedded systems? How much human oversight is advisable? Drawing from the automotive industry's vocabulary, I propose that embedded development requires Level 2 AI assistance - at least right now.\n\n## Understanding Level 2 automation for AI in embedded development\nIn automotive driving automation, [Level 2 systems](https://www.sae.org/blog/sae-j3016-update) represent partial automation: a carefully balanced human-machine collaboration. These systems can help control steering, acceleration, and braking in specific scenarios, but the driver must stay engaged. They must monitor the environment and be ready to take control at any moment. The human remains legally responsible for the vehicle's operation and must supervise the automation continually. In contrast, Level 4-5 systems aim to operate with minimal or no human oversight in defined conditions.\n\nThis framework provides a useful analogy for AI in embedded development. Current AI tools excel at providing suggestions and automating routine tasks, much like Level 2 driver assistance. They can suggest code, help with testing, and identify potential issues. However, their contextual understanding has limitations. Given the high stakes of automotive embedded systems, combining AI's capabilities with human wisdom and oversight is best.\n\n## Why AI excels as a development assistant\nAI shows remarkable capabilities across numerous areas of embedded development. Here are just a few examples from the growing list of applications:\n\nFirst, AI can [generate and complete code](https://docs.gitlab.com/user/project/repository/code_suggestions/) for common patterns in C/C++, reducing developers' time spent on routine programming tasks. And if prompted correctly, AI can respect embedded-specific constraints like memory limitations and hardware interfaces.\n\nSecond, AI can [generate tests](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#write-tests-in-the-ide) that you can run on cloud-based ARM CPUs or virtual hardware. This helps teams \"shift left\" in testing their firmware and catch issues earlier in development when they're less expensive to fix. It also helps identify edge cases you might have otherwise overlooked.\n\nThird, AI can help [accelerate the remediation of security vulnerabilities](https://docs.gitlab.com/user/application_security/vulnerabilities/#explaining-a-vulnerability) in your code. AI tools can help interpret security findings from your security scanners. They can even suggest potential approaches to address issues, supporting development teams as they work to meet cybersecurity requirements in this highly regulated space. AI thus helps expedite remediation.\n\nBeyond these examples, AI is increasingly valuable for [root cause analysis](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#troubleshoot-failed-cicd-jobs-with-root-cause-analysis) of complex issues, comprehensive [code reviews](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/#have-gitlab-duo-review-your-code), automated [code refactoring](https://about.gitlab.com/blog/2024/08/26/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/) for optimization, [explaining](https://docs.gitlab.com/user/project/merge_requests/changes/#explain-code-in-a-merge-request) complex legacy code, and providing conversational assistance through [AI chat capabilities](https://docs.gitlab.com/user/gitlab_duo_chat/). As AI evolves, so will the ways in which it assists embedded development teams.\n\n## The essential human element\nThough these AI capabilities are quite powerful, they cannot - and should not - replace human expertise. Embedded developers bring domain knowledge that spans both software and hardware domains, understanding not just how to code, but how that code interacts with physical components under varying conditions.\n\nMoreover, embedded developers understand the intricate relationships between different vehicle subsystems. Far from replacing such expertise, AI must integrate with human beings' contextual knowledge.\n\nHumans also bring creativity and innovation to solving unique automotive challenges. When faced with conflicting requirements or novel problems, human engineers draw on experience and intuition that AI simply doesn't possess.\n\nThe human-centered approach is critical in automotive development, where safety and reliability cannot be compromised. Just as a driver must remain alert and ready to take control of a Level 2 automated vehicle, developers must maintain ultimate responsibility for AI-generated code. While valuable, AI suggestions require expert validation. Developers must review and verify that proposed solutions solve the problem correctly within the specific automotive context.\n\nThis human oversight becomes even more critical when considering the consequences of errors. In enterprise software, a bug might cause inconvenience; in automotive systems, it could potentially impact passenger safety. Developers bring ethical judgment and a holistic understanding of the operating environment that AI currently lacks. They can anticipate edge cases based on real-world driving conditions and evaluate AI recommendations against their practical experience with actual vehicle systems.\n\n## Creating an effective human-AI partnership\nBelow are some initial approaches to consider as you begin building productive partnerships between developers and AI.\n\nStart by identifying specific high-volume, low-risk tasks where AI can provide immediate value: unit test generation for non-safety-critical components, documentation updates, and routine code standardization are excellent entry points.\n\nImplement a tiered approach to AI integration based on system criticality. For infotainment or connectivity systems, teams might leverage more autonomous AI assistance. For safety-related systems, establish mandatory human review checkpoints with structured approval workflows. Create clear guidelines on which code components require senior engineer review versus those where junior developers can approve AI suggestions with minimal oversight.\n\nReview processes also need adaptation. Rather than having humans review AI-generated code in isolation, teams should implement collaborative workflows where AI assists with the review itself, highlighting potential issues for human evaluation. Consider adopting structured prompting techniques. For example, have developers specify constraints like memory requirements, coding standards, or performance parameters before generating AI suggestions.\n\nThese examples represent starting points for effective human-AI collaboration in embedded development.\n\n## Looking to the future\nThe human-AI partnership will evolve across different automotive domains as AI capabilities advance. Teams should prepare by focusing on higher-value skills that complement AI capabilities, such as systems architecture, integration expertise, and hardware-software design.\n\nThe teams that succeed will find the right balance, leveraging AI to handle routine tasks while keeping humans at the center of the development process. This is the path to realizing AI's productivity promise.\n\n_I'll be discussing topics like this and more with Dr. Felix Kortmann of Ignite by FORVIA HELLA in a webinar on June 11. The webinar will be on “Building the Future of Automotive Software.” Join us to learn how to effectively balance AI assistance with human expertise in your embedded development teams. [Register here](https://page.gitlab.com/webcasts-jun11-gitlab-ignite-by-foriva-hella-emea-amer.html?utm_medium=referral&utm_source=gmail&utm_campaign=20250611_global_cmp_webcast_speedsecurity_en_&utm_content=salespromo_x_auto)._",[501,504,507,510,513],{"header":502,"content":503},"What is Level 2 AI assistance in automotive software development?","Level 2 AI refers to a collaborative human-AI model where AI supports tasks like code generation and testing, but developers retain oversight and responsibility. Like Level 2 driving automation, the human stays in control, ensuring contextual accuracy and safety.",{"header":505,"content":506},"How does the role of AI differ across various automotive software domains?","AI adds value across all domains, but oversight levels vary. Safety-critical systems require stricter human validation, while infotainment systems allow more autonomous AI use. Teams should tailor AI workflows based on system risk and regulatory requirements.",{"header":508,"content":509},"How can teams establish effective AI review processes for embedded code?","Teams should use a tiered review structure. AI can perform initial quality checks — flagging syntax issues or common errors — while human experts review critical code sections and system interfaces. Clear guidelines should define when AI-generated suggestions require additional human verification or senior engineer approval to ensure safe integration within embedded systems.",{"header":511,"content":512},"What skills should embedded developers focus on as AI capabilities expand?","Embedded developers should deepen their understanding of systems architecture, hardware-software integration, and domain-specific safety requirements. Skills in prompt engineering and AI collaboration, such as framing effective prompts and interpreting model outputs, are also increasingly important. These competencies help developers remain effective evaluators and collaborators alongside AI systems.",{"header":514,"content":515},"How can AI help address the shortage of embedded software expertise in the automotive industry?","AI reduces the burden on experienced engineers by automating routine development tasks like boilerplate coding, unit testing, and documentation. This allows senior engineers to focus on high-impact projects and mentoring. At the same time, AI tools help junior developers ramp up faster by guiding them through embedded-specific best practices, accelerating onboarding and reducing skill barriers.",{"layout":5,"template":486,"articleType":487,"author":517,"featured":331,"gatedAsset":518,"isHighlighted":6,"authorName":448},"lee-faus","source-lp-transform-automotive-devops-secure-fast-future-ready",{"title":520,"date":521,"description":522,"timeToRead":493,"heroImage":523,"keyTakeaways":524,"articleBody":528,"faq":529,"config":548},"How agentic AI unlocks platform engineering potential","2025-05-12","Explore how agentic AI elevates platform engineering by automating complex workflows and scaling standardization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463908/zz7ifau6dya2fn2eyuij.png",[525,526,527],"Agentic AI systems go beyond code assistance, helping organizations overcome platform engineering challenges by automating complex workflows and scaling standardization efforts across development teams.","Technical leaders must consider interoperability, security governance, and workflow integration when implementing autonomous agents to ensure they work seamlessly within existing enterprise systems.","Organizations can leverage agentic systems to address “tech mandatory” initiatives like reducing technical debt and security vulnerabilities, where dense contexts have traditionally posed barriers to automation.","Many organizations have embraced artificial intelligence (AI) tools to boost developer productivity, typically in the form of code assistants that help individual developers write code faster. While these tools deliver welcome efficiency gains, they only scratch the surface of AI’s potential to transform your development organization.\n\nAt the same time, organizations are investing in platform engineering - building integrated toolchains, establishing workflows, and creating standardized processes for software development. However, platform engineering teams will inevitably run into issues with scale: traditional approaches to platform engineering are fundamentally limited by the capacity of human engineers to maintain the platform.\n\nThis is where [agentic AI](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) becomes a game-changer for your platform engineering investments.\n\n## Why your current platform engineering investment isn’t delivering its full potential\nImagine this: You’ve invested in a [platform engineering approach](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/) with tightly integrated tools and processes. However, you’re not yet seeing the expected return on this investment due to challenges such as:\n- Manual processes that can’t scale across the organization\n- Difficulty maintaining and updating platform components\n- Inconsistent adoption of standards across different teams\n- Valuable engineering time spent on routine maintenance instead of innovation\n\nThese challenges exist because traditional approaches to platform engineering are fundamentally limited by human capacity. No matter how well-designed your platform is, you need human engineers to implement, maintain, and scale it.\n\nCode development in enterprise software is also highly context-dependent. Languages can have vastly different performance challenges. Junior developers may not have enough context to write prompts effectively. Security and compliance policies may also create unknown restrictions. No single platform engineer can fully grasp every security, network, and application-layer concern across all these scenarios.\n\n## How agentic AI transforms platform engineering\nAgentic AI systems fundamentally change the equation. Unlike traditional AI assistants that respond only to direct prompts, AI agents have full context into a team’s software development infrastructure. They can initiate actions based on triggers and states, making them the perfect complement to platform engineering frameworks. These autonomous systems can work with minimal human intervention to automate repetitive tasks while still allowing for human oversight on more critical issues.\n\nAI agents can:\n- **Automatically identify and implement standardization opportunities** across your entire organization, ensuring consistency without manual intervention\n- **Maintain and update platform components** by monitoring for security vulnerabilities, performance issues, or outdated dependencies\n- **Create issue descriptions, reusable templates, and implementation plans** based on your existing codebase\n- **Support developers in generating and updating documentation**, including README files, code flow diagrams, and architecture documentation\n\n> Learn how [agentic AI built on top of a comprehensive DevSecOps platform](https://about.gitlab.com/blog/2025/02/24/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/) can help development teams navigate everything from project bootstrapping and deployment to debugging and cross-team coordination.\n\n## What to keep in mind when implementing AI agents\nAsk yourself these questions when thinking about how to incorporate agentic AI into platform engineering workflows:\n\n### Interoperability, scalability, and reliability\n- How will AI agents communicate with other agents, including across third-party products?\n- How will agents self-correct when they produce unexpected or unwanted results?\n- Will the agents be capable of concurrency (working on multiple tasks at the same time)?\n\n### Security, governance, and observability\n- How will AI agents interact with existing network policies to define what they can and cannot access?\n- How will agents interact with multiple data sources?\n- Will the use of data by AI agents comply with existing data governance, security, and privacy policies?\n- How will agents’ telemetry data be collected, how will their performance be measured, and what will remediation look like when they behave incorrectly?\n\n### Developer workflows\n- How will developers adapt their processes to integrate with agent-based systems?\n- What human supervision mechanisms need to be in place for critical business processes?\n\n## Enabling team velocity with AI agents and platform engineering\nAs AI-powered agents mature, organizations can proactively use these tools to identify patterns, standardize practices, and share knowledge across different development teams without manual coordination.\n\nAfter participating in a recent early access program for [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/2025/04/17/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) - an agentic AI solution for the entire software development lifecycle - Osmar Alonso, DevOps Engineer at Volkswagen Digital Solutions, shared how AI agents have transformative potential for development workflows. “Even in its early stages, we saw how the deeper integration with autonomous agents could streamline our process, from code commit to production,” said Alonso. “We're excited to see how this technology empowers our team to focus on innovation and accelerate our digital transformation.”\n\nAgentic systems show particular promise in “tech mandatory” budget areas that most teams are committed to today, such as reducing technical debt, fixing security vulnerabilities, refactoring automation or infrastructure, and re-platforming legacy apps. These areas are filled with context-rich data and pose barriers to automation that agentic AI can help remove.\n\nFor example, platform engineering teams often create templates to standardize and automate processes such as CI pipelines. This traditionally involves significant manual work to identify the right processes to target - those that are widely used, have repeatable steps, and will have the most significant impact across teams. Agentic AI reduces those manual steps.\n\nRather than relying on human effort to identify processes for standardization, an agentic system can identify all Java-based projects from the past year, analyze the build processes across each, and identify the best candidates for AI-based automation. The system can then create draft templates the team can customize and build on.\n\nAgentic AI is also poised to transform how developers access organizational knowledge and tools. Many companies have invested in internal developer portals (IDPs) as abstraction layers to provide insights and portfolio visibility. However, IDPs are generally static artifacts - someone needs to maintain them and notify developers when new versions of reusable components are available. It's possible that AI agents, as part of a DevSecOps platform, will make IDPs increasingly irrelevant by proactively analyzing context across multiple data sources and tools to provide real-time insights directly within the platform. Agents will also be able to pair with the generative AI capabilities within the platform to deliver personalized, contextual information - such as vulnerability explanations or code suggestions - to each developer based on their role, project, and needs.\n\nPlatform engineering has delivered significant value, but many organizations have hit a ceiling in realizing its full potential. Agentic AI plays a crucial role in upgrading platform engineering efforts by automating complex processes, applying contextual understanding at scale, and enabling true team velocity rather than just individual productivity.",[530,533,536,539,542,545],{"header":531,"content":532},"What is agentic AI, and how does it differ from traditional AI assistants?","Agentic AI refers to autonomous systems that can make decisions, initiate actions, and adapt to context with minimal human input. Unlike traditional AI assistants that respond to prompts, agentic AI agents operate proactively within development workflows, enhancing efficiency and scalability across engineering teams.",{"header":534,"content":535},"Why is agentic AI important for platform engineering?","Agentic AI enhances platform engineering by automating routine and complex tasks such as template creation, vulnerability monitoring, and standardization across teams. This allows organizations to scale their DevSecOps efforts while reducing manual workload and accelerating delivery cycles.",{"header":537,"content":538},"How can agentic AI support standardization across development teams?","Agentic AI can analyze organizational codebases, identify repeatable patterns, and automatically generate templates or process improvements. This helps enforce consistency in CI pipelines, security practices, and documentation without requiring manual coordination across teams.",{"header":540,"content":541},"What should platform teams consider when implementing AI agents?","Successful implementation of agentic AI depends on factors such as interoperability across tools, secure data access, compliance with governance policies, and visibility into agent behavior. Teams must also ensure human oversight is embedded into workflows for critical processes.",{"header":543,"content":544},"Can agentic AI help reduce technical debt?","Yes, agentic AI is particularly effective in “tech mandatory” areas like refactoring outdated code, identifying security vulnerabilities, and automating infrastructure updates. By handling these context-heavy tasks, AI agents free up teams to focus on innovation and strategic initiatives.",{"header":546,"content":547},"Will agentic AI replace internal developer portals (IDPs)?","Agentic AI has the potential to augment or even replace static IDPs by delivering real-time, personalized insights directly within a DevSecOps platform. These agents can proactively provide relevant code suggestions, vulnerability analysis, and workflow guidance based on developer context.",{"layout":5,"template":486,"articleType":487,"author":549,"featured":331,"gatedAsset":550,"isHighlighted":6,"authorName":430},"brian-wald","source-lp-getting-started-with-ai-in-software-development-a-guide-for-leaders",{"title":552,"date":553,"description":554,"timeToRead":555,"heroImage":556,"keyTakeaways":557,"articleBody":561,"faq":562,"config":581},"How the insurance industry's data-rich ecosystem powers AI success","2025-04-24","Learn how insurers can build successful AI foundations that turn legacy challenges into operational efficiency and enhanced customer experiences.","3 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463750/s7wlgtnijdqmlqrsjwx7.png",[558,559,560],"Insurance companies face unique challenges with AI implementation due to complex system landscapes, strict regulatory requirements, and data silos. Successful adopters take a domain- and use case-specific approach.","When implemented effectively, AI delivers transformative benefits across the insurance value chain: operational efficiency, accelerated innovation in product development, and enhanced personalized customer experiences.","Successful AI implementation requires more than technology — it demands process simplification, strategic system consolidation, and embedded compliance controls to bridge complex workflows with modern customer expectations.","Insurance companies have an incredible opportunity for AI transformation. Few industries combine such extensive repositories of customer data, complex actuarial models, intricate claims workflows, and stringent regulatory requirements. This unique combination creates the perfect environment for intelligent automation and advanced decision-support systems.\n\nThe most successful insurance transformations I have witnessed share a common catalyst: the strategic implementation of AI built on a strong foundation. This approach revolutionizes how industry leaders bridge complex technology ecosystems with evolving customer expectations.\n\nWhen implemented effectively, AI delivers [transformative benefits](https://about.gitlab.com/the-source/ai/reducing-software-development-complexity-with-ai/) across the insurance value chain:\n- **Operational efficiency**: Automating complex underwriting decisions that traditionally required multiple manual reviews, and significantly reducing claims processing times through intelligent document analysis and automatic fraud detection\n- **Accelerated innovation**: Enabling insurance products with real-time risk modeling, and developing usage-based policies that adjust premiums dynamically based on behavioral data\n- **Enhanced customer experiences**: Transforming high-friction moments like FNOL (First Notice of Loss) into seamless digital experiences with predictive damage assessment and transparent claims tracking\n\nPerhaps most critically, AI can bridge the gap between legacy systems and modern cloud-native applications, preserving valuable business logic while enabling future innovation.\n\n## Where most insurers stumble\nDespite these benefits, implementing AI in insurance operations isn't straightforward. The primary obstacle isn't the technology but the fragmented technology ecosystem within which it must operate.\n\nContext fragmentation is particularly severe in the insurance industry, where critical data is typically stored across more than ten different systems, ranging from legacy policy administration platforms to modern CRM systems, rating engines, claims management software, and third-party data providers.\n\nProcess complexity compounds this challenge. Consider a typical policy renewal workflow that involves quoting systems, underwriting platforms, document management tools, payment processors, and customer communications systems. Each transition between these systems represents a potential point of failure or loss of context, making it impossible for AI to deliver on its promise.\n\nThe heavily regulated nature of insurance adds yet another layer of complexity. Strict requirements around [data privacy](https://content.naic.org/insurance-topics/data-privacy-and-insurance), model explainability, and [anti-discrimination laws](https://consumerfed.org/press_release/important-insurance-anti-discrimination-bill-becomes-law/) governing insurance rating factors all impact how [organizations can deploy AI](https://content.naic.org/insurance-topics/artificial-intelligence). Meanwhile, many insurers continue to operate with legacy systems that are decades old, creating significant barriers to data integration and the implementation of modern AI.\n\n## Addressing the fundamentals\nThe path to successful AI implementation is not solely in deploying new technology. It requires strengthening fundamental elements throughout the organization. Let’s take software development as an example, highlighting a domain-centric strategy:\n\n### Unified platform approach\n[Tool consolidation throughout the software development lifecycle](https://about.gitlab.com/the-source/platform/from-toolchain-chaos-to-business-roi-a-5-step-roadmap/) creates an ideal environment for AI implementation in insurance. When technology and business teams collaborate on a unified platform, AI assistants can access code, requirements, security scanning, software build, environment deployment, and testing data across traditionally siloed tools. This cross-functional visibility enables models to benefit from additional context, which isn’t possible in fragmented environments. In addition, security and release teams can benefit from [AI-powered vulnerability explanation and remediation](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/), and root cause analysis, all within the same interface.\n\n### Common data foundation\nA common data model is the critical backbone for effective AI. In addition to standardizing processes, insurance carriers must unify how data is structured, stored, and accessed across policy administration, claims, and customer systems. This consolidated data foundation enables AI tools to work with consistent information, providing meaningful insights at every stage of the software development lifecycle, from requirements gathering through deployment and monitoring. When [all applications share standardized data definitions and relationships](https://about.gitlab.com/blog/2025/02/24/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/), AI can make connections across traditionally siloed systems, identify patterns, and deliver analytics that would be impossible with fragmented data architectures. This approach ensures that AI enhancements aren't just technical novelties, but deliver measurable business value while maintaining regulatory compliance.\n\n### Guardrails through collaboration\nThe collaborative aspects of modern software delivery provide natural insertion points for controls and [guardrails across the software development pipeline](https://about.gitlab.com/the-source/ai/4-ways-ai-can-help-devops-teams-improve-security/). When enforcing manual or AI-supported review processes, such as code review, AI tools can supplement human expertise by automatically validating that code changes conform to technical standards. From requirements gathering to deployment, these guardrails verify that workflows maintain required separation of duties - all while accelerating the development process rather than creating bottlenecks. This collaborative approach ensures AI becomes a trusted partner in each development phase while maintaining the human oversight essential in regulated environments.\n\nAs you embark on your AI journey, ensure you address these fundamentals alongside your technology implementation. Insurance carriers that approach AI strategically, focusing on specific high-value domains while simultaneously strengthening their operational foundations, will realize the greatest competitive advantages in the years ahead.",[563,566,569,572,575,578],{"header":564,"content":565},"Why is the insurance industry well-suited for AI transformation? ","The insurance industry combines vast amounts of structured data, complex workflows, actuarial modeling, and strict regulatory requirements, making it an ideal environment for AI to drive operational efficiency, decision support, and customer experience enhancements.",{"header":567,"content":568},"What are the main challenges insurers face when implementing AI? ","Insurers often struggle with fragmented systems, disconnected workflows, legacy technologies, and strict compliance requirements that make it difficult to integrate AI effectively across the value chain.",{"header":570,"content":571},"How can a unified platform approach improve AI outcomes in insurance?","A unified platform consolidates tools across the development lifecycle, giving AI systems the full context they need to analyze code, monitor workflows, identify vulnerabilities, and suggest improvements without silo-induced blind spots.",{"header":573,"content":574},"Why is a common data foundation important for AI in insurance? ","A standardized data model ensures AI tools can access consistent, clean data across systems like policy admin, claims, and CRM, enabling pattern recognition, analytics, and compliance without being hindered by fragmented data architectures.",{"header":576,"content":577},"How do AI guardrails enhance trust and compliance in insurance development?","Guardrails built into collaborative software development workflows help ensure that AI tools validate code changes, enforce security and compliance standards, and support human oversight, crucial in regulated environments like insurance.",{"header":579,"content":580},"What’s the most effective strategy for adopting AI in insurance? ","Successful insurers focus on high-value, domain-specific AI applications while modernizing foundational systems and processes, enabling scalable innovation and measurable business value without compromising compliance or stability.",{"layout":5,"template":486,"articleType":487,"author":582,"featured":6,"gatedAsset":27,"isHighlighted":6,"authorName":438},"george-kichukov",{"title":584,"date":585,"description":586,"timeToRead":587,"heroImage":588,"keyTakeaways":589,"articleBody":593,"faq":594,"config":613},"Implementing effective guardrails for AI agents","2025-04-15","Discover essential security guardrails for AI agents in DevSecOps, from compliance controls and infrastructure protection to user access management.","4 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464067/ptrew3tgqdij7tf3e9dt.jpg",[590,591,592],"AI agents require comprehensive security guardrails that go beyond traditional controls, encompassing audit trails, infrastructure protection, and code compliance while maintaining operational efficiency in DevSecOps environments.","Effective AI guardrails must balance security with productivity through layered controls: robust authentication, manual review requirements, customizable access levels, and comprehensive logging systems.","Organizations implementing AI guardrails today should focus on four key areas: user roles and access, limits and controls, customization options, and transparent logging — all while avoiding unnecessary friction.","As artificial intelligence (AI) continues to reshape software development, AI agents are emerging as powerful tools that can work alongside development teams to automate complex tasks, generate code, and streamline development workflows. AI agents promise [unprecedented efficiency gains](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) but also introduce new security risks that organizations must consider. From autonomous code generation to automated infrastructure management, AI agents handle increasingly sensitive operations that traditionally require human oversight. The shift to AI agents raises important questions about security, compliance, and risk management in modern development environments.\n\nEstablishing robust guardrails is a business imperative for technology leaders planning to incorporate AI agents into their development processes. Leaders need to ensure that AI systems operate within defined boundaries while maintaining the agility that makes them valuable.\n\n## Why AI guardrails matter\nThe concept of guardrails in AI systems extends beyond traditional security controls. These guardrails are a comprehensive framework of policies, controls, and monitoring mechanisms that govern how AI agents interact with your development environment. They ensure that AI systems operate safely and effectively while complying with organizational policies and regulatory requirements. As AI agents become more embedded in DevSecOps workflows, these protective measures will be crucial for maintaining security, compliance, and operational stability.\n\nWhy are guardrails so critical when it comes to AI agents? Here are a few of the challenges we expect DevSecOps teams to encounter with the deeper integration of AI agents into their workflows:\n\n**Audit and compliance requirements**: Organizations operating in regulated industries face strict requirements for tracking and justifying system changes. Our research shows that DevSecOps teams need comprehensive audit trails that capture when AI systems make changes and the human oversight involved. This dual-layer tracking is particularly crucial when AI agents and human operators work in tandem, as both the automated actions and human approvals must be documented. For regulated industries, this creates a clear chain of accountability that demonstrates who initiated changes, which AI agents were involved, and the reasoning behind each decision.\n\n**Infrastructure protection**: Protecting critical infrastructure from unintended changes has emerged as a primary concern among DevOps leaders integrating AI systems. Unintended modifications to critical infrastructure components present a significant risk that must be carefully managed. Our research uncovered scenarios where automated systems could inadvertently alter crucial configurations for load balancers or databases. Organizations can prevent these potentially disruptive changes by implementing multiple review requirements and forbidden command controls while maintaining the benefits of AI automation.\n\n**License and code compliance**: With the rise of AI-generated code, the challenge of managing code provenance has become increasingly complex. The security teams we interviewed emphasized the growing difficulty of maintaining clean intellectual property rights and ensuring compliance with open source licensing obligations. This is particularly crucial for organizations that must maintain strict control over their intellectual property or adhere to specific licensing requirements. Effective guardrails must include mechanisms for tracking and verifying the origin of AI-generated code while ensuring compliance with licensing obligations.\n\n**Production data security**: Enterprise security leaders consistently emphasize the critical importance of maintaining existing data access controls when implementing AI systems. This is especially relevant when dealing with customer data or regulated information that requires special handling. Our research shows that granular access controls are essential for ensuring AI agents operate within established security boundaries, preventing unauthorized access to sensitive data while enabling productive automation.\n\n> Learn how [agentic AI built on top of a comprehensive DevSecOps platform](https://about.gitlab.com/blog/2025/02/24/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/) can help teams adopt AI agents in a way that empowers developers while preserving security, compliance, and governance.\n\n## Key guardrails for AI agents\nBased on our comprehensive interviews with 54 DevSecOps practitioners and leaders - including developers, DevOps teams, SecOps, InfraOps, and CIOs - from organizations of all sizes, we’ve identified several critical types of guardrails:\n\n### User roles and access\nSecurity begins with robust authentication and access control. Organizations should implement two-factor authentication or single sign-on (SSO) before granting AI tools access to any systems. This ensures proper user attribution and maintains security standards. Additionally, role-based access control (RBAC) is crucial for AI operations involving sensitive resources such as secrets, credentials, and protected branches.\n\n### Limits and controls\nTo maintain operational safety, organizations need clear boundaries around AI agent actions. This includes preventing direct production deployments without manual review and ensuring all AI-generated changes go through established merge request and review processes. Cost control measures are equally important, with manual approval requirements for actions that exceed defined thresholds. Organizations should also implement multiple review requirements for infrastructure or resource deletion and maintain robust rollback capabilities for all AI agent actions.\n\n### Customization\nEvery organization has unique security requirements and operational procedures. Effective guardrails must be customizable to accommodate these differences. This includes admin controls for forbidden commands (e.g., erasing Terraform state, changing domain names), configurable human touchpoints within workflows based on customer impact, and adjustable automation levels for different user roles. Integrating existing change management processes ensures AI agents work within established operational frameworks.\n\n### Logging, tracking, and transparency\nMaintaining visibility into AI agent actions is crucial for security and compliance. Organizations need comprehensive SecOps logging for all AI-initiated changes, clear explanations for AI decisions (especially regarding role-based trade-offs), and robust licensing compliance checks for AI-generated and third-party code. Granular production data access controls based on compliance requirements protect sensitive information.\n\n## Learning and iterating together\nOur research has revealed a crucial insight: Security measures should protect organizations without creating unnecessary friction. This ensures organizations can confidently adopt AI capabilities while maintaining robust security and compliance standards.\n\nAI guardrails will need to adapt and grow as technology continues to evolve. Organizations implementing these protective measures today will be better positioned to leverage AI agents while maintaining security and compliance. The key is finding the right balance between enabling innovation and maintaining control - a balance that well-designed guardrails help achieve.\n",[595,598,601,604,607,610],{"header":596,"content":597},"Why are security guardrails important for AI agents in software development?","AI agents often perform tasks with significant access and autonomy across development environments. Guardrails help ensure these agents operate safely by enforcing security, compliance, and governance standards while maintaining efficiency.",{"header":599,"content":600},"What risks do AI agents pose without proper guardrails?","Without protective measures, AI agents could unintentionally alter critical infrastructure, expose sensitive data, or introduce license compliance issues. The lack of oversight can also make audit trails and regulatory compliance difficult to maintain.",{"header":602,"content":603},"What are the most essential guardrails for enterprise AI adoption?","Critical guardrails include role-based access control, manual review checkpoints for high-impact actions, command restrictions (e.g., infrastructure deletion), licensing validation for AI-generated code, and centralized logging for all AI activity.",{"header":605,"content":606},"How can organizations balance security with developer productivity when using AI agents?","By implementing layered controls, like customizable automation levels, configurable human touchpoints, and flexible review policies, teams can benefit from AI without introducing friction into workflows or slowing down innovation.",{"header":608,"content":609},"How do AI agents affect regulatory compliance in DevSecOps environments?","AI agents handling code, infrastructure, or sensitive data must operate within clearly defined compliance boundaries. Guardrails provide audit trails and role-based visibility, ensuring that all actions meet regulatory and governance standards.",{"header":611,"content":612},"What role does logging play in securing AI agent activity?","Comprehensive logging ensures transparency by capturing what AI agents do, when, and why. It supports both security monitoring and compliance audits by documenting changes, decisions, and human interactions throughout the workflow.",{"layout":5,"template":486,"articleType":487,"author":614,"featured":6,"gatedAsset":27,"isHighlighted":6,"authorName":437},"erika-feldman",{"title":616,"date":617,"description":618,"timeToRead":587,"heroImage":619,"keyTakeaways":620,"articleBody":624,"faq":625,"config":641},"Emerging agentic AI trends reshaping software development","2025-04-14","Discover how agentic AI transforms development from isolated coding to intelligent workflows that enhance productivity while maintaining security.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464050/us6fmbypxtbjxrzxsbqy.png",[621,622,623],"Agentic AI systems are evolving from simple code completion tools to autonomous agents that understand entire codebases, orchestrate complex workflows, and take proactive actions across the development lifecycle with minimal human input.","Enterprise adoption of agentic AI requires robust security guardrails, including audit trails, infrastructure protection, and access controls, allowing organizations to gain efficiency without compromising governance or compliance standards.","Successful implementation of agentic AI tools depends on establishing clear human-AI partnership models that maintain human oversight for critical decisions while leveraging automation to address technical debt and modernize legacy codebases at scale.","The future of software development lies in the balance between human innovation and artificial intelligence (AI) capabilities. Simple AI-powered code completion tools are evolving into [sophisticated agentic AI systems](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) capable of understanding entire codebases and streamlining complex workflows across the development lifecycle. Unlike AI assistants that simply respond to commands, modern autonomous agents can proactively identify problems, suggest solutions, and implement changes with minimal human input - [freeing developers to focus on higher-level problem-solving](https://about.gitlab.com/blog/2025/02/24/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/), innovation, and the creative aspects of software development.\n\nHowever, as these changes accelerate, business leaders face important strategic choices about implementing agentic AI tools within their organizations while staying ahead of potential risks. Let’s explore a few major agentic AI trends that will reshape how enterprises develop software.\n\n## AI agents will complement, not replace, other AI tools\nThe first wave of AI development tools focused primarily on isolated tasks like code completion. The shift to AI agents isn’t about replacing these valuable tools, but rather complementing them with agentic AI systems that coordinate across the entire development ecosystem.\n\n[GitLab research](https://about.gitlab.com/developer-survey/) found that 42% of respondents use 6-10 tools in their development tech stack, and 20% use over 11 tools. What’s more, organizations using AI for software development were more likely to want to consolidate their toolchain than those not using AI. It’s clear that AI-powered development tools have the potential to boost developer productivity and efficiency, but that can’t happen if AI adds to the context-switching problem.\n\nAI agents offer a solution. They provide a single entry point for everyone building software to write code, check for vulnerabilities, update documentation, and more. AI agents can actively orchestrate the entire software lifecycle, managing complex tasks that previously required a developer to interact with three or four different tools, including AI code assistants and chat interfaces. This represents a significant advancement in how developers interact with AI systems, dramatically reducing the productivity drain caused by [context switching between tasks and tools](https://about.gitlab.com/the-source/ai/devops-leaders-fix-this-productivity-blocker-before-adding-ai/).\n\nLooking ahead, we’ll continue to see the convergence of AI agents and other AI tools as they evolve. AI agents will essentially become an orchestration layer that manages specialized systems, such as AI code assistants and security scanners, throughout the development process. At the same time, code assistants will evolve to incorporate more advanced agent-like capabilities, such as increased autonomy and more proactive problem-solving.\n\n## Enterprise-grade, secure AI agents will be mandatory\nAs agentic AI becomes more integrated into development processes, robust guardrails - comprehensive frameworks of policies, controls, and monitoring mechanisms - are emerging as a business imperative. These guardrails should govern how autonomous agents interact with development environments while maintaining the agility that makes them valuable.\n\nIn regulated industries, comprehensive audit trails must capture both AI-initiated changes and human intervention points, creating clear accountability chains that satisfy compliance requirements. For example, financial institutions and healthcare organizations will need granular access controls to ensure AI-powered agents comply with data protection regulations when handling sensitive customer or patient data.\n\nWith the rise of AI-generated code, determining the origin of specific pieces of code will also become more difficult, creating challenges in maintaining clean intellectual property rights and regulatory compliance. Organizations will need guardrails to track and verify the origin of AI-generated code while ensuring compliance with open source licensing obligations. These safeguards will allow organizations to gain the efficiency benefits of AI agents without compromising governance, security, or compliance standards.\n\n> Learn more about [security guardrails for AI agents in DevSecOps](https://about.gitlab.com/the-source/ai/implementing-effective-guardrails-for-ai-agents/), from compliance controls and infrastructure protection to user access management.\n\n## Agents will be critical for addressing technical debt at scale\nMany organizations struggle with [legacy codebases](https://about.gitlab.com/the-source/security/why-legacy-code-is-a-security-risk-and-how-ai-can-help/) that constrain innovation and consume maintenance resources. [GitLab research](https://about.gitlab.com/developer-survey/2024/ai/) found that 34% of all respondents using AI across the software development lifecycle already use AI to modernize legacy code. This is even higher in the financial services industry (46%). However, even with support from AI, code modernization is a complex process: in addition to updating the code itself, it requires testing, root cause analysis, vulnerability scanning, and documentation updates.\n\nAgentic AI is poised to help organizations make the code modernization process more efficient and reduce technical debt at scale. Agents can automatically refactor code to improve quality and reduce complexity while simultaneously migrating legacy languages like COBOL to modern alternatives. Throughout this modernization process, intelligent agents can also automatically manage all downstream dependencies, such as testing, CI/CD, and documentation.\n\nThe business impact of modernization through AI-powered agents goes far beyond cleaner code. It’s about freeing resources for innovation and accelerating response to market changes, which can provide significant competitive advantages for organizations undergoing digital transformation.\n\n## Human-AI partnership will be key\nAs autonomous agents become more capable, the relationship between humans and AI is also evolving. Successful organizations are developing clear models for human-AI collaboration that maintain appropriate human intervention while maximizing the benefits of automation.\n\nEffective partnerships include:\n- Clear delineation of AI versus human responsibilities\n- Transparent processes for reviewing AI-generated work\n- Training programs that help teams adapt to new workflows\n- [Metrics](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/) that capture the combined impact of human-AI collaboration\n\nSuccessful agentic AI implementations maintain humans at the center of critical decisions while leveraging automation for speed and consistency. Organizations that establish this balanced partnership approach - acknowledging both the capabilities of autonomous systems and the essential value of human judgment - will likely experience better adoption rates and greater satisfaction with their AI investments.\n\n## Preparing for an agentic future\nEmbracing agentic AI represents a strategic inflection point for software development. Forward-thinking organizations are already considering how to use AI agents to orchestrate complex workflows, tackle technical debt at scale, and establish productive human-AI partnerships. As these technologies mature, companies that thoughtfully integrate autonomous agents into their development ecosystems - maintaining human judgment for critical decisions while automating routine tasks - will dramatically accelerate innovation cycles, enhance code quality, and gain significant competitive advantages in an increasingly AI-powered world.",[626,629,632,635,638],{"header":627,"content":628},"What are the key differences between agentic AI and generative AI?","While both agentic AI and generative AI leverage large language models, their purposes and behaviors differ significantly. Generative AI tools are typically reactive, producing text or code in response to prompts. Agentic AI, on the other hand, is proactive, it understands broader contexts, can maintain memory across tasks, make decisions, and take autonomous actions across the software development lifecycle. Agentic AI acts more like a collaborator than a tool, working in tandem with other systems and agents to orchestrate entire workflows.",{"header":630,"content":631},"How can agentic AI reduce developer context switching?","Agentic AI helps reduce context switching by serving as a central orchestration layer across development tasks. Instead of jumping between multiple tools for coding, testing, documentation, and vulnerability checks, developers can rely on an AI agent to manage and integrate these workflows. This creates a smoother, more focused experience, improving productivity and reducing fatigue.",{"header":633,"content":634},"What security measures are required when using agentic AI in enterprise environments?","Enterprise-grade agentic AI must be deployed with robust guardrails to ensure compliance, data protection, and infrastructure safety. These include audit trails for AI actions, multi-stage reviews, access controls for sensitive data, and safeguards against unauthorized changes to production systems. These measures help maintain trust and accountability as AI agents take on greater responsibility.",{"header":636,"content":637},"Can agentic AI help modernize legacy code?","Yes, agentic AI is well-suited to tackling technical debt. It can automatically refactor legacy code, manage downstream dependencies like testing and CI/CD, and handle migration from outdated languages to modern alternatives. This helps accelerate modernization efforts while freeing developers to focus on innovation.",{"header":639,"content":640},"How should organizations structure human-AI collaboration for best results?","The most effective agentic AI strategies establish clear partnership models between humans and AI agents. These models include defined roles, transparent review processes, and metrics to measure joint performance. Organizations that maintain human oversight for high-risk decisions while leveraging automation for speed and efficiency tend to see higher adoption rates and better outcomes.",{"layout":5,"template":486,"articleType":487,"author":642,"featured":6,"gatedAsset":550,"isHighlighted":6,"authorName":432},"chandler-gibbons",{"title":644,"date":645,"description":646,"timeToRead":493,"heroImage":647,"keyTakeaways":648,"articleBody":652,"faq":653,"config":675},"Agentic AI: Unlocking developer potential at scale","2025-04-08","Explore how agentic AI is transforming software development, moving beyond code completion to create AI partners that proactively tackle complex tasks.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463876/kiw4eb54r8xtzztvbozf.jpg",[649,650,651],"AI agents can slash development time from weeks to hours by autonomously handling complex tasks like codebase modernization, while maintaining configurable human oversight for critical systems.","Unlike basic code assistants, AI agents can work with other agents to accomplish different tasks, freeing developers to focus on innovation and high-value problem-solving.","Specialized AI agents powered by different models can excel at specific tasks like security and testing, delivering better results than one-size-fits-all solutions.","AI has already changed how developers work. [GitLab research](https://about.gitlab.com/developer-survey/2024/ai/) found that 39% of DevSecOps professionals reported using AI for software development in 2024, up 16 percent from the previous year. AI-powered code assistants are now common tools that help teams write code faster, understand codebases, and create documentation. But now we’re seeing a big shift: the emergence of AI agents that work as active partners, not just passive helpers.\n\nThis change from reactive assistants to proactive agents is reshaping how developers build software. Agentic AI is making software creation easier for more people, driving a boom in innovation as more builders can create software that reaches billions of users. However, leaders will need to seek out agentic AI solutions with strong security and compliance guardrails to get the most out of this new wave of innovation without introducing unnecessary risk.\n\n## AI agents vs. AI assistants: What’s the difference?\nThe main difference between AI assistants and agents is how they behave. Code assistants are reactive, waiting for developers to ask questions or request tasks. While helpful for faster coding and understanding code, these assistants are passive in the development process.\n\nAI agents act more like team members. They exhibit reasoning, planning, and maintain context over different tasks, coupled with a certain degree of autonomy to make decisions, interact with other agents, and adapt to changing circumstances. With the shift to agents, AI becomes a true partner in building software.\n\nUnlike assistants that just help write code while teams handle everything else, AI agents can actively orchestrate complex processes, from security checks to compliance reviews. For example, a code review agent can automatically check code, find problems, and offer fixes. Where an assistant needs human input at each step, an agent can move between tasks based on project goals. Unlike simple assistants who can't remember past interactions or learn from mistakes, agents can also learn and adapt over time.\n\n## The spectrum of autonomy\nOne of the most powerful aspects of AI agents is their configurability and level of interaction. While some agents can be highly interactive, others can execute complex tasks in the background with limited to no human interaction. Teams can therefore set different levels of human oversight based on the agent’s work and the task’s importance.\n\nFor simple tasks like summarizing code or drafting documentation, teams might let an agent work independently, only notifying a human team member when the task is finished. For critical tasks involving key business logic or sensitive data, teams can set up approval checkpoints or closely monitor the agent’s work.\n\nThis flexibility helps balance the speed of automation with the need for human control. It’s not all-or-nothing - teams can fine-tune the level of autonomy for different types of tasks and stages of the development lifecycle.\n\n## The power of specialization\nToday’s AI code assistants usually use a single large language model. But the future will bring many specialized agents, each powered by different models built for specific tasks.\n\nWe’re beginning to see the emergence of specialized agents for tasks such as:\n- Code modernization (converting codebases to newer language versions)\n- Security vulnerability detection and remediation\n- Test generation and execution\n- Performance optimization\n- Documentation generation\n- Root cause analysis for pipeline failures\n\nEach task works best with a model built specifically for that job. This specialization allows each agent to excel at its particular task rather than trying to be a jack-of-all-trades.\n\nWhat’s emerging is an ecosystem of specialized agents working together, each powered by different language models optimized for specific tasks. This multi-model approach promises to deliver better results than trying to handle all development tasks with a single, general-purpose model.\n\n## The real-world impact of AI agents\nTasks that once took weeks can now be done in hours with AI agents. For example, updating a large Java codebase to a newer version - work that might take a team weeks - can now be handled much faster by agents.\n\nMore importantly, AI agents help developers reach their highest potential. By handling routine tasks, agents free developers to focus on what they do best: solving complex problems and creating new solutions. This isn’t about replacing developers with AI, but boosting their abilities and letting them focus on higher-level thinking, innovation, and the creative work that needs human insight.\n\nWith AI agents, developers can work at a scale never before possible for individuals or teams. This shifts work from reactive, prompt-based tasks to proactive workflows that link all parts of software creation, helping with coding, planning, design, testing, deployment, and maintenance.\n\n## What to consider when adopting AI agents\nTo prepare for rapid growth in software development and code, companies need to plan ahead. Before adding AI agents to your process, focus on these key areas:\n\n1. **Think about how to boost real productivity, not just add new tools and processes for teams to learn**. By adopting [agentic AI workflows as part of a DevSecOps platform](https://about.gitlab.com/blog/2025/02/24/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/), you can help developers spend more time creating value for customers without contributing to [AI sprawl](https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/). The platform’s built-in reports and dashboards will also help you [measure success](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/) so you know your team is on the right track.\n2. **Seek out solutions that will work for your whole team**. The best AI agents make everyone more efficient, not just a select few developers.\n3. **Prioritize security and compliance**. As more and more production-ready code is generated by AI, a comprehensive DevSecOps platform is essential for secure software development at scale. If you work in a regulated industry, make sure your AI agent solution meets strict security and data privacy rules. Check if it can work offline or in [air-gapped systems](https://about.gitlab.com/the-source/ai/transforming-government-it-ai-for-air-gapped-environments/) if you need that level of security.\n4. **Look for solutions with enterprise control through human oversight**. AI agents should offer clear approval workflows and configurable guardrails that keep humans in the loop. This balance gives you the speed of automation while maintaining proper governance, which is essential for critical systems and strategic decisions.\n\nCompanies that use an end-to-end DevSecOps platform with automated security scanning, compliance guardrails, and standard workflows will be more equipped to harness the benefits of AI agents without adding unnecessary risk. Those without a platform will struggle to manage the complexity and risks of agentic AI while still delivering a safe and reliable customer experience.\n\n## Looking ahead\nWe’re just at the start of the AI agent revolution in software development. As these tools mature, we’ll see even better teamwork between human developers and AI agents, with agents becoming stronger partners in building software.\n\nLooking towards the future, there is significant potential for convergence between code assistants and AI agents. Code assistants will likely evolve to incorporate more advanced AI agent capabilities, such as increased autonomy in handling coding tasks, proactive problem-solving within the development workflow, and deeper integration with other development tools and processes. Future iterations might see code assistants taking on more complex coding tasks beyond simple generation, such as autonomously debugging, testing, and even deploying code based on high-level requirements, effectively becoming more autonomous “code agents.”\n\nSoftware has changed the world over the past five decades, but only a small fraction of people have the skills to build it. Yet these few developers reach billions through smartphones and the internet. Imagine a world where more people can build, secure, and deliver production-ready software. Agentic AI will make that happen.\n\nThe shift from passive assistants to active development partners is a big step forward in software development. As these specialized agents evolve, software development will be faster, more reliable, and more rewarding for developers working with these new AI partners.",[654,657,660,663,666,669,672],{"header":655,"content":656},"What is agentic AI in software development?","Agentic AI refers to autonomous AI agents that can reason, plan, and take initiative across tasks, unlike reactive code assistants that require human prompts. These agents act more like team members, performing complex tasks with minimal oversight and enabling proactive workflows throughout the software development lifecycle.",{"header":658,"content":659},"How do AI agents differ from traditional code assistants?","While code assistants respond to developer prompts, AI agents can independently complete multi-step tasks, coordinate with other agents, and adapt based on project goals. They can handle functions like security scans, test generation, and code reviews without needing manual intervention at every step.",{"header":661,"content":662},"What are the benefits of using AI agents for developers?","AI agents reduce manual workload by automating time-consuming tasks like updating codebases, running compliance checks, and generating documentation. This allows developers to focus on higher-value work such as innovation, problem-solving, and strategic development, ultimately accelerating delivery without compromising quality.",{"header":664,"content":665},"Can AI agents be customized for different levels of human oversight?","Yes. Teams can configure agent autonomy based on task criticality. For routine tasks, agents may operate independently, while for high-risk or business-critical operations, human approval checkpoints can be integrated to maintain governance and compliance.",{"header":667,"content":668},"Are specialized AI agents more effective than general-purpose models?","Specialized AI agents, each trained for a specific function, such as security, testing, or root cause analysis, typically outperform general-purpose models for their targeted tasks. This modular, multi-agent approach improves accuracy and efficiency by leveraging the strengths of domain-optimized models.",{"header":670,"content":671},"What should companies consider when adopting agentic AI?","Organizations should ensure that AI agents align with their security, compliance, and governance requirements. They should be integrated into an end-to-end DevSecOps platform to avoid AI sprawl, maintain control through human oversight, and support enterprise-wide adoption with consistent workflows.",{"header":673,"content":674},"How will agentic AI shape the future of software development?","Agentic AI will democratize software creation by enabling more people to build and manage production-grade software. As agents become more autonomous and integrated, they will drive faster innovation cycles, improve code quality, and make development more accessible, scalable, and secure.",{"layout":5,"template":486,"articleType":487,"author":488,"featured":331,"gatedAsset":27,"isHighlighted":6,"authorName":436},{"title":677,"date":678,"description":679,"timeToRead":462,"heroImage":680,"keyTakeaways":681,"articleBody":685,"faq":686,"config":705},"DevOps leaders: Fix this productivity blocker before adding AI","2025-01-30","Context switching is often a multi-faceted issue. AI can support developers by increasing productivity, but it’s not a one-size-fits-all solution.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464409/cuh1ieonfjcbck9zxnad.png",[682,683,684],"Context switching is known as the killer of productivity, leading to increased error rates and slowing down development velocity.","Context switching is caused by overcomplicated workflows and complex toolchains — systems that AI cannot solve alone.","AI can support developers in reducing context switching if it is thoughtfully integrated into development workflows.","Consider the barrage of notifications your development team receives every day. Emails, Slack notifications, meetings, issue updates, Jira boards, and more - across multiple projects at the same time - constantly redirect developers’ attention. On average, developers are interrupted [13 times per hour](https://library.oapen.org/viewer/web/viewer.html?file=/bitstream/handle/20.500.12657/22839/1007322.pdf?sequence=1&isAllowed=y#page=155).\n\nNever-ending context switching is draining your team’s productivity, breaking their concentration and making it much more challenging to finish tasks at hand - increasing the rate of errors, slowing down time-to-market, and leading to employee burnout.\n\nMany teams look to AI to solve productivity issues. However, if your challenges stem from context switching, you'll have to get to the root of the problem in order to get the most out of AI. Let’s dive deeper into the state of context switching among DevSecOps teams, strategies to reduce context switching, and how you can thoughtfully introduce AI to accelerate your teams’ workflows.\n\n## The costs of context switching\nContext switching has a tremendous impact on a developer’s daily experience, influencing productivity, security, and project deadlines.\n\n**Less productivity and more errors**: Developers spend only [20 seconds to 2 minutes](https://library.oapen.org/viewer/web/viewer.html?file=/bitstream/handle/20.500.12657/22839/1007322.pdf?sequence=1&isAllowed=y#page=155) on activities (such as writing code or running tests) before switching to another. This fragmentation leads to poorer code quality and more errors as developers cannot complete a task before a notification or required review breaks their concentration.\n\n**High rate of burnout**: Context switching also negatively impacts job satisfaction. According to [a study by the University of Irvine](https://ics.uci.edu/~gmark/chi08-mark.pdf), after only 20 minutes of interrupted tasks, people reported significantly higher stress, frustration, and pressure. With such a wide variety of interruptions, it can feel overwhelming to accomplish a single task. Over time, these frustrations can lead to employee burnout.\n\n## Common causes of context switching\nWhile some causes of context switching are more challenging to identify than others, understanding the underlying issues can help teams adjust their workflows.\n\n### Complex toolchains\n[Toolchains that include multiple point solutions](https://about.gitlab.com/blog/2022/08/24/too-many-toolchains-a-devops-platform-migration-is-the-answer/) as part of a single development workflow often complicate the development process. Consider a development team working on a deployment pipeline with multiple manual steps and approvals that each occur in a different tool. When teams work with many disparate tools, they hop not only between projects, but between multiple systems and interfaces just to complete basic tasks - one tool for code review, another for CI/CD, another for security scans, and another for deployment. This slows teams down as they navigate the software development lifecycle (SDLC).\n\nUnfortunately, working across a complex toolchain is the standard, not the exception. [GitLab’s 2024 Global DevSecOps report](https://about.gitlab.com/developer-survey/) found that 42% of respondents use 6-10 tools in their development tech stack, while 20% use over 11 tools.\n\n### Meeting and notification overload\nBeyond process and tooling challenges, developers face constant real-time interruptions. A company culture that is overly dependent on meetings can hinder productivity. Similarly, notifications such as Slack and email constantly draw attention away from a project, especially if employees feel pressured to respond immediately.\n\n## How engineering leaders can reduce context switching\nYou aren’t powerless in the battle against context switching. In fact, you can take several steps to minimize context switching across your organization.\n\n### Understand your team’s context switching patterns\nStart by investigating where context switching occurs within your team. You can leverage:\n\n- **Self-reporting methods**, such as surveys, questionnaires, and regular check-ins to get an anecdotal understanding of the amount of context switching that occurs.\n- **Digital tracking tools** like Toggl, RescueTime, or Clockify to track time spent on different tasks and identify task switching patterns.\n- **[Productivity analytics](https://docs.gitlab.com/ee/user/analytics/productivity_analytics.html)** through a DevSecOps platform like GitLab to pinpoint what issues are taking the longest and where workflows need to be streamlined.\n\nOnce you understand your team's patterns, here are key areas where you can reduce context switching:\n\n#### 1. Simplify your toolchain\nUse the data you gathered to consider how to best address context switching. Before jumping to AI as the productivity fix-all, take inventory of the tools currently being used by your teams. By understanding what tools are redundant, what can be consolidated, and where systems can be centralized, you’ll reduce the number of platforms developers use daily before adding on AI as another tool.\n\nGitLab’s report on [Navigating AI Maturity in DevSecOps](https://about.gitlab.com/developer-survey/2024/ai/) found that respondents using AI for software development were more likely to want to consolidate their toolchain than those not using AI tools. Interestingly, there wasn’t a significant difference in the number of tools each group used, which shows that AI may add to the context switching problem if it isn’t thoughtfully integrated.\n\n#### 2. Address cultural factors\nIf your teams are overwhelmed by meetings, notifications, or other internal commitments, there are several strategies you can put in place to increase focus time and reduce context switching.\n\n- **Create meeting-free days**: Designate one day each week when no meetings should be scheduled to allow for high-context, focused work.\n- **Time-block meetings**: Meetings typically require less concentration than project work. Tell your team to reserve their best focus hours for project work (for example, some people work better in the morning, others in the afternoon).\n- **Alternate who attends meetings**: Divide and conquer meetings across the team. This can give junior members more exposure and spread out the time each team member spends in meetings.\n- **Cancel meetings without a clear agenda**: Empower your team to ask for agendas before the meeting. Team members can then determine whether or not the meeting is relevant to their work.\n- **Set notifications to “do not disturb”**: Encourage team members to block time on their calendars for heads-down focus work. This includes setting statuses on applications like Slack or Discord to “do not disturb” to reduce interruptions.\n\nBy increasing quality focus time you can improve productivity, reduce stress, and create a happier work environment. Once you have done everything you can to simplify the toolchain and reduce internal disruptions, consider adding AI to further address bottlenecks across the SDLC.\n\n#### 3. Thoughtfully incorporate AI into workflows\nWhen you’re ready to add AI to your workflow, start with one designated AI tool, and run a pilot with a select group of team members. You can identify use cases that inform the tool’s effectiveness and performance, and receive feedback from the team on ease of use and overall value before fully integrating the tool into your systems. **The key is ensuring any AI tools are integrated into existing workflows rather than becoming another source of context switching themselves.**\n\nHere are a few ways teams can integrate AI to increase productivity:\n\n**Leverage an AI chatbot within a DevSecOps platform**: Developers can ask questions about documentation without leaving the IDE or platform where they’re writing or deploying code. In this way, [AI helps teams improve productivity](https://about.gitlab.com/the-source/ai/how-ai-helps-devsecops-teams-improve-productivity/) and time-savings while decreasing the cognitive load associated with context switching.\n\n**Receive AI explanations of code**: Developers can learn what a block of code does and why it behaves a certain way without leaving the platform. This can be especially useful when team members are trying to understand portions of code written by others.\n\n**Explain vulnerabilities with AI**: When DevSecOps teams identify security flaws, they often spend hours determining the root cause with manual troubleshooting. Using AI, developers can receive [vulnerability explanations](https://about.gitlab.com/the-source/ai/understand-and-resolve-vulnerabilities-with-ai-powered-gitlab-duo/) that automatically generate relevant information about the vulnerability, provide resolution suggestions, and recommend code to fix it.\n\n## Solving context switching\nWhile reducing context switching looks different for every company and team, it often requires restructuring your toolchain, evaluating developer workflows, and reducing notifications and meetings.\n\nOnce you understand the elements contributing to your team’s productivity and workflow, you can intentionally incorporate AI to streamline manual tasks and improve efficiencies.",[687,690,693,696,699,702],{"header":688,"content":689},"Why is context switching such a problem for developer productivity?","Context switching interrupts a developer’s concentration and makes it harder to complete deep work. Frequent switching between tasks, tools, and communication channels breaks focus, increases errors, and slows down progress. Over time, this disruption can lead to burnout, reduced quality of code, and delays in project timelines.",{"header":691,"content":692},"What are the main causes of context switching in DevSecOps environments?","Complex toolchains and constant notifications are major contributors to context switching. When developers rely on multiple point solutions across the software development lifecycle, they must juggle various interfaces and workflows. Add to that a culture of excessive meetings and notification overload, and it becomes increasingly difficult for teams to stay focused on meaningful work.",{"header":694,"content":695},"Can simplifying the toolchain really make a difference?","Yes, streamlining tools is one of the most effective ways to reduce context switching. Consolidating platforms and removing redundant software can significantly lower the cognitive load placed on developers. This simplification not only reduces distractions but also creates a more consistent and efficient development workflow that helps teams stay focused.",{"header":697,"content":698},"What cultural changes can help reduce unnecessary interruptions?","Promoting a focus-friendly work environment starts with intentional practices. Limiting unnecessary meetings, encouraging meeting-free time blocks, and supporting “do not disturb” hours can make a big difference. Teams that value deep work and protect focused time empower developers to do their best work without constant disruption.",{"header":700,"content":701},"When is the right time to introduce AI into developer workflows?","AI can be incredibly helpful, but only if the underlying productivity challenges — like context switching — are first addressed. Once tooling and processes are streamlined, AI should be introduced carefully, beginning with limited use cases. This approach allows teams to test value, gather feedback, and ensure the AI tool fits naturally into the existing workflow rather than becoming another distraction.",{"header":703,"content":704},"How can AI help reduce context switching rather than contribute to it?","AI can improve productivity when it is integrated thoughtfully into a unified platform. Tools that provide in-context support — like AI chat for documentation, code explanations, and automated vulnerability analysis — keep developers focused by delivering help without forcing them to leave their workspace. These kinds of seamless integrations reduce the need for external searches, additional meetings, or manual troubleshooting.",{"layout":5,"template":486,"articleType":487,"author":706,"featured":6,"gatedAsset":25,"isHighlighted":6,"authorName":446},"julie-griffin",{"title":708,"date":709,"description":710,"timeToRead":462,"heroImage":711,"keyTakeaways":712,"articleBody":716,"faq":717,"config":733},"Reducing software development complexity with AI","2025-01-28","Discover how a strategic approach can help organizations maximize the benefits of AI without introducing extra complexity into software development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463941/oium18y7jy4pu6nxozkq.png",[713,714,715],"AI offers a promising solution to software development complexity by automating routine tasks like debugging, code reviews, and testing — but it requires thoughtful human oversight to avoid introducing unnecessary complexity.","The successful implementation of AI in software development involves a gradual approach, starting with low-risk areas, continuous adjustments, and the formulation of policies for AI usage.","AI serves a vital role in reducing software development challenges, including improving code reliability, meeting customer expectations, and enhancing cybersecurity measures.","Software development today involves managing an unprecedented number of moving parts. Teams are handling more languages, tools, and dependencies than ever while facing increased security requirements and shorter deployment cycles. AI offers a way to handle this growing complexity - but only with the right approach.\n\nAdopting any emerging technology can sometimes have unexpected consequences. Installing a new app-based home security system may require learning to use different tools or deal with false alarms. If you get an electric vehicle, you may experience lower fuel costs for your commute, but you’ll need to adjust to new controls and be mindful of where and how far you drive.\n\nIncorporating AI into the software development lifecycle takes this concept to the next level. AI can simplify work for developers by eliminating repetitive tasks - but as a leader, you’re likely weighing the promises of AI against very real concerns, from change management to security to the long-term career stability of your developers.\n\n## Understanding where AI fits\nSoftware development is a complex process that involves multiple steps, including coding, testing, debugging, and maintenance. Development teams face increasing pressure from multiple directions:\n\n- **Rapidly evolving technologies**: Developers may struggle to keep up with the latest tools and techniques as technology advances.\n- **Increasing customer expectations**: Customers are becoming more tech-savvy and have higher expectations for software quality, functionality, and security - putting additional pressure on developers to deliver complex and robust solutions.\n- **Legacy systems**: Many organizations still rely on outdated or legacy systems that can be difficult to integrate with newer technologies, resulting in complexity in the development process.\n- **Evolving security landscape**: As new, more advanced cyber threats emerge, developers must be constantly vigilant against security vulnerabilities, adding another layer of complexity.\n\nAs this complexity increases, it's not always clear where AI can deliver on its promises. The key is identifying where AI can have the most impact. One question I hear often is whether AI code assistants will help to reduce this complexity by augmenting developers’ current workflow or whether developers will need to adapt to an entirely new way of working. The answer, as I see it, is both. Human developers will need to get used to working with AI tools and large language models. And AI code assistants will adapt over time, tapping into more specialized models closely tailored to developers’ workflows. Before long, users will engage with [AI agents](https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/#the-future-of-applications-is-intelligent-adaptive-ai-agents) that respond intuitively and learn over time.\n\n## Starting smart with a phased implementation\nImplementing AI isn’t as simple as flipping a light switch. To harness the efficiencies of AI safely and strategically without introducing unnecessary complexity, I’ve found the most successful teams adopt a gradual approach. They start with low-risk areas to avoid pitfalls and allow development teams to experiment with how AI and other tools fit with their workflows. Be prepared: You might see a temporary dip in productivity before realizing long-term efficiency gains.\n\nThink about how changes ripple through your organization. Teams often face initial resistance to change, and in the case of AI, there is also a potential shift in code quality as AI-driven code volume increases. Understanding [how the entire software development lifecycle benefits from AI](https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/) is essential for successful adoption.\n\nFrom there, you’ll need clear guardrails and policies for AI usage, including employee guidelines, data sanitization practices, in-product disclosures, and moderation capabilities. What matters is staying focused on real results: continuously evaluating and adjusting how your teams use AI to ensure it's making development more efficient.\n\n## AI and cybersecurity\nStaying focused on what matters doesn’t just apply to automating code review and improving development cycles. Reducing complexity in software development also yields significant security benefits. The ever-increasing volume and sophistication of cyber attacks, combined with the complexity of organizations’ tech stacks, significantly contribute to security frustrations.\n\nBuilding large, multi-faceted software systems will always involve some complexity - that's unavoidable. However, you can take steps to minimize complications like difficult-to-maintain code repositories and redundant dependencies. When you let this unnecessary complexity creep in, you’re not just creating a larger attack surface - you’re giving your teams more security findings to sort through, prioritize, and address.\n\nThis is where your teams can use AI to minimize the potential negative security impacts of AI tools in other parts of the software development lifecycle. For example, [the new generation of AI-powered development tools](https://about.gitlab.com/gitlab-duo/) evaluate AI-generated code to ensure it doesn’t contain vulnerabilities. If vulnerabilities exist, the tool can help explain the expected impact and how developers can address the issue.\n\nAI creates additional security guardrails to prevent problems such as bad actors injecting harmful answers into large language models while helping your teams create more secure and compliant software. With predictive threat analysis, AI can scan code for security threats and automatically apply patches or reconfigure security settings in response to emerging vulnerabilities. Finally, compliance monitoring is another burden that AI development tools can help lift for software engineering teams.\n\n## Measuring success\n[GitLab research](https://about.gitlab.com/developer-survey/) has shown that software developers spend 25% or less of their day writing code; the rest is devoted to fixing errors, resolving security issues, and updating legacy systems. Automating these tasks with AI allows your teams to utilize their expertise more effectively and focus on problem-solving rather than recreating existing code. This reduces complexity, drives innovation by eliminating wasted effort, and enhances job satisfaction.\n\nFrom a business standpoint, objectives such as improving developer productivity and producing better, more secure code are key performance indicators (KPIs) that translate directly to improved cycle times and better results.\n\nThese are tangible improvements, but there are other areas where AI can reduce complexity. For example, code review has been shown to improve code but often creates bottlenecks as developers wait for review. AI streamlines code reviews and creates comprehensive testing scenarios, enhancing code reliability and reducing bugs, leading to improved software quality and higher customer satisfaction.\n\nFurthermore, AI can predict development bottlenecks and automate routine tasks, leading to more predictable release cycles and faster market entry. Its ability to rapidly and accurately tailor software to user feedback ensures that products meet customer needs and expectations more effectively.\n\n## The path forward: Controlling complexity with AI\nThe complexity of software development may continue to ebb and flow as development teams embed new AI technologies more deeply into their workflows and AI tools become more tailored to developers’ specific needs. But as long as your teams can transfer repetitive development tasks to AI while supervising AI output and intervening when needed, your organization should be able to manage complexity over the long term.\n\nBy providing your development teams with the proper training and time to experiment, you’ll find your organization better equipped to handle increasing complexity while delivering better, more secure software more efficiently.",[718,721,724,727,730],{"header":719,"content":720},"What is the best approach for implementing AI in software development?","A phased implementation strategy is ideal. Start with low-risk areas where AI can provide immediate efficiency gains, such as automated testing, documentation summarization, or code reviews. Gradually expand AI integration while monitoring its impact on workflows, ensuring that AI-driven solutions augment human developers rather than replacing essential development expertise.",{"header":722,"content":723},"How does AI impact software security and compliance?","AI enhances security by automating vulnerability detection, security patching, and compliance monitoring. It reduces complexity by scanning AI-generated code for weaknesses, ensuring compliance with industry standards, and providing predictive threat analysis. AI tools can also prevent bad actors from injecting harmful data into large language models while helping teams respond faster to emerging cybersecurity threats.",{"header":725,"content":726},"What are the biggest challenges when implementing AI in software development?","The main challenges include adapting to AI-driven workflows, ensuring code quality, managing security risks, and avoiding AI sprawl — the uncontrolled adoption of disconnected AI tools. Organizations must also establish clear guidelines for AI usage, including data sanitization, compliance monitoring, and AI-driven security guardrails to ensure AI adoption enhances efficiency rather than creating new complexities.",{"header":728,"content":729},"How should organizations measure the success of AI in software development?","Organizations should track key performance indicators (KPIs) such as developer productivity, cycle time reduction, code quality, security issue resolution time, and software reliability. AI’s ability to accelerate development cycles, reduce human error, and improve overall efficiency should be evaluated against real-world business outcomes like faster deployment, lower costs, and increased customer satisfaction.",{"header":731,"content":732},"How can AI help reduce complexity in software development?","AI streamlines software development by automating repetitive tasks such as code generation, bug detection, and security scanning. It helps developers manage multiple languages, tools, and dependencies while improving workflow efficiency. AI-powered tools can also identify bottlenecks, assist in debugging, and predict potential security vulnerabilities before they become major issues.",{"layout":5,"template":486,"articleType":487,"author":488,"featured":6,"gatedAsset":550,"isHighlighted":6,"authorName":436},{"title":735,"date":736,"description":737,"timeToRead":462,"heroImage":738,"keyTakeaways":739,"articleBody":743,"faq":744,"config":757},"Three challenges impacting your team’s AI productivity gains","2025-01-23","AI is becoming a critical part of software development — but there are growing pains. Learn more about common roadblocks and how to address them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464418/sekku5gned7o9tct0jze.png",[740,741,742],"AI can increase software development productivity by automating tasks, identifying insights from large datasets, and reducing time spent on repetitive tasks. However, there are challenges to achieving these productivity gains.","Organizations may face challenges such as an AI training gap, toolchain sprawl, and appropriately defining productivity metrics. Addressing these can help ensure the effective utilization of AI in software development.","To evaluate AI's effectiveness, organizations should measure ROI based on user adoption, time to market, revenue, and customer satisfaction metrics. Evaluation of the right metrics can help organizations better understand AI's impact on business outcomes.","Software development is at a turning point. AI promises to transform development workflows, but many organizations are discovering that integrating AI effectively requires more than just adopting new tools. [A GitLab research study](https://about.gitlab.com/developer-survey/) revealed that while executives are confident about AI adoption, 25% of individual contributors report their organizations aren’t providing adequate training and resources to help them use AI.\n\nAI can help teams tackle increasingly complex challenges, from code generation and security vulnerability detection to automated testing and project management. When implemented thoughtfully, AI allows developers to focus on innovation rather than repetition, leading to improved code quality. More importantly, AI’s ability to analyze vast datasets of code, builds, and deployments helps teams make informed decisions that accelerate delivery while reducing risks.\n\nHowever, as AI technology becomes more integrated into software development processes, organizations encounter three key challenges that can hinder these potential productivity gains.\n\n## 1. The AI training gap\nThe executive/developer perception gap isn’t surprising: While executives focus on AI’s strategic potential, development teams face the day-to-day reality of integrating these tools into their workflows. The disconnect often stems from organizations viewing AI as a potential replacement for software engineers, rather than a tool that enables more creative and strategic human-centered work. Software leaders should supplement their investments in AI with investments in training and development resources that allow software development teams to build momentum and motivation over time.\n\nIt’s important to call out here that your teams will need a grace period to determine how AI best fits their processes. Initially, productivity may decline as they adjust to new workflows. However, your teams will build trust in their new tools by testing how AI can best fit into their day-to-day workflows and see better results.\n\n## 2. AI-powered toolchain sprawl\nOne major factor that can detract from developer experience and impact overall productivity is [toolchain sprawl](https://about.gitlab.com/blog/2022/12/05/shake-off-sprawling-diy-toolchains-with-a-devsecops-platform/), or having multiple point solutions across the software development process. GitLab’s research found that two-thirds of DevSecOps professionals want to consolidate their toolchain, with many citing negative impacts on developer experience caused by context switching between tools.\n\nToolchain sprawl has additional drawbacks, such as adding cost and complexity, creating silos, and making it more challenging to standardize processes across teams. It also creates security concerns due to expanding attack surfaces and unnecessary handoff points. AI-powered point solutions compound these issues. In fact, GitLab’s research found that respondents whose organizations are currently using AI were more likely to want to consolidate their toolchains than those not using AI - even though there wasn’t a significant difference between the two groups in the number of tools respondents reported using.\n\nRather than attempting to integrate AI into unwieldy, complex toolchains, adopt consistent, strategic best practices that [minimize your teams’ context switching and cognitive load](https://about.gitlab.com/the-source/ai/devops-leaders-fix-this-productivity-blocker-before-adding-ai/) while reducing your organization’s total cost of ownership. Before incorporating new AI development tools, [evaluate your existing toolchains](https://about.gitlab.com/the-source/ai/overcome-ai-sprawl-with-a-value-stream-management-approach/) to determine areas where you can streamline or eliminate disparate tools to avoid the strain of integrating excess tools with AI-powered solutions.\n\n## 3. Unclear productivity metrics\nDeveloper productivity is a top concern for the C-suite. While measuring developer productivity has always been difficult, [AI has compounded the challenge](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/). You might agree that measuring developer productivity can help business growth, but most leaders aren’t effectively measuring productivity against business priorities. GitLab’s research revealed that less than half (42%) of C-level executives currently measure developer productivity within their organization and are happy with their approach.\n\nMany organizations struggle to quantify the impact of AI-powered tools on developer productivity or other real-world business outcomes. Traditional metrics, such as lines of code, code commits, or task completion, are often insufficient when assessing development’s impact on a business’s bottom line.\n\nThe best approach to modernizing measurement practices begins with consolidating quantitative data from throughout the software development lifecycle with insights from software developers on how AI is supporting or hindering their daily work.\n\n## Making AI work for your teams\nSuccessfully implementing AI in software development requires closing the gap between executive expectations and developer realities. Start where your team feels the most friction today— whether that’s providing proper training, consolidating toolchains, or rethinking traditional productivity metrics. Taking action now allows your teams to realize meaningful productivity gains, rather than just adding new tools.",[745,748,751,754],{"header":746,"content":747},"How is the gap between executive expectations and developer experience affecting AI adoption in software development?","While executives remain optimistic about AI’s strategic potential, many developers face challenges integrating these tools into daily workflows. This disconnect can result from a lack of training and support, with some organizations viewing AI as a replacement for developers rather than an enabler of more meaningful work. Addressing this gap requires investments in developer education and a grace period to adapt to new AI-driven workflows.",{"header":749,"content":750},"Why is toolchain sprawl a problem when implementing AI in software development?","Toolchain sprawl, or using multiple point solutions across development processes, can negatively impact developer experience by increasing context switching and complexity. AI-powered tools can worsen this issue if introduced into already fragmented toolchains, creating additional silos and security risks. Streamlining tools and adopting integrated solutions can reduce friction, improve productivity, and lower total cost of ownership.",{"header":752,"content":753},"What makes measuring AI-driven productivity difficult for organizations?","Many organizations struggle to quantify the value of AI tools using traditional metrics like lines of code or task completion. These measures often fall short in reflecting how AI impacts overall business outcomes. A more effective approach combines lifecycle-wide quantitative data with qualitative insights from developers to understand how AI supports or hinders day-to-day work.",{"header":755,"content":756},"What steps can organizations take to improve the impact of AI on developer productivity?","Organizations can begin by addressing the most pressing friction points for their teams. This might include offering better AI training, simplifying complex toolchains, or modernizing productivity metrics. A strategic, developer-first approach ensures AI is integrated in a way that enhances rather than complicates development workflows.",{"layout":5,"template":486,"articleType":487,"author":758,"featured":331,"gatedAsset":27,"isHighlighted":6,"authorName":451},"sabrina-farmer",{"title":760,"date":761,"description":762,"heroImage":763,"keyTakeaways":764,"articleBody":768,"config":769},"DORA insights: Where is AI really driving developer productivity?","2025-01-16","Discover valuable insights from the 2024 Accelerate State of DevOps Report and learn how you can harness AI to maximize team performance and innovation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464086/p04zmdk6h3bbkipeqelh.png",[765,766,767],"DORA research underlines the significance of developer experience, the emergence of platform engineering, and AI's role in software development across various levels.","AI has been shown to have positive impacts in enhancing team performance across the software development process; however, a comprehensive AI strategy is critical to ensure that benefits for individuals translate into benefits for the product.","Creating a supportive, valued, and motivated workspace is key to high performance and mitigating burnout, making it essential for organizations to ready their teams for AI's innovative potential.","For over a decade, the DORA research program has examined what distinguishes high-performing technology teams and organizations. Their four key metrics - lead time for changes, deployment frequency, change fail rate, and failed deployment recovery time - have become the industry standard for assessing software delivery performance. The [2024 Accelerate State of DevOps Report](https://cloud.google.com/resources/devops/state-of-devops?hl=en) highlights the ongoing importance of developer experience, the rise of platform engineering, and how the adoption of artificial intelligence (AI) affects software development across multiple levels.\n\nSoftware developers across all industries increasingly depend on emerging AI-powered development tools to minimize a wide range of repetitive tasks and boost team performance, security, and code quality - and over a third of developers report \"moderate\" to \"extreme\" productivity gains from using AI. However, effective change management and a comprehensive AI strategy are essential to address the challenges of early adoption, such as the AI training gap, “AI sprawl,” finding the optimal level of trust, and the need for a clear vision of success that is captured by a robust set of metrics.\n\nCreating a work environment where teams feel supported, valued, and motivated is crucial for achieving high performance and minimizing burnout. How can organizations ready their teams, processes, and cultures to harness the full potential of an AI strategy for driving innovation?\n\nIn this webinar, Derek DeBellis, Lead Researcher on Google's DORA team, Stephen Walters, Field CTO at GitLab, and Haim Snir, Senior Product Manager, Dev & Analytics at GitLab reveal the key findings from the 2024 Accelerate State of DevOps DORA report.\n\n### Join us as we explore:\n\n- **Benefits and challenges of AI adoption:** Learn how AI boosts productivity, job satisfaction, retention, and code quality and how to address potential roadblocks in early adoption.\n- **Platform engineering and AI:** Discover how platform engineering can elevate developer productivity and performance when combined with AI.\n- **Measuring performance with AI:** Understand how assessing the right quantitative metrics can help organizations better understand AI's impact on development workflows and business goals.",{"layout":5,"template":486,"articleType":770,"featured":6,"gatedAsset":771,"speakers":772,"isHighlighted":6,"authorName":-1},"Webinar","dora-insights",[773,774,775],"derek-debellis","stephen-walters","haim-snir",{"title":777,"date":778,"description":779,"timeToRead":555,"heroImage":780,"keyTakeaways":781,"articleBody":785,"faq":786,"config":802},"Agentic AI, self-hosted models, and more: AI trends for 2025","2024-12-18","Discover key trends in AI for software development, from on-premises model deployments to intelligent, adaptive AI agents.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464096/twyszwpyraghcxz1bruy.png",[782,783,784],"Artificial intelligence is already having a major impact on software development, enhancing code quality and efficiency by removing a wide range of tasks.","Software developers will work alongside AI agents that will facilitate real-time problem solving, rapid optimization of application performance, and overall improvement in software quality, enabling developers to concentrate on strategic decision-making.","Increased use of on-premises AI deployments, particularly in regulated industries, will give companies more control over data privacy and security, as well as the ability to customize their software according to individual needs.","According to [GitLab's 2024 research](https://about.gitlab.com/developer-survey/), 78% of organizations will use artificial intelligence in their software development processes within the next two years - a dramatic shift that's already transforming how teams build and deliver software. The research also shows that the number of organizations actively using AI has jumped from 23% to 39% in the last year alone.\n\nAs software development teams race to integrate AI into their workflows, major shifts are emerging that will fundamentally change how we build software. From intelligent AI agents that adapt in real time to the rise of customized on-premises models, here are three ways AI will significantly alter software development.\n\n## The future of applications is intelligent, adaptive AI agents\nWhile the first wave of AI in software development focused on reactive code assistants for code generation and completion, the future belongs to agentic AI. [Intelligent, adaptable AI agents](https://about.gitlab.com/blog/2024/06/27/meet-gitlab-duo-workflow-the-future-of-ai-driven-development/) will surpass the limitations of traditional software. Rather than interacting with fixed interfaces and preset workflows, users will engage with AI agents that respond intuitively and learn over time.\n\nThese AI-powered agents will serve as the application, providing a more interactive and conversational experience. As AI agents can perform complex tasks, offer guidance, and learn from interactions in real time, agentic AI will lead to significantly more personalized and responsive applications, fundamentally reshaping how we use software.\n\n## AI assistants will evolve to become proactive collaborators\n[AI assistants are getting smarter](https://about.gitlab.com/gartner-mq-ai-code-assistants/), moving beyond reactive prompt-based interactions to proactive problem-solvers. As part of this evolution, AI-powered tools will become central hubs for development, anticipating developers’ needs and offering real-time suggestions for optimizing application performance, security, and maintenance. This new generation of AI assistants will tackle complex projects and tasks with little human interaction, accelerating the software development process. This shift will streamline the entire software development lifecycle, making it more accessible through a simple user interface.\n\nThe role of software developers will evolve alongside these advancements. AI will not replace human developers but will augment their capabilities, allowing them to focus on what they love most: solving complex technical problems. By automating routine tasks and providing expert guidance, AI assistants will empower developers to delve deeper into business problem-solving, continuously improve code quality, and explore new technologies and skills.\n\n## More companies will run customized models on-premises\nIn 2025, organizations will shift toward smaller and more specialized AI deployments. As open source models become more cost-effective and accessible, teams will increasingly opt to run customized versions within their own data centers. As a result, it will be cheaper, faster, and easier for organizations to [host their own large language models and fine-tune them to their individual needs](https://about.gitlab.com/releases/2024/10/17/gitlab-17-5-released/#use-self-hosted-model-for-gitlab-duo-code-suggestions). Companies will find they can combine their data with existing models and tailor the customer experience at a fraction of today’s costs.\n\nMeanwhile, increased compliance risks associated with AI will drive regulated industries - like financial institutions and government agencies - to deploy models in air-gapped environments for reduced latency and greater control over data privacy and security.\n\n## Conclusion\nThe future of software development is inextricably linked to AI. These technologies are transforming how software is created, delivered, and maintained. By embracing AI in all its forms - from generative AI to proactive AI assistants to fully autonomous AI agents - organizations can gain a competitive edge, improve efficiency, and deliver innovative solutions that meet customers’ evolving needs.\n\nThis transformation requires thoughtful preparation: strategic planning, investment in talent and infrastructure, and a commitment to continuous learning and adaptation. Organizations that successfully navigate this evolving landscape will be well-positioned to thrive in the digital age.\n",[787,790,793,796,799],{"header":788,"content":789},"Why are companies moving toward self-hosted AI models?","Organizations are shifting toward self-hosted AI models to enhance data privacy, reduce costs, and customize AI solutions for their specific needs. With advancements in open-source AI, companies can fine-tune models in on-premises environments, ensuring compliance with regulations and improving performance while maintaining control over sensitive data.",{"header":791,"content":792},"What are the benefits of running AI models in on-premises environments?","Deploying AI models on-premises offers organizations greater control over data security, improved compliance with regulatory requirements, and reduced latency. This approach is particularly valuable for industries handling sensitive data, such as finance, healthcare, and government agencies.",{"header":794,"content":795},"How are AI-powered coding assistants evolving?","AI coding assistants are transitioning from reactive tools to proactive collaborators. Future AI assistants will anticipate developer needs, provide intelligent recommendations, automate complex tasks, and enhance software security, ultimately making software development more efficient and accessible.",{"header":797,"content":798},"How can organizations prepare for AI-driven software development in 2025?","To successfully adopt AI-driven development, companies should invest in AI infrastructure, upskill developers, implement responsible AI governance, and explore hybrid AI solutions that balance cloud and on-premises deployment. Staying informed about AI trends will help teams leverage AI for innovation and efficiency.",{"header":800,"content":801},"What is agentic AI, and how will it impact software development?","Agentic AI refers to AI systems that operate autonomously, learning from interactions and adapting in real time. Unlike traditional AI coding assistants that react to prompts, agentic AI acts proactively, streamlining software development by automating workflows, improving efficiency, and personalizing user experiences.",{"layout":5,"template":486,"articleType":487,"author":488,"featured":6,"gatedAsset":550,"isHighlighted":6,"authorName":436},{"title":804,"date":805,"description":806,"timeToRead":807,"heroImage":808,"keyTakeaways":809,"articleBody":813,"faq":814,"config":830},"Overcome AI sprawl with a Value Stream Management approach","2024-12-12","Learn how an AI strategy based on Value Stream Management can stop AI sprawl and supply chain constraints and drive ROI.","7 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464425/vox2seqkjomxok5i56lh.png",[810,811,812],"A strategic approach to AI involves linking it to value streams, ensuring AI is implemented precisely where constraints exist for optimal value delivery.","Transitioning from project-based to flow-based thinking enhances team alignment and effectiveness in AI implementations.","AI should be seamlessly integrated within an effective Value Stream Management approach to eliminate inefficiencies and support organizational goals.","With the move to [platform engineering](https://about.gitlab.com/the-source/platform-and-infrastructure/driving-business-results-with-platform-engineering/), organizations can consolidate complex toolchains and deliver higher-quality software faster and with greater security. Now, as software development teams look to AI for further improvements, we’re seeing a concerning trend: the implementation of disconnected AI point solutions, creating “AI sprawl.”\n\nThis AI sprawl amplifies all of the toolchain sprawl problems listed above. Implementing AI as a point solution for a small group of individuals at one stage in the value stream is just as likely to negatively impact your business outcomes as it is to improve them. An 8x boost in development speed means little if it leads to an 8-fold increase in integration overhead, maintenance time, and data reconciliation.\n\nHaving a strategy to address AI sprawl is just as important as having a strategy to minimize toolchains - but what does that strategy look like? It’s more than simply the consolidation of AI tools. It’s also about joining up lean processes and establishing clear responsibilities and lines of communication among cross-functional teams. [Value Stream Management](https://about.gitlab.com/solutions/value-stream-management/) holds the answer.\n\n## What is Value Stream Management?\n\nValue Stream Management is a methodology that aims to identify areas for improvement in a process to help drive the flow of business value more efficiently. When applied to software development, Value Stream Management involves identifying and mapping out all the steps involved in delivering value to customers, analyzing and measuring the flow of work through these steps, and continuously improving the process through automation. In this case, _value_ means any outcome that benefits the customer - whether that's new features, better performance, increased reliability, or enhanced security.\n\nImplementing a Value Stream Management strategy brings numerous benefits to an organization, such as increased transparency, improved communication, reduced waste and bottlenecks, quicker feedback loops, and ultimately faster time to market. By gaining visibility into the entire software delivery process, organizations can identify areas for improvement and make data-driven decisions to continuously optimize their value stream.\n\n## Building a value stream and AI strategy\n\nReducing AI sprawl starts with identifying bottlenecks in your software value stream: your process is only as fast as its slowest step. In DevSecOps, our goal is a business outcome achieved by an IT system that is delivered via a fast, safe, and secure supply chain. AI’s purpose should be to limit or remove any potential constraints in the supply chain. _This supply chain is a value stream_, which is why Value Stream Management is fundamental for building an AI strategy.\n\nOne example of a constraint in a software development process is time wasted waiting for security vulnerability information, a security representative, or vulnerability details. AI-powered security scanning and resolution can eliminate this bottleneck.\n\nIf we look at the [Implementation Roadmap for Value Stream Management](https://www.vsmconsortium.org/implementation/the-value-stream-management-implementation-roadmap) from the Value Stream Management Consortium, we can see several important steps in building the strategy before any technology decisions are made.\n\n![Value Stream Management implementation roadmap](//images.ctfassets.net/xz1dnu24egyd/bwxFAtAfFcKQlD3kovd9C/29e282bcd324d071eefad40057e9433d/VSM-implementation-roadmap-no-text.png)\n\n_Diagram courtesy of [Value Stream Management Consortium](https://www.vsmconsortium.org/)_\n\nHere, I’ll focus on three steps. After assessing our current position and determining a vision of what we want to achieve, we must:\n\n- **Identify** our value streams\n- **Organize** roles and responsibilities for each of them\n- **Map** our people, processes, and technology to understand how all of this connects\n\nLet’s take a closer look at each stage.\n\n### 1. Identify value streams that deliver business outcomes\n\nUnderstanding your value streams is critical for AI adoption because it reveals _where_ and _how_ AI can actually improve delivery. The first step in building an AI strategy is to ask yourself: What are your organization’s main processes and workflows that drive business value? And which of those value streams have limitations or bottlenecks - points of constraint - that can be addressed with AI? The answers to these questions will tell you where AI can deliver the best results and what the end goal of using AI should be.\n\nWhen identifying your value streams, connect them directly to the business goals your IT systems are established to achieve. For AI to enable value delivery of the goal, you will need to consider the entire value stream, end-to-end, from idea to realization, with the required business objectives in mind. For example, certain value streams may have regulatory and compliance requirements that other value streams do not. These should be documented in the business goals of the value stream and used to define the potential AI requirements for each value stream.\n\n### 2. Organize people, processes, and tools around specific roles and responsibilities\n\nOver the last few decades, IT has moved from a **Project** mindset to a **Product** mindset. A project mindset focuses on delivering a specific result to fulfill a set of requirements, while a product mindset focuses on the bigger picture, including long-term success for users and customers. However, this shift has often just replaced activity-based silos with artifact-based silos - in other words, we've moved from teams organized by what they do (coding, testing, security) to teams organized by what they make (apps, APIs, platforms). Methods such as Scrum of Scrums try to address this, but typically swap under-collaboration with over-collaboration, where teams spend more time discussing than doing.\n\nWith Value Stream Management comes the concept of moving from **Product** to **Flow**. Instead of focusing on individual products, teams focus on how value moves - or flows - through the organization. Flow Engineering is then about designing team structures and handoffs around making that journey as smooth as possible.\n\nThis shift changes how we implement AI. AI solutions can’t focus on either a singular role or artifact. To remove handoffs and ensure alignment, AI must understand the scope and parameters of its operation, which teams it works with, and when it must be used.\n\nIn other words, AI must have a clearly defined role and responsibility, just as humans do. AI should understand its part in enabling flow along the value stream, working in an interactive manner with other people and AI tools. [Team Topologies](https://teamtopologies.com/) and a [Value Stream Reference Architecture](https://www.vsmconsortium.org/value-stream-reference-architectures) are invaluable at this stage because they provide a framework for designing and documenting a team structure that will help your team create value faster.\n\n### 3. Map the value stream to ensure everyone’s on the same page\n\nAI must be implemented precisely at the point of the constraint to provide the required benefit. In other words, adding AI to the wrong place in your process can actually make things worse. Let’s say security reviews are the slowest step in your workflow because developers need to spend time going back and forth with the security team to understand and address vulnerabilities. In this case, implementing AI only _before_ the security stage to help developers write more code faster is just going to make the bottleneck worse, because there will be even more code (and more potential vulnerabilities) for the team to sort through.\n\nBut how do we pinpoint where AI should operate? By mapping the existing workflow or golden path for a single value stream. This detailed mapping allows us to identify the precise point of constraint and determine whether AI will provide the required benefit to remove or reduce the impact.\n\nA value stream reference architecture lets us define team actions and map out an ideal future state, showing activities in their most efficient sequence and where AI fits into the bigger picture.\n\nA simplified example of a value stream map for developing a new software feature might look something like this: It starts with a customer request for a specific feature. Then, developers build the code, which is followed by testing and security scanning before the code is finally deployed to production. The value stream map should include each of these steps as a distinct section.\n\nContinuing with the example above, you’ve identified a bottleneck at the security stage. This point of constraint might have a couple of different potential solutions. Adopting an [end-to-end DevSecOps platform](https://about.gitlab.com/platform/) will allow you to shift security closer to the developers’ workflow to reduce the cognitive burden. You might also identify an AI solution to help developers understand and resolve vulnerabilities faster. In the previous stage, you would have defined the role and responsibility of AI in enabling flow along the value stream. Now, the value stream map captures this whole picture - which AI tools work where, what they're meant to achieve, and how they help value flow faster through your system.\n\n## Addressing AI sprawl: A value stream-based approach\n\nValue stream mapping helps prevent AI sprawl in several ways:\n\n- **Identifying** important value streams illustrates how different value streams can rely upon a single AI-powered platform to provide consistency and standards, particularly for regulatory needs.\n- **Organizing** people, processes, and tools around specific roles and responsibilities allows you to see how that platform should work holistically with context across the length of each value stream.\n- **Mapping** the value stream allows you to see where and how AI operates at different stages in the value stream so you can identify where duplicate efforts might be creating waste. This will enable flow in the value stream, remove handoffs, improve team alignment, and ensure that AI tools deliver on the organization’s goals.\n\nBy repeating the process for different value streams and AI solutions with different goals and roles, you’ll have a framework for your AI strategy.\n\n## Conclusion\n\nSoftware development teams can remove waste, improve operational efficiency, and ensure security by using technology to enhance and automate manual tasks across the software development lifecycle. Historically, this has been achieved through toolchain automation, but organizations can now leverage emerging technologies such as generative AI.\n\nBy identifying, organizing, and mapping AI to value streams, we can strategically implement AI to enable flow and remove waste. AI isn’t a standalone solution, but should rather be integrated into a holistic strategy with clear roles and responsibilities. By viewing AI through the lens of Value Stream Management, we see the real key to success: AI's effectiveness depends entirely on understanding how you manage your value streams.",[815,818,821,824,827],{"header":816,"content":817},"What role does AI play in accelerating security compliance within value streams?","AI enhances security compliance by automating vulnerability assessments, detecting threats in real time, and assisting teams with security policy enforcement. Within value streams, AI can reduce the time needed for compliance approvals and improve risk assessment accuracy.",{"header":819,"content":820},"How does Value Stream Management ensure AI is used effectively?","Value Stream Management ensures AI is integrated at points where it adds measurable value, such as removing bottlenecks, reducing manual effort, or improving security scanning. By mapping workflows end-to-end, organizations can strategically place AI tools where they will have the highest impact.",{"header":822,"content":823},"How can organizations align AI initiatives with business outcomes using Value Stream Management?","By identifying critical value streams and aligning AI with key business objectives, organizations ensure that AI investments drive tangible improvements in speed, cost efficiency, and product reliability. This approach prevents AI from becoming a siloed experiment and ensures it supports enterprise-wide innovation.",{"header":825,"content":826},"How does AI sprawl affect software development efficiency?","AI sprawl increases integration overhead, maintenance complexity, and data reconciliation issues. When multiple AI tools are introduced in isolation, they create redundant processes, fragmented workflows, and operational inefficiencies that slow down development instead of accelerating it.",{"header":828,"content":829},"What are the risks of implementing AI without a Value Stream Management strategy?","Without Value Stream Management, AI tools may be applied at inefficient points in the workflow, worsening bottlenecks rather than eliminating them. Duplicative AI models, lack of visibility, and misaligned automation can lead to wasted resources and increased operational costs.",{"layout":5,"template":486,"articleType":487,"author":774,"featured":6,"gatedAsset":831,"isHighlighted":6,"authorName":454},"source-lp-dora-insights-where-is-ai-really-driving-developer-productivity",{"title":833,"date":834,"description":835,"heroImage":836,"keyTakeaways":837,"articleBody":841,"config":842},"Modernizing government DevSecOps with artificial intelligence","2024-12-03","Discover how artificial intelligence in DevSecOps can revolutionize federal government operations and foster innovation across government agencies.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464229/eobtirh5sxuiw3mjodbx.png",[838,839,840],"Artificial intelligence is transforming how federal agencies approach their development lifecycle, helping to overcome hurdles such as strict security risks and a lack of skilled workers.","By 2025, AI is anticipated to be a core component of government operations, refining the software development lifecycle, updating legacy systems, improving security issues, and promoting efficiency.","Implementation of AI within federal government agencies could bring about substantial advancements in decision-making, effectiveness, and digital transformation, revolutionizing the sector and refining DevSecOps processes.","Artificial intelligence (AI) and machine learning present remarkable opportunities for enhancing efficiency and fostering innovation in government agencies. However, challenges related to legacy systems, security risks, and legislative gaps continue to hinder modernization efforts. By integrating AI with DevSecOps processes, development teams can strengthen code security and address these concerns. Furthermore, blending AI capabilities with human intelligence allows for a more comprehensive approach to security, ensuring that government operations are both future-proof and resilient against emerging threats.\n\n### Current state: Exploring AI across the software development lifecycle\nFederal agencies are increasingly exploring AI adoption to improve productivity by automating complex tasks in the software development process. However, agencies face hurdles such as outdated procurement laws, a shortage of skilled professionals, and the challenge of bridging the gap between modern digital solutions and legacy systems. These challenges highlight the need for evolving DevOps practices to support AI's role in government operations.\n\n> According to research, __67% of security professionals in government agencies__ find it challenging to get development teams to prioritize remediation of security issues.\n\n### Future vision: Embracing AI for enhanced DevSecOps\nLooking ahead to 2025 and beyond, AI is expected to become integral to federal government operations, particularly in streamlining the development lifecycle, identifying and addressing potential security risks, and modernizing legacy systems. AI-driven automation will allow agencies to move their focus from routine tasks to strategic initiatives, addressing inefficiencies and managing increased customer demand. Anticipated policy changes will align AI advancements with ethical standards and data privacy, ensuring responsible AI adoption. Additionally, AI will enhance secure software development by proactively identifying security issues and streamlining incident response, driving a new era of government efficiency and smarter cybersecurity.\n\n### What comes next?\nAI and machine learning are already automating routine tasks, enabling DevSecOps teams to dedicate more time to strategic initiatives, continuous improvement, and the enhancement of software development processes. As AI evolves, it will become a key partner for development teams across all stages of the software development lifecycle, augmenting their capabilities and enhancing productivity. This will drive significant improvements in government operations and security. Through thoughtful modernization efforts and integration, AI can transform public sector service delivery, creating a secure, efficient, and innovative foundation for transformation that better meets citizen needs.\n\nPresently, AI plays a crucial role in enhancing efficiency and driving digital transformation across federal agencies. As we look ahead to 2025 and beyond, AI is poised to advance these applications, further embedding itself in government operations and helping agencies build secure software, faster.\n\nRead the full whitepaper for a deep dive into the current and future role of AI and machine learning in federal government DevSecOps, focusing on its evolution in critical areas like incident response, security risks, and modernization strategy.",{"layout":5,"template":486,"articleType":843,"featured":6,"gatedAsset":844,"isHighlighted":6,"authorName":-1},"Guide","pf-modernizing-government-devsecops-with-artificial-intelligence",{"title":846,"date":847,"description":848,"heroImage":849,"keyTakeaways":850,"articleBody":854,"config":855},"AI-powered efficiency: Modernizing government in 2025","2024-12-01","Learn how AI accelerates workflows and maximizes productivity, enabling government agencies to maintain service delivery despite reduced headcount.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464139/gd4vt2w5igt0afe1mmmz.png",[851,852,853],"Government agencies face the dual challenge of maintaining service levels with reduced personnel while modernizing through AI. Agencies should focus on integrating AI capabilities across the software development lifecycle to maximize efficiency.","Public sector organizations can achieve measurable efficiency gains through AI-driven tools that accelerate development, enforce security protocols, and provide flexible deployment options spanning classified, air-gapped, and cloud environments.","The future of government efficiency isn't just about faster development — it's about intelligent, proactive operations that anticipate needs and solve problems before they arise, transforming how public services are delivered.","Artificial Intelligence (AI) is revolutionizing how government agencies operate in an era of workforce transitions and heightened efficiency mandates. Today's public sector organizations face unprecedented pressure to do more with less while simultaneously modernizing their technical infrastructure.\n\nThe challenges are substantial: Research shows over 75% of IT teams spend up to 25 hours weekly maintaining legacy systems rather than focusing on innovation. Meanwhile, 54% of public sector teams juggle more than six development tools, and 66% of DevOps teams want to consolidate their toolchains. This fragmentation becomes even more concerning when considering today's security landscape, with a 156% year-over-year increase in malicious packages infiltrating open source ecosystems.\n\nTo meet these challenges while protecting sensitive operations, government operations require a fundamentally different approach - one built around three pillars of AI-driven efficiency:\n\nFirst, workforce empowerment is critical. AI must do more than help developers code - it must empower teams across the entire software lifecycle. Modern approaches use AI to automate everything from documentation to security compliance, allowing public sector workers to focus on innovation rather than maintenance.\n\nSecond, secure infrastructure provides the foundation. Traditional AI solutions typically rely on internet connections, creating security challenges for sensitive government services. Self-hosted AI capabilities enable agencies to harness AI's power within their secure environments - whether in classified facilities, private clouds, or regulated data centers.\n\nThird, measurable impact demonstrates value. With an emphasis on government efficiency, the ability to measure and demonstrate return on AI investments has never been more important. Advanced analytics dashboards transform how agencies track success, offering clear visibility into performance improvements across the software development lifecycle.\n\nThe scale of AI's potential in public services is staggering. Federal agencies have already identified more than 1,700 use cases spanning everything from space exploration to tax administration. Without AI, government agencies simply won’t be able to keep pace with the volume and complexity of modern operations.\n\nLooking ahead, the transformation extends beyond today's manually prompted AI assistance. The future lies in agentic AI that proactively works alongside development teams, anticipating issues and implementing solutions without constant human intervention. This evolution will enable government officials to move from reactive to proactive operations - particularly in areas like public safety, where continuously monitoring for threats can dramatically improve outcomes.\n\nAs agencies navigate these changes, ethical considerations remain paramount. The balance between innovation and security requires thoughtful implementation that protects sensitive data while maximizing efficiency gains. Unlike the private sector, government implementations must maintain complete data sovereignty while upholding public trust.\n\nDownload our comprehensive guide to discover how your organization can harness AI to transform operations while maintaining the highest standards of security and compliance - helping you deliver more effective public services even with reduced personnel.",{"layout":5,"template":486,"articleType":843,"featured":6,"gatedAsset":856,"isHighlighted":6,"authorName":-1},"pf-ai-powered-efficiency-modernizing-government-in-2025",{"title":858,"date":847,"description":859,"heroImage":860,"keyTakeaways":861,"articleBody":865,"config":866},"Transforming government IT: AI for air-gapped environments","Discover how government agencies can implement AI capabilities in secure environments while maintaining strict security and regulatory compliance.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464131/pipmg0ffhdazhtdtjck2.png",[862,863,864],"Air-gapped networks provide essential security for government agencies handling sensitive data, but traditionally limit access to artificial intelligence tools that rely on public internet connectivity for functionality.","Modern self-hosted AI solutions enable agencies to leverage advanced capabilities within isolated environments, maintaining complete data sovereignty while meeting mission-critical security standards like NIST FIPS and ICD 503.","Implementing AI in air-gapped systems helps government teams operate efficiently with reduced personnel, accelerate legacy system modernization, and maximize return on existing infrastructure investments without compromising security.","Government agencies face a growing dilemma in today's digital landscape. While artificial intelligence (AI) promises to transform operations and help teams do more with less, traditional AI implementations often conflict with the stringent security requirements of air-gapped environments. This fundamental tension creates what appears to be an impossible choice for CTOs and CIOs: either embrace AI capabilities and accept security risks or maintain strict security protocols and forgo the benefits of AI.\n\nThe reality is that agencies no longer need to make this difficult compromise. Emerging technologies now enable organizations to deploy sophisticated AI tools directly within secure environments - from classified facilities to private clouds and regulated data centers - without requiring external network connections.\n\n## The critical need for secure AI implementation\nWith the federal government spending over $100 billion annually on information technology and most funds directed toward maintaining existing systems, the need for efficient modernization has never been greater. Air-gapped deployments of AI represent a strategic opportunity to address this challenge.\n\nSelf-hosted AI solutions allow government agencies to process vast amounts of sensitive data safely, leveraging natural language processing and other advanced capabilities without exposing information to potential cyber threats. By maintaining complete control over AI infrastructure, organizations can satisfy rigorous compliance requirements while still harnessing AI's transformative potential.\n\nThis approach is particularly crucial for agencies operating critical infrastructure or handling classified information where internet access restrictions are non-negotiable. Modern self-hosted implementations bring AI capabilities directly into these controlled environments, effectively bridging the gap between innovation and security.\n\n## Driving efficiency through secure AI adoption\nThe benefits of deploying AI in air-gapped networks extend beyond simply meeting security requirements. Government teams facing resource constraints and reduced personnel can leverage these tools to dramatically improve operational efficiency across several dimensions:\n\n- Accelerating legacy system modernization while maintaining strict security protocols\n- Automating routine tasks to optimize workforce productivity\n- Enhancing vulnerability detection and resolution capabilities\n- Providing intelligent analysis of complex historical codebases\n- Transforming outdated systems into more maintainable, secure architectures\n\nFor agencies operating in highly regulated sectors, self-hosted AI solutions enable regulatory compliance while still providing the tools necessary to advance mission objectives. Rather than sending data outside secure boundaries for processing, all operations remain within the organization's control, creating a clear chain of custody that satisfies stringent requirements.\n\n## The path forward\nAs government agencies continue to face increasing pressure to modernize while maintaining the highest security standards, self-hosted AI solutions for disconnected environments offer a compelling path forward. By bringing AI capabilities directly into air-gapped systems while leveraging existing infrastructure investments, organizations can drive secure, cost-effective innovation while ensuring mission continuity with optimized teams.\n\nDownload our comprehensive white paper to explore how your agency can implement advanced AI capabilities within your secure infrastructure - without compromising your security posture.\n",{"layout":5,"template":486,"articleType":843,"featured":6,"gatedAsset":867,"isHighlighted":6,"authorName":-1},"pf-transforming-government-it-ai-for-air-gapped-environments",{"title":869,"date":870,"description":871,"timeToRead":462,"heroImage":872,"keyTakeaways":873,"articleBody":877,"faq":878,"config":894},"4 steps for measuring the impact of AI","2024-10-29","To judge the success of AI initiatives, an effective measurement framework is crucial. Here are four steps to help you focus on the right metrics.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463600/yv1v4ywk7hbobfvlxwhf.png",[874,875,876],"The effectiveness of AI in software development should not be measured solely by productivity metrics like code generation, but also by considering the impact of AI on error rates, maintenance, testing, and security.","Successful AI integration requires a holistic approach that combines data-driven insights from the entire software development lifecycle with qualitative insights from developers about AI's real impact on their work and strategies.","With the right approach, AI can enhance collaboration, improve customer experience, and support business goals without compromising software quality or security.","Artificial intelligence (AI) has rapidly evolved into a core part of organizations' technology stacks. AI-powered productivity tools promise to enhance efficiency by automating repetitive coding tasks. However, many organizations are struggling to quantify the business impact of their AI initiatives and are reevaluating metrics to ensure they align with desired outcomes, such as revenue growth or customer satisfaction. This is crucial for making informed decisions about AI usage.\n\nHistorically, measuring developer productivity has been challenging, with or without AI-powered tools. [Research conducted by GitLab](https://about.gitlab.com/developer-survey/) found that less than half of CxOs are happy with their organizations’ current approach to measuring developer productivity, and 36% feel their current productivity measurements are flawed.\n\nEvaluating the productivity of AI-enhanced coding requires a more nuanced approach than traditional metrics such as lines of code, code commits, or task completion. It necessitates shifting the focus to real-world business outcomes that balance development speed, software quality, and security.\n\nHere are a few steps organizations can take today to ensure they can measure the full impact of AI on software development processes.\n\n## 1. Set clear goals for implementing AI\nWhen implementing AI in software development, organizations must have clear goals and key performance indicators (KPIs) in place to measure success. This includes both short-term and long-term objectives that align with the overall business strategy. For example, a short-term goal could be to reduce code review time by 30% using AI-powered tools, while a long-term goal could be to improve customer satisfaction ratings through faster release cycles and higher quality code.\n\nAdditionally, organizational leaders should involve developers in setting these goals and metrics. Developers have firsthand experience with the impact of AI on their work and can provide valuable insights into how it has improved or hindered productivity. [GitLab research](https://about.gitlab.com/developer-survey/) showed that 63% of developers expect AI to significantly change their role in the next five years, and 56% feel that introducing AI into the software development lifecycle is risky. By asking developers where they see opportunities for AI to help them, as well as where they have concerns about AI, organizations can create more meaningful and relevant success metrics that reflect the actual business impact of AI on software development teams.\n\nIt's also important for organizations to regularly revisit and reevaluate these goals as they continue to integrate AI into their processes. Technology evolves quickly, and so do business processes and priorities. Setting clear goals allows teams to track progress and make adjustments as necessary.\n\n## 2. Look beyond coding metrics\nProductivity is more than acceptance rates or lines of code generated. Developers spend [more than 75%](https://about.gitlab.com/developer-survey/) of their time on tasks other than code generation. Efficient use of AI could therefore reduce the time developers spend reviewing, testing, and maintaining code.\n\nIn order to fully realize and appreciate the benefits of AI-aided software development, organizations should focus on a holistic view of [AI's impact on productivity](https://about.gitlab.com/the-source/ai/how-ai-helps-devsecops-teams-improve-productivity/) and their bottom line across the software development lifecycle (SDLC). The optimal approach combines quantitative data from the entire SDLC with qualitative insights from developers about AI's real impact on their daily work and its influence on long-term development strategies.\n\nOne effective measurement technique is the [DORA framework](https://about.gitlab.com/solutions/value-stream-management/dora/), which assesses a development team's performance over a specific period. DORA metrics evaluate deployment frequency, lead time for changes, mean time to restore, change failure rate, and reliability. These performance metrics provide visibility into a team's agility, operational efficiency, and velocity, serving as proxies for how well an engineering organization balances speed, quality, and security.\n\nFurthermore, teams should utilize [value stream analytics](https://about.gitlab.com/solutions/value-stream-management/) to examine the complete workflow from concept to production. Value stream analytics continuously monitors metrics such as lead time, cycle time, deployment frequency, and production defects, focusing on business results rather than individual developer actions. This comprehensive approach surfaces data-driven insights to ensure a more productive and efficient development process.\n\n## 3. Prepare for growing pains\nWhile AI can accelerate code production, it can also contribute to technical debt if the resulting code lacks quality and security. AI-generated code often demands more time for review, testing, and maintenance. Developers might save time using AI initially, but this time is likely to be spent later in the software development lifecycle. Furthermore, any security flaws in AI-generated code will need attention from security teams, requiring additional time to address potential issues. As a result, development and security teams may initially be skeptical of AI.\n\nTo start, teams should develop best practices by working in lower-risk areas before expanding AI applications. This cautious approach ensures safe and sustainable scalability. For instance, AI can facilitate code generation, test generation, syntax correction, and documentation, helping teams build momentum and improve results while learning to use the tool more effectively.\n\nProductivity might dip initially as teams acclimate to new workflows. Organizations should provide a grace period for teams to determine how best to integrate AI into their processes.\n\n## 4. Integrate AI holistically with a DevSecOps platform\nOne way organizations can ease the growing pains of implementing AI in their development processes is by utilizing a DevSecOps platform that integrates AI capabilities - such as AI-powered code generation, discussion summaries, and vulnerability explanations - throughout the software development lifecycle. DevSecOps platforms provide a centralized and streamlined workflow for both developers and security teams, allowing them to collaborate more effectively and [catch potential issues earlier in the development process](https://about.gitlab.com/the-source/ai/4-ways-ai-can-help-devops-teams-improve-security/).\n\n[AI-powered code review and testing tools](https://about.gitlab.com/blog/2024/05/30/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/) within a DevSecOps platform can help identify and address security flaws or coding errors before they make it into production. This not only saves time but also reduces technical debt and improves overall software quality. When AI tools are part of an integrated platform, teams can also [blend AI with root cause analysis ](https://about.gitlab.com/blog/2024/06/06/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)to fix errors in CI/CD pipelines and release secure code faster. The goal is to apply automated code quality scanning and security scanning to all of the code the organization is producing, especially AI-generated code.\n\nIn addition, teams can easily track the [ROI of AI](https://about.gitlab.com/blog/2024/05/15/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) with a platform's built-in analytics, which provide valuable insights such as the impact of AI on productivity.\n\nAI will play a critical role in the evolution of DevSecOps platforms, reshaping how development, security, and operations teams collaborate to accelerate software development without sacrificing quality and security. Business leaders will want to see how their investments in AI-powered tools are paying off - and developers should embrace this scrutiny and leverage the opportunity to showcase how their work aligns with the organization’s broader goals.\n\nBy adopting a holistic approach that evaluates code quality, collaboration, downstream costs, and developer experience, teams can leverage AI technologies to enhance human efforts while also driving business impact.",[879,882,885,888,891],{"header":880,"content":881},"What are the biggest challenges of adopting AI in software development?","One major challenge is the adjustment period, as teams need time to adapt to AI-driven workflows. AI-generated code may also introduce security risks or technical debt if not properly reviewed. Organizations should implement security scanning, best practices, and continuous feedback loops to mitigate these risks while refining AI integration strategies.",{"header":883,"content":884},"Why are traditional coding metrics inadequate for evaluating AI-driven development?","Metrics such as lines of code or code commits fail to reflect AI's impact because they only measure raw output rather than efficiency or quality. Since AI automates tasks beyond just code generation, organizations should instead track workflow efficiency, issue resolution speed, and deployment frequency to get a more accurate picture of AI’s benefits.",{"header":886,"content":887},"How can organizations track the ROI of AI adoption?","Tracking AI’s ROI involves measuring efficiency gains, such as reduced cycle times and fewer production defects, alongside qualitative benefits like improved developer experience. A DevSecOps platform with built-in analytics provides visibility into AI-driven improvements, helping teams assess the tangible impact of AI on software development.",{"header":889,"content":890},"How can AI help reduce development time without sacrificing security?","AI streamlines repetitive tasks such as bug detection, test generation, and documentation, allowing developers to focus on more complex coding challenges. By integrating AI-driven security checks into DevSecOps platforms, teams can automate vulnerability detection and maintain security standards while accelerating development.",{"header":892,"content":893},"How can organizations effectively measure the impact of AI in software development?","Organizations should establish clear goals that align AI initiatives with business outcomes, such as improved software quality or faster deployments. Traditional coding metrics like lines of code are insufficient, so teams should use holistic productivity indicators like DORA metrics and value stream analytics. By focusing on efficiency, security, and real-world business impact, organizations can accurately assess AI’s role in development.",{"layout":5,"template":486,"articleType":487,"author":895,"gatedAsset":25,"isHighlighted":6,"authorName":455},"taylor-mccaslin",{"title":897,"date":870,"description":898,"timeToRead":807,"heroImage":899,"keyTakeaways":900,"articleBody":904,"faq":905,"config":918},"6 strategies to help developers accelerate AI adoption","AI in software development is here to stay. Here’s how leaders can create an environment that fosters innovation while acknowledging potential concerns.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464541/da4tvbmwsisqabz8i0mc.png",[901,902,903],"Integrating AI into software development processes can enhance developer productivity by streamlining workflows, allowing teams to focus on innovation over repetitive tasks.","Despite the benefits, successfully integrating AI-powered solutions into workflows can be challenging due a lack of knowledge or resources, workflow adaptation difficulties, and fear of job loss.","Strategies for successful AI implementation include clarifying the goals and objectives of AI, establishing guardrails and workflows, and focusing on talent and culture transformation.","By integrating artificial intelligence (AI) into the coding process, software developers can spend more time on strategic tasks, reduce cognitive load, and deliver greater value.\n\nOrganizations are already making significant investments in AI. According to [GitLab’s 2024 Global DevSecOps Report](https://about.gitlab.com/developer-survey/), 78% of respondents said they are currently using AI in software development or plan to in the next two years, up from 64% in 2023. And organizations adopting AI are already seeing benefits, such as improved developer productivity, better code quality, and more secure code. [Embracing AI](https://about.gitlab.com/the-source/ai/how-ai-helps-devsecops-teams-improve-productivity/) enables development teams to devote more time to creative problem-solving and innovation rather than time-consuming and repetitive tasks such as manually writing boilerplate code.\n\nDespite AI's clear benefits, teams may struggle to integrate AI tools successfully into their day-to-day processes. This challenge can be attributed to various factors, such as a lack of knowledge or resources, difficulty adapting existing workflows and tools, and the fear of losing jobs to automation. Nearly half (49%) of our survey respondents voiced concern that AI will replace their roles in the next five years.\n\nUnderstanding where your team is today is necessary to set them up for success when integrating AI. [Our research](https://about.gitlab.com/developer-survey/2024/ai/) shows that the majority (56%) of organizations are in the Evaluation and Exploration stage - meaning most teams have started to set achievable targets for AI adoption but haven’t actually started using it in their software development lifecycle.\n\nWhether you’re an early adopter or you’re still exploring the idea of AI, here are six strategies you can use to set your team up for success:\n\n## 1. Clarify the goals and objectives of AI adoption\nYour first step should be to create an AI governance model for your organization. What are the goals and objectives of AI adoption? How will it fit into your existing processes and workflows?\n\nIdentifying a leader to oversee AI strategy and implementation is critical. While some companies are beginning to hire a chief AI officer (CAIO), the role doesn’t have to be an immediate addition to the C-suite; it can be a transitional title that a VP assumes to coordinate AI usage across teams.\n\nThe primary goal is to identify and prioritize high-impact AI use cases that directly support business outcomes, focusing on areas where AI can create significant value, such as automation, personalization, or data-driven decision-making. It’s important to remember that AI success isn’t possible without first addressing the privacy, security, and legal requirements your organization might face and how AI adoption plays into continued compliance.\n\n## 2. Establish AI guardrails and workflows\nBefore incorporating AI into your development environment, you'll need to establish guidelines to ensure it is used responsibly and effectively. Set up automated testing, including using a security analyzer, to create a gating mechanism that ensures all AI-generated code is reviewed before being promoted to production. And beware of shadow AI - the latest variation of shadow IT - where workers adopt their own AI assistants while working on your code base, which can lead to the leakage of sensitive information and intellectual property.\n\nYou'll also want to think now about how your teams will use different machine learning models for different types of tasks. One size does not fit all. Large language models (LLMs) are often tuned for specific tasks, meaning teams that are using the same AI models across multiple use cases may not be getting optimal results. As you shop around for AI tools, look for vendors that allow you to use a variety of models tailored to specific use cases - this will save you from having to rip and replace later.\n\n## 3. Build a data-driven AI structure\nThe results that AI can drive for organizations are only as good as the data that AI systems have access to. Feeding data into your AI systems will allow you to tailor the results to your organization’s needs and improve efficiency and productivity across your software development lifecycle. However, long-term success requires a structured approach that allows data to be used across the organization to inform prompts and enhance generative AI outputs.\n\nTo that end, enterprises must:\n\n- Ensure robust data collection, storage, cleaning, and processing mechanisms.\n- Establish clear governance around data access, usage, security, and privacy, especially to ensure compliance with regulations like GDPR or CCPA.\n- Break down data silos to facilitate cross-department collaboration and leverage data across various parts of the organization. Now is the time for developers and data scientists to collaborate on using data warehouses and data lakes to facilitate access to training models and application usage.\n\n## 4. Focus on talent and culture transformation\nContinuous upskilling is critical to safely, securely, and responsibly unlocking AI’s potential. Build a team of data scientists, AI engineers, and other experts to design, develop, and implement AI solutions. Upskilling employees to ensure they can use and maintain AI systems effectively is critical. Finally, embracing AI is a journey, and it will require some [cultural shifts](https://about.gitlab.com/the-source/ai/5-ways-execs-can-support-their-devops-teams-with-ai/). To succeed, it is critical to foster a culture that embraces AI and data-driven decision-making. Encourage experimentation and innovation while addressing fears around automation and job displacement.\n\n## 5. Embrace iteration\nImplementing AI is an ongoing process. Adopt a continuous learning approach, where AI solutions are constantly refined and improved based on feedback, new data, and technological advances. Developers must be given an experimentation period to assess how AI fits into their individual workflows. It’s also important to note that there might be a short-term dip in productivity before the organization benefits from long-term gains. Managers must anticipate this by emphasizing transparency and accountability throughout the implementation and iteration cycles.\n\n## 6. Measure success beyond lines of code\nIt's true that metrics such as the number of tasks completed or lines of code written can be good proxies to help you identify areas where AI is having the biggest impact on your team. However, AI is more than just code generation. What really matters is how AI is driving metrics that are important to the business, such as how quickly teams are able to deliver value to customers, or the code quality of the final product.\n\nKnowing how many lines of code a team produced won’t tell you the full story here. Measuring success in AI adoption requires moving [beyond traditional productivity metrics](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/) and focusing on KPIs that demonstrate measurable business value, such as faster software delivery, improved developer satisfaction, and higher customer satisfaction scores.\n\n## Conclusion: Empowering developers through AI adoption\nEven if your organization has not fully embraced AI, the time to start is now. According to Gartner®, by 2028, 75% of enterprise software engineers will use AI coding assistants, up from less than 10% in early 2023 [1].\n\nThe adoption curve is steep, but we are still relatively early in the AI hype cycle. In fact, if your team is only just starting to look into adopting an AI code assistant, they may be well-positioned to avoid some of the growing pains early adopters have experienced.\n\nIn addition to the strategies above, adopting an [AI solution integrated into an end-to-end DevSecOps platform](/gitlab-duo/) can jumpstart success by supporting developers at every stage of their workflow.\n\nAs AI transforms the workplace, business leaders should ask how they can harness the power of AI across the software development lifecycle to accelerate innovation and drive tangible benefits for customers.\n\n[1] _Source: Gartner, Top 5 Strategic Technology Trends in Software Engineering for 2024, Joachim Herschmann, Manjunath Bhat, Frank O'Connor, Arun Batchu, Bill Blosen, May 2024. GARTNER is a registered trademark and service mark of Gartner, Inc. and/or its affiliates in the U.S. and internationally and is used herein with permission. All rights reserved._",[906,909,912,915],{"header":907,"content":908},"How can AI improve software development productivity?","AI enhances software development by automating repetitive tasks, reducing cognitive load, and enabling developers to focus on strategic problem-solving. AI-powered coding assistants can generate code, conduct automated testing, and improve code quality, leading to faster development cycles and more secure applications.",{"header":910,"content":911},"What are AI guardrails, and why are they important in software development?","Success metrics should go beyond traditional coding productivity, such as lines of code written. Instead, organizations should focus on business-impacting KPIs like faster software delivery, improved developer satisfaction, higher code quality, and better customer experience.",{"header":913,"content":914},"What are the biggest challenges in adopting AI for software development?","Common challenges include integrating AI into existing workflows, ensuring data privacy and security, overcoming resistance to change, and addressing concerns about job displacement. Organizations can mitigate these challenges by establishing clear AI governance, upskilling employees, and fostering a culture of experimentation and innovation.",{"header":916,"content":917},"What steps should developers take to prepare for AI-driven software development?","Developers should focus on continuous learning, gaining proficiency in AI tools, and understanding data-driven decision-making. Collaborating with AI engineers and data scientists, experimenting with AI-powered coding assistants, and staying updated on emerging AI trends will help developers maximize AI's potential in software development.",{"layout":5,"template":486,"articleType":487,"author":488,"featured":6,"gatedAsset":919,"isHighlighted":6,"authorName":436},"source-lp-how-to-think-about-developer-productivity-in-the-age-of-ai",{"title":921,"date":922,"description":923,"heroImage":924,"keyTakeaways":925,"articleBody":929,"config":930},"Getting started with AI in software development: A guide for leaders","2024-07-01","Learn how to gain a competitive edge by developing and implementing a well-defined strategy for AI-assisted software development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464236/w1ek9f8qevs63vcndaav.png",[926,927,928],"Leaders need to be clear that using AI isn’t about replacing developers — it’s about   helping them do their jobs more efficiently by eliminating repetitive tasks.","The role of generative AI extends beyond helping software developers to write more lines of code — it enhances every phase of the software development lifecycle.","AI-powered tools can be used on their own but it’s easier when they’re part of a DevSecOps platform, which weaves security throughout the entire development and operations process.","Artificial intelligence (AI) is no longer just on the way - it’s now fundamental to how organizations build applications. Companies are leveraging generative AI to radically change the way their teams build, secure, and deliver software. According to the GitLab research, 78% of development, security, and operations professionals are either already using AI or plan to within the next two years - a significant increase from 64% last year.\n\nThe key benefits of AI are evident: it drives faster, more secure software development processes and offers a competitive edge. This transformation is not optional; 62% of C-level executives recognize AI integration in software development as essential to staying relevant.\n\n### AI is about empowering, not replacing\nManagers need to be clear that using AI isn’t about replacing human developers, software engineers, or any other members of the software development team. It’s about helping them do their jobs more efficiently, better and faster. It’s about empowerment. AI takes arduous and mundane tasks off people’s plates so they have more time to do what they love - create higher-quality software that can take on business-critical needs.\n\n### Key capabilities of AI in software development\nAI capabilities are powered by machine learning algorithms, large language models, computer vision, pattern recognition, and natural language processing. AI-powered tools - ranging from chatbots to code generation, vulnerability explanations, code refactoring, and anomaly detection - are transforming teams’ ability to uncover valuable insights and automate critical processes.\n\n### Using AI across the entire software development process\nAI’s role extends beyond code generation - it enhances every phase of the software development lifecycle. From planning and coding to testing and deployment, AI acts as the next generation of automation, supporting software developers, security teams, and operations. This holistic approach ensures that every team member benefits from AI’s capabilities.\n\n### Using AI in a DevSecOps platform\nArtificial intelligence capabilities can be used on their own but it’s easier when they’re part of a DevSecOps platform, which weaves security throughout the entire process to ensure that AI-generated code doesn't expose the organization to vulnerabilities. With an AI-driven platform, there’s no need to adopt a disparate collection of AI tools because they’re all seamlessly integrated in one application. The combination creates a powerful synergy that enhances security, automation, and efficiency across the entire development lifecycle.\n\n#### If you haven’t started using AI in your software development process, now is the time. If you have, it’s time to accelerate.\nIT leaders are perfectly positioned to help their teams not only adopt generative AI tools but maximize all the benefits that come with using them - adding efficiencies; eliminating routine tasks; improving code quality; giving developers more time to be innovative; and fostering, rather than replacing, human-to-human collaboration.\n\nManagers and executives can work with their teams to figure out the pain points in their processes and how AI can help simplify complex tasks and drive business goals. They also can strategize on what AI-powered solutions to start with and how to continue to add capabilities as they progress.\n\nNo matter where you are in your journey, there are clear steps you can take to put AI to work for you and your business. This ebook will guide you through how each role in the DevSecOps environment can take advantage of AI, and give you tangible takeaways for creating a strategic AI implementation plan that will ultimately help you create secure software faster.",{"layout":5,"template":486,"articleType":843,"featured":6,"gatedAsset":931,"isHighlighted":6,"authorName":-1},"pf-getting-started-with-ai-in-software-development-a-guide-for-leaders",{"title":402,"date":933,"description":934,"heroImage":935,"keyTakeaways":936,"articleBody":940,"config":941},"2024-03-08","Learn how AI tools can boost productivity, transform software development, and create a competitive advantage.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464346/mkoag8ukibtprc3vbo0u.png",[937,938,939],"Generative AI holds immense potential for enterprises by providing innovative solutions to a wide range of complex and tedious tasks, but its full implications are yet to be understood.","Companies need to introduce AI to stay competitive, but considering the inherent risks to safety and security is crucial in unlocking AI's power.","Planning long-term AI strategies to cater to shifting needs and fostering a culture of continuous upskilling and adoption are key to successful AI implementation.","To stay competitive, organizations can no longer ignore the transformative power of artificial intelligence (AI) and machine learning (ML) in software development.\n\nGenerative AI has already begun to revolutionize various aspects of the software development process, enabling teams to work more efficiently and effectively and creating a competitive advantage. As organizations seek to adapt to rapid technological changes, understanding the implications of AI is essential for maximizing its benefits.\n\n## Enhancing efficiency with AI tools\nOne of the primary ways AI is transforming software development is through automation. Repetitive tasks such as generating boilerplate code or debugging can be streamlined with AI-driven tools. For instance, AI-powered code generation tools can significantly decrease development time by suggesting context-aware code snippets, allowing developers to focus on more complex aspects of their projects. This leads not only to faster delivery times but also reduces the likelihood of human error. In addition, AI-powered code explanations can help developers understand code in a variety of programming languages more quickly.\n## Facilitating better decision-making\nAI can provide invaluable insights through data analysis, helping teams make informed decisions. By analyzing performance metrics and real-time insights, AI systems can suggest optimizations or highlight areas needing improvement. This capability to leverage data for thoughtful decision-making empowers development teams to create more user-centric applications without the guesswork often required in traditional development processes.\n\n## Fostering collaboration and knowledge sharing\nAI solutions can be a powerful tool for enhancing collaboration among development teams by breaking down silos and improving communication. Tools equipped with natural language processing can facilitate seamless communication between team members, enabling them to engage in more meaningful discussions about their projects. By interpreting and summarizing technical jargon or complex concepts, these tools can bridge the gap between cross-functional teams, such as software engineers, designers, and product managers. As a result, iterative feedback loops become more efficient, fostering a cohesive environment where innovative ideas can thrive.\n\n## Navigating AI uncertainties\nWe’ve talked with hundreds of GitLab customers, and some of their top questions remain unanswered:\n\n- Can code written by AI be trusted?\n- Will these tools deliver on promised increases in productivity and efficiency?\n- Will software developers use AI coding tools for intended scenarios, or will they stretch beyond appropriate applications too quickly, or perhaps not use them?\n- How might consumer-facing applications change as AI is integrated into them?\n- Does it matter what large language models an organization uses? What are the implications of using one model over another?\n\nHow your organization answers these questions can be the difference between a successful enterprise AI implementation, which can become a foundation for future innovation, and going back to the drawing board, which can lead to falling behind peers, competitors, and customer expectations.\n\n## Building the right AI approach for your software innovation strategy\nOne certainty is that generative AI tools are here to stay. Companies with software engineering organizations that want to recruit and retain top talent must introduce AI to the enterprise to compete. Continuous upskilling, adoption, and support are critical to safely, securely, and responsibly unlocking AI’s power. So, too, is mapping out long-term AI business strategies that ensure organizational freedom and choice as infrastructure and application needs shift, mature, and scale.\n\nThat’s why GitLab has created this guide for business leaders. Our aim is to help you think about these issues across the whole geography of your organization and prepare your C-suite, leadership team, and development teams for what AI can do today - and will do in the near future - to accelerate software development and transform customer experiences.",{"layout":5,"template":486,"articleType":843,"featured":6,"gatedAsset":942,"isHighlighted":6,"authorName":-1},"pf-ai-guide-for-enterprise-leaders-building-the-right-approach",{"title":944,"date":945,"description":946,"timeToRead":807,"heroImage":947,"keyTakeaways":948,"articleBody":952,"faq":953,"config":969},"How to put generative AI to work in your DevSecOps environment","2024-03-07","Learn how artificial intelligence, when integrated throughout the platform, can reap tangible rewards for organizations and their DevSecOps teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463955/b01uj40kjfhezhwiczhp.png",[949,950,951],"To fully leverage AI's potential in DevSecOps, it's essential to incorporate AI not just as a coding assistant, but throughout the entire software development lifecycle.","Consolidating AI tools across the organization reduces complexity, operational risks, and costs, fostering a streamlined and secure environment.","Evaluating AI's effectiveness requires more than traditional metrics like code production frequency. Implement standard workflows within your organization to capture comprehensive metrics such as vulnerability resolution times and code review efficiencies.","Generative AI has ushered in a new wave of innovation that's poised to help alleviate many tedious manual and time-consuming aspects of software development and delivery, and, as a result, improve developer experience and accelerate DevSecOps workflows. But to realize the full potential of generative AI, the technology has to be sprinkled not just at the point of code creation, but everywhere.\n\nAccording to our [2024 survey of more than 5,000 DevSeOps professionals](https://about.gitlab.com/developer-survey/2024/ai), code creation accounts for less than 25% of a developer's time. There are so many other critical tasks that happen from the first commit through to production that could benefit from the power of AI.\n\nAI can be infused at each stage to help shepherd software from idea to delivery, creating better, more secure software faster. For instance, something as commonplace as examining a failed build can be improved by using AI to assess what went wrong and how to fix it. Although AI does not eliminate the task, it can help [reduce the steps and time required to complete it](https://about.gitlab.com/the-source/ai/how-ai-helps-devsecops-teams-improve-productivity/).\n\nHere is what your DevSecOps team can do to begin to understand - and measure - the impact of generative AI.\n\n## Start with an assessment of your workflows\n\nBefore you can fully realize the impact of AI, you’ll have to do some upfront work, including revisiting your workflows. You want to understand the ideal workflow you can build out to have consistency in your approach to using AI and have the [proper guardrails in place](https://about.gitlab.com/the-source/ai/velocity-with-guardrails-ai-automation/) to reduce any risks that AI might introduce.\n\nFor instance, if your team is writing code with generative AI, some of that AI-generated code might include security vulnerabilities. That's just how it works. So you'll need a [workflow in place to catch those vulnerabilities](https://about.gitlab.com/the-source/ai/4-ways-ai-can-help-devops-teams-improve-security/) and reduce the chance of them making it into production. Once you have this workflow, you can start to introduce a lot of AI functionality in a more consistent manner that will increase the velocity of your development process.\n\nHere's an example of how assessing your workflow upfront can improve the benefits you'll get from AI. While AI can automatically build tests for you, you wouldn't want it to do so after the code's already created. Developers are not part of the QA team because they only test what they've written. Generative AI acts similarly, so you need your workflow for an AI-generated test to start earlier - where developers can use details in issues to interactively generate unit tests for the code they want to write. By considering the workflow, they can create the merge requests with the test first, and then, when they pull the branch to start working on the implementation, their code suggestions are more robust because the context now includes the proper tests and their response hits will be much higher than if they started with the code directly.\n\nYou can't revamp all your workflows at once, so make sure to focus on those related to your biggest software development and delivery challenges, such as modernizing legacy code bases, handling an increase in security issues, or operating on ever-thinning budgets and staff.\n\n## Establish guardrails for AI\n\nYou'll also want to consider the risk of AI in terms of the data it's interacting with and make sure you're putting guardrails in place to mitigate that risk and meet your unique compliance needs. You'll want to consider the AI models you're using, whether you're accessing vector databases, and how large language models (LLMs) are being trained.\n\nFor these questions, you'll want to pull together your legal, compliance, and DevSecOps teams together to ask tough questions of your AI providers. We provide some helpful guidance in the [GitLab AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) and [our blog post on building a transparency-first AI strategy](https://about.gitlab.com/the-source/ai/building-a-transparency-first-ai-strategy-7-questions-to-ask-your-devops/).\n\nAnother critical guardrail is streamlining how many separate AI tools you're using throughout the software development lifecycle and across your organization. The more tools used, the more complexity introduced, causing operational issues, oversight challenges, and potential security risks. In addition, numerous tools result in increased overhead costs.\n\n## Measure the impact of AI\n\nMeasuring the changes in productivity and other key metrics will be essential to [truly understanding the impact of AI in your organization](https://about.gitlab.com/the-source/ai/4-steps-for-measuring-the-impact-of-ai/). Typically, organizations would look at output from the perspective of how often they are shipping code into production, the [four DORA metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html), or the time it takes to remediate bugs. But that doesn't provide a holistic picture.\n\nAt GitLab, we measure the impact of AI by building out the standardization of workflows inside our hierarchy structure of groups and projects so we can roll up metrics from teams to business units and analyze those outputs directly inside the user interface.\n\nWhen you implement AI on top of this structure, you're able to see the increase in velocity, including the time it takes to resolve vulnerabilities and validate that merge requests have the right reviewers and the right tests, which reduces the time it takes to go through the code review process. You can see each stage inside GitLab, including dependencies, and the delta it takes the development team to get through those stages. Dashboards show what that speed looks like and makes it easier to pivot based off that data. For instance, you can decide whether to release software into production.\n\n### Practical uses for an SDLC AI assistant\n\nHere are some practical ways to use AI assistants like [GitLab Duo](https://about.gitlab.com/gitlab-duo/) throughout the software development lifecycle.\n\n- **Write merge request descriptions:** Automate the creation of comprehensive descriptions for merge requests and quickly and accurately capture the essence of an MR's string of commits. It can also surface tasks that are missing based on the code that is written and the intent of the MR's linked issue.\n\n- **Explain code in natural language:** QA testers can use code explanations to quickly and easily understand code. For instance, if an MR includes code written in Rust and a complex set of methods, the QA tester can highlight the methods and receive a natural language readout of what the change is trying to do. This allows the QA tester to write much better test cases that will cover not just the sunny day but also rainy day scenarios.\n\n- **Root cause analysis of pipeline errors:** If your pipelines are becoming larger and you try to refactor them, you could break something, which can be difficult to troubleshoot – especially if you're executing a series of bash scripts or running a Docker image leveraging internal commands inside the image. You can run the errors you receive through generative AI and it will explain a possible root cause and a recommended solution that you can copy and paste directly back into your CI job.\n\n- **Vulnerability resolution:** In the rush to shift security left, engineering teams have had to quickly become security experts. With generative AI tools, engineers can access chat to learn what the vulnerability is, where it is in the code, and even open an automated MR with a possible fix – all within the development window, so no context-switching.\n\n## GitLab Duo: Your one-stop shop for impactful, generative AI features\n\nWe're building GitLab Duo, our expanding suite of AI-powered tools for our DevSecOps platform, with powerful generative AI models and cutting-edge technologies from hypercloud vendors. Today, [GitLab Duo has features in general availability, beta, and experimental phases](https://docs.gitlab.com/ee/user/ai_features.html), ranging from code assistant to conversational chat assistant to vulnerability explainer. When used consistently across the software development lifecycle, GitLab Duo will drive a 10x faster cycle time, helping organizations do more with less and allowing employees to spend their time on higher-value tasks.\n\nThe \"[Omdia Market Radar: AI-Assisted Software Development, 2023–24](https://learn.gitlab.com/devsecops-plat-ai/analyst-omdia-ai)\" report highlighted GitLab Duo as one of the products the analyst firm considers “suitable for enterprise-grade application development,\" noting that its “AI assistance is integrated throughout the SDLC pipeline.”\n\nHere is a look at GitLab Duo's features in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/s19nBOA2k_Y?si=qEcsZbpMChynYlfn\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->",[954,957,960,963,966],{"header":955,"content":956},"How can organizations measure the impact of AI on software development?","Organizations should look beyond traditional coding metrics and track improvements in speed, quality, and security. By analyzing AI's effect on code review times, vulnerability resolution speed, and developer productivity, teams can assess its true impact. Using AI-powered dashboards and analytics within a DevSecOps platform provides deeper insights into AI-driven efficiency gains.",{"header":958,"content":959},"What are the key steps to successfully implement AI in DevSecOps?","To effectively implement AI, organizations should start by assessing their workflows to identify areas where AI can provide value. Next, they should establish guardrails to ensure AI-generated code meets security and compliance requirements. Finally, teams should measure AI's impact using productivity metrics such as DORA and value stream analytics to track improvements in development speed and security.",{"header":961,"content":962},"What are some practical applications of AI in DevSecOps?","AI can be used for various DevSecOps tasks, including writing merge request descriptions, explaining complex code, performing root cause analysis of pipeline errors, and resolving security vulnerabilities. AI-powered assistants like GitLab Duo help teams automate these processes, reducing manual effort and improving software quality.",{"header":964,"content":965},"What security risks does AI introduce in software development?","AI-generated code can sometimes contain security vulnerabilities, increasing the risk of introducing flaws into production. To mitigate these risks, organizations should integrate AI with DevSecOps platforms that include automated security scanning, vulnerability detection, and compliance checks. This ensures that AI-generated code is reviewed and secured before deployment.",{"header":967,"content":968},"How can generative AI improve DevSecOps workflows?","Generative AI enhances DevSecOps by automating repetitive tasks, improving security, and accelerating development cycles. AI can assist with writing code, explaining complex logic, identifying vulnerabilities, and generating test cases. By integrating AI across the software development lifecycle (SDLC), organizations can streamline operations and improve efficiency.",{"layout":5,"template":486,"articleType":487,"author":895,"featured":6,"gatedAsset":26,"isHighlighted":6,"authorName":455},{"title":971,"date":972,"description":973,"timeToRead":555,"heroImage":974,"keyTakeaways":975,"articleBody":979,"faq":980,"config":990},"Understand and resolve vulnerabilities with AI-powered GitLab Duo","2024-02-21","Developers can find and fix vulnerabilities with auto explanation and auto-generated merge requests, ensuring a streamlined development process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464462/a8lhn3mrjyjuq55m1yqc.png",[976,977,978],"GitLab Duo uses AI to explain vulnerabilities, bridging knowledge gaps and speeding up issue resolution.","One-click AI fixes in GitLab Duo auto-generate merge requests, saving developers time on security patches.","GitLab Duo fosters proactive security by empowering developers to understand and resolve vulnerabilities efficiently.","In the dynamic world of software development, companies are dedicated to delivering quick and efficient innovations, and they recognize the importance of ensuring they deliver secured applications. GitLab, the most comprehensive [AI-powered](https://about.gitlab.com/gitlab-duo/) DevSecOps Platform, already provides built-in scans in the CI pipeline to deliver detailed scan reports that highlight potential issues within the code. However, not every developer is well-versed in cybersecurity or has encountered every type of vulnerability before, creating a knowledge gap that can lead to confusion and delays in addressing security concerns.\n\n![A vulnerability example detected by static application security testing](//images.ctfassets.net/xz1dnu24egyd/y6vNslaESqyGTlH3i215z/feef7fb70d00f92b77ea128157111a2e/resolve_vuln_-_image_1.png)\n\n\u003Ccenter>\u003Ci>A vulnerability example detected by static application security testing\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n## Resolving vulnerabilities with GitLab Duo (AI)\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) uses AI to help developers resolve vulnerabilities. Here's how.\n\n### Understanding vulnerabilities\nCritical vulnerabilities detected in developers' code can delay code merging, often necessitating assistance from security experts to resolve the issues promptly. This leads to extended periods of open merge requests and delays in releasing features. GitLab recognizes the knowledge gap and empowers developers to comprehend security vulnerabilities identified by scans using the [Vulnerability Explanation feature](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#explaining-a-vulnerability), which offers clear insights into detected vulnerabilities, potential risks with attack examples, and practical solutions for resolution, including example code snippets.\n\nVulnerability Explanation generates a dedicated overview of vulnerabilities. You can access this overview by clicking the \"Explain this vulnerability\" button within each vulnerability report.\n\n![Vulnerability Explanation example](//images.ctfassets.net/xz1dnu24egyd/7yUKBE7jH3IDkZTEKBZCsu/ea7abaa0b50bba1bcabe39beeab23d21/Screenshot_2024-02-20_at_3.30.44_PM.png)\n\u003Ccenter>\u003Ci>Vulnerability Explanation example\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nDevelopers can follow all sections in the explanation to swiftly address the vulnerabilities, fostering a culture where they are involved in threat mitigation. This involvement cultivates a sense of comfort and confidence in handling security concerns, ultimately promoting a more proactive and secure development environment.\n\n### Fixing vulnerabilities\n\nGitLab goes beyond just explaining detected vulnerabilities – now, with the power of AI, the platform can swiftly suggest a resolution with just one click. This feature automatically generates detailed merge requests containing all pertinent information about the vulnerability and its intended fix. Moreover, it even suggests the necessary code to address the vulnerability. This saves developers significant time. All that's left for the developer is to review the fix, make any necessary adjustments, and merge it.\n\n![Merge request, automatically generated by AI, including details of the vulnerability, and suggested code to resolve it](//images.ctfassets.net/xz1dnu24egyd/3QVnzhS1h1lTZ2vGK7QYUx/c5e272d2aa602a9be4e8b58c490393ae/resolvevuln_image_3.png)\n\nThe above image shows a merge request, automatically generated by AI, including details of the vulnerability, and suggested code to resolve it.\n\n## Take a product tour\n\nWe've prepared a brief product tour so you can quickly dive into the functionality and see it in action (click on the image and use the \"Next\" button to progress through the demo).\n\n[![vulnerability explanation product tour screenshot](//images.ctfassets.net/xz1dnu24egyd/5dzrs0w9PR1oxEP3dTujgf/f9a60d39c45802bcfb7e77b871e188a2/Screenshot_2024-02-22_at_8.03.16_AM.png)](https://tech-marketing.gitlab.io/static-demos/pt-explain-vulnerability.html)\n\n> __[Get started with GitLab Duo today](https://about.gitlab.com/gitlab-duo/)!__\n",[981,984,987],{"header":982,"content":983},"How does the Vulnerability Explanation feature improve security remediation?","The Vulnerability Explanation feature provides developers with detailed insights into detected security issues, including potential risks and best practices for resolution. By offering attack examples and step-by-step guidance, it helps bridge the cybersecurity knowledge gap among developers.",{"header":985,"content":986},"Why is AI-driven vulnerability management important in DevSecOps?","AI-driven vulnerability management accelerates threat detection and resolution, reducing delays in code deployment. By automating security scans, explanations, and fixes, AI enhances DevSecOps efficiency, minimizes security risks, and allows developers to focus on innovation.",{"header":988,"content":989},"How does GitLab Duo use AI to help developers resolve vulnerabilities?","How does GitLab Duo use AI to help developers resolve vulnerabilities?\n> GitLab Duo leverages AI to detect security vulnerabilities, provide clear explanations, and generate suggestions for fixing vulnerabilities.",{"layout":5,"template":486,"articleType":487,"author":991,"featured":6,"isHighlighted":6,"authorName":442},"iganbaruch",{"title":993,"date":994,"description":995,"timeToRead":493,"heroImage":996,"keyTakeaways":997,"articleBody":1001,"config":1002},"As AI becomes standard, watch for these 4 DevSecOps trends","2024-01-17","Harnessing AI to drive innovation and deliver enhanced customer value will be critical to staying competitive in the AI-driven marketplace.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464636/htrk5th3q0tq1rcfntkp.png",[998,999,1000],"AI in DevSecOps demands proactive advocacy for responsible use and addressing global trends like AI bias and privacy risks.","Embracing AI in code testing will redefine QA roles, requiring new skills and oversight for improved software quality.","GitLab Duo offers AI benefits with clear ownership and privacy commitments.","AI’s role in software development is reaching a pivotal moment - one that will compel organizations and their DevSecOps leaders to be more proactive in advocating for effective and responsible AI utilization.\n\nSimultaneously, developers and the wider DevSecOps community must prepare to address four global trends in AI: the increased use of AI in code testing, ongoing threats to IP ownership and privacy, a rise in AI bias, and - despite all of these challenges - an increased reliance on AI technologies. Successfully aligning with these trends will position organizations and DevSecOps teams for success. Ignoring them could stifle innovation or, worse, derail your business strategy.\n\n## From luxury to standard: Organizations will embrace AI across the board\nIntegrating AI will become standard, not a luxury, across all industries of products and services, leveraging DevSecOps to build AI functionality alongside the software that will leverage it. Harnessing AI to drive innovation and deliver enhanced customer value will be critical to staying competitive in the AI-driven marketplace.\n\nFrom my conversations with GitLab customers and monitoring industry trends, with organizations pushing the boundaries of efficiency through AI adoption, more than two-thirds of businesses will embed AI capabilities within their offerings by the end of 2024. Organizations are evolving from experimenting with AI to becoming AI-centric.\n\nTo prepare, organizations must invest in revising software development governance and emphasizing continuous learning and adaptation in AI technologies. This will require a cultural and strategic shift. It demands rethinking business processes, product development, and customer engagement strategies. And it requires training - which DevSecOps teams say they want and need. In our latest [Global DevSecOps Report](https://about.gitlab.com/developer-survey/), 81% of respondents said they would like more training on how to use AI effectively.\n\nAs AI becomes more sophisticated and integral to business operations, companies will need to navigate the ethical implications and societal impacts of their AI-driven solutions, ensuring that they contribute positively to their customers and communities.\n\n## AI will dominate code-testing workflows\n\nThe evolution of AI in DevSecOps is already transforming code testing, and the trend is expected to accelerate. GitLab’s research found that only 41% of DevSecOps teams currently use AI for automated test generation as part of software development, but that number is expected to reach 80% by the end of 2024 and approach 100% within two years.\n\nAs organizations integrate AI tools into their workflows, they are grappling with the challenges of aligning their current processes with the efficiency and scalability gains that AI can provide. This shift promises a radical increase in productivity and accuracy - but it also demands significant adjustments to traditional testing roles and practices. Adapting to AI-powered workflows requires training DevSecOps teams in AI oversight and fine-tuning AI systems to facilitate its integration into code testing to enhance software products’ overall quality and reliability.\n\nAdditionally, this trend will redefine the role of quality assurance professionals, requiring them to evolve their skills to oversee and enhance AI-based testing systems. It’s impossible to overstate the importance of human oversight, as AI systems will require continuous monitoring and guidance to be highly effective.\n\n## AI’s threat to IP and privacy in software security will accelerate\n\nThe growing adoption of AI-powered code creation increases the risk of AI-introduced vulnerabilities and the chance of widespread IP leakage and data privacy breaches affecting software security, corporate confidentiality, and customer data protection.\n\nTo mitigate those risks, businesses must prioritize robust IP and privacy protections in their AI adoption strategies and ensure that AI is implemented with full transparency about how it’s being used. Implementing stringent data governance policies and employing advanced detection systems will be crucial to identifying and addressing AI-related risks. Fostering heightened awareness of these issues through employee training and encouraging a proactive risk management culture is vital to safeguarding IP and data privacy.\n\nThe security challenges of AI also underscore the ongoing need to implement DevSecOps practices throughout the software development life cycle, where security and privacy are not afterthoughts but are integral parts of the development process from the outset. In short, businesses must keep security at the forefront when adopting AI - similar to the shift left concept within DevSecOps - to ensure that innovations leveraging AI do not come at the cost of security and privacy.\n\n## Brace for a rise in AI bias before we see better days\n\nWhile 2023 was AI’s breakout year, its rise put a spotlight on bias in algorithms. AI tools that rely on internet data for training inherit the full range of biases expressed across online content. This development poses a dual challenge: exacerbating existing biases and creating new ones that impact the fairness and impartiality of AI in DevSecOps.\n\nTo counteract pervasive bias, developers must focus on diversifying their training datasets, incorporating fairness metrics, and deploying bias-detection tools in AI models, as well as explore AI models designed for specific use cases. One promising avenue to explore is using AI feedback to evaluate AI models based on a clear set of principles, or a “constitution,” that establishes firm guidelines about what AI will and won’t do. Establishing ethical guidelines and training interventions are crucial to ensure unbiased AI outputs.\n\nOrganizations must establish robust data governance frameworks to ensure the quality and reliability of the data in their AI systems. AI systems are only as good as the data they process, and bad data can lead to inaccurate outputs and poor decisions.\n\nDevelopers and the broader tech community should demand and facilitate the development of unbiased AI through constitutional AI or reinforcement learning with human feedback aimed at reducing bias. This requires a concerted effort across AI providers and users to ensure responsible AI development that prioritizes fairness and transparency.\n\n## Preparing for the AI revolution in DevSecOps\nAs organizations ramp up their shift toward AI-centric business models, it’s not just about staying competitive - it’s also about survival. Business leaders and DevSecOps teams will need to confront the anticipated challenges amplified by using AI - whether they be threats to privacy, trust in what AI produces, or issues of cultural resistance.\n\nCollectively, these developments represent a new era in software development and security. Navigating these changes requires a comprehensive approach encompassing ethical AI development and use, vigilant security and governance measures, and a commitment to preserving privacy. The actions organizations and DevSecOps teams take now will set the course for the long-term future of AI in DevSecOps, ensuring its ethical, secure, and beneficial deployment.\n\n_This article was originally published January 7, 2024, on [TechCrunch](https://techcrunch.com/2024/01/07/as-ai-becomes-standard-watch-for-these-4-devsecops-trends/)._\n",{"layout":5,"template":486,"articleType":487,"author":1003,"featured":6,"isHighlighted":6,"authorName":434},"ddesanto",{"title":1005,"date":1006,"description":1007,"timeToRead":493,"heroImage":1008,"keyTakeaways":1009,"articleBody":1013,"faq":1014,"config":1030},"How AI helps DevSecOps teams improve productivity","2024-01-02","Learn how DevOps teams are using AI to save time and improve efficiency.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464559/fbvzbz6vxppsblv8sngf.png",[1010,1011,1012],"Utilizing AI in DevSecOps workflows significantly boosts efficiency and productivity by automating repetitive tasks, reducing context switching, and providing intelligent assistance.","AI tools can enhance code quality and security by offering real-time suggestions, summarizing potential threats, and providing remediation strategies.","Organizations should develop strategies addressing privacy, intellectual property, and data security concerns, ensuring AI implementations align with legal and ethical standards.","Artificial intelligence (AI) and machine learning (ML) in software development are here to stay, and DevSecOps teams are using them in many different ways to save time and improve productivity and efficiency.\n\nHere are a few ways development, security, and operations teams can incorporate AI into their DevOps processes.\n\n## 9 ways DevSecOps teams use AI\n\n### Ask questions in documentation using chatbots\nTo find answers faster and reduce context switching, DevSecOps teams can use AI-powered chatbots to ask questions and get relevant answers in real time from documentation or other large volumes of text. Instead of leaving the IDE or platform where they’re writing and deploying code to go search the web, developers can ask a built-in chatbot a question and get one concise answer without disrupting their flow.\n\n### Suggest tests and test files\nDevelopers can use AI to suggest tests and generate test files for their code, right in the merge request. This can help them enhance their testing, ensure they have appropriate test coverage for their changes, and reduce the time they have to spend writing and thinking about tests.\n\n### Summarize code changes\nWhen making a commit or merge request, developers can use AI to generate a written summary of the code changes. This can help developers save time when they’re committing changes and asking for code reviews, and AI can also help code reviewers save time - and likely provide a better review - by giving them more context on the changes made before they dive into the code.\n\n### Get suggestions for who can review code\nCode review is an important, but sometimes frustrating and time-consuming, process - especially if the right reviewer isn’t asked the first time.\n\nBy looking at the code changes and the project’s contribution graph, AI can automatically suggest a code reviewer who can provide faster and higher-quality feedback and catch potential issues. AI also can help save time by suggesting someone else to review the code if a suggested reviewer doesn't respond or if their review isn’t sufficient.\n\n### Summarize discussions\nWhen discussions get lengthy or convoluted, teams can use AI to summarize all the comments in an issue or ticket. This can help everyone get on the same page and efficiently understand the status of a project and what the next steps are, leading to more seamless collaboration and faster results.\n\n### Suggest code\n[AI-powered code suggestions](https://about.gitlab.com/blog/2024/06/11/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/) can help developers write code more efficiently by suggesting code right in their IDE while they’re developing. Developers can use AI to complete blocks of code, define and generate logic for function declarations, generate unit tests, suggest common code like regex patterns, and more. These capabilities can certainly make developers more efficient, but because less than 25% of developers’ time is spent on code development [according to our research](https://about.gitlab.com/developer-survey/), it’s just one piece of the puzzle.\n\n### Explain how a piece of code works\nDevelopers (or anyone on the DevOps team) can use AI to get a quick explanation of what a block of code does and why it's behaving the way it is – without leaving their workflow.\n\nAn AI-generated code explanation can be particularly helpful for developers trying to understand pieces of code that others have created or that’s written in a language they’re less familiar with. And according to [our research](https://about.gitlab.com/developer-survey/), developers spend 13% of their time understanding what code does, so time savings here can really add up.\n\n### Summarize vulnerabilities in code\nUnderstanding a newly detected security vulnerability and how to fix it isn’t trivial, but AI-powered security tools can make it simpler and more efficient. An [AI-generated summary of a vulnerability](https://about.gitlab.com/blog/2024/07/15/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities/) helps developers and security professionals understand the vulnerability, how it could be exploited, and how to fix it. Some AI-powered tools can even provide a suggested mitigation with sample code. This can go a long way in [helping teams avoid potential security threats and security risks](https://about.gitlab.com/the-source/ai/4-ways-ai-can-help-devops-teams-improve-security/) with less effort.\n\n### Forecast productivity metrics\nUsing AI, software leaders can [forecast or predict productivity metrics](https://about.gitlab.com/blog/2024/05/15/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/) - such as deployment frequency - to identify trends and anomalies across the software development lifecycle. These actionable insights can help teams implement changes to improve their efficiency and DevSecOps processes.\n\n## The benefits of using AI in software development\nDevSecOps teams are using AI - or plan to use AI - to help them do many things, including:\n\n* Improve efficiency of their software delivery lifecycle\n* Speed up cycle times\n* Streamline compliance checks\n* Improve employee productivity\n* Improve security posture\n* Improve code quality\n* Improve customer satisfaction\n* Improve employee satisfaction and the developer experience\n* Improve collaboration between teams\n* Improve application performance\n* Automate repetitive tasks\n* Reduce operational costs\n* Reduce context switching and cognitive load\n* Reduce human error\n* Get new hires up to speed faster\n* Help employees [learn new programming languages](https://about.gitlab.com/blog/2023/10/12/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions/)\n\n## Avoiding privacy and security issues\nWhile there are numerous benefits to integrating AI into the software development process, it’s important to be aware of the potential risks as well as common issues and obstacles.\n\nAccording to our [research](https://about.gitlab.com/developer-survey/2024/ai/), privacy, security, and a lack of familiarity with AI-driven solutions were common obstacles respondents said they encountered or expect to encounter while implementing AI in the software development lifecycle. Of all the obstacles identified, concerns around privacy and data security was the most common response (34%), followed by the lack of appropriate skills (31%) and the lack of AI knowledge (30%).\n\nBusiness leaders should ensure that AI implementations adhere to established privacy and security standards. This involves integrating compliance checks and balances throughout the AI lifecycle to protect sensitive data and maintain user trust. It's also key to ensure you adopt AI tools that are transparent about how their machine learning models use your organization's data.\n\n## Get to know GitLab Duo\nAll the capabilities mentioned above - from code explanations to suggested tests - are part of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), the suite of AI capabilities built into GitLab’s DevSecOps platform. GitLab Duo helps DevSecOps teams boost efficiency, reduce cycle times, and prevent context switching with AI-assisted workflows in every phase of the software development lifecycle, all in a single application.\n\n> Learn why GitLab was named a Leader in the 2024 Gartner® Magic Quadrant™ for AI Code Assistants.\n> [Access the report](https://about.gitlab.com/gartner-mq-ai-code-assistants/){class=\"button\" data-ga-name=\"gartner magic quadrant\" data-ga-location=\"thesource\"}",[1015,1018,1021,1024,1027],{"header":1016,"content":1017},"How does AI contribute to better code security in DevOps?","AI-powered security tools identify, summarize, and suggest fixes for vulnerabilities in real time. They provide automated risk assessments and mitigation recommendations, helping teams detect threats earlier in the software development lifecycle and reduce security blind spots.",{"header":1019,"content":1020},"How can AI forecasting improve software development performance?","AI-driven analytics predict deployment frequency, cycle times, and productivity trends, allowing teams to identify inefficiencies, improve DevOps strategies, and proactively address bottlenecks before they impact performance.",{"header":1022,"content":1023},"How can AI improve the efficiency of DevSecOps teams?","AI enhances DevSecOps efficiency by automating repetitive tasks, suggesting code improvements, summarizing vulnerabilities, and streamlining compliance checks. It reduces context switching, speeds up workflows, and allows teams to focus on higher-value development and security efforts.",{"header":1025,"content":1026},"What are the key privacy and security risks when using AI in DevSecOps?","The primary risks include data privacy concerns, AI-generated code vulnerabilities, and a lack of transparency in how AI models handle proprietary data. Organizations should vet AI providers for compliance with security standards and ensure AI-powered workflows align with internal governance policies.",{"header":1028,"content":1029},"Can AI help developers understand complex or unfamiliar codebases?","Yes, AI-powered assistants explain code logic in natural language, making it easier for developers to quickly grasp existing code structures and dependencies. This is particularly useful when onboarding new developers or working with legacy code written in unfamiliar programming languages.",{"layout":5,"template":486,"articleType":487,"author":1031,"featured":6,"isHighlighted":6,"authorName":447},"kristina-weis",{"title":1033,"date":1034,"description":1035,"timeToRead":1036,"heroImage":1037,"keyTakeaways":1038,"articleBody":1042,"config":1043},"5 ways execs can support their DevOps teams with AI","2023-12-14","Learn how the AI capabilities within a DevSecOps platform can help teams boost productivity and collaboration.","8 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463727/lo1idgayu6d7ysofhlsn.png",[1039,1040,1041],"By automating routine tasks and providing tools like code suggestions, AI enhances developer efficiency and reduces stress, which can lead to higher job satisfaction and retention.","A strategic rollout of AI tools can free developers to focus on high-priority projects.","AI assists in summarizing code reviews and discussions, allowing team members to easily grasp and contribute to ongoing projects. This ultimately fosters a more collaborative and connected work environment.","Artificial intelligence (AI) is poised to radically improve the way DevSecOps teams build software. And IT leaders are positioned to help their teams maximize all the benefits that come with using AI - focusing on how the technology can add efficiencies, make developers’ jobs easier, and foster, rather than replace, human-to-human collaboration.\n\n“If developers have the right tools to get their jobs done efficiently, they’re happier and less stressed,” says Abubakar Siddiq Ango, developer evangelism program manager at GitLab. “And if developers are happier, less stressed, and less burned out, they’ll do their jobs better and they won’t be looking to leave for another job. So it means better productivity and retention. Big wins.”\n\n[AI capabilities built in an end-to-end DevSecOps platform](https://about.gitlab.com/gitlab-duo/) are about empowering developers and making their jobs easier. Think of AI as the next generation of automation, freeing developers to do the valuable work they love doing - writing innovative code.\n\nHere are five ways to help your DevSecOps teams use AI to transform the way they work:\n\n## 1. Boost developer confidence with AI training\n\nA few of the best things executives can do for their teams is to [automate routine tasks](https://about.gitlab.com/the-source/ai/how-ai-helps-devsecops-teams-improve-productivity/) and make solving problems faster and more efficient, because that makes developers’ jobs easier, more interesting, and less stressful. And that’s just the job for AI.\n\nBy employing AI tools - like [code suggestions](https://about.gitlab.com/blog/2023/05/25/code-suggestions-improves-developer-productivity/), vulnerability summaries, and code explanations - developers are able to spend less time and mental energy on mundane, repetitive, and time-consuming tasks. And that takes a huge load off their backs and helps improve the quality of their work.\n\n“This is definitely going to improve developers’ jobs,” says Ango. “I’d say 70% of my time is spent on Googling this function or researching that. If I can get that understanding in seconds, instead of a few hours, I can spend all that time and energy actually writing code. AI does the mundane work so humans can spend their time on more important things.”\n\nTo get started with AI in a way that won’t simply add stress to DevSecOps team members, managers and executives should make sure their people have the training they need to feel comfortable with AI features. Actually, [our research](https://about.gitlab.com/developer-survey/2024/ai/) shows that nearly a third of respondents, 31%, are concerned they lack the appropriate skill set to employ AI or interpret AI output. Of course, training is always critical but with something as new as AI, leaders should take steps to ensure people begin using the technology with a lot of confidence and excitement.\n\n## 2. Work with teams to roll out AI strategically\n\nSpending less time on manual tasks means developers have more time to work on building features for the next project iteration or design the next big piece of software. It also means they have more time to return to projects that might have been pushed aside due to time constraints.\n\nBy using AI to generate code suggestions and explanations, or by using AI-powered root cause analysis to identify the cause of a problem, developers have more time to move projects forward and focus on bigger-picture needs.\n\n“I think executives and IT leaders need to understand that they’re helping people do more with AI,” says Karen Kwentus, senior solutions architect at GitLab. “These capabilities move repetitive tasks out of the way. When I’m developing, I’ve literally spent hours trying to figure out a problem. If AI can suggest code or summarize vulnerabilities so I don’t have to spend time doing that, that can save me hours. Then I’m suddenly doing more with the same amount of time.”\n\nAngo adds, “AI will lead to more efficiency in how developers can build software, secure software, and deploy software.”\n\nLeaders should stay current with what AI features are available and work with their teams to figure out what workflows to simplify first with AI. Where can AI be used to help developers lighten their load and make their work more efficient? Once an AI solution is in place and developers are seeing positive results, managers can work with their teams to see what projects or efforts have been delayed or back-burnered, and begin to prioritize getting that work back on track.\n\n## 3. Reinforce the importance of human-to-human collaboration\n\nOne of the major benefits of a DevSecOps platform is that it fosters a collaborative environment. By giving all team members - both within DevSecOps teams and throughout other departments in the company - visibility into the entire software development lifecycle, people from different teams are able to communicate about and help each other navigate around roadblocks and offer efficiency suggestions.\n\nAI capabilities support that.\n\n“When colleagues post comments about the code you’re building, it’s only helpful if you have time to take it in and absorb it all,” says Ango. “When someone asks for a review, AI can provide a summary of that request. And when people provide reviews, [AI can summarize those comments](https://about.gitlab.com/blog/2023/04/20/merge-request-changes-summary-ai/) so it’s easier to understand what everyone is saying about your project. Instead of disconnecting people, AI better connects them.\"\n\nHe adds, \"Workflow is AI enabled. AI improved. Not AI replaced.”\n\nAI does more than automate tasks. It helps team members communicate, creating more opportunities for human-to-human collaboration. Leaders can serve their teams by fostering an environment that encourages communication and collaboration, and reminding people that AI is opening that door for them.\n\n## 4. Encourage teams to share security responsibility\n\nUsing AI-powered [vulnerability summaries](https://about.gitlab.com/blog/2024/07/15/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities/) makes securing code more efficient, less mentally consuming, and faster.\n\nFor example, if a developer pushes code and gets an alert that a SQL injection has been detected, they might not immediately understand how their code is being impacted. But with AI, it’s easy to get an explanation of what the vulnerability is, how it affects the code, and how it impacts the entire piece of software - as well as suggestions for how to fix it.\n\n“If AI can explain a vulnerability and suggest a fix, then that’s exactly what I want,” says Kwentus. “Developers and security teams are ultimately responsible for implementing the remediation, but they’ll benefit from actionable AI prompting, context, and explanation. With more information, a user can triage and correct the issue faster.”\n\nAs IT leaders play a significant role in ensuring that DevSecOps teams are using automated security and compliance testing and alerts, they have a similar responsibility to make sure teams are using security-related AI tools, like vulnerability summaries. Team members are increasingly understanding that security is a shared responsibility. That means correcting problems shouldn’t just be left to a security team taking on issues at the end of a project. Developers creating the code can make use of AI capabilities to explain problems that pop up and use suggestions to correct them as soon as they’re found.\n\n> Learn more about [how AI can help DevOps teams improve security](https://about.gitlab.com/the-source/ai/4-ways-ai-can-help-devops-teams-improve-security/) and [how to put generative AI to work in your DevSecOps environment](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/).\n\n## 5. Find AI champions in your teams\n\nExecutives should create time to talk with their teams about the AI capabilities in their DevSecOps platform, and how these tools can ease their workloads. “Let your teams know what your goals are,” says Kwentus. “Give them information. Talk about saving time and mental energy. Tell them about spending less time researching vulnerabilities and spending more time writing code. They didn’t get into this job to do all of these other tasks. They want to write code and this will give them more time for that.”\n\nAnd by relieving their workload and stress, developers will be happier in their jobs. And happier people lead to better retention, which leads to more stable DevSecOps teams and less work for executives.\n\n“Developers get stressed when they’re trying to get something done but they keep hitting bottlenecks,” says Ango. “Getting rid of those bottlenecks will decrease their stress and burnout. And that’s easier on everyone.”\n\nIT leaders should, of course, focus on openly communicating with their teams about AI capabilities in their DevSecOps platforms, explaining how the features can make their jobs easier, and making sure they have the training they need to use the tools efficiently and with confidence.\n\nLeaders can make this communication easier by finding and empowering influential people on their teams who are excited about using AI and who will act as champions to encourage others to use the technology. By giving team members not only the tools that will make their jobs easier, but also the knowledge of how to use them, and the encouragement to adopt them, developers are likely to be happier in their jobs.",{"layout":5,"template":486,"articleType":487,"author":1044,"featured":6,"gatedAsset":1045,"isHighlighted":6,"authorName":453},"sharon-gaudin","source-lp-the-ultimate-playbook-for-high-performing-devsecops-teams",{"title":1047,"date":1048,"description":1049,"timeToRead":587,"heroImage":1050,"keyTakeaways":1051,"articleBody":1055,"faq":1056,"config":1072},"How AI can help DevOps teams improve security","2023-12-05","Find out how DevOps teams are using artificial intelligence and machine learning to improve security, minimize risk, and ship more secure code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463801/t2lucrovy8dadeimvk48.png",[1052,1053,1054],"AI and ML in software development is more than code generation — it can enhance security by mitigating vulnerabilities faster, making code reviews more efficient, and suggesting relevant tests to ensure proper coverage.","Nearly a third of DevSecOps teams already use AI for automated test generation. However, 55% feel that introducing AI into the software development lifecycle is risky.","Organizations should prioritize AI tools that do not train machine learning models with proprietary data or source code and are designed with a privacy-first approach.","Artificial intelligence (AI) and machine learning (ML) in software development aren't just about helping DevOps teams reduce repetitive tasks and ship code more efficiently. AI and ML can help organizations ship better, more secure code and minimize security risk to their organization and customers.\n\nHere are a few ways AI can help bolster your organization’s security:\n\n## Mitigate security vulnerabilities faster\nWhen a security vulnerability is detected, the first step in fixing it is understanding it - and this is a place where AI stands out. Traditional methods require teams to review code for vulnerabilities manually, which can be time-consuming and prone to human error. However, with AI, developers and security teams can generate summaries of potential vulnerabilities and how attackers might exploit them. More advanced AI-powered tools can even provide a suggested mitigation with sample code for each vulnerability - giving teams actionable insights on how to reduce security risks.\n\n## Make code reviews more efficient and effective\nWhen a developer's code is ready for review, there are a few ways AI can help speed things up and help catch potential issues.\n\nAI can help the author choose the best reviewer - one who's familiar with the code base and more likely to catch important issues, and less likely to ignore the code review request, say that someone else should review it, or provide insufficient feedback. While choosing the most appropriate code reviewers can be a complex task for a human, a machine learning algorithm can analyze the changes and the project’s contribution graph to help identify reviewers.\n\nAI also can generate a summary of the merge request to help reviewers quickly understand what they're being asked to review and to ease the code review handoff process.\n\n## Generate tests to ensure proper test coverage\nThoroughly testing code changes is one of the most important ways to ensure code works as expected and doesn’t introduce security issues - but writing tests can be time-consuming and difficult, so code is often pushed to production environments without appropriate test coverage.\n\nAI can look at code changes and suggest relevant tests along with test files, so developers can spend less time thinking about and writing tests and [more time coding](https://about.gitlab.com/the-source/ai/how-ai-helps-devsecops-teams-improve-productivity/).\n\nIn fact, many DevOps teams are already using AI to generate tests. In our [2024 survey of more than 5,000 DevSecOps professionals worldwide](https://about.gitlab.com/developer-survey/2024/ai), nearly a third (32%) of respondents whose organizations were using AI said they were using it for automated test generation.\n\n## Protect your proprietary data when using AI\nFor many organizations, it’s important that the efficiency gains of using AI and ML don’t come at the cost of privacy, security, or compliance. More than half of survey respondents (55%) said they feel that introducing AI into the software development process is risky. Concerns around privacy and data security were the top AI-related obstacle identified by respondents.\n\nBefore integrating AI into your software development processes, make sure to understand how your proprietary data will or won’t be used to train its machine learning models. Allowing DevOps teams to use the wrong AI tool can lead to painful and costly [leaks of top-secret data and source code](https://www.techradar.com/news/samsung-workers-leaked-company-secrets-by-using-chatgpt).\n\n> Find out what your DevSecOps team can do to begin to understand - and measure - the [impact of generative AI](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/).\n\n### Improve security with AI-powered DevSecOps workflows\n\nAI solutions like [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help DevOps teams use AI to improve security throughout their software development lifecycle with [capabilities](https://docs.gitlab.com/ee/user/ai_features.html) such as vulnerability summaries, suggested tests, suggested reviewers, and merge request summaries.\n\nGitLab Duo does not train ML models with customers’ proprietary data or source code and is designed with a privacy-first approach to help enterprises and regulated organizations adopt AI-powered workflows.",[1057,1060,1063,1066,1069],{"header":1058,"content":1059},"How can AI-powered DevSecOps workflows improve software security?","AI-powered DevSecOps workflows integrate security at every stage of development by providing vulnerability detection, risk analysis, automated testing, and secure code recommendations. By leveraging AI-driven security insights, teams can ship more secure software faster while reducing manual workload and human error.",{"header":1061,"content":1062},"Can AI assist with test generation to improve security?","Yes, AI can automatically generate tests to ensure proper code coverage and reduce the likelihood of security vulnerabilities going undetected. By analyzing code changes, AI tools suggest relevant unit tests, integration tests, and security tests, helping DevOps teams validate software without the burden of manually writing every test case.",{"header":1064,"content":1065},"How can AI help DevOps teams detect and mitigate security vulnerabilities?","AI can speed up vulnerability detection and mitigation by generating summaries of security risks and suggesting actionable fixes. Instead of manually reviewing code for vulnerabilities, DevOps teams can use AI-powered security tools to analyze code, identify weaknesses, and provide remediation suggestions, reducing the time it takes to address security threats.",{"header":1067,"content":1068},"What security risks are associated with using AI in software development?","The biggest risks of using AI in DevOps include privacy concerns, compliance issues, and potential data leaks. Organizations should carefully evaluate AI tools to ensure they do not train machine learning models using proprietary source code. AI solutions like GitLab Duo prioritize a privacy-first approach, ensuring that sensitive data remains protected.",{"header":1070,"content":1071},"How does AI enhance the efficiency of code reviews?","AI improves code review efficiency by suggesting the most relevant reviewers based on contribution history and expertise. It can also generate merge request summaries, helping reviewers quickly understand the changes and focus on key security risks. This reduces bottlenecks in the review process and ensures higher-quality security assessments.",{"layout":5,"template":486,"articleType":487,"author":1031,"featured":6,"gatedAsset":25,"isHighlighted":6,"authorName":447},{"title":1074,"date":1075,"description":1076,"timeToRead":462,"heroImage":1077,"keyTakeaways":1078,"articleBody":1081,"faq":1082,"config":1098},"Building a transparency-first AI strategy: 7 questions to ask your DevOps provider","2023-11-13","Learn what to ask before adopting an AI tool to avoid exposing sensitive data or compromising intellectual property rights.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751464514/nigg5kzkyyjxsjlhl45j.png",[1079,1080,1000],"AI boosts software efficiency but requires transparency to protect privacy and IP rights.","GitLab prioritizes transparency in AI use, ensuring data protection and customer trust.","AI enables organizations to enhance software development practices by boosting efficiency and reducing cycle times, but its use should not be at the cost of privacy and data security. Transparency around data protection and intellectual property should be a central part of any organization’s AI strategy. Transparency is even more critical for organizations using AI as part of DevOps as they need to know what they are agreeing to when using AI features and how updates will be communicated.\n\nAt GitLab, transparency is one of our [core values](https://handbook.gitlab.com/handbook/values/#transparency). As we continue to expand [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered capabilities for the entire software development lifecycle, transparency remains a top priority.\n\nGitLab’s [State of AI in Software Development report](https://about.gitlab.com/developer-survey/#ai) found that teams are feeling optimistic about their adoption of AI, and 83% of respondents said it is essential to implement AI in their software development processes to avoid falling behind. However, nearly as many respondents (79%) also expressed concern about AI tools having access to their private information and intellectual property.\n\nMany of our customers ask where they should begin when evaluating a new AI tool in their software development lifecycle. To help you get better visibility into the actions your DevOps provider is taking to protect your organization’s data and intellectual property, here are seven questions you can ask (as well as how GitLab Duo stacks up).\n\n## What large language models (LLMs) power the AI features in your platform?\n\nDifferent LLMs have different strengths, so setting up your AI architecture with multiple models for specific use cases can be a path to success. However, it’s important to ensure that DevOps providers are transparent about the LLMs they utilize for their AI features as well as details about where the LLMs are hosted.\n\nGitLab Duo features aren’t powered by a single model. We have built GitLab Duo with the flexibility to use the model that provides the best result for each use case. We continue our commitment to transparency by clearly identifying the models powering GitLab Duo features in our [publicly available documentation](https://docs.gitlab.com/ee/user/ai_features.html).\n\n## Who has control of and access to the models?\n\nEvery organization must be able to identify who has control of and access to the LLMs they are using. If a third party has control and access, are they listed by the DevOps provider as a subprocessor? If affiliates have control and access, are those affiliates clearly identified as a subprocessor?\n\nGitLab Duo is powered by third-party models hosted on cloud infrastructure, and the vendors of these models and the terms on which they provide services to GitLab were chosen as they support GitLab’s commitment to privacy and the protection of customer intellectual property.\n\nWe list all our subprocessors clearly on our [subprocessors page](https://about.gitlab.com/privacy/subprocessors/), and customers can [sign up](https://about.gitlab.com/privacy/subprocessors/#sign-up) to be notified when updates are made to this page.\n\n## What protection do you provide to alleviate customer concerns related to the perceived risks of using AI-generated output?\n\nIt's essential to know what protections a DevOps provider will provide regarding AI-generated output and how that guarantee will be met.\n\nGitLab will indemnify you and protect your right to use output generated by GitLab Duo including defending you from claims that output generated from GitLab Duo infringes a third party’s intellectual property rights.\n\n## How do I get the benefit of those protections? Are the protections automatic, or do I need to take any action to receive the protections?\n\nEven if you know that your DevOps provider includes protections related to the risks of using AI-generated output, it’s important to know what limitations, if any, are associated with those protections.\n\n[GitLab protects your right to use output generated by GitLab Duo](https://about.gitlab.com/handbook/legal/ai-functionality-terms/) as long as you:\n1. have not modified the output;\n2. have a valid right to use your inputs;\n3. have paid for the AI feature(s); and\n4. have evaluated the output before using or otherwise relying on it.\n\nAt this time, you do not need to enable or activate any features or filters to receive this protection.\n\n## Do I retain my intellectual property (IP) rights to input entered into AI features?\n\nIP is the foundation of an organization and, therefore, you must know how a DevOps provider will handle your rights in respect to inputs that you add to AI features.\n\nWith GitLab Duo, your inputs remain your content. GitLab makes no claim of ownership in your input.\n\n## Do I own the output (or suggestions) generated from AI features?\n\nPerhaps equally important is the question of whether you own what is generated from AI features - the output and suggestions - especially if they are incorporated into your software.\n\nWhile the legal and regulatory landscape related to AI-generated output is developing, GitLab’s position is clear. GitLab does not claim ownership of any output generated by GitLab Duo. Output generated by GitLab Duo can be used at your discretion and, if a third-party claim arises from your use of the output generated GitLab Duo, GitLab will step in and defend you.\n\n## Where are the terms, policies, and commitments that govern the use of your AI features located?\n\nDevOps providers should be able to share specific documentation about how their AI features use your data.\n\nHere are the relevant resources for GitLab customers:\n- [GitLab Subscription Agreement](https://about.gitlab.com/handbook/legal/subscription-agreement/)\n- [AI Functionality Terms](https://about.gitlab.com/handbook/legal/ai-functionality-terms/)\n- [GitLab Privacy Statement](https://about.gitlab.com/privacy/)\n- [Acceptable Use Policy](https://about.gitlab.com/handbook/legal/acceptable-use-policy/)\n- [GitLab Duo Documentation](https://docs.gitlab.com/ee/user/ai_features.html)\n\n## Learn more\nWithout transparency from AI tool providers, organizations are unable to discern the risks around the handling of sensitive information and customer data, trade secrets, and the organization’s intellectual property rights. GitLab remains committed to privacy and transparency. With [GitLab Duo](https://about.gitlab.com/gitlab-duo/), enterprises and regulated organizations can adopt AI-powered workflows with confidence over how their sensitive data is being used.\n\nYou can learn more about GitLab’s privacy-first approach to AI in the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/ai_features.html).",[1083,1086,1089,1092,1095],{"header":1084,"content":1085},"What protections should organizations expect for AI-generated output?","Organizations should verify whether their DevOps provider offers indemnification for AI-generated output, meaning they will defend customers against legal claims related to AI-generated content. Providers should also specify whether any actions—such as reviewing AI-generated code or enabling certain features—are required to qualify for these protections.",{"header":1087,"content":1088},"Where can businesses find policies governing AI usage in DevOps?","AI transparency policies should be easily accessible within a provider’s subscription agreements, AI terms, privacy statements, and acceptable use policies. Organizations should review these documents to understand data handling, security commitments, and compliance requirements before integrating AI into their development workflows.",{"header":1090,"content":1091},"How can organizations verify the AI models used by their DevOps provider?","Before adopting AI-powered DevOps tools, organizations should ask providers which large language models (LLMs) they use, where they are hosted, and whether third-party vendors have access. A trustworthy provider should disclose this information in their documentation and offer customers a way to track changes, ensuring continued compliance with security and data protection policies.",{"header":1093,"content":1094},"Who owns the input and output data when using AI features?","Ownership of AI-generated content varies by provider, making it essential to clarify whether organizations retain rights over both input data (what users enter into AI tools) and output data (what AI generates). GitLab, for example, does not claim ownership of input or output data, ensuring businesses maintain control over their intellectual property.",{"header":1096,"content":1097},"Why is transparency important when adopting AI in DevOps?","Transparency ensures organizations understand how AI tools handle sensitive data, intellectual property, and security risks. Without clear policies from AI providers, businesses may unknowingly expose proprietary code or fail to comply with data protection regulations. A transparency-first approach helps mitigate these risks by providing visibility into AI model usage, data access, and ownership rights.",{"layout":5,"template":486,"articleType":487,"author":1099,"featured":6,"isHighlighted":6,"authorName":450},"rschulman",{"title":1101,"date":1102,"description":1103,"timeToRead":1036,"heroImage":1104,"keyTakeaways":1105,"articleBody":1109,"faq":1110,"config":1126},"Velocity with guardrails: AI, automation, and removing the security and speed tradeoff","2023-04-24","Learn what 'velocity with guardrails' means for you and how the DevSecOps Platform's features support your need for security and speed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463608/tle7cto9xpbrqlygzqex.png",[1106,1107,1108],"Technology teams face resource constraints and security challenges, intensified by limited budgets and a shortage of security engineers.","GitLab's DevSecOps platform leverages AI and automation to enhance security, streamline regulatory compliance, and boost developer productivity without sacrificing speed.","The Value Streams Dashboard provides strategic insights into metrics that help decision makers identify trends and patterns to optimize software delivery.","Technology teams are under intense pressure. They are resource constrained, but still need to have one foot firmly on the gas pedal to drive innovation and deliver value to their customers. And they need to do that while protecting their software supply chain – the seemingly endless amount of integrations and add-ons in today’s modern development environment.\n\nThe dynamic is brutal. Security engineers are outnumbered. One customer told me that for every 100 developers, there is only 1 security engineer. Couple that with dwindling budgets – according to the [2023 GitLab Global DevSecOps Report: Security Without Sacrifices](https://about.gitlab.com/developer-survey/), 85% of respondents said security budgets are flat or reduced – and you get a dynamic where speed and convenience will trump security and compliance.\n\nBut that dynamic does not need to be the norm.\n\nWe believe in a simple mantra: **Velocity with guardrails**. Artificial intelligence technologies and automation solutions accelerate code creation and, when paired with a comprehensive DevSecOps platform, create the security and compliance guardrails that every company needs. Velocity with guardrails means no more trading off the need for fast software innovation with the need for secure software development. Velocity with guardrails only happens in a world where AI and automation extend beyond code creation. In fact, our Global DevSecOps Report found that 62% of developers said they use AI/ML to check code and 65% of developers are using – or plan to use in the next three years – AI/ML in testing efforts.\n\nGiven the resource constraints DevSecOps teams face, automation and artificial intelligence become a strategic resource. Our DevSecOps Platform helps teams fill critical gaps while automatically enforcing policies, applying compliance frameworks, performing security tests using GitLab’s automation capabilities, and providing AI-assisted recommendations - which frees up resources.\n\nIn the past few months, we’ve introduced a host of new features and capabilities to bring this mantra to life. Here’s a taste.\n\n## Increase velocity with Code Suggestions\n\nEvery day, millions of developers use GitLab to contribute code. In February, we launched a Beta for our Code Suggestions feature, and since then, we’ve been working hard to make [Code Suggestions available to more developers](https://about.gitlab.com/releases/2023/04/22/gitlab-15-11-released/#code-suggestions-for-ultimate--premium-users). During Beta, Code Suggestions is free for all Ultimate and Premium customers. GitLab Code Suggestions can improve developer productivity, focus, and innovation without context switching and within a single DevSecOps platform.\n\n![code-suggestions](//images.ctfassets.net/xz1dnu24egyd/7bEYlrnjRV1uH1BNJ9fJt3/c4ca2364f5f3c6e239ae0d6776fd50ca/code-suggestions.png)\n\nCode Suggestions is only the start of our journey infusing AI/ML into all aspects of the software development lifecycle. Along with [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers), we have been [sharing previews of these AI/ML-powered features on our blog](https://about.gitlab.com/blog/tags.html#AI/ML) every Thursday in a weekly series.\n\n## AI-assisted vulnerability guidance\n\nAccording to our Global DevSecOps Report, security respondents who don’t use a DevSecOps platform were more likely to struggle to identify who can perform remediation and consider it difficult to understand vulnerability findings. To help teams identify an effective way to fix a vulnerability within the context of their specific code base, we have released an experimental feature that provides GitLab AI-assisted vulnerability recommendations leveraging the explanatory power of large language models. This capability combines basic vulnerability information with insights derived from the customer’s code to explain the vulnerability in context, demonstrate how it can be exploited, and provide an example fix. Initial testing shows significant promise in reducing the time to determine a fix for a vulnerability.\n\n![gitlab-Improper Restriction-XXE](//images.ctfassets.net/xz1dnu24egyd/4SZxxUHTRmKtwbqCb4v0tU/bfb043db336863342effb6aefb19b055/gitlab-Improper_Restriction-XXE.png)\n\nThis is just one of [a number of experimental AI-assisted capabilities](/blog/2023/04/24/ai-ml-in-devsecops-series/) we’ve shared in the past few months to improve developer productivity and software delivery efficiency.\n\n## Gain a new level of visibility with Value Streams Dashboard\n\nWith AI accelerating productivity, visibility and transparency have never been more important. Our new Value Streams Dashboard provides strategic insights into metrics that help decision makers identify trends and patterns to optimize software delivery. This data is grounded in [DORA4 metrics](https://docs.gitlab.com/ee/user/analytics/dora_metrics.html) and the [flow of value delivery](https://docs.gitlab.com/ee/user/group/value_stream_analytics) across projects and groups.\n\nThe Value Streams Dashboard offers visibility across every step of the software development lifecycle, without needing to buy or maintain a third-party tool. The result: Fewer tools, increased visibility, and more transparency, all within GitLab.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/819308062?h=752d064728\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Set license policies and scan software licenses for compliance\n\nViolating or breaching a license by using software with an incompatible license may result in an expensive lawsuit or many developer hours to remove problematic code. We recently released a new and improved [license compliance scanner](https://about.gitlab.com/releases/2023/02/22/gitlab-15-9-released/#new-license-compliance-scanner) along with [license approval policies](https://docs.gitlab.com/ee/user/compliance/license_approval_policies.html). The new scanner extracts license information from packages that are dual-licensed or have multiple licenses that apply and automatically parses and identifies more than 500 different types of licenses, a substantial increase from previously identifying only 20 types of licenses.\nLicense approval policies help minimize the risk that unapproved licenses are in use, saving organizations time and effort to manually ensure compliance.\n\n![set-license-policy](//images.ctfassets.net/xz1dnu24egyd/uc77uy7dLGNgYfP6WprlN/8a7a1a2de63a4135f63209a58ff5cdcf/set-license-policy.png)\n\n![dependencies list](//images.ctfassets.net/xz1dnu24egyd/1rwjZLoe7YEN2pjzAHWCAw/2dd136c6f2b20276242f94b61f804d7b/dependencies.png)\n\n## Protect secrets from being leaked\n\nA recent [string of attacks](https://securityboulevard.com/2023/02/secrets-exposed-why-modern-development-open-source-repositories-spill-secrets-en-masse/) pointed to leaked personal access tokens (PATs) in source code as the culprit. GitLab Secret Detection can protect against that. We now [automatically revoke PATs](https://about.gitlab.com/releases/2023/02/22/gitlab-15-9-released/#automatic-revocation-of-leaked-personal-access-tokens) leaked in public GitLab repositories, mitigating the risk of a developer mistakenly commiting a PAT into their code. This capability helps protect GitLab users and their organizations from credential exposure and reduces risk to production applications.\n\n![personal-access-token](//images.ctfassets.net/xz1dnu24egyd/6h16lSbWhGFynU9mezUhxB/cbc2d1097e17fe377c12009f45406113/personal-access-token.png)\n\nWe are not stopping at remediating GitLab managed credentials. We now support [responding to leaked secrets in public projects](https://about.gitlab.com/releases/2023/04/22/gitlab-15-11-released/#automatic-response-to-leaked-secrets-on-any-public-branch) by revoking the credential or notifying the vendor who issued it. We’re actively expanding the list of supported vendors which [any SaaS vendor can join](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html#partner-program-for-leaked-credential-notifications) to help us secure any secret a developer might use.\n\n## Automatically enforce security policies\n\nManually enforcing security policies for different projects and code commits can be time-consuming. Applying automation to policy enforcement can prevent security rules from being bypassed without proper approval. Security teams can configure [policy rules](https://docs.gitlab.com/ee/user/application_security/policies/), such as requiring multiple approvers across various teams (e.g., QA, Business, Legal), a two-step approval process, and approval for exceptions for using out-of-policy licenses. Such policies can be applied to multiple development projects, at the group or subgroup level, to allow for ease in maintaining a single, centralized ruleset.\n\n![enforce-policies-approvals](//images.ctfassets.net/xz1dnu24egyd/pn3pi2IjVzYcnNHjlTVai/423b0b4bcf6b462d80669a0539e9892d/enforce-policies-approvals.png)\n\n## Avoid false positives in security testing\n\nSecurity professionals report that too many false positives rank among their top three frustrations, according to the GitLab 2023 Global DevSecOps Survey. Our [DAST API Analyzer](https://docs.gitlab.com/ee/user/application_security/dast_api) is now more accurate and reduces false positives by an estimated 78%, making it easier for DevSecOps teams to hone in on true security threats.\n\n![dast-vulnerabilities](//images.ctfassets.net/xz1dnu24egyd/Hj0qscGH4vFZNXX3L13BV/34f2f2de0f5ffc8f5f8ab30ec7014dde/dast-vulnerabilities.png)\n\nWe’ve also just introduced [vulnerability dismissal reasons](https://about.gitlab.com/releases/2023/04/22/gitlab-15-11-released/#vulnerability-dismissal-reasons) to help track why vulnerabilities were resolved to improve compliance tracking and audit reports.\n\n![vulnerability-dismissal](//images.ctfassets.net/xz1dnu24egyd/57bioAT9qKilh6uDjuxcZa/35ee045d552e0ad487255928ae7464ec/vulnerability-dismissal.png)\n\nWe've introduced a lot of new capabilities that enable our customers to achieve velocity with guardrails. Watch this 90-second video to see how GitLab secures your end-to-end software supply chain.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://player.vimeo.com/video/762685637?h=f96e969756\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## More velocity, more guardrails coming soon\n\nGitLab has an ambitious roadmap for 2023 to make it easier to integrate security into our customers’ software development lifecycle so they can deliver secure code easier and more efficiently. Capabilities coming soon include:\n\n- [Group and subgroup level dependency lists](https://gitlab.com/groups/gitlab-org/-/epics/8090) provide users a simple way to view their projects’ dependencies, as managing dependencies at the project level can be problematic for organizations with hundreds of projects.\n- [Continuous container and dependency scanning](https://gitlab.com/groups/gitlab-org/-/epics/7886) improves visibility and timeliness of vulnerability discovery by automatically scanning for new findings any time a new security advisory is published or code is changed.\n- [Management tools for compliance frameworks](https://gitlab.com/groups/gitlab-org/-/epics/9101) allow customers to apply the compliance frameworks to existing projects and multiple projects at once. Currently, customers can apply compliance frameworks and policies individually per project.\n- [SBOM ingestion](https://gitlab.com/groups/gitlab-org/-/epics/8024) will allow GitLab to import CycloneDX files from third-party tools to create a single source for all of the software’s dependencies giving greater system-wide visibility and helping to create actionable insights.\n\n> __Learn how to increase velocity securely with [Secure by Design principles](https://about.gitlab.com/the-source/security/strengthen-your-cybersecurity-strategy-with-secure-by-design/).__\n",[1111,1114,1117,1120,1123],{"header":1112,"content":1113},"What does \"velocity with guardrails\" mean in DevSecOps?","\"Velocity with guardrails\" refers to achieving fast software development while maintaining strong security and compliance measures. By leveraging AI and automation, organizations can accelerate code creation, automate security enforcement, and reduce risks without sacrificing speed.",{"header":1115,"content":1116},"How does GitLab automate security policy enforcement?","GitLab’s security automation ensures compliance by enforcing security rules across multiple projects. Teams can set policies such as multi-step approvals, license compliance checks, and automated secret detection, reducing security risks and improving regulatory adherence.",{"header":1118,"content":1119},"What is GitLab’s Value Streams Dashboard, and how does it help teams?","GitLab’s Value Streams Dashboard provides visibility into software delivery performance using key metrics. It helps organizations track trends, optimize workflows, and improve efficiency without relying on third-party analytics tools.",{"header":1121,"content":1122},"How does AI-assisted vulnerability guidance enhance security remediation?","AI-assisted vulnerability guidance in GitLab analyzes detected vulnerabilities, explains potential risks, and suggests example fixes. By leveraging large language models (LLMs), it helps developers quickly understand security threats and implement effective solutions with minimal manual effort.",{"header":1124,"content":1125},"How does GitLab’s AI-powered Code Suggestions improve developer productivity?","GitLab’s Code Suggestions feature uses AI to help developers write code faster by providing real-time recommendations within the DevSecOps platform. This reduces context switching, improves efficiency, and enables developers to focus on innovation while maintaining high code quality.",{"layout":5,"template":486,"articleType":487,"author":1127,"featured":6,"isHighlighted":6,"authorName":433},"dave-steer",[458,489,519,551,583,615],{"ai":11,"platform":363,"security":98},1751572790072]