From 36a0b65c8991681591f9c13e0ef83f3165efa375 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 11:00:24 -0500 Subject: [PATCH 01/58] Add badges for Codespaces and Dev Containers --- 068-AzureOpenAIApps/Student/Challenge-00-nolab.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-nolab.md b/068-AzureOpenAIApps/Student/Challenge-00-nolab.md index 0ebe875196..e816636b16 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-nolab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-nolab.md @@ -40,6 +40,8 @@ A GitHub Codespace is a development environment that is hosted in the cloud that #### Use Github Codespaces +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack/068-AzureOpenAIApps) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack/068-AzureOpenAIApps) | +|---|---| You must have a GitHub account to use GitHub Codespaces. If you do not have a GitHub account, you can [Sign Up Here](https://github.com/signup). GitHub Codespaces is available for developers in every organization. All personal GitHub.com accounts include a monthly quota of free usage each month. GitHub will provide users in the Free plan 120 core hours, or 60 hours of run time on a 2 core codespace, plus 15 GB of storage each month. From dafb3f1e4f7f248770be23f99205026ac93d5223 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 11:01:25 -0500 Subject: [PATCH 02/58] Fix formatting of GitHub Codespaces badge section --- 068-AzureOpenAIApps/Student/Challenge-00-nolab.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-nolab.md b/068-AzureOpenAIApps/Student/Challenge-00-nolab.md index e816636b16..004d9535f9 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-nolab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-nolab.md @@ -40,8 +40,7 @@ A GitHub Codespace is a development environment that is hosted in the cloud that #### Use Github Codespaces -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack/068-AzureOpenAIApps) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack/068-AzureOpenAIApps) | -|---|---| +[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack/068-AzureOpenAIApps) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack/068-AzureOpenAIApps) You must have a GitHub account to use GitHub Codespaces. If you do not have a GitHub account, you can [Sign Up Here](https://github.com/signup). GitHub Codespaces is available for developers in every organization. All personal GitHub.com accounts include a monthly quota of free usage each month. GitHub will provide users in the Free plan 120 core hours, or 60 hours of run time on a 2 core codespace, plus 15 GB of storage each month. From 9b41d56d9d61e7b61182008d58bb0f3a865ee371 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 17:39:32 -0500 Subject: [PATCH 03/58] Add generic root devcontainer and update Dev Containers button - Add generic .devcontainer/devcontainer.json at repo root - Updated Challenge-00-lab.md with working Dev Containers button - Root devcontainer uses universal image for broad compatibility - Allows users to start with generic container, then reopen specific folders in their specialized containers --- .devcontainer/devcontainer.json | 17 +++++++++++++++++ .../Student/Challenge-00-lab.md | 18 ++++++++++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..30bdc7d8a0 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,17 @@ +{ + "name": "WhatTheHack", + "image": "mcr.microsoft.com/devcontainers/universal:2", + "features": { + "ghcr.io/devcontainers/features/azure-cli:1": {}, + "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/github-cli:1": {} + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.vscode-json" + ] + } + }, + "remoteUser": "codespace" +} \ No newline at end of file diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 6888c28944..ae573b3f0b 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -32,6 +32,9 @@ Keep your credentials handy as you will also need them to login to the Azure CLI You will need a set of developer tools to work with the sample application for this hack. You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack) | +|---|---| + A GitHub Codespace is a development environment that is hosted in the cloud that you access via a browser. All of the pre-requisite developer tools for this hack are pre-installed and available in the codespace. @@ -66,9 +69,13 @@ Your Codespace environment should load in a new browser tab. It will take approx Your developer environment is ready, hooray! Skip to section: [Setup Citrus Bus Application](#setup-citrus-bus-application) +<<<<<<< Updated upstream **NOTE:** If you close your Codespace window, or need to return to it later, you can go to [GitHub Codespaces](https://github.com/codespaces) and you should find your existing Codespaces listed with a link to re-launch it. **NOTE:** GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser. If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds. If you want to have a better experience, you can also update the default timeout value in your personal setting page on Github. Refer to this page for instructions: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) +======= +**NOTE:** GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser. If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds. If you want to have a better experience, you can also update the default timeout value in your personal setting page on GitHub. Refer to this page for instructions: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) +>>>>>>> Stashed changes **NOTE:** Codespaces expire after 30 days unless you extend the expiration date. When a Codespace expires, the state of all files in it will be lost. @@ -95,9 +102,9 @@ You will next be setting up your local workstation so that it can use dev contai **NOTE:** On Windows, Dev Containers run in the Windows Subsystem for Linux (WSL). As of May 2025, WSL on Windows ARM64 does not currently support running the Azure Function Core Tools needed for this hackathon in x86_64 emulation using QEMU. IF you are using a Windows on ARM device, you will need to use a GitHub Codespace instead. -On Windows and Mac OS (**NOTE:** only tested on Apple Silicon): +On Windows and macOS (**NOTE:** only tested on Apple Silicon): - Download and install Docker Desktop -- (Mac OS only) In Docker Desktop settings, choose Apple Virtualization Framework for the Virtual Machine Manager. Also, click the checkbox to use Rosetta for x86_64/amd64 emulation on Apple Silicon +- (macOS only) In Docker Desktop settings, choose Apple Virtualization Framework for the Virtual Machine Manager. Also, click the checkbox to use Rosetta for x86_64/amd64 emulation on Apple Silicon - (Windows only) Install the Windows Subsystem for Linux along with a Linux distribution such as Ubuntu. You will need to copy the `Resources.zip` to your Linux home directory and unzip it there. - Open the root folder of the Student resource package in Visual Studio Code - You should get prompted to re-open the folder in a Dev Container. You can do that by clicking the Yes button, but if you miss it or hit no, you can also use the Command Palette in VS Code and select `Dev Containers: Reopen in Container` @@ -119,10 +126,17 @@ There are three major steps to setup the Sample Application: - [Setup App Backend](#setup-app-backend) - [Setup App Frontend](#setup-app-frontend) +<<<<<<< Updated upstream In your codespace, or student `Resources.zip` package, you fill find the following folders containing the frontend and backend API of the sample application to help you get started: - `/ContosoAIAppsBackend` - Contains an Azure function app that provides capabilities of processing data and interacting with Azure AI Services like Azure OpenAI and Azure Document Intelligence. - `/ContosoAIAppsFrontend` - Contains an Angular App that provides a user interface to some example virtual assistants. - `/data` - Contains various artifacts and data sources that will be used by the Citrus Bus application +======= +In your codespace, or student `Resources.zip` package, you will find the following folders containing the frontend and backend API of the sample application to help you get started: +- `/ContosoAIAppsBackend` - Contains an Azure Function app that provides capabilities of processing data and interacting with Azure AI Services like Azure OpenAI and Azure Document Intelligence. +- `/ContosoAIAppsFrontend` - Contains an Angular app that provides a user interface to some example virtual assistants. +- `/artifacts` - Contains various artifacts and data sources that will be used by the Citrus Bus application +>>>>>>> Stashed changes - `/infra` - Contains deployment script and Bicep templates to deploy Azure resources for hosting the Citrus Bus application in Azure. The apps also contain helper utilities, functions and tools to help you speed up development as well as hints to the challenges you will be taking on. From 7f44ea88c5eb51d573cb79a5beee199ecbae4fe3 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 17:48:54 -0500 Subject: [PATCH 04/58] Create devcontainer.json --- .devcontainer/devcontainer.json | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..002ae47923 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,17 @@ +{ + "name": "WhatTheHack", + "image": "mcr.microsoft.com/devcontainers/universal:2", + "features": { + "ghcr.io/devcontainers/features/azure-cli:1": {}, + "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/github-cli:1": {} + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.vscode-json" + ] + } + }, + "remoteUser": "codespace" +} From 19bb6ab7aa51f02e410ac065d90cb341913c690f Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 17:51:49 -0500 Subject: [PATCH 05/58] Update Dev Containers button to use TestCodespaceButton branch - Updated URL to include ref=TestCodespaceButton parameter - Ensures testing uses the branch with the root devcontainer - Users can now test the full dev container workflow --- .../.devcontainer/devcontainer.json | 17 +++++++++++++++++ 068-AzureOpenAIApps/Student/Challenge-00-lab.md | 2 +- 2 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 068-AzureOpenAIApps/.devcontainer/devcontainer.json diff --git a/068-AzureOpenAIApps/.devcontainer/devcontainer.json b/068-AzureOpenAIApps/.devcontainer/devcontainer.json new file mode 100644 index 0000000000..30bdc7d8a0 --- /dev/null +++ b/068-AzureOpenAIApps/.devcontainer/devcontainer.json @@ -0,0 +1,17 @@ +{ + "name": "WhatTheHack", + "image": "mcr.microsoft.com/devcontainers/universal:2", + "features": { + "ghcr.io/devcontainers/features/azure-cli:1": {}, + "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/github-cli:1": {} + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.vscode-json" + ] + } + }, + "remoteUser": "codespace" +} \ No newline at end of file diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index ae573b3f0b..6d682b3b1d 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -32,7 +32,7 @@ Keep your credentials handy as you will also need them to login to the Azure CLI You will need a set of developer tools to work with the sample application for this hack. You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack) | +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack&ref=TestCodespaceButton) | |---|---| From 9409b2065aa910ed88522d979c8bd0e5962b6585 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:00:58 -0500 Subject: [PATCH 06/58] Enhance root devcontainer with Python feature and postCreateCommand --- .devcontainer/devcontainer.json | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 30bdc7d8a0..6f94b1d4d8 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -4,14 +4,17 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {}, "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/github-cli:1": {} + "ghcr.io/devcontainers/features/github-cli:1": {}, + "ghcr.io/devcontainers/features/python:1": {} }, "customizations": { "vscode": { "extensions": [ - "ms-vscode.vscode-json" + "ms-vscode.vscode-json", + "ms-python.python" ] } }, + "postCreateCommand": "echo 'Dev container ready!'", "remoteUser": "codespace" } \ No newline at end of file From 71dacec48e504d8c8323d017c7e0be94bcefe13a Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:02:55 -0500 Subject: [PATCH 07/58] Update dev containers URL for testing --- 068-AzureOpenAIApps/Student/Challenge-00-lab.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 6d682b3b1d..1e10e2db34 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -32,7 +32,7 @@ Keep your credentials handy as you will also need them to login to the Azure CLI You will need a set of developer tools to work with the sample application for this hack. You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack&ref=TestCodespaceButton) | +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack/tree/TestCodespaceButton) | |---|---| From 6d2f98dfd96728409846663627b16679534c4b32 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:03:41 -0500 Subject: [PATCH 08/58] Add generic root devcontainer for testing Dev Containers button --- .devcontainer/devcontainer.json | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 002ae47923..6f94b1d4d8 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -4,14 +4,17 @@ "features": { "ghcr.io/devcontainers/features/azure-cli:1": {}, "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/github-cli:1": {} + "ghcr.io/devcontainers/features/github-cli:1": {}, + "ghcr.io/devcontainers/features/python:1": {} }, "customizations": { "vscode": { "extensions": [ - "ms-vscode.vscode-json" + "ms-vscode.vscode-json", + "ms-python.python" ] } }, + "postCreateCommand": "echo 'Dev container ready!'", "remoteUser": "codespace" -} +} \ No newline at end of file From e7cfa89a184fa2b7a6a95e03ec38288a7c42435b Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:07:31 -0500 Subject: [PATCH 09/58] Add devcontainer.json at root level for Dev Containers detection --- .devcontainer.json | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .devcontainer.json diff --git a/.devcontainer.json b/.devcontainer.json new file mode 100644 index 0000000000..6f94b1d4d8 --- /dev/null +++ b/.devcontainer.json @@ -0,0 +1,20 @@ +{ + "name": "WhatTheHack", + "image": "mcr.microsoft.com/devcontainers/universal:2", + "features": { + "ghcr.io/devcontainers/features/azure-cli:1": {}, + "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/github-cli:1": {}, + "ghcr.io/devcontainers/features/python:1": {} + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.vscode-json", + "ms-python.python" + ] + } + }, + "postCreateCommand": "echo 'Dev container ready!'", + "remoteUser": "codespace" +} \ No newline at end of file From 037565ed3d50749e8556c31fd44a1d3782cdae8f Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:12:09 -0500 Subject: [PATCH 10/58] Try cloneInContainer command with folder parameter --- 068-AzureOpenAIApps/Student/Challenge-00-lab.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 1e10e2db34..10fb954838 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -32,7 +32,7 @@ Keep your credentials handy as you will also need them to login to the Azure CLI You will need a set of developer tools to work with the sample application for this hack. You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack/tree/TestCodespaceButton) | +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInContainer?url=https://github.com/perktime/WhatTheHack&folder=.devcontainer/068-AzureOpenAIApps) | |---|---| From 5ce08748781fe2a1881e07d174c65df65c1f0125 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:12:34 -0500 Subject: [PATCH 11/58] Add root level devcontainer.json for testing --- .devcontainer.json | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 .devcontainer.json diff --git a/.devcontainer.json b/.devcontainer.json new file mode 100644 index 0000000000..f59db1bff9 --- /dev/null +++ b/.devcontainer.json @@ -0,0 +1,9 @@ +{ + "name": "WhatTheHack Dev Container", + "image": "mcr.microsoft.com/devcontainers/universal:2-ubuntu", + "features": { + "ghcr.io/devcontainers/features/azure-cli:1": {}, + "ghcr.io/devcontainers/features/node:1": {}, + "ghcr.io/devcontainers/features/python:1": {} + } +} \ No newline at end of file From 9f1fd6d92b31e5ec614850b144b39b97f1337f54 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:12:59 -0500 Subject: [PATCH 12/58] Test simple cloneInVolume with branch parameter --- 068-AzureOpenAIApps/Student/Challenge-00-lab.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 10fb954838..11884d976b 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -32,7 +32,7 @@ Keep your credentials handy as you will also need them to login to the Azure CLI You will need a set of developer tools to work with the sample application for this hack. You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInContainer?url=https://github.com/perktime/WhatTheHack&folder=.devcontainer/068-AzureOpenAIApps) | +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack&branch=TestCodespaceButton) | |---|---| From 8b6d85e04a85bbe7ac76a26ab31b934a555d7ebd Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:17:42 -0500 Subject: [PATCH 13/58] Try openInContainer command --- 068-AzureOpenAIApps/Student/Challenge-00-lab.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 11884d976b..f45b5a73ba 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -32,7 +32,7 @@ Keep your credentials handy as you will also need them to login to the Azure CLI You will need a set of developer tools to work with the sample application for this hack. You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack&branch=TestCodespaceButton) | +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/openInContainer?url=https://github.com/perktime/WhatTheHack&branch=TestCodespaceButton) | |---|---| From d89f1bedd38f340b2d383b4064a8f1c458f22845 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 19 Sep 2025 18:19:49 -0500 Subject: [PATCH 14/58] Fix merge conflict in root devcontainer.json --- .devcontainer.json | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/.devcontainer.json b/.devcontainer.json index e050211fba..61bbe61eff 100644 --- a/.devcontainer.json +++ b/.devcontainer.json @@ -1,11 +1,9 @@ { -<<<<<<< HEAD - "name": "WhatTheHack", - "image": "mcr.microsoft.com/devcontainers/universal:2", + "name": "WhatTheHack Dev Container", + "image": "mcr.microsoft.com/devcontainers/universal:2-ubuntu", "features": { "ghcr.io/devcontainers/features/azure-cli:1": {}, "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/github-cli:1": {}, "ghcr.io/devcontainers/features/python:1": {} }, "customizations": { @@ -16,15 +14,5 @@ ] } }, - "postCreateCommand": "echo 'Dev container ready!'", - "remoteUser": "codespace" -======= - "name": "WhatTheHack Dev Container", - "image": "mcr.microsoft.com/devcontainers/universal:2-ubuntu", - "features": { - "ghcr.io/devcontainers/features/azure-cli:1": {}, - "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/python:1": {} - } ->>>>>>> TestCodespaceButton + "postCreateCommand": "echo 'Dev container ready!'" } \ No newline at end of file From 12ec55f9e1c751bf90da1eeff96e877be04404a7 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 23 Sep 2025 10:51:21 -0500 Subject: [PATCH 15/58] Enhanced markdown styling with emojis, better formatting, and custom CSS --- .../Student/Challenge-00-lab.md | 140 ++++++++----- _layouts/default.html | 1 + assets/css/custom.css | 190 ++++++++++++++++++ 3 files changed, 286 insertions(+), 45 deletions(-) create mode 100644 assets/css/custom.css diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 213014bebb..7e13d5a02e 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -1,47 +1,66 @@ -# Challenge 00 - Prerequisites - Ready, Set, GO! (Lab Provided) +# πŸš€ Challenge 00 - Prerequisites - Ready, Set, GO! (Lab Provided) -**[Home](../README.md)** - [Next Challenge >](./Challenge-01.md) +**[🏠 Home](../README.md)** - [Next Challenge > πŸ“‹](./Challenge-01.md) -## Introduction +--- -Thank you for participating in the Azure Open AI Apps What The Hack. An Azure lab environment will be provided to you with the sample application resources pre-deployed into Azure. Before you can hack, you will still need to set up some prerequisites. +## πŸ‘‹ Introduction -## Description +> **Welcome to the Azure OpenAI Apps What The Hack!** +> +> An Azure lab environment will be provided to you with the sample application resources pre-deployed into Azure. Before you can hack, you will still need to set up some prerequisites. + +--- + +## πŸ“‹ Description In this challenge, you will setup the necessary pre-requisites and environment to complete the rest of the hack, including: -- [Access Azure Subscription](#access-azure-subscription) -- [Setup Development Environment](#setup-development-environment) - - [Use GitHub Codespaces](#use-github-codespaces) - - [Use Local Workstation](#use-local-workstation) -- [Setup Citrus Bus Application](#setup-citrus-bus-application) - - [Get Azure Resource Settings](#get-azure-resource-settings) - - [Setup App Backend and Frontend](#setup-app-backend-and-frontend) - - [Setup App Backend](#setup-app-backend) - - [Setup App Frontend](#setup-app-frontend) +### 🎯 Quick Navigation +- [πŸ” Access Azure Subscription](#access-azure-subscription) +- [βš™οΈ Setup Development Environment](#setup-development-environment) + - [☁️ Use GitHub Codespaces](#use-github-codespaces) + - [πŸ’» Use Local Workstation](#use-local-workstation) +- [πŸ—οΈ Setup Citrus Bus Application](#setup-citrus-bus-application) + - [βš™οΈ Get Azure Resource Settings](#get-azure-resource-settings) + - [πŸ”§ Setup App Backend and Frontend](#setup-app-backend-and-frontend) + - [πŸ”™ Setup App Backend](#setup-app-backend) + - [🎨 Setup App Frontend](#setup-app-frontend) + +--- + +### πŸ” Access Azure Subscription -### Access Azure Subscription +> **πŸ“ Note:** You will be provided login credentials to an Azure subscription to complete this hack by your coach. -You will be provided login credentials to an Azure subscription to complete this hack by your coach. When you receive your credentials, make note of them and login to the Azure Portal: -- [Azure Portal](https://portal.azure.com) +When you receive your credentials, make note of them and login to the Azure Portal: -Keep your credentials handy as you will also need them to login to the Azure CLI (command line interface). +🌐 **[Azure Portal](https://portal.azure.com)** -### Setup Development Environment +⚠️ **Important:** Keep your credentials handy as you will also need them to login to the Azure CLI (command line interface). + +--- + +### βš™οΈ Setup Development Environment You will need a set of developer tools to work with the sample application for this hack. -You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack) | -|---|---| +> **πŸš€ Quick Start Options** +> +> Choose your preferred development environment: +
-A GitHub Codespace is a development environment that is hosted in the cloud that you access via a browser. All of the pre-requisite developer tools for this hack are pre-installed and available in the codespace. +| ☁️ **GitHub Codespaces** | πŸ–₯️ **Dev Containers** | πŸ’» **Local Workstation** | +|:---:|:---:|:---:| +| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack) | [πŸ“– Setup Guide](#use-local-workstation) | +| **Recommended** ⭐ | **VS Code Required** | **Manual Setup** | -- [Use GitHub Codespaces](#use-github-codespaces) -- [Use Local Workstation](#use-local-workstation) +
-**NOTE:** We highly recommend using GitHub Codespaces to make it easier to complete this hack. +> **πŸ’‘ Recommendation:** We highly recommend using GitHub Codespaces to make it easier to complete this hack. + +--- #### Use Github Codespaces @@ -201,29 +220,60 @@ npm start Open another terminal session in VSCode so that you can continue the rest of the challenges. The terminal sessions you opened to run the Frontend and Backend should remain running in the background. -## Success Criteria +--- + +## βœ… Success Criteria + +> **🎯 Challenge Complete!** +> +> To complete this challenge successfully, you should be able to accomplish the following: + +### πŸ”§ Development Environment +- βœ… Verify that you have a **bash shell** with the **Azure CLI** available +- βœ… Your **Azure Function Backend** is up and running +- βœ… Your **Frontend application** is reachable via HTTP (Browser) + +### ☁️ Azure Resources Deployed +Verify that you have the following resources deployed in Azure: + +
+ +| Service | Status | Purpose | +|---------|--------|---------| +| πŸ€– **Azure OpenAI Service** | βœ… Required | AI language models | +| πŸ” **Azure Cognitive Search** | βœ… Required | Search and indexing | +| πŸ’Ύ **Azure Storage Accounts** (2x) | βœ… Required | Blob storage | +| πŸ—„οΈ **Azure Cosmos DB** | βœ… Required | Database and containers | +| πŸ“¨ **Azure Service Bus** | βœ… Required | Message queuing | +| ⚑ **Azure Redis Cache** | βœ… Required | Caching layer | +| πŸ“„ **Azure Document Intelligence** | βœ… Required | Form processing | + +
+ +### πŸ§ͺ Functional Testing +- βœ… **Assistant Response Test**: Ask all assistants for their name from the front-end +- βœ… **Expected Result**: They should respond correctly with the configured names from system prompts + +--- -To complete this challenge successfully, you should be able to: +## πŸ“š Learning Resources -- Verify that you have a bash shell with the Azure CLI available. -- Verify that you have deployed the following resources in Azure: +> **πŸ’‘ Expand Your Knowledge** +> +> Here are essential resources to deepen your understanding of the technologies used: - - Azure OpenAI Service - - Azure Cognitive Search - - Two Azure Storage Accounts with Azure Blob Storage - - Azure Cosmos DB service with databases and containers - - Azure Service Bus with at least one queue set up - - Azure Redis Cache Instance - - Azure Document Intelligence Service (formerly Azure Form Recognizer) - -Your Azure Function Backend and Front End applications should be up and running and reachable via HTTP (Browser) +### πŸ€– AI & OpenAI +- πŸ”— [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) - Complete guide to Azure OpenAI +- πŸ”— [Document Intelligence Overview](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/overview?view=doc-intel-4.0.0) - Region and API version details -You should also be able to ask all the assistants for their name from the front-end and they should respond correctly with the correct name configured in the app's system prompts. +### πŸ› οΈ Development Tools +- πŸ”— [VS Code with GitHub Copilot](https://code.visualstudio.com/docs/copilot/setup-simplified?wt.md_id=AZ-MVP-5004796) - AI-powered coding assistant -## Learning Resources +### πŸ“– Additional Resources +- πŸ”— [Azure Functions Documentation](https://docs.microsoft.com/en-us/azure/azure-functions/) +- πŸ”— [Angular Framework Guide](https://angular.io/docs) +- πŸ”— [GitHub Codespaces Documentation](https://docs.github.com/en/codespaces) -Here are some resources that should provide you with background information and educational content on the resources you have just deployed +--- -- [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) -- [Document Intelligence Region/API Version Availability](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/overview?view=doc-intel-4.0.0) -- [VS Code with Github Copilot](https://code.visualstudio.com/docs/copilot/setup-simplified?wt.md_id=AZ-MVP-5004796) +πŸŽ‰ **Ready for the next challenge?** [Continue to Challenge 01 β†’](./Challenge-01.md) diff --git a/_layouts/default.html b/_layouts/default.html index 579626f46b..8134e89925 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -6,6 +6,7 @@ {% seo %} + B[Step 1] + B --> C[Step 2] + C --> D[Step 3] + D --> E[βœ… Success!] + + style A fill:#e1f5fe + style E fill:#e8f5e8 +``` + + + +### 🎯 Tasks to Complete + +- [πŸ“‹ Task 1](#task-1) +- [πŸ“‹ Task 2](#task-2) +- [πŸ“‹ Task 3](#task-3) + +--- + +### πŸ“‹ Task 1 + +1 **Task Title** + +[Task description] + +
+πŸ’‘ Pro Tip: [Helpful tip] +
+ +
+πŸ”§ Advanced Configuration (Optional) +
+ +[Advanced configuration details] + +
+
+ +--- + +## βœ… Success Criteria + +
+
🎯 Challenge [NUMBER] - Validation Phase
+
+ +
+🎯 Challenge Complete!
+To complete this challenge successfully, you should be able to accomplish the following: +
+ +### πŸ”§ Validation Checklist + +
+ +| 1 | **Requirement** | **Status** | **Description** | +|:---:|:---|:---:|:---| +| βœ… | **Requirement 1** | ⏳ Pending | Description | +| βœ… | **Requirement 2** | ⏳ Pending | Description | +| βœ… | **Requirement 3** | ⏳ Pending | Description | + +
+ +--- + +## πŸ“š Learning Resources + +
+πŸ’‘ Expand Your Knowledge
+Here are essential resources to deepen your understanding: +
+ +### πŸ”— Documentation +- πŸ“– [Resource 1](https://link) - Description +- πŸ“– [Resource 2](https://link) - Description + +### πŸŽ₯ Videos & Tutorials +- 🎬 [Video 1](https://link) - Description +- 🎬 [Video 2](https://link) - Description + +--- + +πŸŽ‰ **Ready for the next challenge?** [Continue to Challenge [NEXT] β†’](./Challenge-[NEXT].md) \ No newline at end of file diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 7e13d5a02e..4422abd463 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -2,19 +2,53 @@ **[🏠 Home](../README.md)** - [Next Challenge > πŸ“‹](./Challenge-01.md) +
+
Challenge 00 - Setup Phase
+
+ +![Azure](https://img.shields.io/badge/Azure-0078D4?style=for-the-badge&logo=microsoft-azure&logoColor=white) +![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white) +![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=for-the-badge&logo=typescript&logoColor=white) +![Angular](https://img.shields.io/badge/Angular-DD0031?style=for-the-badge&logo=angular&logoColor=white) +![Python](https://img.shields.io/badge/Python-3776AB?style=for-the-badge&logo=python&logoColor=white) + --- ## πŸ‘‹ Introduction -> **Welcome to the Azure OpenAI Apps What The Hack!** -> -> An Azure lab environment will be provided to you with the sample application resources pre-deployed into Azure. Before you can hack, you will still need to set up some prerequisites. +
+πŸŽ‰ Welcome to the Azure OpenAI Apps What The Hack!
+An Azure lab environment will be provided to you with the sample application resources pre-deployed into Azure. Before you can hack, you will still need to set up some prerequisites. +
--- ## πŸ“‹ Description -In this challenge, you will setup the necessary pre-requisites and environment to complete the rest of the hack, including: +
+ +### 🎯 Challenge Overview + +This challenge will guide you through setting up your development environment and the Citrus Bus application. Follow the steps below to get everything ready for the upcoming challenges. + +```mermaid +graph TD + A[πŸš€ Start Challenge] --> B[πŸ” Access Azure Subscription] + B --> C[βš™οΈ Setup Development Environment] + C --> D{Choose Environment} + D -->|Recommended| E[☁️ GitHub Codespaces] + D -->|Alternative| F[πŸ’» Local Workstation] + E --> G[πŸ—οΈ Setup Citrus Bus App] + F --> G + G --> H[πŸ§ͺ Test Application] + H --> I[βœ… Success!] + + style A fill:#e1f5fe + style I fill:#e8f5e8 + style E fill:#fff3e0 +``` + +
### 🎯 Quick Navigation - [πŸ” Access Azure Subscription](#access-azure-subscription) @@ -31,13 +65,19 @@ In this challenge, you will setup the necessary pre-requisites and environment t ### πŸ” Access Azure Subscription -> **πŸ“ Note:** You will be provided login credentials to an Azure subscription to complete this hack by your coach. +
+πŸ“ Important: You will be provided login credentials to an Azure subscription to complete this hack by your coach. +
When you receive your credentials, make note of them and login to the Azure Portal: -🌐 **[Azure Portal](https://portal.azure.com)** + -⚠️ **Important:** Keep your credentials handy as you will also need them to login to the Azure CLI (command line interface). +
+πŸ’‘ Pro Tip: Keep your credentials handy as you will also need them to login to the Azure CLI (command line interface). +
--- @@ -79,24 +119,39 @@ The GitHub Codespace for this hack will host the developer tools, sample applica **NOTE:** Make sure you do not sign in with your enterprise managed Github account. Once you are signed in: -- Verify that the `Dev container configuration` drop down is set to `068-AzureOpenAIApps` -- Click on the green "Create Codespace" button. +- βœ… Verify that the `Dev container configuration` drop down is set to `068-AzureOpenAIApps` +- βœ… Click on the green "Create Codespace" button + +
+
⏳ Creating Codespace (3-5 minutes)
+
Your Codespace environment should load in a new browser tab. It will take approximately 3-5 minutes the first time you create the codespace for it to load. -- When the codespace completes loading, you should find an instance of Visual Studio Code running in your browser with the files needed for this hackathon. +
+πŸŽ‰ Success! When the codespace completes loading, you should find an instance of Visual Studio Code running in your browser with the files needed for this hackathon. +
-Your developer environment is ready, hooray! Skip to section: [Setup Citrus Bus Application](#setup-citrus-bus-application) +Your developer environment is ready, hooray! Skip to section: [πŸ—οΈ Setup Citrus Bus Application](#setup-citrus-bus-application) -<<<<<<< Updated upstream -**NOTE:** If you close your Codespace window, or need to return to it later, you can go to [GitHub Codespaces](https://github.com/codespaces) and you should find your existing Codespaces listed with a link to re-launch it. +
+πŸ“‹ Important Codespace Notes +
-**NOTE:** GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser. If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds. If you want to have a better experience, you can also update the default timeout value in your personal setting page on Github. Refer to this page for instructions: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) -======= -**NOTE:** GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser. If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds. If you want to have a better experience, you can also update the default timeout value in your personal setting page on GitHub. Refer to this page for instructions: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) ->>>>>>> Stashed changes +**Returning to Your Codespace:** +- If you close your Codespace window, or need to return to it later, you can go to [GitHub Codespaces](https://github.com/codespaces) and you should find your existing Codespaces listed with a link to re-launch it. + +**Timeout Management:** +- GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser +- If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds +- For a better experience, you can update the default timeout value in your personal setting page on GitHub: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) -**NOTE:** Codespaces expire after 30 days unless you extend the expiration date. When a Codespace expires, the state of all files in it will be lost. +**Expiration:** +- Codespaces expire after 30 days unless you extend the expiration date +- When a Codespace expires, the state of all files in it will be lost + +
+
#### Use Local Workstation @@ -224,35 +279,66 @@ Open another terminal session in VSCode so that you can continue the rest of the ## βœ… Success Criteria -> **🎯 Challenge Complete!** -> -> To complete this challenge successfully, you should be able to accomplish the following: +
+
🎯 Challenge 00 - Validation Phase
+
+ +
+🎯 Challenge Complete!
+To complete this challenge successfully, you should be able to accomplish the following: +
-### πŸ”§ Development Environment -- βœ… Verify that you have a **bash shell** with the **Azure CLI** available -- βœ… Your **Azure Function Backend** is up and running -- βœ… Your **Frontend application** is reachable via HTTP (Browser) +### πŸ”§ Development Environment Checklist -### ☁️ Azure Resources Deployed -Verify that you have the following resources deployed in Azure: +
+ +| 1 | **Requirement** | **Status** | **Description** | +|:---:|:---|:---:|:---| +| πŸ–₯️ | **Bash Shell + Azure CLI** | ⏳ Pending | Verify command line access | +| ⚑ | **Azure Function Backend** | ⏳ Pending | Backend service running | +| 🌐 | **Frontend Application** | ⏳ Pending | Web app accessible via browser | + +
+ +### ☁️ Azure Resources Validation
-| Service | Status | Purpose | -|---------|--------|---------| -| πŸ€– **Azure OpenAI Service** | βœ… Required | AI language models | -| πŸ” **Azure Cognitive Search** | βœ… Required | Search and indexing | -| πŸ’Ύ **Azure Storage Accounts** (2x) | βœ… Required | Blob storage | -| πŸ—„οΈ **Azure Cosmos DB** | βœ… Required | Database and containers | -| πŸ“¨ **Azure Service Bus** | βœ… Required | Message queuing | -| ⚑ **Azure Redis Cache** | βœ… Required | Caching layer | -| πŸ“„ **Azure Document Intelligence** | βœ… Required | Form processing | +Verify that you have the following resources deployed in Azure: + +
+πŸ” Click to expand Azure Resources Checklist +
+ +| Service | Status | Purpose | Validation | +|---------|:------:|---------|------------| +| πŸ€– **Azure OpenAI Service** | βœ… Required | AI language models | Check in Azure Portal | +| πŸ” **Azure Cognitive Search** | βœ… Required | Search and indexing | Verify search service | +| πŸ’Ύ **Azure Storage Accounts** (2x) | βœ… Required | Blob storage | Check both accounts | +| πŸ—„οΈ **Azure Cosmos DB** | βœ… Required | Database and containers | Verify DB access | +| πŸ“¨ **Azure Service Bus** | βœ… Required | Message queuing | Check queue setup | +| ⚑ **Azure Redis Cache** | βœ… Required | Caching layer | Verify cache instance | +| πŸ“„ **Azure Document Intelligence** | βœ… Required | Form processing | Check service availability | + +
+
### πŸ§ͺ Functional Testing -- βœ… **Assistant Response Test**: Ask all assistants for their name from the front-end -- βœ… **Expected Result**: They should respond correctly with the configured names from system prompts + +
+ +πŸ§ͺ **Final Validation Steps:** + +1. **Assistant Response Test**: Ask all assistants for their name from the front-end +2. **Expected Result**: They should respond correctly with the configured names from system prompts + +
+πŸ’‘ Testing Tip: This validates that your entire application stack is working correctly from frontend to backend to AI services. +
+ +
--- diff --git a/_config.yml b/_config.yml index fcfca7fe99..4b58257aef 100644 --- a/_config.yml +++ b/_config.yml @@ -1,3 +1,10 @@ -theme: jekyll-theme-midnight +theme: jekyll-theme-cayman title: What The Hack -include: [CONTRIBUTING.md] \ No newline at end of file +description: A collection of challenge-based hackathons including student guides, coach guides, lecture presentations, sample/template code and sample solutions. +include: [CONTRIBUTING.md] +plugins: + - jekyll-optional-front-matter + - jekyll-paginate + - jekyll-readme-index + - jekyll-default-layout + - jekyll-relative-links \ No newline at end of file diff --git a/_layouts/default.html b/_layouts/default.html index 8134e89925..45696e3e27 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -8,6 +8,21 @@ + + B{Choose Path} + B -->|Easy| C[πŸ“š Follow Guide] + B -->|Advanced| D[πŸ”§ Custom Setup] + C --> E[βœ… Success] + D --> F[πŸ§ͺ Test Configuration] + F --> G{Tests Pass?} + G -->|Yes| E + G -->|No| H[πŸ› Debug Issues] + H --> F + + style A fill:#e1f5fe + style E fill:#e8f5e8 + style H fill:#ffebee +``` + +--- + +## πŸ’¬ Enhanced Blockquotes + +> **πŸ’‘ Pro Tip** +> +> This is an enhanced blockquote with modern styling. It includes beautiful gradients, shadows, and improved typography for better readability. + +> **🎯 Key Insight** +> +> Use these visual elements to make your documentation more engaging and easier to follow. Visual hierarchy helps users scan content quickly and find what they need. + +--- + +## πŸ–₯️ Code Blocks + +```bash +# Example terminal commands +npm install +npm start + +# With syntax highlighting +git add . +git commit -m "✨ Add new features" +git push origin main +``` + +```javascript +// JavaScript example with enhanced styling +function enhanceMarkdown() { + const elements = document.querySelectorAll('.enhanced'); + elements.forEach(el => { + el.classList.add('modern-styling'); + }); +} +``` + +--- + +## ✨ Conclusion + +These visual enhancements transform plain markdown into engaging, modern documentation that: + +- πŸ“ˆ **Improves user experience** with better visual hierarchy +- 🎯 **Increases engagement** through interactive elements +- πŸ“± **Works on all devices** with responsive design +- β™Ώ **Supports accessibility** with proper contrast and structure +- 🎨 **Looks professional** with modern design patterns + +Ready to use these in your own challenges? Check out the [Enhanced Template](../000-HowToHack/WTH-Challenge-Enhanced-Template.md)! \ No newline at end of file From 1805015c65ee13bf36986cb855bfb46006588492 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 23 Sep 2025 11:04:31 -0500 Subject: [PATCH 18/58] =?UTF-8?q?=F0=9F=8C=88=20Switch=20to=20bright,=20ch?= =?UTF-8?q?eerful=20minima=20theme=20with=20colorful=20enhancements?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ✨ Theme Changes: - 🎨 Switched from dark Cayman to bright, clean Minima theme - 🌈 Redesigned entire color palette with bright, friendly colors - β˜€οΈ Light background with colorful gradients throughout - 🎯 Maintained all interactive elements with brighter styling 🎨 Visual Improvements: - πŸ’™ Bright blue primary colors instead of dark themes - 🌸 Colorful gradients in alerts, cards, and buttons - πŸŒ… Light backgrounds with subtle gradients - πŸ”† Enhanced contrast for better readability - 🎨 Colorful progress bars with rainbow gradients - ✨ Bright, cheerful badge designs 🎯 Enhanced Elements: - πŸ“Š Redesigned all components for brightness - πŸŽͺ More colorful and engaging visual elements - πŸ“± Maintained mobile responsiveness - β™Ώ Kept accessibility features with better contrast --- _config.yml | 14 +- _layouts/default.html | 77 +++---- assets/css/custom.css | 476 ++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 508 insertions(+), 59 deletions(-) diff --git a/_config.yml b/_config.yml index 4b58257aef..df2c679e98 100644 --- a/_config.yml +++ b/_config.yml @@ -1,4 +1,4 @@ -theme: jekyll-theme-cayman +theme: minima title: What The Hack description: A collection of challenge-based hackathons including student guides, coach guides, lecture presentations, sample/template code and sample solutions. include: [CONTRIBUTING.md] @@ -7,4 +7,14 @@ plugins: - jekyll-paginate - jekyll-readme-index - jekyll-default-layout - - jekyll-relative-links \ No newline at end of file + - jekyll-relative-links +minima: + skin: classic + social_links: + github: perktime +author: + name: What The Hack + email: info@whathehack.com +header_pages: + - VISUAL-SHOWCASE.md + - CONTRIBUTING.md \ No newline at end of file diff --git a/_layouts/default.html b/_layouts/default.html index 45696e3e27..e0cfe987be 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -1,54 +1,39 @@ - - - - - + + -{% seo %} - - - - - - - - - + {%- include head.html -%} - -
- -
-
- - - -
- + {%- include header.html -%} + +
+
+ + + + {{ content }} +
+
-
- -
+ {%- include footer.html -%} @@ -60,5 +45,7 @@ gtag('config', 'G-E7MV34DNDL'); gtag('config', 'UA-173162534-1'); + + diff --git a/assets/css/custom.css b/assets/css/custom.css index c7e86b42b5..aa5e6fac4e 100644 --- a/assets/css/custom.css +++ b/assets/css/custom.css @@ -1,17 +1,469 @@ -/* Enhanced styling for What The Hack markdown pages */ +/* Bright and friendly styling for What The Hack markdown pages */ -/* Modern color palette */ +/* Bright color palette */ :root { - --primary-color: #0366d6; - --success-color: #28a745; - --warning-color: #ffc107; - --danger-color: #dc3545; - --info-color: #17a2b8; - --light-gray: #f8f9fa; - --border-color: #e1e4e8; - --text-muted: #6a737d; - --gradient-start: #155799; - --gradient-end: #159957; + --primary-color: #2196F3; + --success-color: #4CAF50; + --warning-color: #FF9800; + --danger-color: #F44336; + --info-color: #00BCD4; + --light-blue: #E3F2FD; + --light-green: #E8F5E8; + --light-orange: #FFF3E0; + --light-red: #FFEBEE; + --light-cyan: #E0F7FA; + --border-color: #E0E0E0; + --text-muted: #757575; + --gradient-start: #42A5F5; + --gradient-end: #66BB6A; + --background-light: #FAFAFA; +} + +/* Smooth animations */ +* { + transition: all 0.3s ease; +} + +/* Enhanced badges with bright colors */ +.badge { + display: inline-block; + padding: 0.4em 0.8em; + font-size: 0.8em; + font-weight: 600; + line-height: 1; + text-align: center; + white-space: nowrap; + vertical-align: baseline; + border-radius: 20px; + text-decoration: none; + margin: 0.2em; + box-shadow: 0 2px 4px rgba(0,0,0,0.1); +} + +.badge-primary { background: linear-gradient(135deg, #42A5F5, #1E88E5); color: white; } +.badge-success { background: linear-gradient(135deg, #66BB6A, #43A047); color: white; } +.badge-warning { background: linear-gradient(135deg, #FFA726, #FF8F00); color: white; } +.badge-danger { background: linear-gradient(135deg, #EF5350, #D32F2F); color: white; } +.badge-info { background: linear-gradient(135deg, #26C6DA, #00ACC1); color: white; } + +/* Bright and cheerful alert boxes */ +.alert { + padding: 1.2rem 1.5rem; + margin-bottom: 1.5rem; + border: none; + border-radius: 12px; + position: relative; + box-shadow: 0 4px 12px rgba(0,0,0,0.1); + border-left: 5px solid; +} + +.alert-info { + background: linear-gradient(135deg, var(--light-cyan) 0%, #B2EBF2 100%); + border-left-color: var(--info-color); + color: #006064; +} + +.alert-warning { + background: linear-gradient(135deg, var(--light-orange) 0%, #FFE0B2 100%); + border-left-color: var(--warning-color); + color: #E65100; +} + +.alert-success { + background: linear-gradient(135deg, var(--light-green) 0%, #C8E6C9 100%); + border-left-color: var(--success-color); + color: #1B5E20; +} + +.alert-danger { + background: linear-gradient(135deg, var(--light-red) 0%, #FFCDD2 100%); + border-left-color: var(--danger-color); + color: #B71C1C; +} + +/* Bright, friendly blockquotes */ +blockquote { + background: linear-gradient(135deg, #F3E5F5 0%, #E1BEE7 100%); + border-left: 5px solid #9C27B0; + margin: 1.5em 0; + padding: 1.2em 1.8em; + border-radius: 0 15px 15px 0; + box-shadow: 0 4px 12px rgba(156, 39, 176, 0.15); + position: relative; +} + +blockquote:before { + color: #9C27B0; + content: open-quote; + font-size: 3em; + line-height: 0.1em; + margin-right: 0.25em; + vertical-align: -0.4em; + opacity: 0.4; +} + +blockquote p { + display: inline; + font-style: italic; + color: #4A148C; + font-weight: 500; +} + +/* Bright code blocks */ +pre { + background: linear-gradient(135deg, #263238 0%, #37474F 100%); + color: #B0BEC5; + border: none; + border-radius: 12px; + padding: 1.5rem; + overflow-x: auto; + margin: 1.5rem 0; + box-shadow: 0 6px 20px rgba(0,0,0,0.15); + position: relative; +} + +pre:before { + content: "πŸ’» Code"; + position: absolute; + top: 0.8rem; + right: 1.2rem; + font-size: 0.8rem; + color: #78909C; + background: rgba(255,255,255,0.1); + padding: 0.2rem 0.5rem; + border-radius: 8px; + opacity: 0.8; +} + +code { + background: linear-gradient(135deg, #E8F5E8 0%, #C8E6C9 100%); + color: #2E7D32; + padding: 0.3em 0.6em; + border-radius: 6px; + font-size: 85%; + font-weight: 600; + box-shadow: 0 1px 3px rgba(0,0,0,0.1); +} + +/* Bright, modern button styling */ +.btn { + display: inline-block; + padding: 0.8rem 2rem; + margin: 0.3rem; + font-size: 1rem; + font-weight: 600; + line-height: 1.5; + text-align: center; + text-decoration: none; + white-space: nowrap; + vertical-align: middle; + cursor: pointer; + border: none; + border-radius: 25px; + transition: all 0.3s ease; + box-shadow: 0 4px 12px rgba(0,0,0,0.15); + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.btn:hover { + transform: translateY(-3px); + box-shadow: 0 8px 25px rgba(0,0,0,0.25); + text-decoration: none; +} + +.btn-primary { + background: linear-gradient(135deg, #42A5F5 0%, #1E88E5 100%); + color: white; +} + +.btn-success { + background: linear-gradient(135deg, #66BB6A 0%, #43A047 100%); + color: white; +} + +.btn-warning { + background: linear-gradient(135deg, #FFA726 0%, #FF8F00 100%); + color: white; +} + +.btn-info { + background: linear-gradient(135deg, #26C6DA 0%, #00ACC1 100%); + color: white; +} + +/* Bright, beautiful tables */ +table { + border-collapse: collapse; + width: 100%; + margin: 2rem 0; + box-shadow: 0 8px 25px rgba(0,0,0,0.1); + border-radius: 12px; + overflow: hidden; + background: white; +} + +th { + background: linear-gradient(135deg, var(--gradient-start) 0%, var(--gradient-end) 100%); + color: white; + padding: 1.2rem; + text-align: left; + font-weight: 700; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +td { + padding: 1.2rem; + border-bottom: 1px solid #F0F0F0; +} + +tr:nth-child(even) { + background: linear-gradient(135deg, #FAFAFA 0%, #F5F5F5 100%); +} + +tr:hover { + background: linear-gradient(135deg, var(--light-blue) 0%, #BBDEFB 100%); + transform: scale(1.01); +} + +/* Colorful progress indicators */ +.progress { + width: 100%; + height: 2rem; + background: linear-gradient(135deg, #EEEEEE 0%, #E0E0E0 100%); + border-radius: 25px; + overflow: hidden; + margin: 1.5rem 0; + box-shadow: inset 0 2px 4px rgba(0,0,0,0.1); +} + +.progress-bar { + height: 100%; + background: linear-gradient(90deg, #FF6B6B 0%, #4ECDC4 50%, #45B7D1 100%); + border-radius: 25px; + text-align: center; + line-height: 2rem; + color: white; + font-weight: 700; + font-size: 0.9rem; + transition: width 0.8s ease; + position: relative; + overflow: hidden; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.progress-bar:before { + content: ''; + position: absolute; + top: 0; + left: -100%; + width: 100%; + height: 100%; + background: linear-gradient(90deg, transparent, rgba(255,255,255,0.3), transparent); + animation: shimmer 2s infinite; +} + +@keyframes shimmer { + 0% { left: -100%; } + 100% { left: 100%; } +} + +/* Bright, cheerful cards */ +.card { + background: linear-gradient(135deg, white 0%, #FAFAFA 100%); + border-radius: 20px; + padding: 2.5rem; + margin: 2rem 0; + box-shadow: 0 10px 30px rgba(0,0,0,0.1); + border: 1px solid var(--border-color); + position: relative; + overflow: hidden; +} + +.card:before { + content: ''; + position: absolute; + top: 0; + left: 0; + right: 0; + height: 5px; + background: linear-gradient(90deg, #FF6B6B 0%, #4ECDC4 25%, #45B7D1 50%, #96CEB4 75%, #FFEAA7 100%); +} + +.card:hover { + transform: translateY(-8px); + box-shadow: 0 20px 40px rgba(0,0,0,0.15); +} + +/* Colorful step indicators */ +.step { + display: inline-flex; + align-items: center; + justify-content: center; + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + color: white; + border-radius: 50%; + width: 3rem; + height: 3rem; + text-align: center; + line-height: 1; + margin-right: 1rem; + font-weight: bold; + font-size: 1.2rem; + box-shadow: 0 6px 15px rgba(102, 126, 234, 0.4); + position: relative; +} + +.step:after { + content: ''; + position: absolute; + width: 100%; + height: 100%; + border-radius: 50%; + background: inherit; + top: 0; + left: 0; + z-index: -1; + opacity: 0; + transform: scale(1.2); + animation: pulse 2s infinite; +} + +@keyframes pulse { + 0% { opacity: 0; transform: scale(1); } + 50% { opacity: 0.4; transform: scale(1.3); } + 100% { opacity: 0; transform: scale(1.5); } +} + +/* Bright collapsible sections */ +details { + border: 2px solid var(--border-color); + border-radius: 15px; + padding: 0; + margin: 1.5rem 0; + overflow: hidden; + box-shadow: 0 4px 12px rgba(0,0,0,0.1); +} + +summary { + background: linear-gradient(135deg, #E3F2FD 0%, #BBDEFB 100%); + padding: 1.2rem 2rem; + cursor: pointer; + font-weight: 700; + border-bottom: 2px solid var(--border-color); + position: relative; + color: #1565C0; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +summary:hover { + background: linear-gradient(135deg, #BBDEFB 0%, #90CAF9 100%); +} + +summary:after { + content: 'πŸ”½'; + position: absolute; + right: 2rem; + top: 50%; + transform: translateY(-50%); + transition: transform 0.3s ease; + font-size: 1.2rem; +} + +details[open] summary:after { + transform: translateY(-50%) rotate(180deg); +} + +details div { + padding: 2rem; + background: linear-gradient(135deg, white 0%, #F8F9FA 100%); +} + +/* Bright status indicators */ +.status { + display: inline-flex; + align-items: center; + gap: 0.5rem; + padding: 0.6rem 1.2rem; + border-radius: 25px; + font-size: 0.9rem; + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.5px; + box-shadow: 0 2px 8px rgba(0,0,0,0.1); +} + +.status-complete { + background: linear-gradient(135deg, var(--light-green) 0%, #C8E6C9 100%); + color: #1B5E20; + border: 2px solid var(--success-color); +} + +.status-pending { + background: linear-gradient(135deg, var(--light-orange) 0%, #FFE0B2 100%); + color: #E65100; + border: 2px solid var(--warning-color); +} + +.status-error { + background: linear-gradient(135deg, var(--light-red) 0%, #FFCDD2 100%); + color: #B71C1C; + border: 2px solid var(--danger-color); +} + +/* Bright page styling */ +body { + background: linear-gradient(135deg, #FAFAFA 0%, #F0F4F8 100%); + color: #333; +} + +/* Enhanced headings */ +h1, h2, h3, h4, h5, h6 { + background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); + -webkit-background-clip: text; + -webkit-text-fill-color: transparent; + background-clip: text; + font-weight: 700; +} + +/* Responsive design */ +@media (max-width: 768px) { + .card { + margin: 1rem 0; + padding: 1.5rem; + } + + .btn { + width: 100%; + margin: 0.5rem 0; + } + + table { + font-size: 0.9rem; + } + + .step { + width: 2.5rem; + height: 2.5rem; + margin-right: 0.75rem; + } +} + +/* Remove dark mode for bright theme */ +@media (prefers-color-scheme: dark) { + /* Override dark mode to keep bright theme */ + body { + background: linear-gradient(135deg, #FAFAFA 0%, #F0F4F8 100%) !important; + color: #333 !important; + } + + .card { + background: linear-gradient(135deg, white 0%, #FAFAFA 100%) !important; + color: #333 !important; + } } /* Smooth animations */ From 510e0dfaa2c3d972b5d2bf786dd08dfa8b78c2f4 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Mon, 12 Jan 2026 17:18:15 -0600 Subject: [PATCH 19/58] Refactor Azure OpenAI integration across notebooks; update to use Azure AD token provider, enhance error handling, and improve environment setup. Add Bicep module for Foundry project deployment with role assignments and diagnostic settings. --- .../Student/Challenge-00.md | 40 ++- .../Student/Challenge-02.md | 4 +- .../Student/Challenge-04.md | 27 +- .../Student/Challenge-06.md | 8 +- .../Student/Resources/infra/deploy.sh | 14 +- .../Student/Resources/infra/functions.sh | 2 +- .../Student/Resources/infra/main.bicep | 90 +------ .../Student/Resources/infra/main.bicepparam | 4 + .../Resources/infra/modules/aiServices.bicep | 4 +- .../infra/modules/applicationInsights.bicep | 2 +- .../Resources/infra/modules/document.bicep | 9 +- .../infra/modules/foundryProject.bicep | 243 ++++++++++++++++++ .../Student/Resources/infra/modules/hub.bicep | 202 --------------- .../Resources/infra/modules/project.bicep | 4 +- .../Resources/infra/modules/search.bicep | 7 +- .../notebooks/CH-01-PromptEngineering.ipynb | 98 +++++-- .../notebooks/CH-03-A-Grounding.ipynb | 48 ++-- .../notebooks/CH-03-B-Chunking.ipynb | 55 ++-- .../notebooks/CH-03-C-Embeddings.ipynb | 66 +++-- .../CH-04-A-RAG_for_structured_data.ipynb | 130 ++++++++-- .../CH-04-B-RAG_for_unstructured_data.ipynb | 58 ++--- .../notebooks/CH-5.7-RedTeaming.ipynb | 6 +- 22 files changed, 646 insertions(+), 475 deletions(-) create mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep delete mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index eb18e4340a..f5148aa7eb 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -60,7 +60,7 @@ Your Codespace environment should load in a new browser tab. It will take approx - When the codespace completes loading, you should find an instance of Visual Studio Code running in your browser with the files needed for this hackathon. -You are ready to run the Jupyter Notebook files, hooray! Skip to section: [Setup Azure AI Foundry Project and Hub](#Setup-Azure-AI-Foundry-Project-and-Hub) +You are ready to run the Jupyter Notebook files, hooray! Skip to section: [Setup Microsoft Foundry Project](#Setup-Microsoft-Foundry-Project) **NOTE:** If you close your Codespace window, or need to return to it later, you can go to [GitHub Codespaces](https://github.com/codespaces) and you should find your existing Codespaces listed with a link to re-launch it. @@ -134,20 +134,20 @@ Once you have an Azure Machine Learning Studio Workspace set up, you can upload
-### Deploy Azure AI Foundry Resources +### Deploy Microsoft Foundry Resources Now that you have a Jupyter notebook environment setup, you need to: -- Deploy AI models and resources in Azure AI Foundry. +- Deploy AI models and resources in Microsoft Foundry. - Setup Jupyter Notebooks Configuration File We have provided an automation script that will perform these tasks for you. However, you may wish to complete these tasks manually to become more familiar with Azure AI Foundry. -- [Automate Azure AI Foundry Deployment](#automate-azure-ai-foundry-deployment) -- [Manual Azure AI Foundry Deployment](#manual-azure-ai-foundry-deployment) +- [Automate Microsoft Foundry Deployment](#automate-microsoft-foundry-deployment) +- [Manual Microsoft Foundry Deployment](#manual-microsoft-foundry-deployment) **NOTE:** If you are limited on time, we recommend using the automation script option. -#### Automate Azure AI Foundry Deployment +#### Automate Microsoft Foundry Deployment We have provided a deployment script and a set of Bicep templates which will deploy and configure the Azure AI resources which you will use for this hackathon. You can find these files in the `/infra` folder of your Codespace or the student `Resources.zip` package. @@ -174,36 +174,32 @@ cd infra chmod +x deploy.sh ./deploy.sh ``` -**NOTE:** By default, the script will create an Azure resource group for you named `rg-ai-foundry-secure`. You may optionally specify a `resourceGroupName` and/or `location` parameters if you need the resources deployed to a specific resource group or region. The default location is "`eastus`" if you don't specify one. +**NOTE:** By default, the script will create an Azure resource group for you named `rg-microsoft-foundry-secure`. You may optionally specify a `resourceGroupName` and/or `location` parameters if you need the resources deployed to a specific resource group or region. The default location is "`eastus`" if you don't specify one. ``` ./deploy.sh --resourceGroupName "[resource-group-name]" --location "[location]" ``` -#### Manual Azure AI Foundry Deployment +#### Manual Microsoft Foundry Deployment **NOTE:** You can skip this section if you chose to automate the deployment. -If you want to deploy the Azure AI Foundry resources, expand the section below and follow instructions there. +If you want to deploy the Microsoft Foundry resources, expand the section below and follow instructions there.
Click to expand/collapse Manual Deployment Instructions #### Setup Azure AI Foundry Project and Hub -Navigate to [AI Foundry](https://ai.azure.com) to create your Azure AI project and the needed resources. A project is used to organize your work and allows you to collaborate with others. A hub provides the hosting environment for your projects. An Azure AI hub can be used across multiple projects. - -- Click on the **+ Create Project** button. -- Give your project a name and click **Create a new hub**. - - Fill out a name for your hub. - - Click the **Next** button - - Click the **Customize** button - - Click **Create new AI Search**. - - Fill out a name for your Azure AI Search - - Click the **Next** button to finish setting up your Azure AI Search - - Click the **Next** button on the screen where it says **Create a hub for your projects** - - On the Review and Finish page, click the **Create** button -- The hub will create an Azure Open AI, Azure Blob, and an AI Service resource for you once it is finished. Resources are different Azure services you will use within the challenges. +Navigate to [AI Foundry](https://ai.azure.com) to create your Microsoft Foundry project. + +- Click on the **+ Create New** button. +- Choose Microsoft Foundry resource for the resource type. Click the **Next** button + - Fill out a name for your project. **Note:** You should not need to specify Advanced Options unless you need or want to change the region because of capacity contraints. Click the **Create** button +- From the Azure portal (or you can use an Infrastructure as Code approach if you prefer using Bicep/Terraform/ARM/CLI) + - Create an Azure AI Search service + - Specify a service name for your Azure AI Search. You can use the same resource group and location as the Microsoft Foundry resource. **Note:** Make sure you set the Pricing Tier to Standard (Basic/Free is not supported) + #### Deploy Azure OpenAI Models diff --git a/066-OpenAIFundamentals/Student/Challenge-02.md b/066-OpenAIFundamentals/Student/Challenge-02.md index 005062af52..389ef1da24 100644 --- a/066-OpenAIFundamentals/Student/Challenge-02.md +++ b/066-OpenAIFundamentals/Student/Challenge-02.md @@ -19,7 +19,7 @@ Questions you should be able to answer by the end of this challenge: - What model would you select to perform complex problem solving? - What model would you select to generate new names? -You will work in the Azure AI Foundry for this challenge. We recommend keeping the student guide and the Azure AI Foundry in two windows side by side as you work. This will also help to validate you have met the [success criteria](#success-criteria) below for this challenge. +You will work in the Microsoft Foundry for this challenge. We recommend keeping the student guide and the Microsoft Foundry in two windows side by side as you work. This will also help to validate you have met the [success criteria](#success-criteria) below for this challenge. This challenge is divided into the following sections: @@ -36,7 +36,7 @@ This challenge is divided into the following sections: Scenario: You are building a chatbot for a retail company that needs fast responses and safe outputs. Your goal is to explore the Model Catalog and identify models for this use case. There is no right or wrong answer here. #### Student Task 2.1 -- Go into the [Azure AI Foundry](https://ai.azure.com). +- Go into the [Microsoft Foundry](https://ai.azure.com). - Navigate to the Model Catalog and explore different models using the correct filters. - Identify which model can potentially help with the task at hand. - Share your findings with a peer and compare your choices. Did you pick the same models? Why or why not? diff --git a/066-OpenAIFundamentals/Student/Challenge-04.md b/066-OpenAIFundamentals/Student/Challenge-04.md index aa5a1e9c19..0550ae655a 100644 --- a/066-OpenAIFundamentals/Student/Challenge-04.md +++ b/066-OpenAIFundamentals/Student/Challenge-04.md @@ -4,8 +4,8 @@ ## Pre-requisites -- Azure Form Recognizer resource for extracting text from raw unstructured data -- Azure Cognitive Search resource for indexing and retrieving relevant information +- Azure Document Intelligence resource for extracting text from raw unstructured data +- Azure AI Search resource for indexing and retrieving relevant information - Azure OpenAI service for Generative AI Models and Embedding Models - Add required credentials of above resources in `.env` file - Install the required libraries in the `requirements.txt` file via ```pip install -r requirements.txt ``` if you have not already @@ -14,7 +14,7 @@ Knowledge bases are widely used in enterprises and can contain an extensive number of documents across various categories. Retrieving relevant content based on user queries is a challenging task. Traditionally, methods like Page Rank have been employed to accurately retrieve information at the document level. However, users still need to manually search within the document to find the specific and relevant information they need. The recent advancements in Foundation Models, such as the one developed by OpenAI, offer a solution through the use of "Retrieval Augmented Generation" techniques and encoding information like "Embeddings." These methods aid in finding the relevant information and then to answer or summarize the content to present to the user in a concise and succinct manner. -Retrieval augmented generation (RAG) is an innovative approach that combines the power of retrieval-based Knowledge bases, such as Azure Cognitive Search, and generative Large Language Models (LLMs), such as Azure OpenAI ChatGPT, to enhance the quality and relevance of generated outputs. This technique involves integrating a retrieval component into a generative model, enabling the retrieval of contextual and domain-specific information from the knowledge base. By incorporating this contextual knowledge alongside the original input, the model can generate desired outputs, such as summaries, information extraction, or question answering. In essence, the utilization of RAG with LLMs allows you to generate domain-specific text outputs by incorporating specific external data as part of the context provided to the LLMs. +Retrieval augmented generation (RAG) is an innovative approach that combines the power of retrieval-based Knowledge bases, such as Azure AI Search, and generative Large Language Models (LLMs), such as Azure OpenAI ChatGPT, to enhance the quality and relevance of generated outputs. This technique involves integrating a retrieval component into a generative model, enabling the retrieval of contextual and domain-specific information from the knowledge base. By incorporating this contextual knowledge alongside the original input, the model can generate desired outputs, such as summaries, information extraction, or question answering. In essence, the utilization of RAG with LLMs allows you to generate domain-specific text outputs by incorporating specific external data as part of the context provided to the LLMs. RAG aims to overcome limitations found in purely generative models, including issues of factual accuracy, relevance, and coherence, often seen in the form of "hallucinations". By integrating retrieval into the generative process, RAG seeks to mitigate these challenges. The incorporation of retrieved information serves to "ground" the large language models (LLMs), ensuring that the generated content better aligns with the intended context, enhances factual correctness, and produces more coherent and meaningful outputs. @@ -43,28 +43,27 @@ If you are working locally or in the Cloud, you can find them in the `/notebooks To run a Jupyter notebook, navigate to it in your Codespace or open it in VS Code on your local workstation. You will find further instructions for the challenge, as well as in-line code blocks that you will interact with to complete the tasks for the challenge. Return here to the student guide after completing all tasks in the Jupyter notebook to validate you have met the [success criteria](#success-criteria) below for this challenge. -### Exploring RAG in Azure AI Foundry -Try RAG in the Azure AI Foundry portal with unstructured data. -1. This is the prerequisite to the following steps: Navigate to the [Azure Portal](https://portal.azure.com/#home) and find your resource group. Then navigate to the right storage account. On the left navigation, click `Networking`. Under `Firewalls and virtual networks`, select `Enabled from all networks`. -2. Navigate to [Azure AI Foundry](https://ai.azure.com/) and `Playgrounds` in the left navigation. Find the `Chat Playground`. -3. Feel free to keep the default model instructions or modify them. -4. Click on `Add your data` and then `+ Add a new data source`. Let's choose `Upload files` from the drop down of different data sources for this exercise. Grab the data provided in your Codespace under the `/data` folder in `ch2_1.5_product_review.txt`. -5. Click next and select your search service and vector index.On the next page, click `Add vector search to this search resource` and choose your AOAI Service connection. Finally, select `Create vector index`. -6. Once complete, you should be able to chat with the data we added earlier. -7. Ask **What are some of the features and functionalities of the Gally Smartwatch?** +### Exploring RAG in Microsoft Foundry +Try RAG in the Microsoft portal with unstructured data. +1. Navigate to [Microsoft Foundry](https://ai.azure.com/) and `Playgrounds` in the left navigation. Find the `Chat Playground`. +2. Feel free to keep the default model instructions or modify them. +3. Click on `Add your data` and then `+ Add a new data source`. Let's choose `Upload files` from the drop down of different data sources for this exercise. Grab the data provided in your Codespace under the `/data` folder in `ch2_1.5_product_review.txt`. +4. Click next and select your search service and vector index.On the next page, click `Add vector search to this search resource` and choose your AOAI Service connection. Finally, select `Create vector index`. +5. Once complete, you should be able to chat with the data we added earlier. +6. Ask **What are some of the features and functionalities of the Gally Smartwatch?** ## Success Criteria To complete this challenge successfully, you should be able to: - Verify that you have extracted text from raw unstructured data using the Azure Document Intelligence API into a more structured format such as JSON -- Verify that you have created an index using Azure Cognitive Search based on the type of data you are dealing with and load data into the index. +- Verify that you have created an index using Azure AI Search based on the type of data you are dealing with and load data into the index. - Demonstrate the use of Iterative Prompt Development to write effective prompts for your AI tasks ## Learning Resources - [Use OpenAI GPT with your Enterprise Data](https://techcommunity.microsoft.com/t5/startups-at-microsoft/use-openai-gpt-with-your-enterprise-data/ba-p/3817141) -- [ChatGPT + Enterprise data with Azure OpenAI and Cognitive Search](https://github.com/Azure-Samples/azure-search-openai-demo) +- [ChatGPT + Enterprise data with Azure OpenAI and AI Search](https://github.com/Azure-Samples/azure-search-openai-demo) - [Build Industry-Specific LLMs Using Retrieval Augmented Generation](https://towardsdatascience.com/build-industry-specific-llms-using-retrieval-augmented-generation-af9e98bb6f68) ## Advanced Challenges (Optional) diff --git a/066-OpenAIFundamentals/Student/Challenge-06.md b/066-OpenAIFundamentals/Student/Challenge-06.md index 97aef595e7..c528b3b072 100644 --- a/066-OpenAIFundamentals/Student/Challenge-06.md +++ b/066-OpenAIFundamentals/Student/Challenge-06.md @@ -12,7 +12,7 @@ In this challenge, you will create a basic agent. ### Setup -1. Log into your [AI Foundry portal](ai.azure.com) +1. Log into your [Microsoft Foundry portal](ai.azure.com) 2. In your project's left-hand pane, navigate to `My assets -> Models and endpoints`. 3. On the Model deployments tab, click the `+ Deploy model` button and select `Deploy base model` from the drop down. 4. Search for the gpt-4o-mini model, select it, and confirm the deployment. @@ -38,12 +38,12 @@ To complete this challenge successfully, you should be able to: - Identify tools available to extend an agents capabilities ## Conclusion -In this Challenge, you explored creating an AI Agent through the Azure AI Foundry portal. This developer friendly experience integrates with several tools, knowledge connections, and systems. As you start or continue to develop your AI applications, think about the coordination needed between different agents and their roles. What would be some important considerations with multi-agent systems when handling complex tasks? +In this Challenge, you explored creating an agent through the Microsoft Foundry portal. This developer friendly experience integrates with several tools, knowledge connections, and systems. As you start or continue to develop your AI applications, think about the coordination needed between different agents and their roles. What would be some important considerations with multi-agent systems when handling complex tasks? ## Learning Resources -- [Overview of Azure AI Agents](https://learn.microsoft.com/en-us/azure/ai-services/agents/?view=azure-python-preview) -- These steps are listed here along with many other prompts: [AI Agents in AI Foundry](https://techcommunity.microsoft.com/blog/educatordeveloperblog/step-by-step-tutorial-building-an-ai-agent-using-azure-ai-foundry/4386122) . +- [Overview of Microsoft Agents](https://learn.microsoft.com/en-us/azure/ai-services/agents/?view=azure-python-preview) +- These steps are listed here along with many other prompts: [Agents in AI Foundry](https://techcommunity.microsoft.com/blog/educatordeveloperblog/step-by-step-tutorial-building-an-ai-agent-using-azure-ai-foundry/4386122) . diff --git a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh index 6e851de2c5..9b8ee868eb 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh @@ -7,7 +7,7 @@ source ./functions.sh declare -A variables=( [template]="main.bicep" [parameters]="main.bicepparam" - [resourceGroupName]="rg-ai-foundry-secure" + [resourceGroupName]="rg-microsoft-foundry-secure" [location]="eastus" [validateTemplate]=0 [useWhatIf]=0 @@ -90,9 +90,8 @@ deploymentOutputs=$(az deployment group create \ --parameters $parameters \ --parameters location=$location \ --parameters userObjectId=$userObjectId \ - --query 'properties.outputs' -o json) + --query 'properties.outputs' -o json 2>/dev/null | grep -A 9999 '^{') - #echo $deploymentOutputs if [[ $? == 0 ]]; then echo "[$template] Bicep template deployment succeeded" else @@ -113,7 +112,7 @@ environment_sample_file="../.env.sample" # check if the .env file already exists and back it up if it does if [[ -f "$environment_file" ]]; then - random_chars=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 5) + random_chars=$(LC_ALL=C tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 5) mv "$environment_file" "${environment_file}-${random_chars}.bak" echo -e "\e[33mWarning: Existing .env file found. Backed up to ${environment_file}-${random_chars}.bak\e[0m" else @@ -127,15 +126,10 @@ source $environment_sample_file # Extract values from JSON and write to .env file with double quotes around values echo "Populating .env file..." -echo "OPENAI_API_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesKey')\"" >> $environment_file echo "OPENAI_API_BASE=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesOpenAiEndpoint')\"" >> $environment_file -echo "AZURE_AI_SEARCH_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchKey')\"" >> $environment_file echo "AZURE_AI_SEARCH_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchEndpoint')\"" >> $environment_file echo "DOCUMENT_INTELLIGENCE_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentEndpoint')\"" >> $environment_file -echo "DOCUMENT_INTELLIGENCE_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentKey')\"" >> $environment_file -echo "AZURE_BLOB_STORAGE_ACCOUNT_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountName')\"" >> $environment_file -echo "AZURE_BLOB_STORAGE_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountKey')\"" >> $environment_file -echo "AZURE_BLOB_STORAGE_CONNECTION_STRING=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountConnectionString')\"" >> $environment_file +echo "AZURE_AI_PROJECT_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesProjectEndpoint')\"" >> $environment_file # Warning: this assumes the first deployed model is the chat model used by the Jupyter notebooks echo "CHAT_MODEL_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.deployedModels[0].name')\"" >> $environment_file diff --git a/066-OpenAIFundamentals/Student/Resources/infra/functions.sh b/066-OpenAIFundamentals/Student/Resources/infra/functions.sh index 02ae19a2fe..3ef25acf12 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/functions.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/functions.sh @@ -17,7 +17,7 @@ function authenticate_to_azure { parse_args() { # $1 - The associative array name containing the argument definitions and default values # $2 - The arguments passed to the script - local -n arg_defs=$1 + local -n arg_defs=$1 # this won't work by default on the Mac zsh shell, but works in bash. brew install bash and then /opt/homebrew/bin/bash ./deploy.sh to use it. shift local args=("$@") diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep index 3f60ee1fe1..cd5d867379 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep @@ -11,49 +11,13 @@ param location string = resourceGroup().location @description('Specifies the name of the Network Security Perimeter.') param nspName string = '' -@description('Specifies the name Azure AI Hub workspace.') -param hubName string = '' - -@description('Specifies the friendly name of the Azure AI Hub workspace.') -param hubFriendlyName string = 'Demo AI Hub' - -@description('Specifies the description for the Azure AI Hub workspace displayed in Azure AI Foundry.') -param hubDescription string = 'This is a demo hub for use in Azure AI Foundry.' - -@description('Specifies the Isolation mode for the managed network of the Azure AI Hub workspace.') -@allowed([ - 'AllowInternetOutbound' - 'AllowOnlyApprovedOutbound' - 'Disabled' -]) -param hubIsolationMode string = 'Disabled' - -@description('Specifies the public network access for the Azure AI Hub workspace.') -param hubPublicNetworkAccess string = 'Enabled' - -@description('Specifies the authentication method for the OpenAI Service connection.') -@allowed([ - 'ApiKey' - 'AAD' - 'ManagedIdentity' - 'None' -]) -param connectionAuthType string = 'AAD' - -@description('Determines whether or not to use credentials for the system datastores of the workspace workspaceblobstore and workspacefilestore. The default value is accessKey, in which case, the workspace will create the system datastores with credentials. If set to identity, the workspace will create the system datastores with no credentials.') -@allowed([ - 'identity' - 'accessKey' -]) -param systemDatastoresAuthMode string = 'identity' - -@description('Specifies the name for the Azure AI Foundry Hub Project workspace.') +@description('Specifies the name for the Microsoft Foundry Project.') param projectName string = '' -@description('Specifies the friendly name for the Azure AI Foundry Hub Project workspace.') -param projectFriendlyName string = 'AI Foundry Hub Project' +@description('Specifies the friendly name for the Microsoft Foundry Project.') +param projectFriendlyName string = 'Microsoft Foundry Project' -@description('Specifies the public network access for the Azure AI Project workspace.') +@description('Specifies the public network access for the Microsoft Foundry Project.') param projectPublicNetworkAccess string = 'Enabled' @description('Specifies the name of the Azure Log Analytics resource.') @@ -90,9 +54,6 @@ param aiServicesIdentity object = { @description('Specifies an optional subdomain name used for token-based authentication.') param aiServicesCustomSubDomainName string = '' -@description('Specifies whether disable the local authentication via API key.') -param aiServicesDisableLocalAuth bool = false - @description('Specifies whether or not public endpoint access is allowed for this account..') @allowed([ 'Enabled' @@ -287,7 +248,7 @@ module storageAccount 'modules/storageAccount.bicep' = { networkAclsDefaultAction: storageAccountANetworkAclsDefaultAction supportsHttpsTrafficOnly: storageAccountSupportsHttpsTrafficOnly workspaceId: workspace.outputs.id - + // role assignments userObjectId: userObjectId aiServicesPrincipalId: aiServices.outputs.principalId @@ -306,7 +267,6 @@ module aiServices 'modules/aiServices.bicep' = { customSubDomainName: empty(aiServicesCustomSubDomainName) ? toLower('ai-services-${suffix}') : aiServicesCustomSubDomainName - disableLocalAuth: aiServicesDisableLocalAuth publicNetworkAccess: aiServicesPublicNetworkAccess deployments: openAiDeployments workspaceId: workspace.outputs.id @@ -316,13 +276,12 @@ module aiServices 'modules/aiServices.bicep' = { } } -module hub 'modules/hub.bicep' = { - name: 'hub' +module project 'modules/foundryProject.bicep' = { + name: 'project' params: { // workspace organization - name: empty(hubName) ? toLower('hub-${suffix}') : hubName - friendlyName: hubFriendlyName - description_: hubDescription + name: empty(projectName) ? toLower('project-${suffix}') : projectName + friendlyName: projectFriendlyName location: location tags: tags @@ -332,31 +291,9 @@ module hub 'modules/hub.bicep' = { containerRegistryId: acrEnabled ? containerRegistry.outputs.id : '' keyVaultId: keyVault.outputs.id storageAccountId: storageAccount.outputs.id - connectionAuthType: connectionAuthType - systemDatastoresAuthMode: systemDatastoresAuthMode - - // workspace configuration - publicNetworkAccess: hubPublicNetworkAccess - isolationMode: hubIsolationMode - workspaceId: workspace.outputs.id - - // role assignments - userObjectId: userObjectId - } -} - -module project 'modules/project.bicep' = { - name: 'project' - params: { - // workspace organization - name: empty(projectName) ? toLower('project-${suffix}') : projectName - friendlyName: projectFriendlyName - location: location - tags: tags // workspace configuration publicNetworkAccess: projectPublicNetworkAccess - hubId: hub.outputs.id workspaceId: workspace.outputs.id // role assignments @@ -388,6 +325,7 @@ module document 'modules/document.bicep' = { params: { name: 'document-${suffix}' location: location + customSubDomainName: toLower('document-intelligence-${suffix}') } } @@ -398,16 +336,8 @@ output deploymentInfo object = { aiServicesName: aiServices.outputs.name aiServicesEndpoint: aiServices.outputs.endpoint aiServicesOpenAiEndpoint: aiServices.outputs.openAiEndpoint - aiServicesKey: aiServices.outputs.key1 - hubName: hub.outputs.name projectName: project.outputs.name - documentKey: document.outputs.key1 documentEndpoint: document.outputs.endpoint - searchKey: search.outputs.primaryKey searchEndpoint: search.outputs.endpoint - storageAccountName: storageAccount.outputs.name - storageAccountId: storageAccount.outputs.id - storageAccountConnectionString: storageAccount.outputs.connectionString - storageAccountKey: storageAccount.outputs.primaryKey deployedModels: aiServices.outputs.deployedModels } diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam index c644af659f..1e173d12af 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam @@ -5,6 +5,10 @@ param userObjectId = '' param keyVaultEnablePurgeProtection = false param acrEnabled = false param nspEnabled = false +//param aiServicesDisableLocalAuth = false +param storageAccountAllowSharedKeyAccess = true +//param documentDisableLocalAuth = false + //The first model in the list will be the default model for the Jupyter notebooks param openAiDeployments = [ { diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep index 31bd1c25a1..af5954f338 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep @@ -175,8 +175,8 @@ output endpoint string = aiServices.properties.endpoint output openAiEndpoint string = aiServices.properties.endpoints['OpenAI Language Model Instance API'] output principalId string = aiServices.identity.principalId #disable-next-line outputs-should-not-contain-secrets -output key1 string = aiServices.listKeys().key1 +//output key1 string = aiServices.listKeys().key1 // Output the deployed model names output deployedModels array = [for deployment in deployments: { name: deployment.model.name -}] \ No newline at end of file +}] diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep index 69cb91a519..a30fb1ecf2 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep @@ -20,7 +20,7 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { properties: { Application_Type: 'web' DisableIpMasking: false - DisableLocalAuth: false + //DisableLocalAuth: false Flow_Type: 'Bluefield' ForceCustomerStorageForProfiler: false ImmediatePurgeDataOn30Days: true diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep index 5b07c6624c..ce2f51a628 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep @@ -4,6 +4,9 @@ param name string @description('Location where the Azure Document Intelligence will be created.') param location string +@description('Custom subdomain name for the Azure Document Intelligence.') +param customSubDomainName string + resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { name: name location: location @@ -11,10 +14,10 @@ resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { name: 'S0' } kind: 'FormRecognizer' - properties: { + properties: { + customSubDomainName: customSubDomainName + } } -#disable-next-line outputs-should-not-contain-secrets -output key1 string = account.listKeys().key1 output endpoint string = account.properties.endpoint diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep new file mode 100644 index 0000000000..8112bef441 --- /dev/null +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep @@ -0,0 +1,243 @@ +// Parameters +@description('Specifies the name') +param name string + +@description('Specifies the location.') +param location string + +@description('Specifies the resource tags.') +param tags object + +@description('The SKU name to use for the Microsoft Foundry Project') +param skuName string = 'Basic' + +@description('The SKU tier to use for the Microsoft Foundry Project') +@allowed(['Basic', 'Free', 'Premium', 'Standard']) +param skuTier string = 'Basic' + +@description('Specifies the display name') +param friendlyName string = name + +@description('Specifies the public network access for the Foundry project.') +@allowed([ + 'Disabled' + 'Enabled' +]) +param publicNetworkAccess string = 'Enabled' + +@description('Specifies the resource ID of the application insights resource for storing diagnostics logs') +param applicationInsightsId string + +@description('Specifies the resource ID of the container registry resource for storing docker images') +param containerRegistryId string + +@description('Specifies the resource ID of the key vault resource for storing connection strings') +param keyVaultId string + +@description('Specifies the resource ID of the storage account resource for storing experimentation outputs') +param storageAccountId string + +@description('Specifies the name of the Azure AI Services resource') +param aiServicesName string + +@description('Specifies the resource id of the Log Analytics workspace.') +param workspaceId string + +@description('Specifies the object id of a Microsoft Entra ID user. In general, this the object id of the system administrator who deploys the Azure resources.') +param userObjectId string = '' + +@description('Specifies the principal id of the Azure AI Services.') +param aiServicesPrincipalId string = '' + +@description('Optional. The name of logs that will be streamed.') +@allowed([ + 'AmlComputeClusterEvent' + 'AmlComputeClusterNodeEvent' + 'AmlComputeJobEvent' + 'AmlComputeCpuGpuUtilization' + 'AmlRunStatusChangedEvent' + 'ModelsChangeEvent' + 'ModelsReadEvent' + 'ModelsActionEvent' + 'DeploymentReadEvent' + 'DeploymentEventACI' + 'DeploymentEventAKS' + 'InferencingOperationAKS' + 'InferencingOperationACI' + 'EnvironmentChangeEvent' + 'EnvironmentReadEvent' + 'DataLabelChangeEvent' + 'DataLabelReadEvent' + 'DataSetChangeEvent' + 'DataSetReadEvent' + 'PipelineChangeEvent' + 'PipelineReadEvent' + 'RunEvent' + 'RunReadEvent' +]) +param logsToEnable array = [ + 'AmlComputeClusterEvent' + 'AmlComputeClusterNodeEvent' + 'AmlComputeJobEvent' + 'AmlComputeCpuGpuUtilization' + 'AmlRunStatusChangedEvent' + 'ModelsChangeEvent' + 'ModelsReadEvent' + 'ModelsActionEvent' + 'DeploymentReadEvent' + 'DeploymentEventACI' + 'DeploymentEventAKS' + 'InferencingOperationAKS' + 'InferencingOperationACI' + 'EnvironmentChangeEvent' + 'EnvironmentReadEvent' + 'DataLabelChangeEvent' + 'DataLabelReadEvent' + 'DataSetChangeEvent' + 'DataSetReadEvent' + 'PipelineChangeEvent' + 'PipelineReadEvent' + 'RunEvent' + 'RunReadEvent' +] + +@description('Optional. The name of metrics that will be streamed.') +@allowed([ + 'AllMetrics' +]) +param metricsToEnable array = [ + 'AllMetrics' +] + +// Variables +var diagnosticSettingsName = 'diagnosticSettings' +var logs = [ + for log in logsToEnable: { + category: log + enabled: true + retentionPolicy: { + enabled: true + days: 0 + } + } +] + +var metrics = [ + for metric in metricsToEnable: { + category: metric + timeGrain: null + enabled: true + retentionPolicy: { + enabled: true + days: 0 + } + } +] + +// Resources +resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' existing = { + name: aiServicesName +} + +// Standalone Foundry Project (not hub-based) +resource project 'Microsoft.MachineLearningServices/workspaces@2024-04-01-preview' = { + name: name + location: location + tags: tags + sku: { + name: skuName + tier: skuTier + } + // Note: For standalone Foundry projects, kind is NOT set to 'Project' + // Omitting the kind property creates a standalone workspace that works with Foundry + identity: { + type: 'SystemAssigned' + } + properties: { + // organization + friendlyName: friendlyName + hbiWorkspace: false + v1LegacyMode: false + publicNetworkAccess: publicNetworkAccess + + // dependent resources - directly on the project (not inherited from hub) + keyVault: keyVaultId + storageAccount: storageAccountId + applicationInsights: applicationInsightsId + containerRegistry: containerRegistryId == '' ? null : containerRegistryId + systemDatastoresAuthMode: 'identity' + } + + // Create AI Services connection directly on the standalone project + resource aiServicesConnection 'connections@2024-01-01-preview' = { + name: toLower('${aiServices.name}-connection') + properties: { + category: 'AIServices' + target: aiServices.properties.endpoint + authType: 'AAD' + isSharedToAll: true + metadata: { + ApiType: 'Azure' + ResourceId: aiServices.id + } + } + } +} + +resource azureAIDeveloperRoleDefinition 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + name: '64702f94-c441-49e6-a78b-ef80e0188fee' + scope: subscription() +} + +resource azureMLDataScientistRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + name: 'f6c7c914-8db3-469d-8ca1-694a8f32e121' + scope: subscription() +} + +// This resource defines the Azure AI Developer role, which provides permissions for managing Azure AI resources, including deployments and configurations +resource aiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { + name: guid(project.id, azureAIDeveloperRoleDefinition.id, userObjectId) + scope: project + properties: { + roleDefinitionId: azureAIDeveloperRoleDefinition.id + principalType: 'User' + principalId: userObjectId + } +} + +// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Microsoft Foundry +resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { + name: guid(project.id, azureMLDataScientistRole.id, userObjectId) + scope: project + properties: { + roleDefinitionId: azureMLDataScientistRole.id + principalType: 'User' + principalId: userObjectId + } +} + +// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Microsoft Foundry +resource azureMLDataScientistManagedIdentityRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesPrincipalId)) { + name: guid(project.id, azureMLDataScientistRole.id, aiServicesPrincipalId) + scope: project + properties: { + roleDefinitionId: azureMLDataScientistRole.id + principalType: 'ServicePrincipal' + principalId: aiServicesPrincipalId + } +} + +resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { + name: diagnosticSettingsName + scope: project + properties: { + workspaceId: workspaceId + logs: logs + metrics: metrics + } +} + +// Outputs +output name string = project.name +output id string = project.id +output principalId string = project.identity.principalId diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep deleted file mode 100644 index c0cfe4165c..0000000000 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep +++ /dev/null @@ -1,202 +0,0 @@ -// Parameters -@description('Specifies the name') -param name string - -@description('Specifies the location.') -param location string - -@description('Specifies the resource tags.') -param tags object - -@description('The SKU name to use for the AI Foundry Hub Resource') -param skuName string = 'Basic' - -@description('The SKU tier to use for the AI Foundry Hub Resource') -@allowed(['Basic', 'Free', 'Premium', 'Standard']) -param skuTier string = 'Basic' - -@description('Specifies the display name') -param friendlyName string = name - -@description('Specifies the description') -param description_ string - -@description('Specifies the Isolation mode for the managed network of a machine learning workspace.') -@allowed([ - 'AllowInternetOutbound' - 'AllowOnlyApprovedOutbound' - 'Disabled' -]) -param isolationMode string = 'Disabled' - -@description('Specifies the public network access for the machine learning workspace.') -@allowed([ - 'Disabled' - 'Enabled' -]) -param publicNetworkAccess string = 'Enabled' - -@description('Specifies the resource ID of the application insights resource for storing diagnostics logs') -param applicationInsightsId string - -@description('Specifies the resource ID of the container registry resource for storing docker images') -param containerRegistryId string - -@description('Specifies the resource ID of the key vault resource for storing connection strings') -param keyVaultId string - -@description('Specifies the resource ID of the storage account resource for storing experimentation outputs') -param storageAccountId string - -@description('Specifies thename of the Azure AI Services resource') -param aiServicesName string - -@description('Specifies the authentication method for the OpenAI Service connection.') -@allowed([ - 'ApiKey' - 'AAD' - 'ManagedIdentity' - 'None' -]) -param connectionAuthType string = 'AAD' - -@description('Specifies the name for the Azure OpenAI Service connection.') -param aiServicesConnectionName string = '' - -@description('Specifies the resource id of the Log Analytics workspace.') -param workspaceId string - -@description('Specifies the object id of a Miccrosoft Entra ID user. In general, this the object id of the system administrator who deploys the Azure resources.') -param userObjectId string = '' - -@description('Optional. The name of logs that will be streamed.') -@allowed([ - 'ComputeInstanceEvent' -]) -param logsToEnable array = [ - 'ComputeInstanceEvent' -] - -@description('Optional. The name of metrics that will be streamed.') -@allowed([ - 'AllMetrics' -]) -param metricsToEnable array = [ - 'AllMetrics' -] - -@description('Determines whether or not to use credentials for the system datastores of the workspace workspaceblobstore and workspacefilestore. The default value is accessKey, in which case, the workspace will create the system datastores with credentials. If set to identity, the workspace will create the system datastores with no credentials.') -@allowed([ - 'identity' - 'accessKey' -]) -param systemDatastoresAuthMode string = 'identity' - -// Variables -var diagnosticSettingsName = 'diagnosticSettings' -var logs = [ - for log in logsToEnable: { - category: log - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -var metrics = [ - for metric in metricsToEnable: { - category: metric - timeGrain: null - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -// Resources -resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' existing = { - name: aiServicesName -} - -resource hub 'Microsoft.MachineLearningServices/workspaces@2024-04-01-preview' = { - name: name - location: location - tags: tags - sku: { - name: skuName - tier: skuTier - } - kind: 'Hub' - identity: { - type: 'SystemAssigned' - } - properties: { - // organization - friendlyName: friendlyName - description: description_ - managedNetwork: { - isolationMode: isolationMode - } - publicNetworkAccess: publicNetworkAccess - - // dependent resources - keyVault: keyVaultId - storageAccount: storageAccountId - applicationInsights: applicationInsightsId - containerRegistry: containerRegistryId == '' ? null : containerRegistryId - systemDatastoresAuthMode: systemDatastoresAuthMode - } - - resource aiServicesConnection 'connections@2024-01-01-preview' = { - name: !empty(aiServicesConnectionName) ? aiServicesConnectionName : toLower('${aiServices.name}-connection') - properties: { - category: 'AIServices' - target: aiServices.properties.endpoint - authType: connectionAuthType - isSharedToAll: true - metadata: { - ApiType: 'Azure' - ResourceId: aiServices.id - } - credentials: connectionAuthType == 'ApiKey' - ? { - key: aiServices.listKeys().key1 - } - : null - } - } -} - -resource azureMLDataScientistRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { - name: 'f6c7c914-8db3-469d-8ca1-694a8f32e121' - scope: subscription() -} - -// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Azure AI Foundry -resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { - name: guid(hub.id, azureMLDataScientistRole.id, userObjectId) - scope: hub - properties: { - roleDefinitionId: azureMLDataScientistRole.id - principalType: 'User' - principalId: userObjectId - } -} - -resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { - name: diagnosticSettingsName - scope: hub - properties: { - workspaceId: workspaceId - logs: logs - metrics: metrics - } -} - -// Outputs -output name string = hub.name -output id string = hub.id diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep index ba3fe208cb..0ce1acc760 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep @@ -155,7 +155,7 @@ resource aiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022 } } -// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Azure AI Foundry +// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Microsoft Foundry resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { name: guid(project.id, azureMLDataScientistRole.id, userObjectId) scope: project @@ -166,7 +166,7 @@ resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAss } } -// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Azure AI Foundry +// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Microsoft Foundry resource azureMLDataScientistManagedIdentityRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesPrincipalId)) { name: guid(project.id, azureMLDataScientistRole.id, aiServicesPrincipalId) scope: project diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep index 60c837d8fb..e7c3c7c7c4 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep @@ -14,9 +14,12 @@ resource search 'Microsoft.Search/searchServices@2023-11-01' = { replicaCount: 1 partitionCount: 1 hostingMode: 'default' + authOptions: { + aadOrApiKey: { + aadAuthFailureMode: 'http401WithBearerChallenge' + } + } } } -#disable-next-line outputs-should-not-contain-secrets -output primaryKey string = search.listAdminKeys().primaryKey output endpoint string = 'https://${name}.search.windows.net' diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb index ccfa510fbf..242fce4795 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb @@ -37,7 +37,7 @@ }, "source": [ "## 1. Parameter Experimentation\n", - "Let's first set up the Challenge. Load the API key and relevant Python libraries using the cells below." + "Let's first set up the Challenge. These cells install the required Python packages, load the environment variables, and relevant Python libraries using the cells below." ] }, { @@ -90,12 +90,12 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -r ../requirements-old.txt" + "%pip install -r ../requirements.txt" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "gather": { "logged": 1686932813309 @@ -115,8 +115,11 @@ "import openai\n", "import os\n", "import json\n", + "\n", "from dotenv import load_dotenv, find_dotenv\n", - "load_dotenv(find_dotenv())" + "load_dotenv(find_dotenv())\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n" ] }, { @@ -138,9 +141,10 @@ }, "outputs": [], "source": [ - "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "openai.api_key = API_KEY\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", @@ -149,8 +153,14 @@ "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", + "openai.azure_ad_token_provider = token_provider\n", + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", "\n", - "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n" + "client = AzureOpenAI(\n", + " azure_endpoint=RESOURCE_ENDPOINT,\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")\n" ] }, { @@ -215,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "gather": { "logged": 1686938673045 @@ -234,19 +244,19 @@ "source": [ "def get_chat_completion(prompt, model=chat_model):\n", " messages = [{\"role\": \"user\", \"content\": prompt}]\n", - " response = openai.ChatCompletion.create(\n", - " engine=model,\n", + " response = client.chat.completions.create(\n", + " model=chat_model,\n", " messages=messages,\n", " temperature=0, # this is the degree of randomness of the model's output\n", " max_tokens = 200,\n", " top_p = 1.0\n", " )\n", - " return response.choices[0].message[\"content\"]" + " return response.choices[0].message.content" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { "gather": { "logged": 1686938550664 @@ -264,14 +274,13 @@ "outputs": [], "source": [ "def get_completion_from_messages(messages, model=chat_model, temperature=0):\n", - " response = openai.ChatCompletion.create(\n", - " engine=model,\n", + " response = client.chat.completions.create(\n", + " model=chat_model,\n", " messages=messages,\n", " temperature=temperature # this is the degree of randomness of the model's output\n", " )\n", "\n", - " return response.choices[0].message[\"content\"]\n", - "\n" + " return response.choices[0].message.content\n" ] }, { @@ -673,7 +682,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": { "gather": { "logged": 1685081594233 @@ -742,7 +751,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": { "gather": { "logged": 1685059771050 @@ -883,7 +892,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1510,7 +1519,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": { "gather": { "logged": 1685051978623 @@ -1525,7 +1534,18 @@ } } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'To determine the most decorated individual athlete at the Sydney 2000 Olympic Games, we need to follow a step-by-step approach:\\n\\n1. **Identify the Event**: The Sydney 2000 Olympic Games were held from September 15 to October 1, 2000.\\n\\n2. **Research the Medalists**: We need to look into the medalists from the Sydney 2000 Olympics to find out who won the most medals.\\n\\n3. **Focus on Individual Athletes**: We are interested in individual athletes, not teams or countries.\\n\\n4. **Consult Reliable Sources**: Use reliable sources such as the official Olympic website, sports databases, and historical records.\\n\\n5. **Analyze the Data**: Compare the number of medals won by individual athletes.\\n\\n### Step-by-Step Analysis:\\n\\n- **Research**: According to the official Olympic records and sports databases, the Sydney 2000 Olympics featured many outstanding performances.\\n\\n- **Identify Top Performers**: Swimmer Ian Thorpe from'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "text = f\"\"\"\n", " The 2020 Summer Olympics, officially the Games of the XXXII Olympiad and also known as Tokyo 2020, was an international multi-sport event held from 23 July to 8 August 2021 in Tokyo, Japan, with some preliminary events that began on 21 July 2021. Tokyo was selected as the host city during the 125th IOC Session in Buenos Aires, Argentina, on 7 September 2013.Originally scheduled to take place from 24 July to 9 August 2020, the event was postponed to 2021 on 24 March 2020 due to the global COVID-19 pandemic, the first such instance in the history of the Olympic Games (previous games had been cancelled but not rescheduled). However, the event retained the Tokyo 2020 branding for marketing purposes. It was largely held behind closed doors with no public spectators permitted due to the declaration of a state of emergency in the Greater Tokyo Area in response to the pandemic, the first and only Olympic Games to be held without official spectators. The Games were the most expensive ever, with total spending of over $20 billion.The Games were the fourth Olympic Games to be held in Japan, following the 1964 Summer Olympics (Tokyo), 1972 Winter Olympics (Sapporo), and 1998 Winter Olympics (Nagano). Tokyo became the first city in Asia to hold the Summer Olympic Games twice. The 2020 Games were the second of three consecutive Olympics to be held in East Asia, following the 2018 Winter Olympics in Pyeongchang, South Korea and preceding the 2022 Winter Olympics in Beijing, China. Due to the one-year postponement, Tokyo 2020 was the first and only Olympic Games to have been held in an odd-numbered year and the first Summer Olympics since 1900 to be held in a non-leap year.\\nNew events were introduced in existing sports, including 3x3 basketball, freestyle BMX and mixed gender team events in a number of existing sports, as well as the return of madison cycling for men and an introduction of the same event for women. New IOC policies also allowed the host organizing committee to add new sports to the Olympic program for just one Games. The disciplines added by the Japanese Olympic Committee were baseball and softball, karate, sport climbing, surfing and skateboarding, the last four of which made their Olympic debuts, and the last three of which will remain on the Olympic program.The United States topped the medal count by both total golds (39) and total medals (113), with China finishing second by both respects (38 and 89). Host nation Japan finished third, setting a record for the most gold medals and total medals ever won by their delegation at an Olympic Games with 27 and 58. Great Britain finished fourth, with a total of 22 gold and 64 medals. The Russian delegation competing as the ROC finished fifth with 20 gold medals and third in the overall medal count, with 71 medals. Bermuda, the Philippines and Qatar won their first-ever Olympic gold medals. Burkina Faso, San Marino and Turkmenistan also won their first-ever Olympic medals.'\n", @@ -1538,7 +1558,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1550,7 +1570,18 @@ } } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'To determine the largest time zone difference between the top two countries that won the most gold medals in the 2020 Tokyo Olympics, we need to identify these countries and their respective time zones.\\n\\n1. **Identify the top two countries by gold medals:**\\n - The United States won the most gold medals with 39.\\n - China finished second with 38 gold medals.\\n\\n2. **Determine the time zones for each country:**\\n - The United States spans multiple time zones, but the primary time zones are Eastern Standard Time (EST, UTC-5), Central Standard Time (CST, UTC-6), Mountain Standard Time (MST, UTC-7), and Pacific Standard Time (PST, UTC-8). For simplicity, we can consider the Eastern Standard Time (EST, UTC-5) as a representative time zone for the U.S.\\n - China operates on China Standard Time (CST, UTC+8), which is used nationwide.\\n\\n3. **Calculate'" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Another example\n", "prompt = f\"\"\"\n", @@ -1580,7 +1611,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": { "gather": { "logged": 1685053144682 @@ -1595,7 +1626,18 @@ } } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'To find out how many more silver and bronze medals the United States has over Great Britain, we need to calculate the number of silver and bronze medals each country has and then find the difference.\\n\\nFirst, calculate the number of silver and bronze medals for each country:\\n\\n1. **United States:**\\n - Total medals: 113\\n - Gold medals: 39\\n - Silver and bronze medals: 113 - 39 = 74\\n\\n2. **Great Britain:**\\n - Total medals: 64\\n - Gold medals: 22\\n - Silver and bronze medals: 64 - 22 = 42\\n\\nNow, find the difference in the number of silver and bronze medals between the United States and Great Britain:\\n\\n74 (United States) - 42 (Great Britain) = 32\\n\\nThe United States has 32 more silver and bronze medals than Great Britain.'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Notice how this response may not be ideal, or the most accurate.\n", "prompt = f\"\"\"\n", @@ -1879,7 +1921,7 @@ "name": "python38-azureml" }, "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -1893,7 +1935,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" }, "microsoft": { "host": { diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb index 195c83072e..84993e5715 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb @@ -35,7 +35,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -44,7 +44,9 @@ "import json\n", "\n", "from dotenv import load_dotenv, find_dotenv\n", - "load_dotenv(find_dotenv())" + "load_dotenv(find_dotenv())\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider" ] }, { @@ -58,23 +60,33 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ - "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "openai.api_key = API_KEY\n", + "#API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", + "#assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", + "#openai.api_key = API_KEY\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", "assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t.openai.azure.com\"\n", - "openai.api_base = RESOURCE_ENDPOINT\n", "\n", + "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "\n", - "model=os.getenv(\"CHAT_MODEL_NAME\")" + "openai.azure_ad_token_provider = token_provider\n", + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", + "\n", + "client = AzureOpenAI(\n", + " azure_endpoint=RESOURCE_ENDPOINT,\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")" ] }, { @@ -88,20 +100,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "def get_chat_completion(prompt, model=model):\n", + "def get_chat_completion(prompt, model=chat_model):\n", " messages = [{\"role\": \"user\", \"content\": prompt}]\n", - " response = openai.ChatCompletion.create(\n", - " engine=model,\n", + " response = client.chat.completions.create(\n", + " model=chat_model,\n", " messages=messages,\n", " temperature=0, # this is the degree of randomness of the model's output\n", " max_tokens = 200,\n", " top_p = 1.0\n", " )\n", - " return response.choices[0].message[\"content\"]" + " return response.choices[0].message.content" ] }, { @@ -130,7 +142,7 @@ "Enter Question Here\n", "\"\"\"\n", "\n", - "model_response = get_chat_completion(prompt, model=model)\n", + "model_response = get_chat_completion(prompt, model=chat_model)\n", "print(f\"Response: {model_response}\\n\")\n" ] }, @@ -156,7 +168,7 @@ "Enter Question Here\n", "\"\"\"\n", "\n", - "model_response = get_chat_completion(prompt, model=model)\n", + "model_response = get_chat_completion(prompt, model=chat_model)\n", "print(f\"Response: {model_response}\\n\")" ] }, @@ -183,7 +195,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -197,7 +209,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb index b5e623f23b..a4deb96601 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb @@ -44,31 +44,36 @@ "metadata": {}, "outputs": [], "source": [ + "%pip install langchain langchain-text-splitters\n", + "\n", "import openai\n", "import PyPDF3\n", "import os\n", "import json\n", "import tiktoken\n", "import spacy\n", - "from openai.error import InvalidRequestError\n", "\n", "from dotenv import load_dotenv, find_dotenv\n", "load_dotenv(find_dotenv())\n", - "\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "from spacy.lang.en import English \n", "nlp = spacy.load(\"en_core_web_sm\")\n", "\n", "import langchain\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter" + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "from openai import BadRequestError" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Set up your environment to access your Azure OpenAI keys. Refer to your Azure OpenAI resource in the Azure Portal to retrieve information regarding your Azure OpenAI endpoint and keys. \n", - "\n", - "For security purposes, store your sensitive information in an .env file." + "This cell sets up your Python environment to access your Azure OpenAI endpoint and sets up various openai settings from your .env file. " ] }, { @@ -77,19 +82,26 @@ "metadata": {}, "outputs": [], "source": [ - "# Load your OpenAI credentials\n", - "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "openai.api_key = API_KEY\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", "assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t.openai.azure.com\"\n", - "openai.api_base = RESOURCE_ENDPOINT\n", "\n", + "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "model=os.getenv(\"CHAT_MODEL_NAME\")\n" + "openai.azure_ad_token_provider = token_provider\n", + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", + "\n", + "client = AzureOpenAI(\n", + " azure_endpoint=RESOURCE_ENDPOINT,\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")" ] }, { @@ -164,12 +176,13 @@ "outputs": [], "source": [ "document = open(r'Insert PDF file path', 'rb') \n", + "\n", "doc_helper = PyPDF3.PdfFileReader(document)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -194,12 +207,16 @@ "\n", "try:\n", " final_prompt = prompt + q\n", - " response = openai.ChatCompletion.create(engine=model, messages=final_prompt, max_tokens=50)\n", - " answer = response.choices[0].text.strip()\n", + " response = client.chat.completions.create(\n", + " model=chat_model, \n", + " messages=[{\"role\": \"user\", \"content\": final_prompt}], \n", + " max_tokens=50\n", + " )\n", + " answer = response.choices[0].message.content.strip()\n", " print(f\"{q}\\n{answer}\\n\")\n", "\n", - "except InvalidRequestError as e:\n", - " print(e.error)\n", + "except BadRequestError as e:\n", + " print(e)\n", "\n" ] }, @@ -387,7 +404,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -401,7 +418,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb index 9e88ed1da1..de1b9484a3 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb @@ -52,7 +52,6 @@ "source": [ "! pip install num2words\n", "! pip install plotly\n", - "! pip install \"openai==0.28.1\" \n", "! pip install nptyping" ] }, @@ -62,7 +61,6 @@ "metadata": {}, "outputs": [], "source": [ - "import openai\n", "import os\n", "import re \n", "import requests\n", @@ -70,11 +68,36 @@ "from num2words import num2words \n", "import pandas as pd \n", "import numpy as np\n", - "from openai.embeddings_utils import get_embedding, cosine_similarity \n", "import tiktoken\n", "from dotenv import load_dotenv\n", "from tenacity import retry, wait_random_exponential, stop_after_attempt\n", - "load_dotenv() " + "from sklearn.metrics.pairwise import cosine_similarity as sklearn_cosine_similarity\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "\n", + "load_dotenv()\n", + "\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", + "\n", + "# Initialize the Azure OpenAI client\n", + "client = AzureOpenAI(\n", + " azure_endpoint=os.getenv(\"OPENAI_API_BASE\"),\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")\n", + "\n", + "# Define helper functions using the OpenAI 1.x API\n", + "@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\n", + "def get_embedding(text: str, engine: str) -> list:\n", + "\ttext = text.replace(\"\\n\", \" \")\n", + "\tresponse = client.embeddings.create(input=[text], model=engine)\n", + "\treturn response.data[0].embedding\n", + "\n", + "def cosine_similarity(a, b):\n", + "\treturn np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))" ] }, { @@ -88,15 +111,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ - "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", - "openai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n", - "openai.api_base = os.environ.get(\"OPENAI_API_BASE\")\n", - "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "embedding_model=os.getenv(\"EMBEDDING_MODEL_NAME\")" + "# Get the embedding model name from environment\n", + "embedding_model = os.getenv(\"EMBEDDING_MODEL_NAME\")" ] }, { @@ -119,7 +139,7 @@ "\n", "input=\"I would like to order a pizza\"\n", "\n", - "# Add code here " + "# Add code here: Create embedding using the helper function\n" ] }, { @@ -127,7 +147,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The openai.Embedding.create() method will take a list of text - here we have a single sentence - and then will return a list containing a single embedding. You can use these embeddings when searching, providing recommendations, classification, and more." + "The client.embeddings.create() method will take a list of text - here we have a single sentence - and then will return a list containing a single embedding. You can use these embeddings when searching, providing recommendations, classification, and more." ] }, { @@ -148,6 +168,7 @@ "outputs": [], "source": [ "df=pd.read_csv(os.path.join(os.getcwd(),r'Enter path here'))\n", + "\n", "df" ] }, @@ -163,9 +184,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "398" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n", "shortened_df['n_tokens'] = shortened_df[\"name\"].apply(lambda x: len(tokenizer.encode(x)))\n", @@ -195,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -234,7 +266,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -248,7 +280,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb index c35a148f3e..67fc0b57d3 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb @@ -106,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "69bd738e", "metadata": {}, "outputs": [], @@ -119,9 +119,8 @@ "import pandas as pd\n", "import numpy as np\n", "from sklearn.metrics.pairwise import cosine_similarity\n", - "\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "# Azure Cognitive Search imports\n", - "from azure.core.credentials import AzureKeyCredential\n", "from azure.search.documents.indexes import SearchIndexClient \n", "from azure.search.documents import SearchClient\n", "from azure.search.documents.indexes.models import (\n", @@ -143,7 +142,12 @@ "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv()" + "load_dotenv()\n", + "\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n" ] }, { @@ -160,19 +164,19 @@ "# Initialize Semantic Kernel\n", "kernel = sk.Kernel()\n", "\n", - "# Add Azure OpenAI Chat Completion service\n", + "# Add Azure OpenAI Chat Completion service with Entra ID authentication\n", "chat_service = AzureChatCompletion(\n", " deployment_name=chat_model,\n", " endpoint=os.environ['OPENAI_API_BASE'],\n", - " api_key=os.environ['OPENAI_API_KEY']\n", + " ad_token_provider=token_provider\n", ")\n", "kernel.add_service(chat_service)\n", "\n", - "# Add Azure OpenAI Text Embedding service \n", + "# Add Azure OpenAI Text Embedding service with Entra ID authentication\n", "embedding_service = AzureTextEmbedding(\n", " deployment_name=embedding_model,\n", " endpoint=os.environ['OPENAI_API_BASE'],\n", - " api_key=os.environ['OPENAI_API_KEY']\n", + " ad_token_provider=token_provider\n", ")\n", "kernel.add_service(embedding_service)\n", "\n", @@ -206,10 +210,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Create a Cognitive Search Index client\n", + "# Create a Cognitive Search Index client with Entra ID authentication\n", + "from azure.identity import AzureCliCredential\n", + "\n", "service_endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\") \n", - "key = os.getenv(\"AZURE_AI_SEARCH_KEY\")\n", - "credential = AzureKeyCredential(key)\n", + "\n", + "# Use AzureCliCredential for local development (more reliable than DefaultAzureCredential)\n", + "credential = AzureCliCredential()\n", "\n", "index_name = \"news-index\"\n", "\n", @@ -322,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "32689db7-4337-42d9-b8f9-4cbd9d98a850", "metadata": { "gather": { @@ -571,7 +578,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "id": "56354758-427f-4af9-94b9-96a25946e9a5", "metadata": { "gather": { @@ -587,7 +594,98 @@ } } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generated embeddings for 11 chunks\n", + "\n", + "Query: What did Laurene Jobs say about Hillary Clinton?\n", + "\n", + "Result 1 (Score: 0.913):\n", + "She is one of America’s greatest modern creations. Laurene Jobs, pictured, widow of Apple's Steve, has strongly backed Hillary Clinton for president . Laurene Jobs said that Hillary Clinton, right, ha...\n", + "\n", + "Result 2 (Score: 0.904):\n", + "Apple founder Steve Jobs' widow Laurene has told of her admiration for Democratic White House front-runner Hillary Clinton. Ms Jobs, 51, called former First Lady Hillary a 'revolutionary' woman, and a...\n", + "\n", + "Result 3 (Score: 0.829):\n", + "'It matters, of course, that Hillary is a woman. But what matters more is what kind of woman she is.' Mrs Clinton announced her intention to seek the Democratic nomination on Sunday - and set upon the...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n" + ] + } + ], "source": [ "# Create embeddings for document chunks\n", "embeddings = []\n", @@ -819,7 +917,7 @@ "name": "python3" }, "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -833,7 +931,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" }, "microsoft": { "host": { diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb index e4ca2b4acd..82f33157c6 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -49,6 +49,7 @@ "from azure.core.credentials import AzureKeyCredential\n", "from azure.search.documents.indexes import SearchIndexClient \n", "from azure.search.documents import SearchClient\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "from azure.search.documents.indexes.models import (\n", " SearchIndex,\n", " SearchField,\n", @@ -66,12 +67,17 @@ "import numpy as np\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv()" + "load_dotenv()\n", + "\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -80,10 +86,11 @@ "# Initialize the Azure OpenAI client for the latest version\n", "from openai import AzureOpenAI\n", "\n", + "# Initialize the Azure OpenAI client\n", "client = AzureOpenAI(\n", - " api_key=os.environ['OPENAI_API_KEY'],\n", - " api_version=os.environ['OPENAI_API_VERSION'],\n", - " azure_endpoint=os.environ['OPENAI_API_BASE']\n", + " azure_endpoint=os.getenv(\"OPENAI_API_BASE\"),\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", ")\n", "\n", "chat_model = os.environ['CHAT_MODEL_NAME']\n", @@ -99,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -111,18 +118,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "from azure.core.credentials import AzureKeyCredential\n", "from azure.ai.formrecognizer import DocumentAnalysisClient\n", "\n", - "endpoint = os.environ[\"AZURE_DOC_INTELLIGENCE_ENDPOINT\"]\n", - "key = os.environ[\"AZURE_DOC_INTELLIGENCE_KEY\"]\n", + "endpoint = os.environ[\"DOCUMENT_INTELLIGENCE_ENDPOINT\"]\n", + "\n", + "# Use Entra ID authentication instead of API key\n", + "credential = DefaultAzureCredential()\n", "\n", "document_analysis_client = DocumentAnalysisClient(\n", - " endpoint=endpoint, credential=AzureKeyCredential(key)\n", + " endpoint=endpoint, credential=credential\n", ")" ] }, @@ -138,7 +146,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -211,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -263,8 +271,7 @@ "source": [ "# Create an SDK client\n", "service_endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\") \n", - "key = os.getenv(\"AZURE_AI_SEARCH_KEY\")\n", - "credential = AzureKeyCredential(key)\n", + "credential = DefaultAzureCredential()\n", "\n", "index_name = \"research-paper-index\"\n", "\n", @@ -338,7 +345,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -374,7 +381,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -421,7 +428,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -488,7 +495,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -545,18 +552,11 @@ "answer = query_search(\"what is prompt tuning?\", 10)\n", "print(answer)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -570,7 +570,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb index 0bf538deef..77eecbbdb0 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "94bcb00a", "metadata": {}, "outputs": [], @@ -199,7 +199,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -213,7 +213,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" } }, "nbformat": 4, From 6f630deb5af765c181af0a15976376881083e1aa Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 09:30:37 -0600 Subject: [PATCH 20/58] Update references from AI Foundry to Microsoft Foundry in coach guides; clarify resource creation process and troubleshooting steps for students. --- 066-OpenAIFundamentals/Coach/Solution-00.md | 6 +++--- 066-OpenAIFundamentals/Coach/Solution-02.md | 2 +- 066-OpenAIFundamentals/Coach/Solution-04.md | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/066-OpenAIFundamentals/Coach/Solution-00.md b/066-OpenAIFundamentals/Coach/Solution-00.md index d9e2b9293f..cc3f5493b8 100644 --- a/066-OpenAIFundamentals/Coach/Solution-00.md +++ b/066-OpenAIFundamentals/Coach/Solution-00.md @@ -6,17 +6,17 @@ Challenge-00 is all about helping the student set up the prerequisites for this hack. This includes necessary installations, environment options, and other libraries needed. -They will be creating all needed Azure resources through the Azure AI Foundry. Once they create a hub, they will have an AOAI, AI Search, Azure Document Intelligence, and Azure Storage Account deployed. They will get the credentials for AOAI, AI Search, and Document Intelligence through the AI Foundry. For Azure Storage, they will need to navigate to the Azure Portal. +They will be creating all needed Azure resources through Microsoft Foundry. If they use the deployment script, all of the required Azure resources will be created for them. Otherwise, they will need to create the required resources in the Azure Portal and the Foundry portal. Once they create a project, they will have an AOAI, AI Search, Azure Document Intelligence, and Azure Storage Account deployed. They will get the credentials for AOAI, AI Search, and Document Intelligence through Microsoft Foundry. For Azure Storage, they will need to navigate to the Azure Portal if they are doing it manually. **NOTE:** Target | Endpoint | Base can be used interchangeably. -**NOTE:** For all of the challenges, if a student changes any variables in their .env file, they will need to re-run those cells that load the .env file and set the variables in Python. They can check the values of their Jupyter variables by clicking the Jupyter tab in Visual Studio Code. +**NOTE:** For all of the challenges, if a student changes any variables in their .env file, they will need to re-run those cells that load the .env file and set the variables in Python. They can check the values of their Jupyter variables by clicking the Jupyter tab in Visual Studio Code. If Visual Studio Code hangs during cell execution, they will need to restart the kernel which means they will need to re-run any cells that load the .env file and set the variables in Python again. ### Model Deployment Challenge 0 has the students deploy multiple models that will be used for the following: - One model will be used by the Jupyter notebooks for Challenges 1, 3, 4, & 5. The notebooks expect to find the name of this model in the `CHAT_MODEL_NAME` value of the `.env` file. -- A second model will be used in Challenge 2 for model comparison with the Leaderboards in Azure AI Foundry. +- A second model will be used in Challenge 2 for model comparison with the Leaderboards in Microsoft Foundry. - A text embedding model will be used in the Jupyter notebooks for Challenges 3 & 4. The notebooks expect to find the name of this model in the `EMBEDDING_MODEL_NAME` value of the `.env` file. Students can use different/newer models than the ones listed in the student guide when this hack was published. Most models should work fine. Just ensure the values set in the `.env` file to match the names of the models deployed. diff --git a/066-OpenAIFundamentals/Coach/Solution-02.md b/066-OpenAIFundamentals/Coach/Solution-02.md index 0da1824c17..1d7d4c169f 100644 --- a/066-OpenAIFundamentals/Coach/Solution-02.md +++ b/066-OpenAIFundamentals/Coach/Solution-02.md @@ -28,7 +28,7 @@ Some possible model choices include: 1. GPT-4o and GPT-4o Mini 2. GPT-4o and GPT-5 Mini -For Model Router, students will be deploying an instance of model router in AI Foundry and prompting it with different questions in the chat playground to see how the queries are automatically sent to the different LLMs in depending on their complexity. +For Model Router, students will be deploying an instance of model router in Microsoft Foundry and prompting it with different questions in the chat playground to see how the queries are automatically sent to the different LLMs in depending on their complexity. The router may choose the model **`gpt-5-nano-2025-08-07`** consistently for the given prompts. This model is known for its ultra low latency and fast responses for simple tasks. Encourage students to try longer, multi-step reasoning prompts to trigger a different model. diff --git a/066-OpenAIFundamentals/Coach/Solution-04.md b/066-OpenAIFundamentals/Coach/Solution-04.md index 35787ef4a0..fa063ffcac 100644 --- a/066-OpenAIFundamentals/Coach/Solution-04.md +++ b/066-OpenAIFundamentals/Coach/Solution-04.md @@ -16,4 +16,4 @@ Known Bugs - **CH4 EXTRACTING FILES: operation returned an invalid status `inefficient storage`** \ **Solution:** If some of the files are extracted, students can continue working with those and ignore the error message. - Rate Limits \ - **Solution:** Have students go into their model deployments in the AI Studio and adjust the TPM allocation for that model to a higher value using the toggle. + **Solution:** Have students go into their model deployments in the Microsoft Foundryand adjust the TPM allocation for that model to a higher value using the toggle. From 80a5e1ad4911b1b2243e3a59972224dbcb25292b Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 10:17:40 -0600 Subject: [PATCH 21/58] Update references from AI Foundry to Microsoft Foundry across multiple challenge documents for consistency and clarity. --- .../Student/Challenge-00.md | 34 ++++++------------- .../Student/Challenge-01.md | 4 +-- .../Student/Challenge-02-Old.md | 6 ++-- .../Student/Challenge-02.md | 4 +-- .../Student/Challenge-05.md | 34 +++++++++---------- .../Student/Challenge-06.md | 2 +- 6 files changed, 36 insertions(+), 48 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index f5148aa7eb..6ae97f12dc 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -12,7 +12,7 @@ In this challenge, you will set up the necessary prerequisites and environment t - [Setup Jupyter Notebook Environment](#setup-jupyter-notebook-environment) - [GitHub Codespaces](#setup-github-codespace) - [Local Workstation](#setup-local-workstation) -- [Deploy Azure AI Foundry Resources](#deploy-azure-ai-foundry-resources) +- [Deploy Microsoft Foundry Resources](#deploy-azure-ai-foundry-resources) ### Azure Subscription @@ -140,7 +140,7 @@ Now that you have a Jupyter notebook environment setup, you need to: - Deploy AI models and resources in Microsoft Foundry. - Setup Jupyter Notebooks Configuration File -We have provided an automation script that will perform these tasks for you. However, you may wish to complete these tasks manually to become more familiar with Azure AI Foundry. +We have provided an automation script that will perform these tasks for you. However, you may wish to complete these tasks manually to become more familiar with Microsoft Foundry. - [Automate Microsoft Foundry Deployment](#automate-microsoft-foundry-deployment) - [Manual Microsoft Foundry Deployment](#manual-microsoft-foundry-deployment) @@ -182,16 +182,16 @@ chmod +x deploy.sh #### Manual Microsoft Foundry Deployment -**NOTE:** You can skip this section if you chose to automate the deployment. +**NOTE:** You can skip this section if you chose to automate the deployment. It is strongly recommended that you use the automated approach. If you'd like to understand more what the automated approach is doing, you can use GitHub Copilot to explain what the deployment script and associated Bicep files are doing. If you want to deploy the Microsoft Foundry resources, expand the section below and follow instructions there.
Click to expand/collapse Manual Deployment Instructions -#### Setup Azure AI Foundry Project and Hub +#### Setup Azure Microsoft Foundry Project -Navigate to [AI Foundry](https://ai.azure.com) to create your Microsoft Foundry project. +Navigate to [Microsoft Foundry](https://ai.azure.com) to create your Microsoft Foundry project. - Click on the **+ Create New** button. - Choose Microsoft Foundry resource for the resource type. Click the **Next** button @@ -205,7 +205,7 @@ Navigate to [AI Foundry](https://ai.azure.com) to create your Microsoft Foundry Now we will deploy the needed large language models from Azure OpenAI. -- Navigate to the [AI Foundry](https://ai.azure.com) +- Navigate to the [Microsoft Foundry](https://ai.azure.com) - On the left navigation bar, under My Assets, click on Models + endpoints. Click the Deploy Model button and select Deploy base model - Deploy the following 3 models in your Azure OpenAI resource. - `gpt-4o` @@ -222,21 +222,9 @@ You will find the `.env.sample` file in the root of the codespace. If you are wo - Rename the file from `.env.sample` to `.env`. - Add all the required Azure resource credentials in the `.env` file. This includes: Azure OpenAI, model deployments, AI Search, Azure Document Intelligence, and Azure Blob - - For **Azure OpenAI and Model Deployments**, you can find these credentials in Azure AI Foundry: - - Navigate to the [AI Foundry](https://ai.azure.com) - - Navigate to your project. In the lower left corner, click on the link to Management Center. It is also under Project details. - - Click on Connected resources under your project - - Click the name of your Azure OpenAI Service to see its details. Copy the Target URL and API Key for `OPENAI_API_BASE` and `OPEN_API_KEY`, respectively into the `.env` file - - From the **`Manage connect resources in this project`** screen, click the Name with the type **`AIServices`**. The AI Services deployment is a multi-service resource that allows you to access multiple Azure AI services like Document Intelligence with a single key and endpoint. Copy the Target URL and the API Key for `AZURE_DOC_INTELLIGENCE_ENDPOINT` and `AZURE_DOC_INTELLIGENCE_KEY`, respectively into the `.env` file - - In the [Azure Portal](portal.azure.com), navigate to the resource group you made when creating your hub within the AI Foundry. - - Locate your **AI Search** service that you created earlier - - From the **Overview**, copy the URL for `AZURE_AI_SEARCH_ENDPOINT` in the .env file - - Under **`Settings`** go to Keys, copy the admin key into `AZURE_AI_SEARCH_KEY` in the `.env` file - - Model deployment names should be the same as the ones populated in the `.env.sample` file especially if you have deployed a different model due to quota issues. - - For **Azure Blob**, you can find these credentials in the [Azure Portal](portal.azure.com). - - In the Azure Portal, navigate to the resource group you made when creating your hub within the AI Foundry. - - Click on your **`Storage account`** resource - - Click on **`Security + networking`** and find **`Access keys`**. You should be able to see the **`Storage account name`**, **`key`**, and **`Connection string`**. + - For **Azure OpenAI and Model Deployments**, you can find these credentials in Azure Microsoft Foundry: + - Navigate to the [Microsoft Foundry](https://ai.azure.com) + - You will need the values for `OPENAI_API_BASE`, `AZURE_DOC_INTELLIGENCE_ENDPOINT`, `AZURE_AI_SEARCH_ENDPOINT`, `AZURE_AI_PROJECT_ENDPOINT`, and `AZURE_BLOB_STORAGE_ACCOUNT_NAME` to put in your `.env` file. Use your favorite search tool or Github Copilot to figure out where to retrieve these values either in the Foundry Portal, Azure Portal, or using the Azure CLI. **TIP:** Learn more about using `.env` files [here](https://dev.to/edgar_montano/how-to-setup-env-in-python-4a83#:~:text=How%20to%20setup%20a%20.env%20file%201%201.To,file%20using%20the%20following%20format%3A%20...%20More%20items). @@ -259,7 +247,7 @@ If using GitHub Codespaces: - `.env` <= Renamed from `.env.sample` - `.gitignore` - `requirements.txt` -- Verify that you have created the Project and Hub in your AI Foundry. +- Verify that you have created the Project and Hub in your Microsoft Foundry. - Verify that you have the following resources: Azure OpenAI, deployed the necessary models, AI Search, Document Intelligence, Azure Blob. If working on a local workstation: @@ -272,7 +260,7 @@ If working on a local workstation: - `.env` <= Renamed from `.env.sample` - `.gitignore` - `requirements.txt` -- Verify that you have created the Project and Hub in your AI Foundry. +- Verify that you have created the Project and Hub in your Microsoft Foundry. - Verify that you have the following resources: Azure OpenAI, deployed the necessary models, AI Search, Document Intelligence, Azure Blob. ## Learning Resources diff --git a/066-OpenAIFundamentals/Student/Challenge-01.md b/066-OpenAIFundamentals/Student/Challenge-01.md index 65097dde87..95f8d479ad 100644 --- a/066-OpenAIFundamentals/Student/Challenge-01.md +++ b/066-OpenAIFundamentals/Student/Challenge-01.md @@ -4,8 +4,8 @@ ## Prerequisites -* Ensure you have the needed resources from the previous challenge in [AI Foundry](https://ai.azure.com/) -* Update the `.env.sample` file (and save as `.env`) with your respective resource credentials if you haven't already +* Ensure you have the needed resources from the previous challenge in [Microsoft Foundry](https://ai.azure.com/) +* Update the `.env.sample` file (and save as `.env`) with your respective resource credentials if you haven't already done so. ## Introduction diff --git a/066-OpenAIFundamentals/Student/Challenge-02-Old.md b/066-OpenAIFundamentals/Student/Challenge-02-Old.md index b4bde945a3..31a4b24b0d 100644 --- a/066-OpenAIFundamentals/Student/Challenge-02-Old.md +++ b/066-OpenAIFundamentals/Student/Challenge-02-Old.md @@ -17,7 +17,7 @@ Questions you should be able to answer by the end of this challenge: - What model would you select to perform complex problem solving? - What model would you select to generate new names? -You will work in the Azure AI Foundry for this challenge. We recommend keeping the student guide and the Azure AI Foundry in two windows side by side as you work. This will also help to validate you have met the [success criteria](#success-criteria) below for this challenge. +You will work in the Azure Microsoft Foundry for this challenge. We recommend keeping the student guide and the Azure Microsoft Foundry in two windows side by side as you work. This will also help to validate you have met the [success criteria](#success-criteria) below for this challenge. This challenge is divided into the following sections: @@ -33,7 +33,7 @@ This challenge is divided into the following sections: Scenario: You are part of a research team working on getting information from biotech news articles. Your goal is to explore the Model Catalog and identify some suitable models for accurate question answering. There is no right or wrong answer here. #### Student Task 2.1 -- Go into the [Azure AI Foundry](https://ai.azure.com). +- Go into the [Microsoft Foundry](https://ai.azure.com). - Navigate to the Model Catalog and explore different models using the correct filters. - Identify which models can potentially improve the accuracy of the task at hand. @@ -53,7 +53,7 @@ Scenario: You are part of a research team working on getting information from bi ### 2.4 Prompt Flow Scenario: You are a product manager at a multinational tech company, and your team is developing an advanced AI-powered virtual assistant to provide real-time customer support. The company is deciding between GPT-3.5 Turbo and GPT-4 to power the virtual assistant. Your task is to evaluate both models to determine which one best meets the company's needs for handling diverse customer inquiries efficiently and effectively. -Navigate to the AI Foundry and click on your project. You should be able to see **Prompt flow** under Tools in the navigation bar. Create a new **standard flow** to solve the tasks below and compare the responses from different models. For each task, you will see the provided prompts that you can test against the deployed models. +Navigate to the Microsoft Foundry and click on your project. You should be able to see **Prompt flow** under Tools in the navigation bar. Create a new **standard flow** to solve the tasks below and compare the responses from different models. For each task, you will see the provided prompts that you can test against the deployed models. **NOTE:** If you get this **User Error: This request is not authorized to perform this operation using this permission. Please grant workspace/registry read access to the source storage account.** when you create a new **standard** flow using the default name, then please append some random characters to the name or create a unique name for your flow. diff --git a/066-OpenAIFundamentals/Student/Challenge-02.md b/066-OpenAIFundamentals/Student/Challenge-02.md index 389ef1da24..77cd2e780a 100644 --- a/066-OpenAIFundamentals/Student/Challenge-02.md +++ b/066-OpenAIFundamentals/Student/Challenge-02.md @@ -75,7 +75,7 @@ Scenario: You are a product manager at a multinational tech company, and your te ### 2.4 Model Router #### Student Task 2.4 -- Navigate to AI Foundry and deploy an instance of model router in the same project as your other models +- Navigate to Microsoft Foundry and deploy an instance of model router in the same project as your other models - In Chat Playground use the model router deployment and prompt it with a variety of questions ranging simple to difficult. You can use the sample prompts below or come up with your own! Note how different models are used for each query (you can see this switch in the metadata on top of the prompt). - After trying the below prompts navigate to a browser window and open Copilot. Ask Copilot the pricing for the three different models each query used. Note the price difference for each model. The smart routing is optimizing cost by using light weight models (which are cheaper) for the easier prompts! @@ -105,7 +105,7 @@ To complete this challenge successfully, you should be able to: ## Learning Resources - [Overview of Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models) -- [Use Model Router for Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/model-router) +- [Use Model Router for Microsoft Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/openai/how-to/model-router) - [Azure OpenAI Pricing Page](https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/) - [Request for Quota Increase](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR4xPXO648sJKt4GoXAed-0pURVJWRU4yRTMxRkszU0NXRFFTTEhaT1g1NyQlQCN0PWcu) - [Customize Models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/fine-tuning?pivots=programming-language-studio) diff --git a/066-OpenAIFundamentals/Student/Challenge-05.md b/066-OpenAIFundamentals/Student/Challenge-05.md index a6d0ed17ba..c03758e662 100644 --- a/066-OpenAIFundamentals/Student/Challenge-05.md +++ b/066-OpenAIFundamentals/Student/Challenge-05.md @@ -27,13 +27,13 @@ This challenge is divided into the following sections: - [5.6 Protected Material Detection](#56-protected-material-detection-preview) - [5.7 Red Teaming](#57-red-teaming) -For each section of this Challenge, you will work in [Azure AI Foundry](https://ai.azure.com). We recommend keeping the student guide and the Azure AI Foundry in two windows side by side as you work. This will also help to validate you have met the success criteria below for this challenge. +For each section of this Challenge, you will work in [Microsoft Foundry](https://ai.azure.com). We recommend keeping the student guide and Microsoft Foundry in two windows side by side as you work. This will also help to validate you have met the success criteria below for this challenge. -**NOTE:** Previously, each of the Content Safety services were hosted in their own portals. As of July 2024, they have been integrated into Azure AI Foundry. While searching for documentation of these services, you may find references to their original stand-alone portals. You should access these services via Azure AI Foundry for this hack. +**NOTE:** Previously, each of the Content Safety services were hosted in their own portals. As of July 2024, they have been integrated into Microsoft Foundry. While searching for documentation of these services, you may find references to their original stand-alone portals. You should access these services via Microsoft Foundry for this hack. ### Access Content Safety Service - TEMPORARY STEP -Azure AI Services are constantly changing. As of July 2024, the Azure AI Foundry does not automatically grant your user access to the Content Safety service. You will need to perform this task manually. We are adding these detailed steps here to complete this challenge today. We anticipate these steps will not be required in the near future when Azure AI Foundry should handle this automatically. +Azure AI Services are constantly changing. As of July 2024, Microsoft Foundry does not automatically grant your user access to the Content Safety service. You will need to perform this task manually. We are adding these detailed steps here to complete this challenge today. We anticipate these steps will not be required in the near future when Microsoft Foundry should handle this automatically. Follow these steps to grant your user account access to the Content Safety service: @@ -51,7 +51,7 @@ Follow these steps to grant your user account access to the Content Safety servi After the role assignment completes in the Azure Portal, you will need to wait 1-3 minutes and then follow one additional step: -- Log out of [Azure AI Foundry](https://ai.azure.com), and then log back in. This will ensure your login token is refreshed with the new permissions for Content Safety. +- Log out of [Microsoft Foundry](https://ai.azure.com), and then log back in. This will ensure your login token is refreshed with the new permissions for Content Safety. You should now be prepared to complete the rest of this challenge! @@ -63,7 +63,7 @@ Your Azure AI Services resource includes Content Safety. You may refer to this [ 1. [Understand harm categories](https://learn.microsoft.com/en-us/azure/ai-services/content-safety/concepts/harm-categories?tabs=warning) defined by Microsoft. -2. In the [AI Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. +2. In the [Microsoft Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. 3. Try out the following features in Content Safety using provided sample text and data, or come up with your own examples. Analyze the moderation results. Try viewing the code! @@ -90,7 +90,7 @@ Let's configure a content filtering system both for user input (prompts) and LLM #### Student Task 5.2: Create a Custom Content Filter -1. Configure a content filter following these [instructions for the Azure AI Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/content-filtering#create-a-content-filter). Select the AI project in your AI Hub that contains any model deployments you made in the previous Challenges. Design a content filter that could hypothetically apply to an internal or external tool in your workplace. Or get creative and come up with a scenario that could use a filter, such as an online school forum. +1. Configure a content filter following these [instructions for Microsoft Foundry](https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/content-filtering#create-a-content-filter). Select the AI project in your AI Hub that contains any model deployments you made in the previous Challenges. Design a content filter that could hypothetically apply to an internal or external tool in your workplace. Or get creative and come up with a scenario that could use a filter, such as an online school forum. 2. In the "Input Filter" step, configure the four content categories. Keep "Prompt shields for jailbreak attacks" and "Prompt shields for indirect attacks" toggled to "Off" (default) for now. @@ -121,7 +121,7 @@ Learn more about PII in the [documentation](https://learn.microsoft.com/en-us/az #### Student Task 5.3: PII Detection, Redaction, and Extraction -1. In the [AI Foundry](https://ai.azure.com/), navigate to your Project and the "AI Services" in the navigation pane. From here, you should find the option to try out "Language + Translator" capabilities. +1. In the [Microsoft Foundry](https://ai.azure.com/), navigate to your Project and the "AI Services" in the navigation pane. From here, you should find the option to try out "Language + Translator" capabilities. * How do you enable redacting PII? How does the output hide those entities? @@ -144,7 +144,7 @@ Any application system that relies on data to provide answers should be mindful Learn what Ungroundedness and Groundedness are, as well as how [Groundedness Detection](https://learn.microsoft.com/en-us/azure/ai-services/content-safety/concepts/groundedness) on Azure works, via the [Microsoft Technical Blog](https://techcommunity.microsoft.com/t5/ai-azure-ai-services-blog/detect-and-mitigate-ungrounded-model-outputs/ba-p/4099261). #### Student Task 5.4.1 -In the [AI Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. +In the [Microsoft Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. Try out the following features in Content Safety using provided sample text and data, or come up with your own examples. Analyze the results. Try viewing the code! @@ -157,7 +157,7 @@ Protecting your LLM application from bad actors is equally important as moderati Attacks can occur through user prompts as well as documents that contain hidden embedded instructions to gain unauthorized control over the LLM session. Read more about [subtypes of user prompt attacks](https://learn.microsoft.com/en-us/azure/ai-services/content-safety/concepts/jailbreak-detection). These are considered "input attacks." #### Student Task 5.5.1 -In the [AI Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. +In the [Microsoft Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. Try out the following features in Content Safety using provided sample text and data, or come up with your own examples. Analyze the results. What is the difference between a direct and indirect attack? @@ -165,13 +165,13 @@ Try out the following features in Content Safety using provided sample text and #### Student Task 5.5.2: Update and test your custom content filter using Prompt Shields -1. Revisit the custom content filter you created earlier in the Azure AI Foundry. +1. Revisit the custom content filter you created earlier in Microsoft Foundry. 2. In the **Input filter** tab, toggle the setting for **Prompt shields for jailbreak attacks** and **Prompt shields for indirect attacks** to either **Annotate only** or **Annotate and block**. Keep in mind, for future implementations, that **Annotate and block** can reduce token usage compared to **Annotate only**, which will still return the completed output. 3. Apply the updated filter to one of your deployed models. -4. In the "Chat" tab of the Playground in the Azure AI Foundry, experiment with your updated content filter. +4. In the "Chat" tab of the Playground in Microsoft Foundry, experiment with your updated content filter. Here are some example jailbreak attacks to prompt your protected model. Copy and paste the following prompts to evaluate the LLM's filtered responses: @@ -188,17 +188,17 @@ Here are some example jailbreak attacks to prompt your protected model. Copy and ### 5.6 Protected Material Detection Preview #### Student Task 5.6.1 -In the [AI Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. +In [Microsoft Foundry](https://ai.azure.com/), navigate to your Project and the **AI Services** pane. From here, you should find the option to try out Content Safety capabilities. Try out the following features in Content Safety using provided sample text and data, or come up with your own examples. Analyze the results. * "Protected material detection for text" or "Protected material detection for code" #### Student Task #5.6.2: Update and test your custom content filter using Protected Material Detection -1. Revisit the custom content filter you created earlier in the Azure AI Foundry. +1. Revisit the custom content filter you created earlier in Microsoft Foundry. 2. In the "Output filter" tab, toggle the setting for "Protected material for text" to either "Annotate only" or "Annotate and block." Keep in mind, for future implementations, that "Annotate and block" can reduce token usage compared to "Annotate only," which will still return the completed output. 3. Apply the updated filter to one of your deployed models. -4. In the "Chat" tab of the Playground in the Azure AI Foundry, experiment with your updated content filter. +4. In the "Chat" tab of the Playground in Microsoft Foundry, experiment with your updated content filter. Here is a sample prompt for testing purposes: `to everyone, the best things in life are free. the stars belong to everyone, they gleam there for you and me. the flowers in spring, the robins that sing, the sunbeams that shine, they\'re yours, they\'re mine. and love can come to everyone, the best things in life are` @@ -227,9 +227,9 @@ To complete this challenge successfully, you should be able to: - Identify tools available to identify and mitigate harms in LLMs ## Conclusion -In this Challenge, you explored principles and practical tools to implement Responsible AI with an LLM system through the Azure AI Foundry. Understanding how to apply Responsible AI principles is essential for maintaining user trust and integrity within AI-driven platforms. +In this Challenge, you explored principles and practical tools to implement Responsible AI with an LLM system through Microsoft Foundry. Understanding how to apply Responsible AI principles is essential for maintaining user trust and integrity within AI-driven platforms. -Throughout this Challenge, you have explored the importance of detecting and managing harmful content, as well as the necessity of personally identifiable information (PII) detection and redaction in generative AI applications. By engaging with Azure AI tools in the AI Foundry, you have gained practical experience in moderating content, filtering out undesirable material, and protecting sensitive data. +Throughout this Challenge, you have explored the importance of detecting and managing harmful content, as well as the necessity of personally identifiable information (PII) detection and redaction in generative AI applications. By engaging with Azure AI tools in Microsoft Foundry, you have gained practical experience in moderating content, filtering out undesirable material, and protecting sensitive data. As you move forward, remember the significance of grounding responses in accurate data to prevent the propagation of misinformation and safeguard against input attacks. There are many ways to mitigate harms, and securing your application responsibly is an ongoing endeavor. We encourage you to continuously strive to enhance the safety and reliability of your AI systems, keeping in mind the evolving landscape of digital content safety. @@ -249,5 +249,5 @@ As you move forward, remember the significance of grounding responses in accurat - [New Updates in AI Content Safety](https://learn.microsoft.com/en-us/azure/ai-services/content-safety/whats-new) - [eBook](https://aka.ms/contentsafetyebook) - [Infuse Responsible AI tools and practices in your LLMOps Microsoft Azure Blog](https://azure.microsoft.com/en-us/blog/infuse-responsible-ai-tools-and-practices-in-your-llmops/) -- [Introducing AI Red Teaming Agent: Accelerate your AI safety and security journey with Azure AI Foundry](https://devblogs.microsoft.com/foundry/ai-red-teaming-agent-preview/) +- [Introducing AI Red Teaming Agent: Accelerate your AI safety and security journey with Microsoft Foundry](https://devblogs.microsoft.com/foundry/ai-red-teaming-agent-preview/) diff --git a/066-OpenAIFundamentals/Student/Challenge-06.md b/066-OpenAIFundamentals/Student/Challenge-06.md index c528b3b072..e2fe3a67ac 100644 --- a/066-OpenAIFundamentals/Student/Challenge-06.md +++ b/066-OpenAIFundamentals/Student/Challenge-06.md @@ -43,7 +43,7 @@ In this Challenge, you explored creating an agent through the Microsoft Foundry ## Learning Resources - [Overview of Microsoft Agents](https://learn.microsoft.com/en-us/azure/ai-services/agents/?view=azure-python-preview) -- These steps are listed here along with many other prompts: [Agents in AI Foundry](https://techcommunity.microsoft.com/blog/educatordeveloperblog/step-by-step-tutorial-building-an-ai-agent-using-azure-ai-foundry/4386122) . +- These steps are listed here along with many other prompts: [Agents in Microsoft Foundry](https://techcommunity.microsoft.com/blog/educatordeveloperblog/step-by-step-tutorial-building-an-ai-agent-using-azure-ai-foundry/4386122) . From 83bbdad3096035fc174b07c813bc15c48a662c20 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 11:57:23 -0600 Subject: [PATCH 22/58] temporary change to WTH OpenAI Fundamentals Codespace Repo URL so it can be used prior to getting the PR approved in the main repo --- 066-OpenAIFundamentals/Student/Challenge-00.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index 6ae97f12dc..768737d16b 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -47,7 +47,8 @@ You can see your balance of available codespace hours on the [GitHub billing pag The GitHub Codespace for this hack will host the Jupyter Notebook files, configuration files, and other data files needed for this event. Here are the steps you will need to follow: - A GitHub repo containing the student resources and Codespace for this hack is hosted here: - - [WTH OpenAI Fundamentals Codespace Repo](https://aka.ms/wth/openaifundamentals/codespace) + - [WTH OpenAI Fundamentals Codespace Repo](https://github.com/perktime/wth-openaifundamentals-codespace) + - Please open this link and sign in with your personal Github account. **NOTE:** Make sure you do not sign in with your enterprise managed Github account. From ee4fb636ae2553a4695b4eb2eea5be0abe7467fd Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 12:20:00 -0600 Subject: [PATCH 23/58] Update notebook for Challenge 04-A: Enhance clarity and structure in the introduction and use case sections. --- 066-OpenAIFundamentals/Student/Resources/infra/main.bicep | 2 +- .../Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep index cd5d867379..995108e05a 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep @@ -288,7 +288,7 @@ module project 'modules/foundryProject.bicep' = { // dependent resources aiServicesName: aiServices.outputs.name applicationInsightsId: applicationInsights.outputs.id - containerRegistryId: acrEnabled ? containerRegistry.outputs.id : '' + containerRegistryId: acrEnabled ? containerRegistry!.outputs.id : '' keyVaultId: keyVault.outputs.id storageAccountId: storageAccount.outputs.id diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb index 67fc0b57d3..95db7eb709 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb @@ -324,7 +324,7 @@ } }, "source": [ - "## Section 1: Leveraging Cognitive Search to extract relevant article based on the query " + "## Section 1: Leveraging Azure AI Search to extract relevant article based on the query " ] }, { From 1f5b97dd0b1f9ef4a84e08430f2da52693ec4177 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 12:23:30 -0600 Subject: [PATCH 24/58] Enhance error handling in Bicep template deployment; fetch and display deployment error details on failure. --- 066-OpenAIFundamentals/Student/Resources/infra/deploy.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh index 9b8ee868eb..b13ddc941d 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh @@ -96,7 +96,9 @@ deploymentOutputs=$(az deployment group create \ echo "[$template] Bicep template deployment succeeded" else echo "Failed to deploy [$template] Bicep template" - exit + echo "Fetching deployment error details..." + az deployment group show --resource-group "$resourceGroupName" --name "$deploymentName" --query 'properties.error' -o json + exit 1 fi json=$deploymentOutputs From fe5c5df90e5fcbce05ab55f0b54c347db254f762 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 12:26:43 -0600 Subject: [PATCH 25/58] Remove unused parameters and sensitive output keys from Bicep templates for AI Services and Storage Account. --- .../Student/Resources/infra/modules/aiServices.bicep | 7 +------ .../Student/Resources/infra/modules/storageAccount.bicep | 5 ----- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep index af5954f338..4f44b5017f 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep @@ -21,9 +21,6 @@ param tags object @description('Specifies an optional subdomain name used for token-based authentication.') param customSubDomainName string = '' -@description('Specifies whether disable the local authentication via API key.') -param disableLocalAuth bool = false - @description('Specifies whether or not public endpoint access is allowed for this account..') @allowed([ 'Enabled' @@ -73,7 +70,6 @@ resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' = tags: tags properties: { customSubDomainName: customSubDomainName - disableLocalAuth: disableLocalAuth publicNetworkAccess: publicNetworkAccess } } @@ -174,8 +170,7 @@ output name string = aiServices.name output endpoint string = aiServices.properties.endpoint output openAiEndpoint string = aiServices.properties.endpoints['OpenAI Language Model Instance API'] output principalId string = aiServices.identity.principalId -#disable-next-line outputs-should-not-contain-secrets -//output key1 string = aiServices.listKeys().key1 + // Output the deployed model names output deployedModels array = [for deployment in deployments: { name: deployment.model.name diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep index 85c59f88b2..ed31e47894 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep @@ -248,8 +248,3 @@ resource blobServiceDiagnosticSettings 'Microsoft.Insights/diagnosticSettings@20 // Outputs output id string = storageAccount.id output name string = storageAccount.name -#disable-next-line outputs-should-not-contain-secrets -output primaryKey string = storageAccount.listKeys().keys[0].value - -#disable-next-line outputs-should-not-contain-secrets -output connectionString string = 'DefaultEndpointsProtocol=https;AccountName=${name};AccountKey=${storageAccount.listKeys().keys[0].value};EndpointSuffix=core.windows.net' From d3dd937a0dd18ba3836f378e988c87948f1cb07a Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 13:08:30 -0600 Subject: [PATCH 26/58] Update link to WTH OpenAI Fundamentals Codespace Repo for improved access to codespaces --- 066-OpenAIFundamentals/Student/Challenge-00.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index 768737d16b..a96ec7a9fa 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -47,7 +47,7 @@ You can see your balance of available codespace hours on the [GitHub billing pag The GitHub Codespace for this hack will host the Jupyter Notebook files, configuration files, and other data files needed for this event. Here are the steps you will need to follow: - A GitHub repo containing the student resources and Codespace for this hack is hosted here: - - [WTH OpenAI Fundamentals Codespace Repo](https://github.com/perktime/wth-openaifundamentals-codespace) + - [WTH OpenAI Fundamentals Codespace Repo](https://github.com/perktime/wth-openaifundamentals-codespace/codespaces) - Please open this link and sign in with your personal Github account. From f2586fcaeee83e5f088baf4f89a6115835547947 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 13:10:43 -0600 Subject: [PATCH 27/58] Update link to WTH OpenAI Fundamentals Codespace Repo for direct access to codespaces --- 066-OpenAIFundamentals/Student/Challenge-00.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index a96ec7a9fa..9d0f0ea38f 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -47,7 +47,7 @@ You can see your balance of available codespace hours on the [GitHub billing pag The GitHub Codespace for this hack will host the Jupyter Notebook files, configuration files, and other data files needed for this event. Here are the steps you will need to follow: - A GitHub repo containing the student resources and Codespace for this hack is hosted here: - - [WTH OpenAI Fundamentals Codespace Repo](https://github.com/perktime/wth-openaifundamentals-codespace/codespaces) + - [WTH OpenAI Fundamentals Codespace Repo](https://codespaces.new/perktime/wth-openaifundamentals-codespace) - Please open this link and sign in with your personal Github account. From af14747739e6a6f8d39fdebe2055424cd144d5db Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 13 Jan 2026 13:14:42 -0600 Subject: [PATCH 28/58] Update link to WTH OpenAI Fundamentals Codespace Repo for improved access with devcontainer configuration --- 066-OpenAIFundamentals/Student/Challenge-00.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index 9d0f0ea38f..0bbc14c64c 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -47,7 +47,7 @@ You can see your balance of available codespace hours on the [GitHub billing pag The GitHub Codespace for this hack will host the Jupyter Notebook files, configuration files, and other data files needed for this event. Here are the steps you will need to follow: - A GitHub repo containing the student resources and Codespace for this hack is hosted here: - - [WTH OpenAI Fundamentals Codespace Repo](https://codespaces.new/perktime/wth-openaifundamentals-codespace) + - [WTH OpenAI Fundamentals Codespace Repo](https://codespaces.new/perktime/wth-openaifundamentals-codespace?devcontainer_path=.devcontainer/devcontainer.json) - Please open this link and sign in with your personal Github account. From 9b57f91be11d41ca8bfa275b52b9d317d585f857 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 15:17:43 -0600 Subject: [PATCH 29/58] Remove outdated access instructions for Content Safety Removed temporary steps for accessing the Content Safety service as they are no longer necessary because they are done in the deployment script. --- .../Student/Challenge-05.md | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-05.md b/066-OpenAIFundamentals/Student/Challenge-05.md index c03758e662..d99ef1169e 100644 --- a/066-OpenAIFundamentals/Student/Challenge-05.md +++ b/066-OpenAIFundamentals/Student/Challenge-05.md @@ -31,30 +31,6 @@ For each section of this Challenge, you will work in [Microsoft Foundry](https:/ **NOTE:** Previously, each of the Content Safety services were hosted in their own portals. As of July 2024, they have been integrated into Microsoft Foundry. While searching for documentation of these services, you may find references to their original stand-alone portals. You should access these services via Microsoft Foundry for this hack. -### Access Content Safety Service - TEMPORARY STEP - -Azure AI Services are constantly changing. As of July 2024, Microsoft Foundry does not automatically grant your user access to the Content Safety service. You will need to perform this task manually. We are adding these detailed steps here to complete this challenge today. We anticipate these steps will not be required in the near future when Microsoft Foundry should handle this automatically. - -Follow these steps to grant your user account access to the Content Safety service: - -- In the [Azure Portal](https://portal.azure.com), navigate to the resource group where your AI resources are deployed -- Navigate to the **Azure AI services** resource -- Click **Access control (IAM)** from the left menu -- Click the **+ ADD** button, then select **Add role assignment** -- On the **Add role assignment** screen, type "Cognitive Services User" in the search box -- In the list of roles, click/highlight the **Cognitive Services User** row -- Click the **NEXT** button at the bottom of the screen -- Click **+ Select Members** -- In the "Select Members" pane that appears, select YOUR user account from the list of users. (This should be **`ODL_User_XXXXXX@azureholXXXX.onmicrosoft.com`**) -- Click the **SELECT** button -- Click the **Review & Assign** button to complete the role assignment - -After the role assignment completes in the Azure Portal, you will need to wait 1-3 minutes and then follow one additional step: - -- Log out of [Microsoft Foundry](https://ai.azure.com), and then log back in. This will ensure your login token is refreshed with the new permissions for Content Safety. - -You should now be prepared to complete the rest of this challenge! - ### 5.1 Harmful Content Detection Your Azure AI Services resource includes Content Safety. You may refer to this [table for region availability](https://learn.microsoft.com/en-us/azure/ai-services/content-safety/overview#region-availability) to confirm your region has the pertinent features for the tasks in this Challenge. From 696299731b7ec8d7be3566dc0b581747cf9608af Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 15:18:24 -0600 Subject: [PATCH 30/58] Update Challenge-06.md by removing setup steps Removed setup instructions for deploying the model. gpt-4o-mini is already deployed through the deployment script. --- 066-OpenAIFundamentals/Student/Challenge-06.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-06.md b/066-OpenAIFundamentals/Student/Challenge-06.md index e2fe3a67ac..9388ab2f57 100644 --- a/066-OpenAIFundamentals/Student/Challenge-06.md +++ b/066-OpenAIFundamentals/Student/Challenge-06.md @@ -10,13 +10,6 @@ Integrating agents into an application after implementing Retrieval-Augmented Ge In this challenge, you will create a basic agent. -### Setup - -1. Log into your [Microsoft Foundry portal](ai.azure.com) -2. In your project's left-hand pane, navigate to `My assets -> Models and endpoints`. -3. On the Model deployments tab, click the `+ Deploy model` button and select `Deploy base model` from the drop down. -4. Search for the gpt-4o-mini model, select it, and confirm the deployment. - ### Creating the Agent 1. In the left-hand pane, under `Build & Customize`, select `Agents` 2. Select your Azure OpenAI resource and hit `Let's go`. From dbf2efb912eca084437d054696b3d25a16b10db7 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 15:24:34 -0600 Subject: [PATCH 31/58] Rename challenge title from 'Trustworthy' to 'Responsible' --- 066-OpenAIFundamentals/Student/Challenge-05.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-05.md b/066-OpenAIFundamentals/Student/Challenge-05.md index d99ef1169e..0f87d6c39f 100644 --- a/066-OpenAIFundamentals/Student/Challenge-05.md +++ b/066-OpenAIFundamentals/Student/Challenge-05.md @@ -1,4 +1,4 @@ -# Challenge 05 - Trustworthy AI +# Challenge 05 - Responsible AI [< Previous Challenge](./Challenge-04.md) - **[Home](../README.md)** - [Next Challenge >](./Challenge-06.md) From 30a93a05eab4ba677398d4708ac10e6705ecc7cc Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 16:00:22 -0600 Subject: [PATCH 32/58] Delete 000-HowToHack/WTH-Challenge-Enhanced-Template.md --- .../WTH-Challenge-Enhanced-Template.md | 115 ------------------ 1 file changed, 115 deletions(-) delete mode 100644 000-HowToHack/WTH-Challenge-Enhanced-Template.md diff --git a/000-HowToHack/WTH-Challenge-Enhanced-Template.md b/000-HowToHack/WTH-Challenge-Enhanced-Template.md deleted file mode 100644 index 5ce948ad69..0000000000 --- a/000-HowToHack/WTH-Challenge-Enhanced-Template.md +++ /dev/null @@ -1,115 +0,0 @@ -# πŸš€ Challenge [NUMBER] - [TITLE] - -**[🏠 Home](../README.md)** - [< Previous Challenge](./Challenge-[PREV].md) - [Next Challenge > πŸ“‹](./Challenge-[NEXT].md) - -
-
Challenge [NUMBER] - [PHASE]
-
- -![Badge1](https://img.shields.io/badge/Technology-Color?style=for-the-badge&logo=logoname&logoColor=white) -![Badge2](https://img.shields.io/badge/Technology-Color?style=for-the-badge&logo=logoname&logoColor=white) - ---- - -## πŸ‘‹ Introduction - -
-🎯 Challenge Objective:
-[Describe what the user will accomplish in this challenge] -
- ---- - -## πŸ“‹ Description - -
- -### 🎯 Challenge Overview - -[Challenge description and context] - -```mermaid -graph TD - A[πŸš€ Start] --> B[Step 1] - B --> C[Step 2] - C --> D[Step 3] - D --> E[βœ… Success!] - - style A fill:#e1f5fe - style E fill:#e8f5e8 -``` - -
- -### 🎯 Tasks to Complete - -- [πŸ“‹ Task 1](#task-1) -- [πŸ“‹ Task 2](#task-2) -- [πŸ“‹ Task 3](#task-3) - ---- - -### πŸ“‹ Task 1 - -1 **Task Title** - -[Task description] - -
-πŸ’‘ Pro Tip: [Helpful tip] -
- -
-πŸ”§ Advanced Configuration (Optional) -
- -[Advanced configuration details] - -
-
- ---- - -## βœ… Success Criteria - -
-
🎯 Challenge [NUMBER] - Validation Phase
-
- -
-🎯 Challenge Complete!
-To complete this challenge successfully, you should be able to accomplish the following: -
- -### πŸ”§ Validation Checklist - -
- -| 1 | **Requirement** | **Status** | **Description** | -|:---:|:---|:---:|:---| -| βœ… | **Requirement 1** | ⏳ Pending | Description | -| βœ… | **Requirement 2** | ⏳ Pending | Description | -| βœ… | **Requirement 3** | ⏳ Pending | Description | - -
- ---- - -## πŸ“š Learning Resources - -
-πŸ’‘ Expand Your Knowledge
-Here are essential resources to deepen your understanding: -
- -### πŸ”— Documentation -- πŸ“– [Resource 1](https://link) - Description -- πŸ“– [Resource 2](https://link) - Description - -### πŸŽ₯ Videos & Tutorials -- 🎬 [Video 1](https://link) - Description -- 🎬 [Video 2](https://link) - Description - ---- - -πŸŽ‰ **Ready for the next challenge?** [Continue to Challenge [NEXT] β†’](./Challenge-[NEXT].md) \ No newline at end of file From d9f5cd8a345472a79de12dfa0b5168e732194529 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 16:01:34 -0600 Subject: [PATCH 33/58] Delete .devcontainer/devcontainer.json --- .devcontainer/devcontainer.json | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 .devcontainer/devcontainer.json diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json deleted file mode 100644 index 6f94b1d4d8..0000000000 --- a/.devcontainer/devcontainer.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "name": "WhatTheHack", - "image": "mcr.microsoft.com/devcontainers/universal:2", - "features": { - "ghcr.io/devcontainers/features/azure-cli:1": {}, - "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/github-cli:1": {}, - "ghcr.io/devcontainers/features/python:1": {} - }, - "customizations": { - "vscode": { - "extensions": [ - "ms-vscode.vscode-json", - "ms-python.python" - ] - } - }, - "postCreateCommand": "echo 'Dev container ready!'", - "remoteUser": "codespace" -} \ No newline at end of file From 406ebd3ba05ae12bd78d118e27a603dc1b8696fe Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 16:06:33 -0600 Subject: [PATCH 34/58] Fix formatting issue in known bugs section --- 066-OpenAIFundamentals/Coach/Solution-04.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Coach/Solution-04.md b/066-OpenAIFundamentals/Coach/Solution-04.md index fa063ffcac..50d6240760 100644 --- a/066-OpenAIFundamentals/Coach/Solution-04.md +++ b/066-OpenAIFundamentals/Coach/Solution-04.md @@ -16,4 +16,4 @@ Known Bugs - **CH4 EXTRACTING FILES: operation returned an invalid status `inefficient storage`** \ **Solution:** If some of the files are extracted, students can continue working with those and ignore the error message. - Rate Limits \ - **Solution:** Have students go into their model deployments in the Microsoft Foundryand adjust the TPM allocation for that model to a higher value using the toggle. + **Solution:** Have students go into their model deployments in the Microsoft Foundry and adjust the TPM allocation for that model to a higher value using the toggle. From cd192b0d999e60ad6975502a54939762317dc4ef Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Tue, 20 Jan 2026 16:08:21 -0600 Subject: [PATCH 35/58] Update project verification instructions Removed reference to 'Hub' in project verification steps. --- 066-OpenAIFundamentals/Student/Challenge-00.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index 0bbc14c64c..bcf27a375f 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -248,7 +248,7 @@ If using GitHub Codespaces: - `.env` <= Renamed from `.env.sample` - `.gitignore` - `requirements.txt` -- Verify that you have created the Project and Hub in your Microsoft Foundry. +- Verify that you have created the Project in Microsoft Foundry. - Verify that you have the following resources: Azure OpenAI, deployed the necessary models, AI Search, Document Intelligence, Azure Blob. If working on a local workstation: @@ -261,7 +261,7 @@ If working on a local workstation: - `.env` <= Renamed from `.env.sample` - `.gitignore` - `requirements.txt` -- Verify that you have created the Project and Hub in your Microsoft Foundry. +- Verify that you have created the Project in your Microsoft Foundry. - Verify that you have the following resources: Azure OpenAI, deployed the necessary models, AI Search, Document Intelligence, Azure Blob. ## Learning Resources From 4a842cbfe58facbc0473eb544712701f5ae6ac83 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 08:56:12 -0600 Subject: [PATCH 36/58] Reverted changes back to match what's in WTH master (will be using codespaces changes instead) --- .../Student/Resources/infra/deploy.sh | 18 +- .../Student/Resources/infra/functions.sh | 2 +- .../Student/Resources/infra/main.bicep | 92 +++++++- .../Student/Resources/infra/main.bicepparam | 4 - .../Resources/infra/modules/aiServices.bicep | 9 +- .../infra/modules/applicationInsights.bicep | 2 +- .../Resources/infra/modules/document.bicep | 9 +- .../Student/Resources/infra/modules/hub.bicep | 202 ++++++++++++++++++ .../Resources/infra/modules/project.bicep | 4 +- .../Resources/infra/modules/search.bicep | 7 +- .../infra/modules/storageAccount.bicep | 5 + .../notebooks/CH-01-PromptEngineering.ipynb | 98 +++------ .../notebooks/CH-03-A-Grounding.ipynb | 48 ++--- .../notebooks/CH-03-B-Chunking.ipynb | 55 ++--- .../notebooks/CH-03-C-Embeddings.ipynb | 66 ++---- .../CH-04-A-RAG_for_structured_data.ipynb | 132 ++---------- .../CH-04-B-RAG_for_unstructured_data.ipynb | 58 ++--- .../notebooks/CH-5.7-RedTeaming.ipynb | 6 +- 18 files changed, 446 insertions(+), 371 deletions(-) create mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep diff --git a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh index b13ddc941d..6e851de2c5 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh @@ -7,7 +7,7 @@ source ./functions.sh declare -A variables=( [template]="main.bicep" [parameters]="main.bicepparam" - [resourceGroupName]="rg-microsoft-foundry-secure" + [resourceGroupName]="rg-ai-foundry-secure" [location]="eastus" [validateTemplate]=0 [useWhatIf]=0 @@ -90,15 +90,14 @@ deploymentOutputs=$(az deployment group create \ --parameters $parameters \ --parameters location=$location \ --parameters userObjectId=$userObjectId \ - --query 'properties.outputs' -o json 2>/dev/null | grep -A 9999 '^{') + --query 'properties.outputs' -o json) + #echo $deploymentOutputs if [[ $? == 0 ]]; then echo "[$template] Bicep template deployment succeeded" else echo "Failed to deploy [$template] Bicep template" - echo "Fetching deployment error details..." - az deployment group show --resource-group "$resourceGroupName" --name "$deploymentName" --query 'properties.error' -o json - exit 1 + exit fi json=$deploymentOutputs @@ -114,7 +113,7 @@ environment_sample_file="../.env.sample" # check if the .env file already exists and back it up if it does if [[ -f "$environment_file" ]]; then - random_chars=$(LC_ALL=C tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 5) + random_chars=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 5) mv "$environment_file" "${environment_file}-${random_chars}.bak" echo -e "\e[33mWarning: Existing .env file found. Backed up to ${environment_file}-${random_chars}.bak\e[0m" else @@ -128,10 +127,15 @@ source $environment_sample_file # Extract values from JSON and write to .env file with double quotes around values echo "Populating .env file..." +echo "OPENAI_API_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesKey')\"" >> $environment_file echo "OPENAI_API_BASE=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesOpenAiEndpoint')\"" >> $environment_file +echo "AZURE_AI_SEARCH_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchKey')\"" >> $environment_file echo "AZURE_AI_SEARCH_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchEndpoint')\"" >> $environment_file echo "DOCUMENT_INTELLIGENCE_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentEndpoint')\"" >> $environment_file -echo "AZURE_AI_PROJECT_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesProjectEndpoint')\"" >> $environment_file +echo "DOCUMENT_INTELLIGENCE_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentKey')\"" >> $environment_file +echo "AZURE_BLOB_STORAGE_ACCOUNT_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountName')\"" >> $environment_file +echo "AZURE_BLOB_STORAGE_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountKey')\"" >> $environment_file +echo "AZURE_BLOB_STORAGE_CONNECTION_STRING=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountConnectionString')\"" >> $environment_file # Warning: this assumes the first deployed model is the chat model used by the Jupyter notebooks echo "CHAT_MODEL_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.deployedModels[0].name')\"" >> $environment_file diff --git a/066-OpenAIFundamentals/Student/Resources/infra/functions.sh b/066-OpenAIFundamentals/Student/Resources/infra/functions.sh index 3ef25acf12..02ae19a2fe 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/functions.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/functions.sh @@ -17,7 +17,7 @@ function authenticate_to_azure { parse_args() { # $1 - The associative array name containing the argument definitions and default values # $2 - The arguments passed to the script - local -n arg_defs=$1 # this won't work by default on the Mac zsh shell, but works in bash. brew install bash and then /opt/homebrew/bin/bash ./deploy.sh to use it. + local -n arg_defs=$1 shift local args=("$@") diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep index 995108e05a..3f60ee1fe1 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep @@ -11,13 +11,49 @@ param location string = resourceGroup().location @description('Specifies the name of the Network Security Perimeter.') param nspName string = '' -@description('Specifies the name for the Microsoft Foundry Project.') +@description('Specifies the name Azure AI Hub workspace.') +param hubName string = '' + +@description('Specifies the friendly name of the Azure AI Hub workspace.') +param hubFriendlyName string = 'Demo AI Hub' + +@description('Specifies the description for the Azure AI Hub workspace displayed in Azure AI Foundry.') +param hubDescription string = 'This is a demo hub for use in Azure AI Foundry.' + +@description('Specifies the Isolation mode for the managed network of the Azure AI Hub workspace.') +@allowed([ + 'AllowInternetOutbound' + 'AllowOnlyApprovedOutbound' + 'Disabled' +]) +param hubIsolationMode string = 'Disabled' + +@description('Specifies the public network access for the Azure AI Hub workspace.') +param hubPublicNetworkAccess string = 'Enabled' + +@description('Specifies the authentication method for the OpenAI Service connection.') +@allowed([ + 'ApiKey' + 'AAD' + 'ManagedIdentity' + 'None' +]) +param connectionAuthType string = 'AAD' + +@description('Determines whether or not to use credentials for the system datastores of the workspace workspaceblobstore and workspacefilestore. The default value is accessKey, in which case, the workspace will create the system datastores with credentials. If set to identity, the workspace will create the system datastores with no credentials.') +@allowed([ + 'identity' + 'accessKey' +]) +param systemDatastoresAuthMode string = 'identity' + +@description('Specifies the name for the Azure AI Foundry Hub Project workspace.') param projectName string = '' -@description('Specifies the friendly name for the Microsoft Foundry Project.') -param projectFriendlyName string = 'Microsoft Foundry Project' +@description('Specifies the friendly name for the Azure AI Foundry Hub Project workspace.') +param projectFriendlyName string = 'AI Foundry Hub Project' -@description('Specifies the public network access for the Microsoft Foundry Project.') +@description('Specifies the public network access for the Azure AI Project workspace.') param projectPublicNetworkAccess string = 'Enabled' @description('Specifies the name of the Azure Log Analytics resource.') @@ -54,6 +90,9 @@ param aiServicesIdentity object = { @description('Specifies an optional subdomain name used for token-based authentication.') param aiServicesCustomSubDomainName string = '' +@description('Specifies whether disable the local authentication via API key.') +param aiServicesDisableLocalAuth bool = false + @description('Specifies whether or not public endpoint access is allowed for this account..') @allowed([ 'Enabled' @@ -248,7 +287,7 @@ module storageAccount 'modules/storageAccount.bicep' = { networkAclsDefaultAction: storageAccountANetworkAclsDefaultAction supportsHttpsTrafficOnly: storageAccountSupportsHttpsTrafficOnly workspaceId: workspace.outputs.id - + // role assignments userObjectId: userObjectId aiServicesPrincipalId: aiServices.outputs.principalId @@ -267,6 +306,7 @@ module aiServices 'modules/aiServices.bicep' = { customSubDomainName: empty(aiServicesCustomSubDomainName) ? toLower('ai-services-${suffix}') : aiServicesCustomSubDomainName + disableLocalAuth: aiServicesDisableLocalAuth publicNetworkAccess: aiServicesPublicNetworkAccess deployments: openAiDeployments workspaceId: workspace.outputs.id @@ -276,24 +316,47 @@ module aiServices 'modules/aiServices.bicep' = { } } -module project 'modules/foundryProject.bicep' = { - name: 'project' +module hub 'modules/hub.bicep' = { + name: 'hub' params: { // workspace organization - name: empty(projectName) ? toLower('project-${suffix}') : projectName - friendlyName: projectFriendlyName + name: empty(hubName) ? toLower('hub-${suffix}') : hubName + friendlyName: hubFriendlyName + description_: hubDescription location: location tags: tags // dependent resources aiServicesName: aiServices.outputs.name applicationInsightsId: applicationInsights.outputs.id - containerRegistryId: acrEnabled ? containerRegistry!.outputs.id : '' + containerRegistryId: acrEnabled ? containerRegistry.outputs.id : '' keyVaultId: keyVault.outputs.id storageAccountId: storageAccount.outputs.id + connectionAuthType: connectionAuthType + systemDatastoresAuthMode: systemDatastoresAuthMode + + // workspace configuration + publicNetworkAccess: hubPublicNetworkAccess + isolationMode: hubIsolationMode + workspaceId: workspace.outputs.id + + // role assignments + userObjectId: userObjectId + } +} + +module project 'modules/project.bicep' = { + name: 'project' + params: { + // workspace organization + name: empty(projectName) ? toLower('project-${suffix}') : projectName + friendlyName: projectFriendlyName + location: location + tags: tags // workspace configuration publicNetworkAccess: projectPublicNetworkAccess + hubId: hub.outputs.id workspaceId: workspace.outputs.id // role assignments @@ -325,7 +388,6 @@ module document 'modules/document.bicep' = { params: { name: 'document-${suffix}' location: location - customSubDomainName: toLower('document-intelligence-${suffix}') } } @@ -336,8 +398,16 @@ output deploymentInfo object = { aiServicesName: aiServices.outputs.name aiServicesEndpoint: aiServices.outputs.endpoint aiServicesOpenAiEndpoint: aiServices.outputs.openAiEndpoint + aiServicesKey: aiServices.outputs.key1 + hubName: hub.outputs.name projectName: project.outputs.name + documentKey: document.outputs.key1 documentEndpoint: document.outputs.endpoint + searchKey: search.outputs.primaryKey searchEndpoint: search.outputs.endpoint + storageAccountName: storageAccount.outputs.name + storageAccountId: storageAccount.outputs.id + storageAccountConnectionString: storageAccount.outputs.connectionString + storageAccountKey: storageAccount.outputs.primaryKey deployedModels: aiServices.outputs.deployedModels } diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam index 1e173d12af..c644af659f 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam @@ -5,10 +5,6 @@ param userObjectId = '' param keyVaultEnablePurgeProtection = false param acrEnabled = false param nspEnabled = false -//param aiServicesDisableLocalAuth = false -param storageAccountAllowSharedKeyAccess = true -//param documentDisableLocalAuth = false - //The first model in the list will be the default model for the Jupyter notebooks param openAiDeployments = [ { diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep index 4f44b5017f..31bd1c25a1 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep @@ -21,6 +21,9 @@ param tags object @description('Specifies an optional subdomain name used for token-based authentication.') param customSubDomainName string = '' +@description('Specifies whether disable the local authentication via API key.') +param disableLocalAuth bool = false + @description('Specifies whether or not public endpoint access is allowed for this account..') @allowed([ 'Enabled' @@ -70,6 +73,7 @@ resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' = tags: tags properties: { customSubDomainName: customSubDomainName + disableLocalAuth: disableLocalAuth publicNetworkAccess: publicNetworkAccess } } @@ -170,8 +174,9 @@ output name string = aiServices.name output endpoint string = aiServices.properties.endpoint output openAiEndpoint string = aiServices.properties.endpoints['OpenAI Language Model Instance API'] output principalId string = aiServices.identity.principalId - +#disable-next-line outputs-should-not-contain-secrets +output key1 string = aiServices.listKeys().key1 // Output the deployed model names output deployedModels array = [for deployment in deployments: { name: deployment.model.name -}] +}] \ No newline at end of file diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep index a30fb1ecf2..69cb91a519 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep @@ -20,7 +20,7 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { properties: { Application_Type: 'web' DisableIpMasking: false - //DisableLocalAuth: false + DisableLocalAuth: false Flow_Type: 'Bluefield' ForceCustomerStorageForProfiler: false ImmediatePurgeDataOn30Days: true diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep index ce2f51a628..5b07c6624c 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep @@ -4,9 +4,6 @@ param name string @description('Location where the Azure Document Intelligence will be created.') param location string -@description('Custom subdomain name for the Azure Document Intelligence.') -param customSubDomainName string - resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { name: name location: location @@ -14,10 +11,10 @@ resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { name: 'S0' } kind: 'FormRecognizer' - properties: { - customSubDomainName: customSubDomainName - + properties: { } } +#disable-next-line outputs-should-not-contain-secrets +output key1 string = account.listKeys().key1 output endpoint string = account.properties.endpoint diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep new file mode 100644 index 0000000000..c0cfe4165c --- /dev/null +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep @@ -0,0 +1,202 @@ +// Parameters +@description('Specifies the name') +param name string + +@description('Specifies the location.') +param location string + +@description('Specifies the resource tags.') +param tags object + +@description('The SKU name to use for the AI Foundry Hub Resource') +param skuName string = 'Basic' + +@description('The SKU tier to use for the AI Foundry Hub Resource') +@allowed(['Basic', 'Free', 'Premium', 'Standard']) +param skuTier string = 'Basic' + +@description('Specifies the display name') +param friendlyName string = name + +@description('Specifies the description') +param description_ string + +@description('Specifies the Isolation mode for the managed network of a machine learning workspace.') +@allowed([ + 'AllowInternetOutbound' + 'AllowOnlyApprovedOutbound' + 'Disabled' +]) +param isolationMode string = 'Disabled' + +@description('Specifies the public network access for the machine learning workspace.') +@allowed([ + 'Disabled' + 'Enabled' +]) +param publicNetworkAccess string = 'Enabled' + +@description('Specifies the resource ID of the application insights resource for storing diagnostics logs') +param applicationInsightsId string + +@description('Specifies the resource ID of the container registry resource for storing docker images') +param containerRegistryId string + +@description('Specifies the resource ID of the key vault resource for storing connection strings') +param keyVaultId string + +@description('Specifies the resource ID of the storage account resource for storing experimentation outputs') +param storageAccountId string + +@description('Specifies thename of the Azure AI Services resource') +param aiServicesName string + +@description('Specifies the authentication method for the OpenAI Service connection.') +@allowed([ + 'ApiKey' + 'AAD' + 'ManagedIdentity' + 'None' +]) +param connectionAuthType string = 'AAD' + +@description('Specifies the name for the Azure OpenAI Service connection.') +param aiServicesConnectionName string = '' + +@description('Specifies the resource id of the Log Analytics workspace.') +param workspaceId string + +@description('Specifies the object id of a Miccrosoft Entra ID user. In general, this the object id of the system administrator who deploys the Azure resources.') +param userObjectId string = '' + +@description('Optional. The name of logs that will be streamed.') +@allowed([ + 'ComputeInstanceEvent' +]) +param logsToEnable array = [ + 'ComputeInstanceEvent' +] + +@description('Optional. The name of metrics that will be streamed.') +@allowed([ + 'AllMetrics' +]) +param metricsToEnable array = [ + 'AllMetrics' +] + +@description('Determines whether or not to use credentials for the system datastores of the workspace workspaceblobstore and workspacefilestore. The default value is accessKey, in which case, the workspace will create the system datastores with credentials. If set to identity, the workspace will create the system datastores with no credentials.') +@allowed([ + 'identity' + 'accessKey' +]) +param systemDatastoresAuthMode string = 'identity' + +// Variables +var diagnosticSettingsName = 'diagnosticSettings' +var logs = [ + for log in logsToEnable: { + category: log + enabled: true + retentionPolicy: { + enabled: true + days: 0 + } + } +] + +var metrics = [ + for metric in metricsToEnable: { + category: metric + timeGrain: null + enabled: true + retentionPolicy: { + enabled: true + days: 0 + } + } +] + +// Resources +resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' existing = { + name: aiServicesName +} + +resource hub 'Microsoft.MachineLearningServices/workspaces@2024-04-01-preview' = { + name: name + location: location + tags: tags + sku: { + name: skuName + tier: skuTier + } + kind: 'Hub' + identity: { + type: 'SystemAssigned' + } + properties: { + // organization + friendlyName: friendlyName + description: description_ + managedNetwork: { + isolationMode: isolationMode + } + publicNetworkAccess: publicNetworkAccess + + // dependent resources + keyVault: keyVaultId + storageAccount: storageAccountId + applicationInsights: applicationInsightsId + containerRegistry: containerRegistryId == '' ? null : containerRegistryId + systemDatastoresAuthMode: systemDatastoresAuthMode + } + + resource aiServicesConnection 'connections@2024-01-01-preview' = { + name: !empty(aiServicesConnectionName) ? aiServicesConnectionName : toLower('${aiServices.name}-connection') + properties: { + category: 'AIServices' + target: aiServices.properties.endpoint + authType: connectionAuthType + isSharedToAll: true + metadata: { + ApiType: 'Azure' + ResourceId: aiServices.id + } + credentials: connectionAuthType == 'ApiKey' + ? { + key: aiServices.listKeys().key1 + } + : null + } + } +} + +resource azureMLDataScientistRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { + name: 'f6c7c914-8db3-469d-8ca1-694a8f32e121' + scope: subscription() +} + +// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Azure AI Foundry +resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { + name: guid(hub.id, azureMLDataScientistRole.id, userObjectId) + scope: hub + properties: { + roleDefinitionId: azureMLDataScientistRole.id + principalType: 'User' + principalId: userObjectId + } +} + +resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { + name: diagnosticSettingsName + scope: hub + properties: { + workspaceId: workspaceId + logs: logs + metrics: metrics + } +} + +// Outputs +output name string = hub.name +output id string = hub.id diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep index 0ce1acc760..ba3fe208cb 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep @@ -155,7 +155,7 @@ resource aiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022 } } -// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Microsoft Foundry +// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Azure AI Foundry resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { name: guid(project.id, azureMLDataScientistRole.id, userObjectId) scope: project @@ -166,7 +166,7 @@ resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAss } } -// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Microsoft Foundry +// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Azure AI Foundry resource azureMLDataScientistManagedIdentityRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesPrincipalId)) { name: guid(project.id, azureMLDataScientistRole.id, aiServicesPrincipalId) scope: project diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep index e7c3c7c7c4..60c837d8fb 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep @@ -14,12 +14,9 @@ resource search 'Microsoft.Search/searchServices@2023-11-01' = { replicaCount: 1 partitionCount: 1 hostingMode: 'default' - authOptions: { - aadOrApiKey: { - aadAuthFailureMode: 'http401WithBearerChallenge' - } - } } } +#disable-next-line outputs-should-not-contain-secrets +output primaryKey string = search.listAdminKeys().primaryKey output endpoint string = 'https://${name}.search.windows.net' diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep index ed31e47894..85c59f88b2 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep @@ -248,3 +248,8 @@ resource blobServiceDiagnosticSettings 'Microsoft.Insights/diagnosticSettings@20 // Outputs output id string = storageAccount.id output name string = storageAccount.name +#disable-next-line outputs-should-not-contain-secrets +output primaryKey string = storageAccount.listKeys().keys[0].value + +#disable-next-line outputs-should-not-contain-secrets +output connectionString string = 'DefaultEndpointsProtocol=https;AccountName=${name};AccountKey=${storageAccount.listKeys().keys[0].value};EndpointSuffix=core.windows.net' diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb index 242fce4795..ccfa510fbf 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb @@ -37,7 +37,7 @@ }, "source": [ "## 1. Parameter Experimentation\n", - "Let's first set up the Challenge. These cells install the required Python packages, load the environment variables, and relevant Python libraries using the cells below." + "Let's first set up the Challenge. Load the API key and relevant Python libraries using the cells below." ] }, { @@ -90,12 +90,12 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -r ../requirements.txt" + "%pip install -r ../requirements-old.txt" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "gather": { "logged": 1686932813309 @@ -115,11 +115,8 @@ "import openai\n", "import os\n", "import json\n", - "\n", "from dotenv import load_dotenv, find_dotenv\n", - "load_dotenv(find_dotenv())\n", - "from openai import AzureOpenAI\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n" + "load_dotenv(find_dotenv())" ] }, { @@ -141,10 +138,9 @@ }, "outputs": [], "source": [ - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")\n", + "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", + "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", + "openai.api_key = API_KEY\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", @@ -153,14 +149,8 @@ "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "openai.azure_ad_token_provider = token_provider\n", - "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", "\n", - "client = AzureOpenAI(\n", - " azure_endpoint=RESOURCE_ENDPOINT,\n", - " azure_ad_token_provider=token_provider,\n", - " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", - ")\n" + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n" ] }, { @@ -225,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": { "gather": { "logged": 1686938673045 @@ -244,19 +234,19 @@ "source": [ "def get_chat_completion(prompt, model=chat_model):\n", " messages = [{\"role\": \"user\", \"content\": prompt}]\n", - " response = client.chat.completions.create(\n", - " model=chat_model,\n", + " response = openai.ChatCompletion.create(\n", + " engine=model,\n", " messages=messages,\n", " temperature=0, # this is the degree of randomness of the model's output\n", " max_tokens = 200,\n", " top_p = 1.0\n", " )\n", - " return response.choices[0].message.content" + " return response.choices[0].message[\"content\"]" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": { "gather": { "logged": 1686938550664 @@ -274,13 +264,14 @@ "outputs": [], "source": [ "def get_completion_from_messages(messages, model=chat_model, temperature=0):\n", - " response = client.chat.completions.create(\n", - " model=chat_model,\n", + " response = openai.ChatCompletion.create(\n", + " engine=model,\n", " messages=messages,\n", " temperature=temperature # this is the degree of randomness of the model's output\n", " )\n", "\n", - " return response.choices[0].message.content\n" + " return response.choices[0].message[\"content\"]\n", + "\n" ] }, { @@ -682,7 +673,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": { "gather": { "logged": 1685081594233 @@ -751,7 +742,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": { "gather": { "logged": 1685059771050 @@ -892,7 +883,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": null, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1519,7 +1510,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": { "gather": { "logged": 1685051978623 @@ -1534,18 +1525,7 @@ } } }, - "outputs": [ - { - "data": { - "text/plain": [ - "'To determine the most decorated individual athlete at the Sydney 2000 Olympic Games, we need to follow a step-by-step approach:\\n\\n1. **Identify the Event**: The Sydney 2000 Olympic Games were held from September 15 to October 1, 2000.\\n\\n2. **Research the Medalists**: We need to look into the medalists from the Sydney 2000 Olympics to find out who won the most medals.\\n\\n3. **Focus on Individual Athletes**: We are interested in individual athletes, not teams or countries.\\n\\n4. **Consult Reliable Sources**: Use reliable sources such as the official Olympic website, sports databases, and historical records.\\n\\n5. **Analyze the Data**: Compare the number of medals won by individual athletes.\\n\\n### Step-by-Step Analysis:\\n\\n- **Research**: According to the official Olympic records and sports databases, the Sydney 2000 Olympics featured many outstanding performances.\\n\\n- **Identify Top Performers**: Swimmer Ian Thorpe from'" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "text = f\"\"\"\n", " The 2020 Summer Olympics, officially the Games of the XXXII Olympiad and also known as Tokyo 2020, was an international multi-sport event held from 23 July to 8 August 2021 in Tokyo, Japan, with some preliminary events that began on 21 July 2021. Tokyo was selected as the host city during the 125th IOC Session in Buenos Aires, Argentina, on 7 September 2013.Originally scheduled to take place from 24 July to 9 August 2020, the event was postponed to 2021 on 24 March 2020 due to the global COVID-19 pandemic, the first such instance in the history of the Olympic Games (previous games had been cancelled but not rescheduled). However, the event retained the Tokyo 2020 branding for marketing purposes. It was largely held behind closed doors with no public spectators permitted due to the declaration of a state of emergency in the Greater Tokyo Area in response to the pandemic, the first and only Olympic Games to be held without official spectators. The Games were the most expensive ever, with total spending of over $20 billion.The Games were the fourth Olympic Games to be held in Japan, following the 1964 Summer Olympics (Tokyo), 1972 Winter Olympics (Sapporo), and 1998 Winter Olympics (Nagano). Tokyo became the first city in Asia to hold the Summer Olympic Games twice. The 2020 Games were the second of three consecutive Olympics to be held in East Asia, following the 2018 Winter Olympics in Pyeongchang, South Korea and preceding the 2022 Winter Olympics in Beijing, China. Due to the one-year postponement, Tokyo 2020 was the first and only Olympic Games to have been held in an odd-numbered year and the first Summer Olympics since 1900 to be held in a non-leap year.\\nNew events were introduced in existing sports, including 3x3 basketball, freestyle BMX and mixed gender team events in a number of existing sports, as well as the return of madison cycling for men and an introduction of the same event for women. New IOC policies also allowed the host organizing committee to add new sports to the Olympic program for just one Games. The disciplines added by the Japanese Olympic Committee were baseball and softball, karate, sport climbing, surfing and skateboarding, the last four of which made their Olympic debuts, and the last three of which will remain on the Olympic program.The United States topped the medal count by both total golds (39) and total medals (113), with China finishing second by both respects (38 and 89). Host nation Japan finished third, setting a record for the most gold medals and total medals ever won by their delegation at an Olympic Games with 27 and 58. Great Britain finished fourth, with a total of 22 gold and 64 medals. The Russian delegation competing as the ROC finished fifth with 20 gold medals and third in the overall medal count, with 71 medals. Bermuda, the Philippines and Qatar won their first-ever Olympic gold medals. Burkina Faso, San Marino and Turkmenistan also won their first-ever Olympic medals.'\n", @@ -1558,7 +1538,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1570,18 +1550,7 @@ } } }, - "outputs": [ - { - "data": { - "text/plain": [ - "'To determine the largest time zone difference between the top two countries that won the most gold medals in the 2020 Tokyo Olympics, we need to identify these countries and their respective time zones.\\n\\n1. **Identify the top two countries by gold medals:**\\n - The United States won the most gold medals with 39.\\n - China finished second with 38 gold medals.\\n\\n2. **Determine the time zones for each country:**\\n - The United States spans multiple time zones, but the primary time zones are Eastern Standard Time (EST, UTC-5), Central Standard Time (CST, UTC-6), Mountain Standard Time (MST, UTC-7), and Pacific Standard Time (PST, UTC-8). For simplicity, we can consider the Eastern Standard Time (EST, UTC-5) as a representative time zone for the U.S.\\n - China operates on China Standard Time (CST, UTC+8), which is used nationwide.\\n\\n3. **Calculate'" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Another example\n", "prompt = f\"\"\"\n", @@ -1611,7 +1580,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": { "gather": { "logged": 1685053144682 @@ -1626,18 +1595,7 @@ } } }, - "outputs": [ - { - "data": { - "text/plain": [ - "'To find out how many more silver and bronze medals the United States has over Great Britain, we need to calculate the number of silver and bronze medals each country has and then find the difference.\\n\\nFirst, calculate the number of silver and bronze medals for each country:\\n\\n1. **United States:**\\n - Total medals: 113\\n - Gold medals: 39\\n - Silver and bronze medals: 113 - 39 = 74\\n\\n2. **Great Britain:**\\n - Total medals: 64\\n - Gold medals: 22\\n - Silver and bronze medals: 64 - 22 = 42\\n\\nNow, find the difference in the number of silver and bronze medals between the United States and Great Britain:\\n\\n74 (United States) - 42 (Great Britain) = 32\\n\\nThe United States has 32 more silver and bronze medals than Great Britain.'" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Notice how this response may not be ideal, or the most accurate.\n", "prompt = f\"\"\"\n", @@ -1921,7 +1879,7 @@ "name": "python38-azureml" }, "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -1935,7 +1893,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.11.13" }, "microsoft": { "host": { diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb index 84993e5715..195c83072e 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb @@ -35,7 +35,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -44,9 +44,7 @@ "import json\n", "\n", "from dotenv import load_dotenv, find_dotenv\n", - "load_dotenv(find_dotenv())\n", - "from openai import AzureOpenAI\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider" + "load_dotenv(find_dotenv())" ] }, { @@ -60,33 +58,23 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "#API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "#assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "#openai.api_key = API_KEY\n", - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")\n", + "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", + "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", + "openai.api_key = API_KEY\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", "assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t.openai.azure.com\"\n", - "\n", "openai.api_base = RESOURCE_ENDPOINT\n", + "\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "openai.azure_ad_token_provider = token_provider\n", - "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", - "\n", - "client = AzureOpenAI(\n", - " azure_endpoint=RESOURCE_ENDPOINT,\n", - " azure_ad_token_provider=token_provider,\n", - " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", - ")" + "\n", + "model=os.getenv(\"CHAT_MODEL_NAME\")" ] }, { @@ -100,20 +88,20 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "def get_chat_completion(prompt, model=chat_model):\n", + "def get_chat_completion(prompt, model=model):\n", " messages = [{\"role\": \"user\", \"content\": prompt}]\n", - " response = client.chat.completions.create(\n", - " model=chat_model,\n", + " response = openai.ChatCompletion.create(\n", + " engine=model,\n", " messages=messages,\n", " temperature=0, # this is the degree of randomness of the model's output\n", " max_tokens = 200,\n", " top_p = 1.0\n", " )\n", - " return response.choices[0].message.content" + " return response.choices[0].message[\"content\"]" ] }, { @@ -142,7 +130,7 @@ "Enter Question Here\n", "\"\"\"\n", "\n", - "model_response = get_chat_completion(prompt, model=chat_model)\n", + "model_response = get_chat_completion(prompt, model=model)\n", "print(f\"Response: {model_response}\\n\")\n" ] }, @@ -168,7 +156,7 @@ "Enter Question Here\n", "\"\"\"\n", "\n", - "model_response = get_chat_completion(prompt, model=chat_model)\n", + "model_response = get_chat_completion(prompt, model=model)\n", "print(f\"Response: {model_response}\\n\")" ] }, @@ -195,7 +183,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -209,7 +197,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.10.13" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb index a4deb96601..b5e623f23b 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb @@ -44,36 +44,31 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install langchain langchain-text-splitters\n", - "\n", "import openai\n", "import PyPDF3\n", "import os\n", "import json\n", "import tiktoken\n", "import spacy\n", + "from openai.error import InvalidRequestError\n", "\n", "from dotenv import load_dotenv, find_dotenv\n", "load_dotenv(find_dotenv())\n", - "from openai import AzureOpenAI\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")\n", + "\n", "from spacy.lang.en import English \n", "nlp = spacy.load(\"en_core_web_sm\")\n", "\n", "import langchain\n", - "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", - "from openai import BadRequestError" + "from langchain.text_splitter import RecursiveCharacterTextSplitter" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "This cell sets up your Python environment to access your Azure OpenAI endpoint and sets up various openai settings from your .env file. " + "Set up your environment to access your Azure OpenAI keys. Refer to your Azure OpenAI resource in the Azure Portal to retrieve information regarding your Azure OpenAI endpoint and keys. \n", + "\n", + "For security purposes, store your sensitive information in an .env file." ] }, { @@ -82,26 +77,19 @@ "metadata": {}, "outputs": [], "source": [ - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")\n", + "# Load your OpenAI credentials\n", + "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", + "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", + "openai.api_key = API_KEY\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", "assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t.openai.azure.com\"\n", - "\n", "openai.api_base = RESOURCE_ENDPOINT\n", + "\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "openai.azure_ad_token_provider = token_provider\n", - "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", - "\n", - "client = AzureOpenAI(\n", - " azure_endpoint=RESOURCE_ENDPOINT,\n", - " azure_ad_token_provider=token_provider,\n", - " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", - ")" + "model=os.getenv(\"CHAT_MODEL_NAME\")\n" ] }, { @@ -176,13 +164,12 @@ "outputs": [], "source": [ "document = open(r'Insert PDF file path', 'rb') \n", - "\n", "doc_helper = PyPDF3.PdfFileReader(document)" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -207,16 +194,12 @@ "\n", "try:\n", " final_prompt = prompt + q\n", - " response = client.chat.completions.create(\n", - " model=chat_model, \n", - " messages=[{\"role\": \"user\", \"content\": final_prompt}], \n", - " max_tokens=50\n", - " )\n", - " answer = response.choices[0].message.content.strip()\n", + " response = openai.ChatCompletion.create(engine=model, messages=final_prompt, max_tokens=50)\n", + " answer = response.choices[0].text.strip()\n", " print(f\"{q}\\n{answer}\\n\")\n", "\n", - "except BadRequestError as e:\n", - " print(e)\n", + "except InvalidRequestError as e:\n", + " print(e.error)\n", "\n" ] }, @@ -404,7 +387,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -418,7 +401,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.10.13" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb index de1b9484a3..9e88ed1da1 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb @@ -52,6 +52,7 @@ "source": [ "! pip install num2words\n", "! pip install plotly\n", + "! pip install \"openai==0.28.1\" \n", "! pip install nptyping" ] }, @@ -61,6 +62,7 @@ "metadata": {}, "outputs": [], "source": [ + "import openai\n", "import os\n", "import re \n", "import requests\n", @@ -68,36 +70,11 @@ "from num2words import num2words \n", "import pandas as pd \n", "import numpy as np\n", + "from openai.embeddings_utils import get_embedding, cosine_similarity \n", "import tiktoken\n", "from dotenv import load_dotenv\n", "from tenacity import retry, wait_random_exponential, stop_after_attempt\n", - "from sklearn.metrics.pairwise import cosine_similarity as sklearn_cosine_similarity\n", - "from openai import AzureOpenAI\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "\n", - "load_dotenv()\n", - "\n", - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")\n", - "\n", - "# Initialize the Azure OpenAI client\n", - "client = AzureOpenAI(\n", - " azure_endpoint=os.getenv(\"OPENAI_API_BASE\"),\n", - " azure_ad_token_provider=token_provider,\n", - " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", - ")\n", - "\n", - "# Define helper functions using the OpenAI 1.x API\n", - "@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\n", - "def get_embedding(text: str, engine: str) -> list:\n", - "\ttext = text.replace(\"\\n\", \" \")\n", - "\tresponse = client.embeddings.create(input=[text], model=engine)\n", - "\treturn response.data[0].embedding\n", - "\n", - "def cosine_similarity(a, b):\n", - "\treturn np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))" + "load_dotenv() " ] }, { @@ -111,12 +88,15 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "# Get the embedding model name from environment\n", - "embedding_model = os.getenv(\"EMBEDDING_MODEL_NAME\")" + "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", + "openai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n", + "openai.api_base = os.environ.get(\"OPENAI_API_BASE\")\n", + "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", + "embedding_model=os.getenv(\"EMBEDDING_MODEL_NAME\")" ] }, { @@ -139,7 +119,7 @@ "\n", "input=\"I would like to order a pizza\"\n", "\n", - "# Add code here: Create embedding using the helper function\n" + "# Add code here " ] }, { @@ -147,7 +127,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The client.embeddings.create() method will take a list of text - here we have a single sentence - and then will return a list containing a single embedding. You can use these embeddings when searching, providing recommendations, classification, and more." + "The openai.Embedding.create() method will take a list of text - here we have a single sentence - and then will return a list containing a single embedding. You can use these embeddings when searching, providing recommendations, classification, and more." ] }, { @@ -168,7 +148,6 @@ "outputs": [], "source": [ "df=pd.read_csv(os.path.join(os.getcwd(),r'Enter path here'))\n", - "\n", "df" ] }, @@ -184,20 +163,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "398" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n", "shortened_df['n_tokens'] = shortened_df[\"name\"].apply(lambda x: len(tokenizer.encode(x)))\n", @@ -227,7 +195,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -266,7 +234,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -280,7 +248,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.10.13" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb index 95db7eb709..c35a148f3e 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb @@ -106,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "69bd738e", "metadata": {}, "outputs": [], @@ -119,8 +119,9 @@ "import pandas as pd\n", "import numpy as np\n", "from sklearn.metrics.pairwise import cosine_similarity\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "\n", "# Azure Cognitive Search imports\n", + "from azure.core.credentials import AzureKeyCredential\n", "from azure.search.documents.indexes import SearchIndexClient \n", "from azure.search.documents import SearchClient\n", "from azure.search.documents.indexes.models import (\n", @@ -142,12 +143,7 @@ "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv()\n", - "\n", - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")\n" + "load_dotenv()" ] }, { @@ -164,19 +160,19 @@ "# Initialize Semantic Kernel\n", "kernel = sk.Kernel()\n", "\n", - "# Add Azure OpenAI Chat Completion service with Entra ID authentication\n", + "# Add Azure OpenAI Chat Completion service\n", "chat_service = AzureChatCompletion(\n", " deployment_name=chat_model,\n", " endpoint=os.environ['OPENAI_API_BASE'],\n", - " ad_token_provider=token_provider\n", + " api_key=os.environ['OPENAI_API_KEY']\n", ")\n", "kernel.add_service(chat_service)\n", "\n", - "# Add Azure OpenAI Text Embedding service with Entra ID authentication\n", + "# Add Azure OpenAI Text Embedding service \n", "embedding_service = AzureTextEmbedding(\n", " deployment_name=embedding_model,\n", " endpoint=os.environ['OPENAI_API_BASE'],\n", - " ad_token_provider=token_provider\n", + " api_key=os.environ['OPENAI_API_KEY']\n", ")\n", "kernel.add_service(embedding_service)\n", "\n", @@ -210,13 +206,10 @@ "metadata": {}, "outputs": [], "source": [ - "# Create a Cognitive Search Index client with Entra ID authentication\n", - "from azure.identity import AzureCliCredential\n", - "\n", + "# Create a Cognitive Search Index client\n", "service_endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\") \n", - "\n", - "# Use AzureCliCredential for local development (more reliable than DefaultAzureCredential)\n", - "credential = AzureCliCredential()\n", + "key = os.getenv(\"AZURE_AI_SEARCH_KEY\")\n", + "credential = AzureKeyCredential(key)\n", "\n", "index_name = \"news-index\"\n", "\n", @@ -324,12 +317,12 @@ } }, "source": [ - "## Section 1: Leveraging Azure AI Search to extract relevant article based on the query " + "## Section 1: Leveraging Cognitive Search to extract relevant article based on the query " ] }, { "cell_type": "code", - "execution_count": 25, + "execution_count": null, "id": "32689db7-4337-42d9-b8f9-4cbd9d98a850", "metadata": { "gather": { @@ -578,7 +571,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "id": "56354758-427f-4af9-94b9-96a25946e9a5", "metadata": { "gather": { @@ -594,98 +587,7 @@ } } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generated embeddings for 11 chunks\n", - "\n", - "Query: What did Laurene Jobs say about Hillary Clinton?\n", - "\n", - "Result 1 (Score: 0.913):\n", - "She is one of America’s greatest modern creations. Laurene Jobs, pictured, widow of Apple's Steve, has strongly backed Hillary Clinton for president . Laurene Jobs said that Hillary Clinton, right, ha...\n", - "\n", - "Result 2 (Score: 0.904):\n", - "Apple founder Steve Jobs' widow Laurene has told of her admiration for Democratic White House front-runner Hillary Clinton. Ms Jobs, 51, called former First Lady Hillary a 'revolutionary' woman, and a...\n", - "\n", - "Result 3 (Score: 0.829):\n", - "'It matters, of course, that Hillary is a woman. But what matters more is what kind of woman she is.' Mrs Clinton announced her intention to seek the Democratic nomination on Sunday - and set upon the...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n" - ] - } - ], + "outputs": [], "source": [ "# Create embeddings for document chunks\n", "embeddings = []\n", @@ -917,7 +819,7 @@ "name": "python3" }, "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -931,7 +833,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.11.13" }, "microsoft": { "host": { diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb index 82f33157c6..e4ca2b4acd 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -49,7 +49,6 @@ "from azure.core.credentials import AzureKeyCredential\n", "from azure.search.documents.indexes import SearchIndexClient \n", "from azure.search.documents import SearchClient\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "from azure.search.documents.indexes.models import (\n", " SearchIndex,\n", " SearchField,\n", @@ -67,17 +66,12 @@ "import numpy as np\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv()\n", - "\n", - "token_provider = get_bearer_token_provider(\n", - " DefaultAzureCredential(),\n", - " \"https://cognitiveservices.azure.com/.default\"\n", - ")" + "load_dotenv()" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -86,11 +80,10 @@ "# Initialize the Azure OpenAI client for the latest version\n", "from openai import AzureOpenAI\n", "\n", - "# Initialize the Azure OpenAI client\n", "client = AzureOpenAI(\n", - " azure_endpoint=os.getenv(\"OPENAI_API_BASE\"),\n", - " azure_ad_token_provider=token_provider,\n", - " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + " api_key=os.environ['OPENAI_API_KEY'],\n", + " api_version=os.environ['OPENAI_API_VERSION'],\n", + " azure_endpoint=os.environ['OPENAI_API_BASE']\n", ")\n", "\n", "chat_model = os.environ['CHAT_MODEL_NAME']\n", @@ -106,7 +99,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -118,19 +111,18 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ + "from azure.core.credentials import AzureKeyCredential\n", "from azure.ai.formrecognizer import DocumentAnalysisClient\n", "\n", - "endpoint = os.environ[\"DOCUMENT_INTELLIGENCE_ENDPOINT\"]\n", - "\n", - "# Use Entra ID authentication instead of API key\n", - "credential = DefaultAzureCredential()\n", + "endpoint = os.environ[\"AZURE_DOC_INTELLIGENCE_ENDPOINT\"]\n", + "key = os.environ[\"AZURE_DOC_INTELLIGENCE_KEY\"]\n", "\n", "document_analysis_client = DocumentAnalysisClient(\n", - " endpoint=endpoint, credential=credential\n", + " endpoint=endpoint, credential=AzureKeyCredential(key)\n", ")" ] }, @@ -146,7 +138,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -219,7 +211,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -271,7 +263,8 @@ "source": [ "# Create an SDK client\n", "service_endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\") \n", - "credential = DefaultAzureCredential()\n", + "key = os.getenv(\"AZURE_AI_SEARCH_KEY\")\n", + "credential = AzureKeyCredential(key)\n", "\n", "index_name = \"research-paper-index\"\n", "\n", @@ -345,7 +338,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -381,7 +374,7 @@ }, { "cell_type": "code", - "execution_count": 23, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -428,7 +421,7 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -495,7 +488,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -552,11 +545,18 @@ "answer = query_search(\"what is prompt tuning?\", 10)\n", "print(answer)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -570,7 +570,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.11.13" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb index 77eecbbdb0..0bf538deef 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": null, "id": "94bcb00a", "metadata": {}, "outputs": [], @@ -199,7 +199,7 @@ ], "metadata": { "kernelspec": { - "display_name": ".venv (3.13.11)", + "display_name": "Python 3", "language": "python", "name": "python3" }, @@ -213,7 +213,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.13.11" + "version": "3.11.13" } }, "nbformat": 4, From 19fbd883a6590ed65dbe758602b0d41083df3aea Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 09:00:57 -0600 Subject: [PATCH 37/58] Update notes on .env file handling --- 066-OpenAIFundamentals/Student/Challenge-00.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index bcf27a375f..d345bfce5e 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -229,7 +229,7 @@ You will find the `.env.sample` file in the root of the codespace. If you are wo **TIP:** Learn more about using `.env` files [here](https://dev.to/edgar_montano/how-to-setup-env-in-python-4a83#:~:text=How%20to%20setup%20a%20.env%20file%201%201.To,file%20using%20the%20following%20format%3A%20...%20More%20items). -**NOTE:** We have also provided a `.gitignore` file that should prevent you from accidentally committing your renamed `.env` file to a Git repo during this hack. +**NOTE:** We have also provided a `.gitignore` file that should prevent you from accidentally committing your own `.env` file to a Git repo during this hack. **NOTE:** On MacOS, files that start with a `.` are hidden files and are not viewable in Finder when browsing the file system. They will be visible in both VS Code or GitHub Codespaces. @@ -245,7 +245,7 @@ If using GitHub Codespaces: - Verify you have the following files & folders available in the Codespace: - `/data` - `/notebooks` - - `.env` <= Renamed from `.env.sample` + - `.env` <= Copied from `.env.sample` - `.gitignore` - `requirements.txt` - Verify that you have created the Project in Microsoft Foundry. From 7fc594300652c8d65fc7692517cda3e719240b37 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 09:02:22 -0600 Subject: [PATCH 38/58] Update Challenge-00.md Changed Codespace link --- 066-OpenAIFundamentals/Student/Challenge-00.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index d345bfce5e..3523855f1b 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -47,8 +47,7 @@ You can see your balance of available codespace hours on the [GitHub billing pag The GitHub Codespace for this hack will host the Jupyter Notebook files, configuration files, and other data files needed for this event. Here are the steps you will need to follow: - A GitHub repo containing the student resources and Codespace for this hack is hosted here: - - [WTH OpenAI Fundamentals Codespace Repo](https://codespaces.new/perktime/wth-openaifundamentals-codespace?devcontainer_path=.devcontainer/devcontainer.json) - + - [WTH OpenAI Fundamentals Codespace Repo](https://aka.ms/wth/openaifundamentals/codespace) - Please open this link and sign in with your personal Github account. **NOTE:** Make sure you do not sign in with your enterprise managed Github account. From 6be2ed54f3ad2d58b9957f21856ae403c565cf16 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 15:43:12 -0600 Subject: [PATCH 39/58] Refactor Azure OpenAI integration across notebooks; update dependencies and environment setup. Add easy.txt for quick fixes. --- .../Student/Resources/infra/deploy.sh | 18 +-- .../Student/Resources/infra/functions.sh | 2 +- .../Student/Resources/infra/main.bicep | 92 ++----------- .../Student/Resources/infra/main.bicepparam | 4 + .../Resources/infra/modules/aiServices.bicep | 9 +- .../infra/modules/applicationInsights.bicep | 2 +- .../Resources/infra/modules/document.bicep | 9 +- .../Resources/infra/modules/project.bicep | 4 +- .../Resources/infra/modules/search.bicep | 7 +- .../infra/modules/storageAccount.bicep | 5 - .../notebooks/CH-01-PromptEngineering.ipynb | 98 +++++++++---- .../notebooks/CH-03-A-Grounding.ipynb | 43 +++--- .../notebooks/CH-03-B-Chunking.ipynb | 55 +++++--- .../notebooks/CH-03-C-Embeddings.ipynb | 66 ++++++--- .../CH-04-A-RAG_for_structured_data.ipynb | 130 +++++++++++++++--- .../CH-04-B-RAG_for_unstructured_data.ipynb | 58 ++++---- .../notebooks/CH-5.7-RedTeaming.ipynb | 6 +- 17 files changed, 366 insertions(+), 242 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh index 6e851de2c5..b13ddc941d 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh @@ -7,7 +7,7 @@ source ./functions.sh declare -A variables=( [template]="main.bicep" [parameters]="main.bicepparam" - [resourceGroupName]="rg-ai-foundry-secure" + [resourceGroupName]="rg-microsoft-foundry-secure" [location]="eastus" [validateTemplate]=0 [useWhatIf]=0 @@ -90,14 +90,15 @@ deploymentOutputs=$(az deployment group create \ --parameters $parameters \ --parameters location=$location \ --parameters userObjectId=$userObjectId \ - --query 'properties.outputs' -o json) + --query 'properties.outputs' -o json 2>/dev/null | grep -A 9999 '^{') - #echo $deploymentOutputs if [[ $? == 0 ]]; then echo "[$template] Bicep template deployment succeeded" else echo "Failed to deploy [$template] Bicep template" - exit + echo "Fetching deployment error details..." + az deployment group show --resource-group "$resourceGroupName" --name "$deploymentName" --query 'properties.error' -o json + exit 1 fi json=$deploymentOutputs @@ -113,7 +114,7 @@ environment_sample_file="../.env.sample" # check if the .env file already exists and back it up if it does if [[ -f "$environment_file" ]]; then - random_chars=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 5) + random_chars=$(LC_ALL=C tr -dc 'a-zA-Z0-9' < /dev/urandom | head -c 5) mv "$environment_file" "${environment_file}-${random_chars}.bak" echo -e "\e[33mWarning: Existing .env file found. Backed up to ${environment_file}-${random_chars}.bak\e[0m" else @@ -127,15 +128,10 @@ source $environment_sample_file # Extract values from JSON and write to .env file with double quotes around values echo "Populating .env file..." -echo "OPENAI_API_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesKey')\"" >> $environment_file echo "OPENAI_API_BASE=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesOpenAiEndpoint')\"" >> $environment_file -echo "AZURE_AI_SEARCH_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchKey')\"" >> $environment_file echo "AZURE_AI_SEARCH_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchEndpoint')\"" >> $environment_file echo "DOCUMENT_INTELLIGENCE_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentEndpoint')\"" >> $environment_file -echo "DOCUMENT_INTELLIGENCE_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentKey')\"" >> $environment_file -echo "AZURE_BLOB_STORAGE_ACCOUNT_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountName')\"" >> $environment_file -echo "AZURE_BLOB_STORAGE_KEY=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountKey')\"" >> $environment_file -echo "AZURE_BLOB_STORAGE_CONNECTION_STRING=\"$(echo "$json" | jq -r '.deploymentInfo.value.storageAccountConnectionString')\"" >> $environment_file +echo "AZURE_AI_PROJECT_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesProjectEndpoint')\"" >> $environment_file # Warning: this assumes the first deployed model is the chat model used by the Jupyter notebooks echo "CHAT_MODEL_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.deployedModels[0].name')\"" >> $environment_file diff --git a/066-OpenAIFundamentals/Student/Resources/infra/functions.sh b/066-OpenAIFundamentals/Student/Resources/infra/functions.sh index 02ae19a2fe..3ef25acf12 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/functions.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/functions.sh @@ -17,7 +17,7 @@ function authenticate_to_azure { parse_args() { # $1 - The associative array name containing the argument definitions and default values # $2 - The arguments passed to the script - local -n arg_defs=$1 + local -n arg_defs=$1 # this won't work by default on the Mac zsh shell, but works in bash. brew install bash and then /opt/homebrew/bin/bash ./deploy.sh to use it. shift local args=("$@") diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep index 3f60ee1fe1..995108e05a 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep @@ -11,49 +11,13 @@ param location string = resourceGroup().location @description('Specifies the name of the Network Security Perimeter.') param nspName string = '' -@description('Specifies the name Azure AI Hub workspace.') -param hubName string = '' - -@description('Specifies the friendly name of the Azure AI Hub workspace.') -param hubFriendlyName string = 'Demo AI Hub' - -@description('Specifies the description for the Azure AI Hub workspace displayed in Azure AI Foundry.') -param hubDescription string = 'This is a demo hub for use in Azure AI Foundry.' - -@description('Specifies the Isolation mode for the managed network of the Azure AI Hub workspace.') -@allowed([ - 'AllowInternetOutbound' - 'AllowOnlyApprovedOutbound' - 'Disabled' -]) -param hubIsolationMode string = 'Disabled' - -@description('Specifies the public network access for the Azure AI Hub workspace.') -param hubPublicNetworkAccess string = 'Enabled' - -@description('Specifies the authentication method for the OpenAI Service connection.') -@allowed([ - 'ApiKey' - 'AAD' - 'ManagedIdentity' - 'None' -]) -param connectionAuthType string = 'AAD' - -@description('Determines whether or not to use credentials for the system datastores of the workspace workspaceblobstore and workspacefilestore. The default value is accessKey, in which case, the workspace will create the system datastores with credentials. If set to identity, the workspace will create the system datastores with no credentials.') -@allowed([ - 'identity' - 'accessKey' -]) -param systemDatastoresAuthMode string = 'identity' - -@description('Specifies the name for the Azure AI Foundry Hub Project workspace.') +@description('Specifies the name for the Microsoft Foundry Project.') param projectName string = '' -@description('Specifies the friendly name for the Azure AI Foundry Hub Project workspace.') -param projectFriendlyName string = 'AI Foundry Hub Project' +@description('Specifies the friendly name for the Microsoft Foundry Project.') +param projectFriendlyName string = 'Microsoft Foundry Project' -@description('Specifies the public network access for the Azure AI Project workspace.') +@description('Specifies the public network access for the Microsoft Foundry Project.') param projectPublicNetworkAccess string = 'Enabled' @description('Specifies the name of the Azure Log Analytics resource.') @@ -90,9 +54,6 @@ param aiServicesIdentity object = { @description('Specifies an optional subdomain name used for token-based authentication.') param aiServicesCustomSubDomainName string = '' -@description('Specifies whether disable the local authentication via API key.') -param aiServicesDisableLocalAuth bool = false - @description('Specifies whether or not public endpoint access is allowed for this account..') @allowed([ 'Enabled' @@ -287,7 +248,7 @@ module storageAccount 'modules/storageAccount.bicep' = { networkAclsDefaultAction: storageAccountANetworkAclsDefaultAction supportsHttpsTrafficOnly: storageAccountSupportsHttpsTrafficOnly workspaceId: workspace.outputs.id - + // role assignments userObjectId: userObjectId aiServicesPrincipalId: aiServices.outputs.principalId @@ -306,7 +267,6 @@ module aiServices 'modules/aiServices.bicep' = { customSubDomainName: empty(aiServicesCustomSubDomainName) ? toLower('ai-services-${suffix}') : aiServicesCustomSubDomainName - disableLocalAuth: aiServicesDisableLocalAuth publicNetworkAccess: aiServicesPublicNetworkAccess deployments: openAiDeployments workspaceId: workspace.outputs.id @@ -316,47 +276,24 @@ module aiServices 'modules/aiServices.bicep' = { } } -module hub 'modules/hub.bicep' = { - name: 'hub' +module project 'modules/foundryProject.bicep' = { + name: 'project' params: { // workspace organization - name: empty(hubName) ? toLower('hub-${suffix}') : hubName - friendlyName: hubFriendlyName - description_: hubDescription + name: empty(projectName) ? toLower('project-${suffix}') : projectName + friendlyName: projectFriendlyName location: location tags: tags // dependent resources aiServicesName: aiServices.outputs.name applicationInsightsId: applicationInsights.outputs.id - containerRegistryId: acrEnabled ? containerRegistry.outputs.id : '' + containerRegistryId: acrEnabled ? containerRegistry!.outputs.id : '' keyVaultId: keyVault.outputs.id storageAccountId: storageAccount.outputs.id - connectionAuthType: connectionAuthType - systemDatastoresAuthMode: systemDatastoresAuthMode - - // workspace configuration - publicNetworkAccess: hubPublicNetworkAccess - isolationMode: hubIsolationMode - workspaceId: workspace.outputs.id - - // role assignments - userObjectId: userObjectId - } -} - -module project 'modules/project.bicep' = { - name: 'project' - params: { - // workspace organization - name: empty(projectName) ? toLower('project-${suffix}') : projectName - friendlyName: projectFriendlyName - location: location - tags: tags // workspace configuration publicNetworkAccess: projectPublicNetworkAccess - hubId: hub.outputs.id workspaceId: workspace.outputs.id // role assignments @@ -388,6 +325,7 @@ module document 'modules/document.bicep' = { params: { name: 'document-${suffix}' location: location + customSubDomainName: toLower('document-intelligence-${suffix}') } } @@ -398,16 +336,8 @@ output deploymentInfo object = { aiServicesName: aiServices.outputs.name aiServicesEndpoint: aiServices.outputs.endpoint aiServicesOpenAiEndpoint: aiServices.outputs.openAiEndpoint - aiServicesKey: aiServices.outputs.key1 - hubName: hub.outputs.name projectName: project.outputs.name - documentKey: document.outputs.key1 documentEndpoint: document.outputs.endpoint - searchKey: search.outputs.primaryKey searchEndpoint: search.outputs.endpoint - storageAccountName: storageAccount.outputs.name - storageAccountId: storageAccount.outputs.id - storageAccountConnectionString: storageAccount.outputs.connectionString - storageAccountKey: storageAccount.outputs.primaryKey deployedModels: aiServices.outputs.deployedModels } diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam index c644af659f..1e173d12af 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam @@ -5,6 +5,10 @@ param userObjectId = '' param keyVaultEnablePurgeProtection = false param acrEnabled = false param nspEnabled = false +//param aiServicesDisableLocalAuth = false +param storageAccountAllowSharedKeyAccess = true +//param documentDisableLocalAuth = false + //The first model in the list will be the default model for the Jupyter notebooks param openAiDeployments = [ { diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep index 31bd1c25a1..4f44b5017f 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep @@ -21,9 +21,6 @@ param tags object @description('Specifies an optional subdomain name used for token-based authentication.') param customSubDomainName string = '' -@description('Specifies whether disable the local authentication via API key.') -param disableLocalAuth bool = false - @description('Specifies whether or not public endpoint access is allowed for this account..') @allowed([ 'Enabled' @@ -73,7 +70,6 @@ resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' = tags: tags properties: { customSubDomainName: customSubDomainName - disableLocalAuth: disableLocalAuth publicNetworkAccess: publicNetworkAccess } } @@ -174,9 +170,8 @@ output name string = aiServices.name output endpoint string = aiServices.properties.endpoint output openAiEndpoint string = aiServices.properties.endpoints['OpenAI Language Model Instance API'] output principalId string = aiServices.identity.principalId -#disable-next-line outputs-should-not-contain-secrets -output key1 string = aiServices.listKeys().key1 + // Output the deployed model names output deployedModels array = [for deployment in deployments: { name: deployment.model.name -}] \ No newline at end of file +}] diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep index 69cb91a519..a30fb1ecf2 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep @@ -20,7 +20,7 @@ resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { properties: { Application_Type: 'web' DisableIpMasking: false - DisableLocalAuth: false + //DisableLocalAuth: false Flow_Type: 'Bluefield' ForceCustomerStorageForProfiler: false ImmediatePurgeDataOn30Days: true diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep index 5b07c6624c..ce2f51a628 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep @@ -4,6 +4,9 @@ param name string @description('Location where the Azure Document Intelligence will be created.') param location string +@description('Custom subdomain name for the Azure Document Intelligence.') +param customSubDomainName string + resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { name: name location: location @@ -11,10 +14,10 @@ resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { name: 'S0' } kind: 'FormRecognizer' - properties: { + properties: { + customSubDomainName: customSubDomainName + } } -#disable-next-line outputs-should-not-contain-secrets -output key1 string = account.listKeys().key1 output endpoint string = account.properties.endpoint diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep index ba3fe208cb..0ce1acc760 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/project.bicep @@ -155,7 +155,7 @@ resource aiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022 } } -// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Azure AI Foundry +// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Microsoft Foundry resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { name: guid(project.id, azureMLDataScientistRole.id, userObjectId) scope: project @@ -166,7 +166,7 @@ resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAss } } -// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Azure AI Foundry +// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Microsoft Foundry resource azureMLDataScientistManagedIdentityRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesPrincipalId)) { name: guid(project.id, azureMLDataScientistRole.id, aiServicesPrincipalId) scope: project diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep index 60c837d8fb..e7c3c7c7c4 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/search.bicep @@ -14,9 +14,12 @@ resource search 'Microsoft.Search/searchServices@2023-11-01' = { replicaCount: 1 partitionCount: 1 hostingMode: 'default' + authOptions: { + aadOrApiKey: { + aadAuthFailureMode: 'http401WithBearerChallenge' + } + } } } -#disable-next-line outputs-should-not-contain-secrets -output primaryKey string = search.listAdminKeys().primaryKey output endpoint string = 'https://${name}.search.windows.net' diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep index 85c59f88b2..ed31e47894 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/storageAccount.bicep @@ -248,8 +248,3 @@ resource blobServiceDiagnosticSettings 'Microsoft.Insights/diagnosticSettings@20 // Outputs output id string = storageAccount.id output name string = storageAccount.name -#disable-next-line outputs-should-not-contain-secrets -output primaryKey string = storageAccount.listKeys().keys[0].value - -#disable-next-line outputs-should-not-contain-secrets -output connectionString string = 'DefaultEndpointsProtocol=https;AccountName=${name};AccountKey=${storageAccount.listKeys().keys[0].value};EndpointSuffix=core.windows.net' diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb index ccfa510fbf..242fce4795 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb @@ -37,7 +37,7 @@ }, "source": [ "## 1. Parameter Experimentation\n", - "Let's first set up the Challenge. Load the API key and relevant Python libraries using the cells below." + "Let's first set up the Challenge. These cells install the required Python packages, load the environment variables, and relevant Python libraries using the cells below." ] }, { @@ -90,12 +90,12 @@ "metadata": {}, "outputs": [], "source": [ - "%pip install -r ../requirements-old.txt" + "%pip install -r ../requirements.txt" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { "gather": { "logged": 1686932813309 @@ -115,8 +115,11 @@ "import openai\n", "import os\n", "import json\n", + "\n", "from dotenv import load_dotenv, find_dotenv\n", - "load_dotenv(find_dotenv())" + "load_dotenv(find_dotenv())\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n" ] }, { @@ -138,9 +141,10 @@ }, "outputs": [], "source": [ - "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "openai.api_key = API_KEY\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", @@ -149,8 +153,14 @@ "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", + "openai.azure_ad_token_provider = token_provider\n", + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", "\n", - "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n" + "client = AzureOpenAI(\n", + " azure_endpoint=RESOURCE_ENDPOINT,\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")\n" ] }, { @@ -215,7 +225,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": { "gather": { "logged": 1686938673045 @@ -234,19 +244,19 @@ "source": [ "def get_chat_completion(prompt, model=chat_model):\n", " messages = [{\"role\": \"user\", \"content\": prompt}]\n", - " response = openai.ChatCompletion.create(\n", - " engine=model,\n", + " response = client.chat.completions.create(\n", + " model=chat_model,\n", " messages=messages,\n", " temperature=0, # this is the degree of randomness of the model's output\n", " max_tokens = 200,\n", " top_p = 1.0\n", " )\n", - " return response.choices[0].message[\"content\"]" + " return response.choices[0].message.content" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": { "gather": { "logged": 1686938550664 @@ -264,14 +274,13 @@ "outputs": [], "source": [ "def get_completion_from_messages(messages, model=chat_model, temperature=0):\n", - " response = openai.ChatCompletion.create(\n", - " engine=model,\n", + " response = client.chat.completions.create(\n", + " model=chat_model,\n", " messages=messages,\n", " temperature=temperature # this is the degree of randomness of the model's output\n", " )\n", "\n", - " return response.choices[0].message[\"content\"]\n", - "\n" + " return response.choices[0].message.content\n" ] }, { @@ -673,7 +682,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": { "gather": { "logged": 1685081594233 @@ -742,7 +751,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": { "gather": { "logged": 1685059771050 @@ -883,7 +892,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 18, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1510,7 +1519,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 19, "metadata": { "gather": { "logged": 1685051978623 @@ -1525,7 +1534,18 @@ } } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'To determine the most decorated individual athlete at the Sydney 2000 Olympic Games, we need to follow a step-by-step approach:\\n\\n1. **Identify the Event**: The Sydney 2000 Olympic Games were held from September 15 to October 1, 2000.\\n\\n2. **Research the Medalists**: We need to look into the medalists from the Sydney 2000 Olympics to find out who won the most medals.\\n\\n3. **Focus on Individual Athletes**: We are interested in individual athletes, not teams or countries.\\n\\n4. **Consult Reliable Sources**: Use reliable sources such as the official Olympic website, sports databases, and historical records.\\n\\n5. **Analyze the Data**: Compare the number of medals won by individual athletes.\\n\\n### Step-by-Step Analysis:\\n\\n- **Research**: According to the official Olympic records and sports databases, the Sydney 2000 Olympics featured many outstanding performances.\\n\\n- **Identify Top Performers**: Swimmer Ian Thorpe from'" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "text = f\"\"\"\n", " The 2020 Summer Olympics, officially the Games of the XXXII Olympiad and also known as Tokyo 2020, was an international multi-sport event held from 23 July to 8 August 2021 in Tokyo, Japan, with some preliminary events that began on 21 July 2021. Tokyo was selected as the host city during the 125th IOC Session in Buenos Aires, Argentina, on 7 September 2013.Originally scheduled to take place from 24 July to 9 August 2020, the event was postponed to 2021 on 24 March 2020 due to the global COVID-19 pandemic, the first such instance in the history of the Olympic Games (previous games had been cancelled but not rescheduled). However, the event retained the Tokyo 2020 branding for marketing purposes. It was largely held behind closed doors with no public spectators permitted due to the declaration of a state of emergency in the Greater Tokyo Area in response to the pandemic, the first and only Olympic Games to be held without official spectators. The Games were the most expensive ever, with total spending of over $20 billion.The Games were the fourth Olympic Games to be held in Japan, following the 1964 Summer Olympics (Tokyo), 1972 Winter Olympics (Sapporo), and 1998 Winter Olympics (Nagano). Tokyo became the first city in Asia to hold the Summer Olympic Games twice. The 2020 Games were the second of three consecutive Olympics to be held in East Asia, following the 2018 Winter Olympics in Pyeongchang, South Korea and preceding the 2022 Winter Olympics in Beijing, China. Due to the one-year postponement, Tokyo 2020 was the first and only Olympic Games to have been held in an odd-numbered year and the first Summer Olympics since 1900 to be held in a non-leap year.\\nNew events were introduced in existing sports, including 3x3 basketball, freestyle BMX and mixed gender team events in a number of existing sports, as well as the return of madison cycling for men and an introduction of the same event for women. New IOC policies also allowed the host organizing committee to add new sports to the Olympic program for just one Games. The disciplines added by the Japanese Olympic Committee were baseball and softball, karate, sport climbing, surfing and skateboarding, the last four of which made their Olympic debuts, and the last three of which will remain on the Olympic program.The United States topped the medal count by both total golds (39) and total medals (113), with China finishing second by both respects (38 and 89). Host nation Japan finished third, setting a record for the most gold medals and total medals ever won by their delegation at an Olympic Games with 27 and 58. Great Britain finished fourth, with a total of 22 gold and 64 medals. The Russian delegation competing as the ROC finished fifth with 20 gold medals and third in the overall medal count, with 71 medals. Bermuda, the Philippines and Qatar won their first-ever Olympic gold medals. Burkina Faso, San Marino and Turkmenistan also won their first-ever Olympic medals.'\n", @@ -1538,7 +1558,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 20, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1550,7 +1570,18 @@ } } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'To determine the largest time zone difference between the top two countries that won the most gold medals in the 2020 Tokyo Olympics, we need to identify these countries and their respective time zones.\\n\\n1. **Identify the top two countries by gold medals:**\\n - The United States won the most gold medals with 39.\\n - China finished second with 38 gold medals.\\n\\n2. **Determine the time zones for each country:**\\n - The United States spans multiple time zones, but the primary time zones are Eastern Standard Time (EST, UTC-5), Central Standard Time (CST, UTC-6), Mountain Standard Time (MST, UTC-7), and Pacific Standard Time (PST, UTC-8). For simplicity, we can consider the Eastern Standard Time (EST, UTC-5) as a representative time zone for the U.S.\\n - China operates on China Standard Time (CST, UTC+8), which is used nationwide.\\n\\n3. **Calculate'" + ] + }, + "execution_count": 20, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Another example\n", "prompt = f\"\"\"\n", @@ -1580,7 +1611,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": { "gather": { "logged": 1685053144682 @@ -1595,7 +1626,18 @@ } } }, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'To find out how many more silver and bronze medals the United States has over Great Britain, we need to calculate the number of silver and bronze medals each country has and then find the difference.\\n\\nFirst, calculate the number of silver and bronze medals for each country:\\n\\n1. **United States:**\\n - Total medals: 113\\n - Gold medals: 39\\n - Silver and bronze medals: 113 - 39 = 74\\n\\n2. **Great Britain:**\\n - Total medals: 64\\n - Gold medals: 22\\n - Silver and bronze medals: 64 - 22 = 42\\n\\nNow, find the difference in the number of silver and bronze medals between the United States and Great Britain:\\n\\n74 (United States) - 42 (Great Britain) = 32\\n\\nThe United States has 32 more silver and bronze medals than Great Britain.'" + ] + }, + "execution_count": 21, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Notice how this response may not be ideal, or the most accurate.\n", "prompt = f\"\"\"\n", @@ -1879,7 +1921,7 @@ "name": "python38-azureml" }, "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -1893,7 +1935,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" }, "microsoft": { "host": { diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb index 195c83072e..aa7f612044 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-A-Grounding.ipynb @@ -35,7 +35,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -44,7 +44,9 @@ "import json\n", "\n", "from dotenv import load_dotenv, find_dotenv\n", - "load_dotenv(find_dotenv())" + "load_dotenv(find_dotenv())\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider" ] }, { @@ -62,19 +64,26 @@ "metadata": {}, "outputs": [], "source": [ - "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "openai.api_key = API_KEY\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", "assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t.openai.azure.com\"\n", - "openai.api_base = RESOURCE_ENDPOINT\n", "\n", + "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "\n", - "model=os.getenv(\"CHAT_MODEL_NAME\")" + "openai.azure_ad_token_provider = token_provider\n", + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", + "\n", + "client = AzureOpenAI(\n", + " azure_endpoint=RESOURCE_ENDPOINT,\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")" ] }, { @@ -88,20 +97,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "def get_chat_completion(prompt, model=model):\n", + "def get_chat_completion(prompt, model=chat_model):\n", " messages = [{\"role\": \"user\", \"content\": prompt}]\n", - " response = openai.ChatCompletion.create(\n", - " engine=model,\n", + " response = client.chat.completions.create(\n", + " model=chat_model,\n", " messages=messages,\n", " temperature=0, # this is the degree of randomness of the model's output\n", " max_tokens = 200,\n", " top_p = 1.0\n", " )\n", - " return response.choices[0].message[\"content\"]" + " return response.choices[0].message.content" ] }, { @@ -130,7 +139,7 @@ "Enter Question Here\n", "\"\"\"\n", "\n", - "model_response = get_chat_completion(prompt, model=model)\n", + "model_response = get_chat_completion(prompt, model=chat_model)\n", "print(f\"Response: {model_response}\\n\")\n" ] }, @@ -156,7 +165,7 @@ "Enter Question Here\n", "\"\"\"\n", "\n", - "model_response = get_chat_completion(prompt, model=model)\n", + "model_response = get_chat_completion(prompt, model=chat_model)\n", "print(f\"Response: {model_response}\\n\")" ] }, @@ -183,7 +192,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -197,7 +206,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb index b5e623f23b..a4deb96601 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-B-Chunking.ipynb @@ -44,31 +44,36 @@ "metadata": {}, "outputs": [], "source": [ + "%pip install langchain langchain-text-splitters\n", + "\n", "import openai\n", "import PyPDF3\n", "import os\n", "import json\n", "import tiktoken\n", "import spacy\n", - "from openai.error import InvalidRequestError\n", "\n", "from dotenv import load_dotenv, find_dotenv\n", "load_dotenv(find_dotenv())\n", - "\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "from spacy.lang.en import English \n", "nlp = spacy.load(\"en_core_web_sm\")\n", "\n", "import langchain\n", - "from langchain.text_splitter import RecursiveCharacterTextSplitter" + "from langchain_text_splitters import RecursiveCharacterTextSplitter\n", + "from openai import BadRequestError" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Set up your environment to access your Azure OpenAI keys. Refer to your Azure OpenAI resource in the Azure Portal to retrieve information regarding your Azure OpenAI endpoint and keys. \n", - "\n", - "For security purposes, store your sensitive information in an .env file." + "This cell sets up your Python environment to access your Azure OpenAI endpoint and sets up various openai settings from your .env file. " ] }, { @@ -77,19 +82,26 @@ "metadata": {}, "outputs": [], "source": [ - "# Load your OpenAI credentials\n", - "API_KEY = os.getenv(\"OPENAI_API_KEY\")\n", - "assert API_KEY, \"ERROR: Azure OpenAI Key is missing\"\n", - "openai.api_key = API_KEY\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", "\n", "RESOURCE_ENDPOINT = os.getenv(\"OPENAI_API_BASE\",\"\").strip()\n", "assert RESOURCE_ENDPOINT, \"ERROR: Azure OpenAI Endpoint is missing\"\n", "assert \"openai.azure.com\" in RESOURCE_ENDPOINT.lower(), \"ERROR: Azure OpenAI Endpoint should be in the form: \\n\\n\\t.openai.azure.com\"\n", - "openai.api_base = RESOURCE_ENDPOINT\n", "\n", + "openai.api_base = RESOURCE_ENDPOINT\n", "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "model=os.getenv(\"CHAT_MODEL_NAME\")\n" + "openai.azure_ad_token_provider = token_provider\n", + "chat_model=os.getenv(\"CHAT_MODEL_NAME\")\n", + "\n", + "client = AzureOpenAI(\n", + " azure_endpoint=RESOURCE_ENDPOINT,\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")" ] }, { @@ -164,12 +176,13 @@ "outputs": [], "source": [ "document = open(r'Insert PDF file path', 'rb') \n", + "\n", "doc_helper = PyPDF3.PdfFileReader(document)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ @@ -194,12 +207,16 @@ "\n", "try:\n", " final_prompt = prompt + q\n", - " response = openai.ChatCompletion.create(engine=model, messages=final_prompt, max_tokens=50)\n", - " answer = response.choices[0].text.strip()\n", + " response = client.chat.completions.create(\n", + " model=chat_model, \n", + " messages=[{\"role\": \"user\", \"content\": final_prompt}], \n", + " max_tokens=50\n", + " )\n", + " answer = response.choices[0].message.content.strip()\n", " print(f\"{q}\\n{answer}\\n\")\n", "\n", - "except InvalidRequestError as e:\n", - " print(e.error)\n", + "except BadRequestError as e:\n", + " print(e)\n", "\n" ] }, @@ -387,7 +404,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -401,7 +418,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb index 9e88ed1da1..de1b9484a3 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb @@ -52,7 +52,6 @@ "source": [ "! pip install num2words\n", "! pip install plotly\n", - "! pip install \"openai==0.28.1\" \n", "! pip install nptyping" ] }, @@ -62,7 +61,6 @@ "metadata": {}, "outputs": [], "source": [ - "import openai\n", "import os\n", "import re \n", "import requests\n", @@ -70,11 +68,36 @@ "from num2words import num2words \n", "import pandas as pd \n", "import numpy as np\n", - "from openai.embeddings_utils import get_embedding, cosine_similarity \n", "import tiktoken\n", "from dotenv import load_dotenv\n", "from tenacity import retry, wait_random_exponential, stop_after_attempt\n", - "load_dotenv() " + "from sklearn.metrics.pairwise import cosine_similarity as sklearn_cosine_similarity\n", + "from openai import AzureOpenAI\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "\n", + "load_dotenv()\n", + "\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n", + "\n", + "# Initialize the Azure OpenAI client\n", + "client = AzureOpenAI(\n", + " azure_endpoint=os.getenv(\"OPENAI_API_BASE\"),\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", + ")\n", + "\n", + "# Define helper functions using the OpenAI 1.x API\n", + "@retry(wait=wait_random_exponential(min=1, max=20), stop=stop_after_attempt(6))\n", + "def get_embedding(text: str, engine: str) -> list:\n", + "\ttext = text.replace(\"\\n\", \" \")\n", + "\tresponse = client.embeddings.create(input=[text], model=engine)\n", + "\treturn response.data[0].embedding\n", + "\n", + "def cosine_similarity(a, b):\n", + "\treturn np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))" ] }, { @@ -88,15 +111,12 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ - "openai.api_type = os.getenv(\"OPENAI_API_TYPE\")\n", - "openai.api_key = os.environ.get(\"OPENAI_API_KEY\")\n", - "openai.api_base = os.environ.get(\"OPENAI_API_BASE\")\n", - "openai.api_version = os.getenv(\"OPENAI_API_VERSION\")\n", - "embedding_model=os.getenv(\"EMBEDDING_MODEL_NAME\")" + "# Get the embedding model name from environment\n", + "embedding_model = os.getenv(\"EMBEDDING_MODEL_NAME\")" ] }, { @@ -119,7 +139,7 @@ "\n", "input=\"I would like to order a pizza\"\n", "\n", - "# Add code here " + "# Add code here: Create embedding using the helper function\n" ] }, { @@ -127,7 +147,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The openai.Embedding.create() method will take a list of text - here we have a single sentence - and then will return a list containing a single embedding. You can use these embeddings when searching, providing recommendations, classification, and more." + "The client.embeddings.create() method will take a list of text - here we have a single sentence - and then will return a list containing a single embedding. You can use these embeddings when searching, providing recommendations, classification, and more." ] }, { @@ -148,6 +168,7 @@ "outputs": [], "source": [ "df=pd.read_csv(os.path.join(os.getcwd(),r'Enter path here'))\n", + "\n", "df" ] }, @@ -163,9 +184,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "398" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n", "shortened_df['n_tokens'] = shortened_df[\"name\"].apply(lambda x: len(tokenizer.encode(x)))\n", @@ -195,7 +227,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, "outputs": [], "source": [ @@ -234,7 +266,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3 (ipykernel)", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -248,7 +280,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb index c35a148f3e..67fc0b57d3 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb @@ -106,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "69bd738e", "metadata": {}, "outputs": [], @@ -119,9 +119,8 @@ "import pandas as pd\n", "import numpy as np\n", "from sklearn.metrics.pairwise import cosine_similarity\n", - "\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "# Azure Cognitive Search imports\n", - "from azure.core.credentials import AzureKeyCredential\n", "from azure.search.documents.indexes import SearchIndexClient \n", "from azure.search.documents import SearchClient\n", "from azure.search.documents.indexes.models import (\n", @@ -143,7 +142,12 @@ "from semantic_kernel.connectors.ai.open_ai import AzureChatPromptExecutionSettings\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv()" + "load_dotenv()\n", + "\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")\n" ] }, { @@ -160,19 +164,19 @@ "# Initialize Semantic Kernel\n", "kernel = sk.Kernel()\n", "\n", - "# Add Azure OpenAI Chat Completion service\n", + "# Add Azure OpenAI Chat Completion service with Entra ID authentication\n", "chat_service = AzureChatCompletion(\n", " deployment_name=chat_model,\n", " endpoint=os.environ['OPENAI_API_BASE'],\n", - " api_key=os.environ['OPENAI_API_KEY']\n", + " ad_token_provider=token_provider\n", ")\n", "kernel.add_service(chat_service)\n", "\n", - "# Add Azure OpenAI Text Embedding service \n", + "# Add Azure OpenAI Text Embedding service with Entra ID authentication\n", "embedding_service = AzureTextEmbedding(\n", " deployment_name=embedding_model,\n", " endpoint=os.environ['OPENAI_API_BASE'],\n", - " api_key=os.environ['OPENAI_API_KEY']\n", + " ad_token_provider=token_provider\n", ")\n", "kernel.add_service(embedding_service)\n", "\n", @@ -206,10 +210,13 @@ "metadata": {}, "outputs": [], "source": [ - "# Create a Cognitive Search Index client\n", + "# Create a Cognitive Search Index client with Entra ID authentication\n", + "from azure.identity import AzureCliCredential\n", + "\n", "service_endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\") \n", - "key = os.getenv(\"AZURE_AI_SEARCH_KEY\")\n", - "credential = AzureKeyCredential(key)\n", + "\n", + "# Use AzureCliCredential for local development (more reliable than DefaultAzureCredential)\n", + "credential = AzureCliCredential()\n", "\n", "index_name = \"news-index\"\n", "\n", @@ -322,7 +329,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "id": "32689db7-4337-42d9-b8f9-4cbd9d98a850", "metadata": { "gather": { @@ -571,7 +578,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "id": "56354758-427f-4af9-94b9-96a25946e9a5", "metadata": { "gather": { @@ -587,7 +594,98 @@ } } }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Generated embeddings for 11 chunks\n", + "\n", + "Query: What did Laurene Jobs say about Hillary Clinton?\n", + "\n", + "Result 1 (Score: 0.913):\n", + "She is one of America’s greatest modern creations. Laurene Jobs, pictured, widow of Apple's Steve, has strongly backed Hillary Clinton for president . Laurene Jobs said that Hillary Clinton, right, ha...\n", + "\n", + "Result 2 (Score: 0.904):\n", + "Apple founder Steve Jobs' widow Laurene has told of her admiration for Democratic White House front-runner Hillary Clinton. Ms Jobs, 51, called former First Lady Hillary a 'revolutionary' woman, and a...\n", + "\n", + "Result 3 (Score: 0.829):\n", + "'It matters, of course, that Hillary is a woman. But what matters more is what kind of woman she is.' Mrs Clinton announced her intention to seek the Democratic nomination on Sunday - and set upon the...\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", + " ret = a @ b\n", + "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", + " ret = a @ b\n" + ] + } + ], "source": [ "# Create embeddings for document chunks\n", "embeddings = []\n", @@ -819,7 +917,7 @@ "name": "python3" }, "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -833,7 +931,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" }, "microsoft": { "host": { diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb index e4ca2b4acd..82f33157c6 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-B-RAG_for_unstructured_data.ipynb @@ -36,7 +36,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -49,6 +49,7 @@ "from azure.core.credentials import AzureKeyCredential\n", "from azure.search.documents.indexes import SearchIndexClient \n", "from azure.search.documents import SearchClient\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", "from azure.search.documents.indexes.models import (\n", " SearchIndex,\n", " SearchField,\n", @@ -66,12 +67,17 @@ "import numpy as np\n", "\n", "from dotenv import load_dotenv\n", - "load_dotenv()" + "load_dotenv()\n", + "\n", + "token_provider = get_bearer_token_provider(\n", + " DefaultAzureCredential(),\n", + " \"https://cognitiveservices.azure.com/.default\"\n", + ")" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -80,10 +86,11 @@ "# Initialize the Azure OpenAI client for the latest version\n", "from openai import AzureOpenAI\n", "\n", + "# Initialize the Azure OpenAI client\n", "client = AzureOpenAI(\n", - " api_key=os.environ['OPENAI_API_KEY'],\n", - " api_version=os.environ['OPENAI_API_VERSION'],\n", - " azure_endpoint=os.environ['OPENAI_API_BASE']\n", + " azure_endpoint=os.getenv(\"OPENAI_API_BASE\"),\n", + " azure_ad_token_provider=token_provider,\n", + " api_version=os.getenv(\"OPENAI_API_VERSION\")\n", ")\n", "\n", "chat_model = os.environ['CHAT_MODEL_NAME']\n", @@ -99,7 +106,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -111,18 +118,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "from azure.core.credentials import AzureKeyCredential\n", "from azure.ai.formrecognizer import DocumentAnalysisClient\n", "\n", - "endpoint = os.environ[\"AZURE_DOC_INTELLIGENCE_ENDPOINT\"]\n", - "key = os.environ[\"AZURE_DOC_INTELLIGENCE_KEY\"]\n", + "endpoint = os.environ[\"DOCUMENT_INTELLIGENCE_ENDPOINT\"]\n", + "\n", + "# Use Entra ID authentication instead of API key\n", + "credential = DefaultAzureCredential()\n", "\n", "document_analysis_client = DocumentAnalysisClient(\n", - " endpoint=endpoint, credential=AzureKeyCredential(key)\n", + " endpoint=endpoint, credential=credential\n", ")" ] }, @@ -138,7 +146,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ @@ -211,7 +219,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, "outputs": [], "source": [ @@ -263,8 +271,7 @@ "source": [ "# Create an SDK client\n", "service_endpoint = os.getenv(\"AZURE_AI_SEARCH_ENDPOINT\") \n", - "key = os.getenv(\"AZURE_AI_SEARCH_KEY\")\n", - "credential = AzureKeyCredential(key)\n", + "credential = DefaultAzureCredential()\n", "\n", "index_name = \"research-paper-index\"\n", "\n", @@ -338,7 +345,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 21, "metadata": {}, "outputs": [], "source": [ @@ -374,7 +381,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -421,7 +428,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": {}, "outputs": [], "source": [ @@ -488,7 +495,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 29, "metadata": {}, "outputs": [], "source": [ @@ -545,18 +552,11 @@ "answer = query_search(\"what is prompt tuning?\", 10)\n", "print(answer)" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -570,7 +570,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" }, "orig_nbformat": 4 }, diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb index 0bf538deef..77eecbbdb0 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-5.7-RedTeaming.ipynb @@ -94,7 +94,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "94bcb00a", "metadata": {}, "outputs": [], @@ -199,7 +199,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": ".venv (3.13.11)", "language": "python", "name": "python3" }, @@ -213,7 +213,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.13" + "version": "3.13.11" } }, "nbformat": 4, From 0e49a81667ae93f55c4295c28ba44c7f0eb6b380 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 16:06:19 -0600 Subject: [PATCH 40/58] Update theme in _config.yml to jekyll-theme-midnight and clean up unnecessary configurations. --- _config.yml | 21 ++------------------- 1 file changed, 2 insertions(+), 19 deletions(-) diff --git a/_config.yml b/_config.yml index df2c679e98..fcfca7fe99 100644 --- a/_config.yml +++ b/_config.yml @@ -1,20 +1,3 @@ -theme: minima +theme: jekyll-theme-midnight title: What The Hack -description: A collection of challenge-based hackathons including student guides, coach guides, lecture presentations, sample/template code and sample solutions. -include: [CONTRIBUTING.md] -plugins: - - jekyll-optional-front-matter - - jekyll-paginate - - jekyll-readme-index - - jekyll-default-layout - - jekyll-relative-links -minima: - skin: classic - social_links: - github: perktime -author: - name: What The Hack - email: info@whathehack.com -header_pages: - - VISUAL-SHOWCASE.md - - CONTRIBUTING.md \ No newline at end of file +include: [CONTRIBUTING.md] \ No newline at end of file From a65b0277d1eeec39cddab908c1d44092d09f3494 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 16:06:27 -0600 Subject: [PATCH 41/58] Remove .devcontainer.json and VISUAL-SHOWCASE.md files to streamline the repository. --- .devcontainer.json | 18 ---- VISUAL-SHOWCASE.md | 221 --------------------------------------------- 2 files changed, 239 deletions(-) delete mode 100644 .devcontainer.json delete mode 100644 VISUAL-SHOWCASE.md diff --git a/.devcontainer.json b/.devcontainer.json deleted file mode 100644 index 61bbe61eff..0000000000 --- a/.devcontainer.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "name": "WhatTheHack Dev Container", - "image": "mcr.microsoft.com/devcontainers/universal:2-ubuntu", - "features": { - "ghcr.io/devcontainers/features/azure-cli:1": {}, - "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/python:1": {} - }, - "customizations": { - "vscode": { - "extensions": [ - "ms-vscode.vscode-json", - "ms-python.python" - ] - } - }, - "postCreateCommand": "echo 'Dev container ready!'" -} \ No newline at end of file diff --git a/VISUAL-SHOWCASE.md b/VISUAL-SHOWCASE.md deleted file mode 100644 index e19d275222..0000000000 --- a/VISUAL-SHOWCASE.md +++ /dev/null @@ -1,221 +0,0 @@ -# 🎨 Visual Elements Showcase - -This page demonstrates all the enhanced visual elements available in the What The Hack repository. - ---- - -## πŸ“Š Progress Bars - -
-
25% Complete
-
- -
-
75% Complete
-
- -
-
100% Complete!
-
- ---- - -## 🚨 Alert Boxes - -
-πŸ’‘ Info: This is an informational message with helpful tips! -
- -
-βœ… Success: Great job! You've completed this step successfully. -
- -
-⚠️ Warning: Please pay attention to this important information. -
- -
-🚫 Error: Something went wrong. Please check your configuration. -
- ---- - -## 🏷️ Badges - -Technology badges: -![Azure](https://img.shields.io/badge/Azure-0078D4?style=for-the-badge&logo=microsoft-azure&logoColor=white) -![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white) -![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=for-the-badge&logo=typescript&logoColor=white) -![Python](https://img.shields.io/badge/Python-3776AB?style=for-the-badge&logo=python&logoColor=white) -![Docker](https://img.shields.io/badge/Docker-2496ED?style=for-the-badge&logo=docker&logoColor=white) - -Status badges: -Primary -Success -Warning -Danger -Info - ---- - -## 🎯 Step Indicators - -1 **First Step** - This is the first step in the process - -2 **Second Step** - This is the second step - -3 **Final Step** - This is the final step - ---- - -## πŸ“‹ Status Indicators - -βœ… Complete -⏳ Pending -❌ Error - ---- - -## πŸ”˜ Buttons - -Primary Button -Success Button -Warning Button -Info Button - ---- - -## πŸ“¦ Cards - -
- -### 🎯 Card Example - -This is a modern card container with enhanced styling. It includes: -- Beautiful box shadows -- Hover effects -- Gradient top border -- Responsive design - -Cards are perfect for grouping related content together. - -
- ---- - -## πŸ—‚οΈ Collapsible Sections - -
-πŸ”§ Advanced Configuration -
- -This is a collapsible section that can contain: -- Detailed configuration steps -- Advanced troubleshooting information -- Optional content that doesn't clutter the main flow -- Code examples and technical details - -```bash -# Example command -az group create --name myResourceGroup --location eastus -``` - -
-
- -
-πŸ“š Additional Resources -
- -Here you can include: -- Links to documentation -- Video tutorials -- Related articles -- Community resources - -
-
- ---- - -## πŸ“Š Enhanced Tables - -| Feature | Status | Description | -|---------|:------:|-------------| -| 🎨 Modern Theme | βœ… Active | Cayman theme with gradients | -| πŸ“± Responsive Design | βœ… Active | Mobile-optimized | -| πŸŒ™ Dark Mode | βœ… Active | Auto-detection | -| πŸ“ˆ Mermaid Diagrams | βœ… Active | Interactive diagrams | -| β™Ώ Accessibility | βœ… Active | WCAG compliant | - ---- - -## πŸ“ˆ Mermaid Diagrams - -```mermaid -graph TD - A[πŸš€ Start Challenge] --> B{Choose Path} - B -->|Easy| C[πŸ“š Follow Guide] - B -->|Advanced| D[πŸ”§ Custom Setup] - C --> E[βœ… Success] - D --> F[πŸ§ͺ Test Configuration] - F --> G{Tests Pass?} - G -->|Yes| E - G -->|No| H[πŸ› Debug Issues] - H --> F - - style A fill:#e1f5fe - style E fill:#e8f5e8 - style H fill:#ffebee -``` - ---- - -## πŸ’¬ Enhanced Blockquotes - -> **πŸ’‘ Pro Tip** -> -> This is an enhanced blockquote with modern styling. It includes beautiful gradients, shadows, and improved typography for better readability. - -> **🎯 Key Insight** -> -> Use these visual elements to make your documentation more engaging and easier to follow. Visual hierarchy helps users scan content quickly and find what they need. - ---- - -## πŸ–₯️ Code Blocks - -```bash -# Example terminal commands -npm install -npm start - -# With syntax highlighting -git add . -git commit -m "✨ Add new features" -git push origin main -``` - -```javascript -// JavaScript example with enhanced styling -function enhanceMarkdown() { - const elements = document.querySelectorAll('.enhanced'); - elements.forEach(el => { - el.classList.add('modern-styling'); - }); -} -``` - ---- - -## ✨ Conclusion - -These visual enhancements transform plain markdown into engaging, modern documentation that: - -- πŸ“ˆ **Improves user experience** with better visual hierarchy -- 🎯 **Increases engagement** through interactive elements -- πŸ“± **Works on all devices** with responsive design -- β™Ώ **Supports accessibility** with proper contrast and structure -- 🎨 **Looks professional** with modern design patterns - -Ready to use these in your own challenges? Check out the [Enhanced Template](../000-HowToHack/WTH-Challenge-Enhanced-Template.md)! \ No newline at end of file From 376be409f0405c479f578c7e95b28ebb3fdcf1d8 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 16:08:55 -0600 Subject: [PATCH 42/58] Refactor default.html layout for improved structure and readability; streamline HTML elements and remove unnecessary includes. --- _layouts/default.html | 63 +++++++++++++++++++++---------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/_layouts/default.html b/_layouts/default.html index e0cfe987be..579626f46b 100644 --- a/_layouts/default.html +++ b/_layouts/default.html @@ -1,39 +1,38 @@ - - + + + + + + +{% seo %} + + + + + + + + + - {%- include head.html -%} +
- +
+
+ + + +
- {%- include header.html -%} - -
-
- - - - {{ content }} -
-
- {%- include footer.html -%} +
+ +
@@ -45,7 +44,5 @@ gtag('config', 'G-E7MV34DNDL'); gtag('config', 'UA-173162534-1'); - - From 2ce2cc3e39784df6c44a45487b7010d4cb4012c0 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 16:09:59 -0600 Subject: [PATCH 43/58] Remove custom.css file to streamline styles and eliminate unused CSS --- assets/css/custom.css | 926 ------------------------------------------ 1 file changed, 926 deletions(-) delete mode 100644 assets/css/custom.css diff --git a/assets/css/custom.css b/assets/css/custom.css deleted file mode 100644 index aa5e6fac4e..0000000000 --- a/assets/css/custom.css +++ /dev/null @@ -1,926 +0,0 @@ -/* Bright and friendly styling for What The Hack markdown pages */ - -/* Bright color palette */ -:root { - --primary-color: #2196F3; - --success-color: #4CAF50; - --warning-color: #FF9800; - --danger-color: #F44336; - --info-color: #00BCD4; - --light-blue: #E3F2FD; - --light-green: #E8F5E8; - --light-orange: #FFF3E0; - --light-red: #FFEBEE; - --light-cyan: #E0F7FA; - --border-color: #E0E0E0; - --text-muted: #757575; - --gradient-start: #42A5F5; - --gradient-end: #66BB6A; - --background-light: #FAFAFA; -} - -/* Smooth animations */ -* { - transition: all 0.3s ease; -} - -/* Enhanced badges with bright colors */ -.badge { - display: inline-block; - padding: 0.4em 0.8em; - font-size: 0.8em; - font-weight: 600; - line-height: 1; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: 20px; - text-decoration: none; - margin: 0.2em; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); -} - -.badge-primary { background: linear-gradient(135deg, #42A5F5, #1E88E5); color: white; } -.badge-success { background: linear-gradient(135deg, #66BB6A, #43A047); color: white; } -.badge-warning { background: linear-gradient(135deg, #FFA726, #FF8F00); color: white; } -.badge-danger { background: linear-gradient(135deg, #EF5350, #D32F2F); color: white; } -.badge-info { background: linear-gradient(135deg, #26C6DA, #00ACC1); color: white; } - -/* Bright and cheerful alert boxes */ -.alert { - padding: 1.2rem 1.5rem; - margin-bottom: 1.5rem; - border: none; - border-radius: 12px; - position: relative; - box-shadow: 0 4px 12px rgba(0,0,0,0.1); - border-left: 5px solid; -} - -.alert-info { - background: linear-gradient(135deg, var(--light-cyan) 0%, #B2EBF2 100%); - border-left-color: var(--info-color); - color: #006064; -} - -.alert-warning { - background: linear-gradient(135deg, var(--light-orange) 0%, #FFE0B2 100%); - border-left-color: var(--warning-color); - color: #E65100; -} - -.alert-success { - background: linear-gradient(135deg, var(--light-green) 0%, #C8E6C9 100%); - border-left-color: var(--success-color); - color: #1B5E20; -} - -.alert-danger { - background: linear-gradient(135deg, var(--light-red) 0%, #FFCDD2 100%); - border-left-color: var(--danger-color); - color: #B71C1C; -} - -/* Bright, friendly blockquotes */ -blockquote { - background: linear-gradient(135deg, #F3E5F5 0%, #E1BEE7 100%); - border-left: 5px solid #9C27B0; - margin: 1.5em 0; - padding: 1.2em 1.8em; - border-radius: 0 15px 15px 0; - box-shadow: 0 4px 12px rgba(156, 39, 176, 0.15); - position: relative; -} - -blockquote:before { - color: #9C27B0; - content: open-quote; - font-size: 3em; - line-height: 0.1em; - margin-right: 0.25em; - vertical-align: -0.4em; - opacity: 0.4; -} - -blockquote p { - display: inline; - font-style: italic; - color: #4A148C; - font-weight: 500; -} - -/* Bright code blocks */ -pre { - background: linear-gradient(135deg, #263238 0%, #37474F 100%); - color: #B0BEC5; - border: none; - border-radius: 12px; - padding: 1.5rem; - overflow-x: auto; - margin: 1.5rem 0; - box-shadow: 0 6px 20px rgba(0,0,0,0.15); - position: relative; -} - -pre:before { - content: "πŸ’» Code"; - position: absolute; - top: 0.8rem; - right: 1.2rem; - font-size: 0.8rem; - color: #78909C; - background: rgba(255,255,255,0.1); - padding: 0.2rem 0.5rem; - border-radius: 8px; - opacity: 0.8; -} - -code { - background: linear-gradient(135deg, #E8F5E8 0%, #C8E6C9 100%); - color: #2E7D32; - padding: 0.3em 0.6em; - border-radius: 6px; - font-size: 85%; - font-weight: 600; - box-shadow: 0 1px 3px rgba(0,0,0,0.1); -} - -/* Bright, modern button styling */ -.btn { - display: inline-block; - padding: 0.8rem 2rem; - margin: 0.3rem; - font-size: 1rem; - font-weight: 600; - line-height: 1.5; - text-align: center; - text-decoration: none; - white-space: nowrap; - vertical-align: middle; - cursor: pointer; - border: none; - border-radius: 25px; - transition: all 0.3s ease; - box-shadow: 0 4px 12px rgba(0,0,0,0.15); - text-transform: uppercase; - letter-spacing: 0.5px; -} - -.btn:hover { - transform: translateY(-3px); - box-shadow: 0 8px 25px rgba(0,0,0,0.25); - text-decoration: none; -} - -.btn-primary { - background: linear-gradient(135deg, #42A5F5 0%, #1E88E5 100%); - color: white; -} - -.btn-success { - background: linear-gradient(135deg, #66BB6A 0%, #43A047 100%); - color: white; -} - -.btn-warning { - background: linear-gradient(135deg, #FFA726 0%, #FF8F00 100%); - color: white; -} - -.btn-info { - background: linear-gradient(135deg, #26C6DA 0%, #00ACC1 100%); - color: white; -} - -/* Bright, beautiful tables */ -table { - border-collapse: collapse; - width: 100%; - margin: 2rem 0; - box-shadow: 0 8px 25px rgba(0,0,0,0.1); - border-radius: 12px; - overflow: hidden; - background: white; -} - -th { - background: linear-gradient(135deg, var(--gradient-start) 0%, var(--gradient-end) 100%); - color: white; - padding: 1.2rem; - text-align: left; - font-weight: 700; - text-transform: uppercase; - letter-spacing: 0.5px; -} - -td { - padding: 1.2rem; - border-bottom: 1px solid #F0F0F0; -} - -tr:nth-child(even) { - background: linear-gradient(135deg, #FAFAFA 0%, #F5F5F5 100%); -} - -tr:hover { - background: linear-gradient(135deg, var(--light-blue) 0%, #BBDEFB 100%); - transform: scale(1.01); -} - -/* Colorful progress indicators */ -.progress { - width: 100%; - height: 2rem; - background: linear-gradient(135deg, #EEEEEE 0%, #E0E0E0 100%); - border-radius: 25px; - overflow: hidden; - margin: 1.5rem 0; - box-shadow: inset 0 2px 4px rgba(0,0,0,0.1); -} - -.progress-bar { - height: 100%; - background: linear-gradient(90deg, #FF6B6B 0%, #4ECDC4 50%, #45B7D1 100%); - border-radius: 25px; - text-align: center; - line-height: 2rem; - color: white; - font-weight: 700; - font-size: 0.9rem; - transition: width 0.8s ease; - position: relative; - overflow: hidden; - text-transform: uppercase; - letter-spacing: 0.5px; -} - -.progress-bar:before { - content: ''; - position: absolute; - top: 0; - left: -100%; - width: 100%; - height: 100%; - background: linear-gradient(90deg, transparent, rgba(255,255,255,0.3), transparent); - animation: shimmer 2s infinite; -} - -@keyframes shimmer { - 0% { left: -100%; } - 100% { left: 100%; } -} - -/* Bright, cheerful cards */ -.card { - background: linear-gradient(135deg, white 0%, #FAFAFA 100%); - border-radius: 20px; - padding: 2.5rem; - margin: 2rem 0; - box-shadow: 0 10px 30px rgba(0,0,0,0.1); - border: 1px solid var(--border-color); - position: relative; - overflow: hidden; -} - -.card:before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 5px; - background: linear-gradient(90deg, #FF6B6B 0%, #4ECDC4 25%, #45B7D1 50%, #96CEB4 75%, #FFEAA7 100%); -} - -.card:hover { - transform: translateY(-8px); - box-shadow: 0 20px 40px rgba(0,0,0,0.15); -} - -/* Colorful step indicators */ -.step { - display: inline-flex; - align-items: center; - justify-content: center; - background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); - color: white; - border-radius: 50%; - width: 3rem; - height: 3rem; - text-align: center; - line-height: 1; - margin-right: 1rem; - font-weight: bold; - font-size: 1.2rem; - box-shadow: 0 6px 15px rgba(102, 126, 234, 0.4); - position: relative; -} - -.step:after { - content: ''; - position: absolute; - width: 100%; - height: 100%; - border-radius: 50%; - background: inherit; - top: 0; - left: 0; - z-index: -1; - opacity: 0; - transform: scale(1.2); - animation: pulse 2s infinite; -} - -@keyframes pulse { - 0% { opacity: 0; transform: scale(1); } - 50% { opacity: 0.4; transform: scale(1.3); } - 100% { opacity: 0; transform: scale(1.5); } -} - -/* Bright collapsible sections */ -details { - border: 2px solid var(--border-color); - border-radius: 15px; - padding: 0; - margin: 1.5rem 0; - overflow: hidden; - box-shadow: 0 4px 12px rgba(0,0,0,0.1); -} - -summary { - background: linear-gradient(135deg, #E3F2FD 0%, #BBDEFB 100%); - padding: 1.2rem 2rem; - cursor: pointer; - font-weight: 700; - border-bottom: 2px solid var(--border-color); - position: relative; - color: #1565C0; - text-transform: uppercase; - letter-spacing: 0.5px; -} - -summary:hover { - background: linear-gradient(135deg, #BBDEFB 0%, #90CAF9 100%); -} - -summary:after { - content: 'πŸ”½'; - position: absolute; - right: 2rem; - top: 50%; - transform: translateY(-50%); - transition: transform 0.3s ease; - font-size: 1.2rem; -} - -details[open] summary:after { - transform: translateY(-50%) rotate(180deg); -} - -details div { - padding: 2rem; - background: linear-gradient(135deg, white 0%, #F8F9FA 100%); -} - -/* Bright status indicators */ -.status { - display: inline-flex; - align-items: center; - gap: 0.5rem; - padding: 0.6rem 1.2rem; - border-radius: 25px; - font-size: 0.9rem; - font-weight: 600; - text-transform: uppercase; - letter-spacing: 0.5px; - box-shadow: 0 2px 8px rgba(0,0,0,0.1); -} - -.status-complete { - background: linear-gradient(135deg, var(--light-green) 0%, #C8E6C9 100%); - color: #1B5E20; - border: 2px solid var(--success-color); -} - -.status-pending { - background: linear-gradient(135deg, var(--light-orange) 0%, #FFE0B2 100%); - color: #E65100; - border: 2px solid var(--warning-color); -} - -.status-error { - background: linear-gradient(135deg, var(--light-red) 0%, #FFCDD2 100%); - color: #B71C1C; - border: 2px solid var(--danger-color); -} - -/* Bright page styling */ -body { - background: linear-gradient(135deg, #FAFAFA 0%, #F0F4F8 100%); - color: #333; -} - -/* Enhanced headings */ -h1, h2, h3, h4, h5, h6 { - background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - background-clip: text; - font-weight: 700; -} - -/* Responsive design */ -@media (max-width: 768px) { - .card { - margin: 1rem 0; - padding: 1.5rem; - } - - .btn { - width: 100%; - margin: 0.5rem 0; - } - - table { - font-size: 0.9rem; - } - - .step { - width: 2.5rem; - height: 2.5rem; - margin-right: 0.75rem; - } -} - -/* Remove dark mode for bright theme */ -@media (prefers-color-scheme: dark) { - /* Override dark mode to keep bright theme */ - body { - background: linear-gradient(135deg, #FAFAFA 0%, #F0F4F8 100%) !important; - color: #333 !important; - } - - .card { - background: linear-gradient(135deg, white 0%, #FAFAFA 100%) !important; - color: #333 !important; - } -} - -/* Smooth animations */ -* { - transition: all 0.3s ease; -} - -/* Enhanced badges */ -.badge { - display: inline-block; - padding: 0.25em 0.6em; - font-size: 0.75em; - font-weight: 700; - line-height: 1; - text-align: center; - white-space: nowrap; - vertical-align: baseline; - border-radius: 0.375rem; - text-decoration: none; - margin: 0.2em; -} - -.badge-primary { background-color: var(--primary-color); color: white; } -.badge-success { background-color: var(--success-color); color: white; } -.badge-warning { background-color: var(--warning-color); color: black; } -.badge-danger { background-color: var(--danger-color); color: white; } -.badge-info { background-color: var(--info-color); color: white; } - -/* Modern alert boxes */ -.alert { - padding: 1rem 1.25rem; - margin-bottom: 1rem; - border: 1px solid transparent; - border-radius: 0.5rem; - position: relative; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); -} - -.alert-info { - color: #0c5460; - background-color: #d1ecf1; - border-color: #bee5eb; - border-left: 4px solid var(--info-color); -} - -.alert-warning { - color: #856404; - background-color: #fff3cd; - border-color: #ffeaa7; - border-left: 4px solid var(--warning-color); -} - -.alert-success { - color: #155724; - background-color: #d4edda; - border-color: #c3e6cb; - border-left: 4px solid var(--success-color); -} - -.alert-danger { - color: #721c24; - background-color: #f8d7da; - border-color: #f1b0b7; - border-left: 4px solid var(--danger-color); -} - -/* Enhanced blockquotes */ -blockquote { - background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); - border-left: 5px solid var(--primary-color); - margin: 1.5em 0; - padding: 1em 1.5em; - quotes: "\201C""\201D""\2018""\2019"; - position: relative; - border-radius: 0 8px 8px 0; - box-shadow: 0 2px 8px rgba(0,0,0,0.1); -} - -blockquote:before { - color: var(--primary-color); - content: open-quote; - font-size: 3em; - line-height: 0.1em; - margin-right: 0.25em; - vertical-align: -0.4em; - opacity: 0.3; -} - -blockquote p { - display: inline; - font-style: italic; -} - -/* Enhanced code blocks */ -pre { - background: linear-gradient(135deg, #2d3748 0%, #1a202c 100%); - color: #e2e8f0; - border: none; - border-radius: 8px; - padding: 1.5rem; - overflow-x: auto; - margin: 1.5rem 0; - box-shadow: 0 4px 12px rgba(0,0,0,0.2); - position: relative; -} - -pre:before { - content: "πŸ’» Code"; - position: absolute; - top: 0.5rem; - right: 1rem; - font-size: 0.8rem; - color: #a0aec0; - opacity: 0.7; -} - -code { - background-color: #f1f3f4; - color: #d73a49; - padding: 0.2em 0.4em; - border-radius: 3px; - font-size: 85%; -} - -/* Modern button styling */ -.btn { - display: inline-block; - padding: 0.75rem 1.5rem; - margin: 0.25rem; - font-size: 1rem; - font-weight: 500; - line-height: 1.5; - text-align: center; - text-decoration: none; - white-space: nowrap; - vertical-align: middle; - cursor: pointer; - border: 1px solid transparent; - border-radius: 0.5rem; - transition: all 0.15s ease-in-out; - box-shadow: 0 2px 4px rgba(0,0,0,0.1); -} - -.btn:hover { - transform: translateY(-2px); - box-shadow: 0 4px 8px rgba(0,0,0,0.2); -} - -.btn-primary { - color: #fff; - background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); - border-color: transparent; -} - -.btn-success { - color: #fff; - background: linear-gradient(135deg, #4ecdc4 0%, #44a08d 100%); - border-color: transparent; -} - -.btn-warning { - color: #212529; - background: linear-gradient(135deg, #ffeaa7 0%, #fab1a0 100%); - border-color: transparent; -} - -.btn-info { - color: #fff; - background: linear-gradient(135deg, #74b9ff 0%, #0984e3 100%); - border-color: transparent; -} - -/* Enhanced tables */ -table { - border-collapse: collapse; - width: 100%; - margin: 2rem 0; - box-shadow: 0 4px 8px rgba(0,0,0,0.1); - border-radius: 8px; - overflow: hidden; -} - -th { - background: linear-gradient(135deg, var(--gradient-start) 0%, var(--gradient-end) 100%); - color: white; - padding: 1rem; - text-align: left; - font-weight: 600; -} - -td { - padding: 1rem; - border-bottom: 1px solid var(--border-color); -} - -tr:nth-child(even) { - background-color: #f8f9fa; -} - -tr:hover { - background-color: #e3f2fd; -} - -/* Progress indicators */ -.progress { - width: 100%; - height: 1.5rem; - background-color: #e9ecef; - border-radius: 0.75rem; - overflow: hidden; - margin: 1rem 0; - box-shadow: inset 0 1px 2px rgba(0,0,0,0.1); -} - -.progress-bar { - height: 100%; - background: linear-gradient(90deg, var(--gradient-start) 0%, var(--gradient-end) 100%); - border-radius: 0.75rem; - text-align: center; - line-height: 1.5rem; - color: white; - font-weight: 500; - transition: width 0.6s ease; - position: relative; - overflow: hidden; -} - -.progress-bar:before { - content: ''; - position: absolute; - top: 0; - left: -100%; - width: 100%; - height: 100%; - background: linear-gradient(90deg, transparent, rgba(255,255,255,0.2), transparent); - animation: shimmer 2s infinite; -} - -@keyframes shimmer { - 0% { left: -100%; } - 100% { left: 100%; } -} - -/* Card-like containers */ -.card { - background: white; - border-radius: 12px; - padding: 2rem; - margin: 2rem 0; - box-shadow: 0 8px 25px rgba(0,0,0,0.1); - border: 1px solid var(--border-color); - position: relative; - overflow: hidden; -} - -.card:before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 4px; - background: linear-gradient(90deg, var(--gradient-start) 0%, var(--gradient-end) 100%); -} - -.card:hover { - transform: translateY(-4px); - box-shadow: 0 12px 35px rgba(0,0,0,0.15); -} - -/* Step indicators */ -.step { - display: inline-flex; - align-items: center; - justify-content: center; - background: linear-gradient(135deg, var(--gradient-start) 0%, var(--gradient-end) 100%); - color: white; - border-radius: 50%; - width: 2.5rem; - height: 2.5rem; - text-align: center; - line-height: 1; - margin-right: 1rem; - font-weight: bold; - box-shadow: 0 4px 8px rgba(0,0,0,0.2); - position: relative; -} - -.step:after { - content: ''; - position: absolute; - width: 100%; - height: 100%; - border-radius: 50%; - background: inherit; - top: 0; - left: 0; - z-index: -1; - opacity: 0; - transform: scale(1.2); - animation: pulse 2s infinite; -} - -@keyframes pulse { - 0% { opacity: 0; transform: scale(1); } - 50% { opacity: 0.3; transform: scale(1.2); } - 100% { opacity: 0; transform: scale(1.4); } -} - -/* Collapsible sections */ -details { - border: 1px solid var(--border-color); - border-radius: 8px; - padding: 0; - margin: 1rem 0; - overflow: hidden; -} - -summary { - background: linear-gradient(135deg, #f8f9fa 0%, #e9ecef 100%); - padding: 1rem 1.5rem; - cursor: pointer; - font-weight: 600; - border-bottom: 1px solid var(--border-color); - position: relative; -} - -summary:hover { - background: linear-gradient(135deg, #e9ecef 0%, #dee2e6 100%); -} - -summary:after { - content: 'β–Ό'; - position: absolute; - right: 1.5rem; - top: 50%; - transform: translateY(-50%); - transition: transform 0.3s ease; -} - -details[open] summary:after { - transform: translateY(-50%) rotate(180deg); -} - -details div { - padding: 1.5rem; - background: white; -} - -/* Tooltips */ -.tooltip { - position: relative; - display: inline-block; - border-bottom: 1px dotted var(--primary-color); - cursor: help; -} - -.tooltip .tooltiptext { - visibility: hidden; - width: 200px; - background-color: #333; - color: #fff; - text-align: center; - border-radius: 6px; - padding: 8px; - position: absolute; - z-index: 1; - bottom: 125%; - left: 50%; - margin-left: -100px; - opacity: 0; - transition: opacity 0.3s; - font-size: 0.9em; -} - -.tooltip:hover .tooltiptext { - visibility: visible; - opacity: 1; -} - -/* Status indicators */ -.status { - display: inline-flex; - align-items: center; - gap: 0.5rem; - padding: 0.5rem 1rem; - border-radius: 20px; - font-size: 0.9rem; - font-weight: 500; -} - -.status-complete { - background-color: #d4edda; - color: #155724; - border: 1px solid #c3e6cb; -} - -.status-pending { - background-color: #fff3cd; - color: #856404; - border: 1px solid #ffeaa7; -} - -.status-error { - background-color: #f8d7da; - color: #721c24; - border: 1px solid #f1b0b7; -} - -/* Responsive design */ -@media (max-width: 768px) { - .card { - margin: 1rem 0; - padding: 1.5rem; - } - - .btn { - width: 100%; - margin: 0.5rem 0; - } - - table { - font-size: 0.9rem; - } - - .step { - width: 2rem; - height: 2rem; - margin-right: 0.75rem; - } -} - -/* Dark mode support */ -@media (prefers-color-scheme: dark) { - .card { - background: #2d3748; - border-color: #4a5568; - color: #e2e8f0; - } - - .alert-info { - background-color: #2b4c5a; - color: #bee3f8; - } - - .alert-success { - background-color: #22543d; - color: #9ae6b4; - } - - .alert-warning { - background-color: #744210; - color: #faf089; - } - - .alert-danger { - background-color: #63171b; - color: #feb2b2; - } -} \ No newline at end of file From feb1a984589de1601ac903a8b350d2d131f0741e Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 16:21:43 -0600 Subject: [PATCH 44/58] Refactor Challenge 00 lab documentation; remove unnecessary badges and streamline content for clarity. --- .../Student/Challenge-00-lab.md | 256 ++++-------------- .../Student/Challenge-00-nolab.md | 1 - 2 files changed, 53 insertions(+), 204 deletions(-) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-lab.md b/068-AzureOpenAIApps/Student/Challenge-00-lab.md index 4422abd463..6888c28944 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-lab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-lab.md @@ -1,106 +1,44 @@ -# πŸš€ Challenge 00 - Prerequisites - Ready, Set, GO! (Lab Provided) +# Challenge 00 - Prerequisites - Ready, Set, GO! (Lab Provided) -**[🏠 Home](../README.md)** - [Next Challenge > πŸ“‹](./Challenge-01.md) +**[Home](../README.md)** - [Next Challenge >](./Challenge-01.md) -
-
Challenge 00 - Setup Phase
-
+## Introduction -![Azure](https://img.shields.io/badge/Azure-0078D4?style=for-the-badge&logo=microsoft-azure&logoColor=white) -![OpenAI](https://img.shields.io/badge/OpenAI-412991?style=for-the-badge&logo=openai&logoColor=white) -![TypeScript](https://img.shields.io/badge/TypeScript-007ACC?style=for-the-badge&logo=typescript&logoColor=white) -![Angular](https://img.shields.io/badge/Angular-DD0031?style=for-the-badge&logo=angular&logoColor=white) -![Python](https://img.shields.io/badge/Python-3776AB?style=for-the-badge&logo=python&logoColor=white) +Thank you for participating in the Azure Open AI Apps What The Hack. An Azure lab environment will be provided to you with the sample application resources pre-deployed into Azure. Before you can hack, you will still need to set up some prerequisites. ---- +## Description -## πŸ‘‹ Introduction +In this challenge, you will setup the necessary pre-requisites and environment to complete the rest of the hack, including: -
-πŸŽ‰ Welcome to the Azure OpenAI Apps What The Hack!
-An Azure lab environment will be provided to you with the sample application resources pre-deployed into Azure. Before you can hack, you will still need to set up some prerequisites. -
+- [Access Azure Subscription](#access-azure-subscription) +- [Setup Development Environment](#setup-development-environment) + - [Use GitHub Codespaces](#use-github-codespaces) + - [Use Local Workstation](#use-local-workstation) +- [Setup Citrus Bus Application](#setup-citrus-bus-application) + - [Get Azure Resource Settings](#get-azure-resource-settings) + - [Setup App Backend and Frontend](#setup-app-backend-and-frontend) + - [Setup App Backend](#setup-app-backend) + - [Setup App Frontend](#setup-app-frontend) ---- +### Access Azure Subscription -## πŸ“‹ Description +You will be provided login credentials to an Azure subscription to complete this hack by your coach. When you receive your credentials, make note of them and login to the Azure Portal: +- [Azure Portal](https://portal.azure.com) -
+Keep your credentials handy as you will also need them to login to the Azure CLI (command line interface). -### 🎯 Challenge Overview - -This challenge will guide you through setting up your development environment and the Citrus Bus application. Follow the steps below to get everything ready for the upcoming challenges. - -```mermaid -graph TD - A[πŸš€ Start Challenge] --> B[πŸ” Access Azure Subscription] - B --> C[βš™οΈ Setup Development Environment] - C --> D{Choose Environment} - D -->|Recommended| E[☁️ GitHub Codespaces] - D -->|Alternative| F[πŸ’» Local Workstation] - E --> G[πŸ—οΈ Setup Citrus Bus App] - F --> G - G --> H[πŸ§ͺ Test Application] - H --> I[βœ… Success!] - - style A fill:#e1f5fe - style I fill:#e8f5e8 - style E fill:#fff3e0 -``` - -
- -### 🎯 Quick Navigation -- [πŸ” Access Azure Subscription](#access-azure-subscription) -- [βš™οΈ Setup Development Environment](#setup-development-environment) - - [☁️ Use GitHub Codespaces](#use-github-codespaces) - - [πŸ’» Use Local Workstation](#use-local-workstation) -- [πŸ—οΈ Setup Citrus Bus Application](#setup-citrus-bus-application) - - [βš™οΈ Get Azure Resource Settings](#get-azure-resource-settings) - - [πŸ”§ Setup App Backend and Frontend](#setup-app-backend-and-frontend) - - [πŸ”™ Setup App Backend](#setup-app-backend) - - [🎨 Setup App Frontend](#setup-app-frontend) - ---- - -### πŸ” Access Azure Subscription - -
-πŸ“ Important: You will be provided login credentials to an Azure subscription to complete this hack by your coach. -
- -When you receive your credentials, make note of them and login to the Azure Portal: - - - -
-πŸ’‘ Pro Tip: Keep your credentials handy as you will also need them to login to the Azure CLI (command line interface). -
- ---- - -### βš™οΈ Setup Development Environment +### Setup Development Environment You will need a set of developer tools to work with the sample application for this hack. -> **πŸš€ Quick Start Options** -> -> Choose your preferred development environment: - -
+You can use GitHub Codespaces where we have a pre-configured development environment set up and ready to go for you, or you can setup the developer tools on your local workstation. -| ☁️ **GitHub Codespaces** | πŸ–₯️ **Dev Containers** | πŸ’» **Local Workstation** | -|:---:|:---:|:---:| -| [![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack?devcontainer_path=.devcontainer%2F068-AzureOpenAIApps%2Fdevcontainer.json) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack) | [πŸ“– Setup Guide](#use-local-workstation) | -| **Recommended** ⭐ | **VS Code Required** | **Manual Setup** | +A GitHub Codespace is a development environment that is hosted in the cloud that you access via a browser. All of the pre-requisite developer tools for this hack are pre-installed and available in the codespace. -
+- [Use GitHub Codespaces](#use-github-codespaces) +- [Use Local Workstation](#use-local-workstation) -> **πŸ’‘ Recommendation:** We highly recommend using GitHub Codespaces to make it easier to complete this hack. - ---- +**NOTE:** We highly recommend using GitHub Codespaces to make it easier to complete this hack. #### Use Github Codespaces @@ -119,39 +57,20 @@ The GitHub Codespace for this hack will host the developer tools, sample applica **NOTE:** Make sure you do not sign in with your enterprise managed Github account. Once you are signed in: -- βœ… Verify that the `Dev container configuration` drop down is set to `068-AzureOpenAIApps` -- βœ… Click on the green "Create Codespace" button - -
-
⏳ Creating Codespace (3-5 minutes)
-
+- Verify that the `Dev container configuration` drop down is set to `068-AzureOpenAIApps` +- Click on the green "Create Codespace" button. Your Codespace environment should load in a new browser tab. It will take approximately 3-5 minutes the first time you create the codespace for it to load. -
-πŸŽ‰ Success! When the codespace completes loading, you should find an instance of Visual Studio Code running in your browser with the files needed for this hackathon. -
+- When the codespace completes loading, you should find an instance of Visual Studio Code running in your browser with the files needed for this hackathon. -Your developer environment is ready, hooray! Skip to section: [πŸ—οΈ Setup Citrus Bus Application](#setup-citrus-bus-application) +Your developer environment is ready, hooray! Skip to section: [Setup Citrus Bus Application](#setup-citrus-bus-application) -
-πŸ“‹ Important Codespace Notes -
+**NOTE:** If you close your Codespace window, or need to return to it later, you can go to [GitHub Codespaces](https://github.com/codespaces) and you should find your existing Codespaces listed with a link to re-launch it. -**Returning to Your Codespace:** -- If you close your Codespace window, or need to return to it later, you can go to [GitHub Codespaces](https://github.com/codespaces) and you should find your existing Codespaces listed with a link to re-launch it. +**NOTE:** GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser. If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds. If you want to have a better experience, you can also update the default timeout value in your personal setting page on Github. Refer to this page for instructions: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) -**Timeout Management:** -- GitHub Codespaces time out after 20 minutes if you are not actively interacting with it in the browser -- If your codespace times out, you can restart it and the developer environment and its files will return with its state intact within seconds -- For a better experience, you can update the default timeout value in your personal setting page on GitHub: [Default-Timeout-Period](https://docs.github.com/en/codespaces/setting-your-user-preferences/setting-your-timeout-period-for-github-codespaces#setting-your-default-timeout-period) - -**Expiration:** -- Codespaces expire after 30 days unless you extend the expiration date -- When a Codespace expires, the state of all files in it will be lost - -
-
+**NOTE:** Codespaces expire after 30 days unless you extend the expiration date. When a Codespace expires, the state of all files in it will be lost. #### Use Local Workstation @@ -176,9 +95,9 @@ You will next be setting up your local workstation so that it can use dev contai **NOTE:** On Windows, Dev Containers run in the Windows Subsystem for Linux (WSL). As of May 2025, WSL on Windows ARM64 does not currently support running the Azure Function Core Tools needed for this hackathon in x86_64 emulation using QEMU. IF you are using a Windows on ARM device, you will need to use a GitHub Codespace instead. -On Windows and macOS (**NOTE:** only tested on Apple Silicon): +On Windows and Mac OS (**NOTE:** only tested on Apple Silicon): - Download and install Docker Desktop -- (macOS only) In Docker Desktop settings, choose Apple Virtualization Framework for the Virtual Machine Manager. Also, click the checkbox to use Rosetta for x86_64/amd64 emulation on Apple Silicon +- (Mac OS only) In Docker Desktop settings, choose Apple Virtualization Framework for the Virtual Machine Manager. Also, click the checkbox to use Rosetta for x86_64/amd64 emulation on Apple Silicon - (Windows only) Install the Windows Subsystem for Linux along with a Linux distribution such as Ubuntu. You will need to copy the `Resources.zip` to your Linux home directory and unzip it there. - Open the root folder of the Student resource package in Visual Studio Code - You should get prompted to re-open the folder in a Dev Container. You can do that by clicking the Yes button, but if you miss it or hit no, you can also use the Command Palette in VS Code and select `Dev Containers: Reopen in Container` @@ -200,17 +119,10 @@ There are three major steps to setup the Sample Application: - [Setup App Backend](#setup-app-backend) - [Setup App Frontend](#setup-app-frontend) -<<<<<<< Updated upstream In your codespace, or student `Resources.zip` package, you fill find the following folders containing the frontend and backend API of the sample application to help you get started: - `/ContosoAIAppsBackend` - Contains an Azure function app that provides capabilities of processing data and interacting with Azure AI Services like Azure OpenAI and Azure Document Intelligence. - `/ContosoAIAppsFrontend` - Contains an Angular App that provides a user interface to some example virtual assistants. - `/data` - Contains various artifacts and data sources that will be used by the Citrus Bus application -======= -In your codespace, or student `Resources.zip` package, you will find the following folders containing the frontend and backend API of the sample application to help you get started: -- `/ContosoAIAppsBackend` - Contains an Azure Function app that provides capabilities of processing data and interacting with Azure AI Services like Azure OpenAI and Azure Document Intelligence. -- `/ContosoAIAppsFrontend` - Contains an Angular app that provides a user interface to some example virtual assistants. -- `/artifacts` - Contains various artifacts and data sources that will be used by the Citrus Bus application ->>>>>>> Stashed changes - `/infra` - Contains deployment script and Bicep templates to deploy Azure resources for hosting the Citrus Bus application in Azure. The apps also contain helper utilities, functions and tools to help you speed up development as well as hints to the challenges you will be taking on. @@ -275,91 +187,29 @@ npm start Open another terminal session in VSCode so that you can continue the rest of the challenges. The terminal sessions you opened to run the Frontend and Backend should remain running in the background. ---- - -## βœ… Success Criteria - -
-
🎯 Challenge 00 - Validation Phase
-
- -
-🎯 Challenge Complete!
-To complete this challenge successfully, you should be able to accomplish the following: -
- -### πŸ”§ Development Environment Checklist - -
- -| 1 | **Requirement** | **Status** | **Description** | -|:---:|:---|:---:|:---| -| πŸ–₯️ | **Bash Shell + Azure CLI** | ⏳ Pending | Verify command line access | -| ⚑ | **Azure Function Backend** | ⏳ Pending | Backend service running | -| 🌐 | **Frontend Application** | ⏳ Pending | Web app accessible via browser | - -
- -### ☁️ Azure Resources Validation - -
- -Verify that you have the following resources deployed in Azure: - -
-πŸ” Click to expand Azure Resources Checklist -
- -| Service | Status | Purpose | Validation | -|---------|:------:|---------|------------| -| πŸ€– **Azure OpenAI Service** | βœ… Required | AI language models | Check in Azure Portal | -| πŸ” **Azure Cognitive Search** | βœ… Required | Search and indexing | Verify search service | -| πŸ’Ύ **Azure Storage Accounts** (2x) | βœ… Required | Blob storage | Check both accounts | -| πŸ—„οΈ **Azure Cosmos DB** | βœ… Required | Database and containers | Verify DB access | -| πŸ“¨ **Azure Service Bus** | βœ… Required | Message queuing | Check queue setup | -| ⚑ **Azure Redis Cache** | βœ… Required | Caching layer | Verify cache instance | -| πŸ“„ **Azure Document Intelligence** | βœ… Required | Form processing | Check service availability | - -
-
- -
- -### πŸ§ͺ Functional Testing - -
- -πŸ§ͺ **Final Validation Steps:** - -1. **Assistant Response Test**: Ask all assistants for their name from the front-end -2. **Expected Result**: They should respond correctly with the configured names from system prompts - -
-πŸ’‘ Testing Tip: This validates that your entire application stack is working correctly from frontend to backend to AI services. -
- -
- ---- +## Success Criteria -## πŸ“š Learning Resources +To complete this challenge successfully, you should be able to: -> **πŸ’‘ Expand Your Knowledge** -> -> Here are essential resources to deepen your understanding of the technologies used: +- Verify that you have a bash shell with the Azure CLI available. +- Verify that you have deployed the following resources in Azure: -### πŸ€– AI & OpenAI -- πŸ”— [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) - Complete guide to Azure OpenAI -- πŸ”— [Document Intelligence Overview](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/overview?view=doc-intel-4.0.0) - Region and API version details + - Azure OpenAI Service + - Azure Cognitive Search + - Two Azure Storage Accounts with Azure Blob Storage + - Azure Cosmos DB service with databases and containers + - Azure Service Bus with at least one queue set up + - Azure Redis Cache Instance + - Azure Document Intelligence Service (formerly Azure Form Recognizer) + +Your Azure Function Backend and Front End applications should be up and running and reachable via HTTP (Browser) -### πŸ› οΈ Development Tools -- πŸ”— [VS Code with GitHub Copilot](https://code.visualstudio.com/docs/copilot/setup-simplified?wt.md_id=AZ-MVP-5004796) - AI-powered coding assistant +You should also be able to ask all the assistants for their name from the front-end and they should respond correctly with the correct name configured in the app's system prompts. -### πŸ“– Additional Resources -- πŸ”— [Azure Functions Documentation](https://docs.microsoft.com/en-us/azure/azure-functions/) -- πŸ”— [Angular Framework Guide](https://angular.io/docs) -- πŸ”— [GitHub Codespaces Documentation](https://docs.github.com/en/codespaces) +## Learning Resources ---- +Here are some resources that should provide you with background information and educational content on the resources you have just deployed -πŸŽ‰ **Ready for the next challenge?** [Continue to Challenge 01 β†’](./Challenge-01.md) +- [Azure OpenAI Service](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) +- [Document Intelligence Region/API Version Availability](https://learn.microsoft.com/en-us/azure/ai-services/document-intelligence/overview?view=doc-intel-4.0.0) +- [VS Code with Github Copilot](https://code.visualstudio.com/docs/copilot/setup-simplified?wt.md_id=AZ-MVP-5004796) diff --git a/068-AzureOpenAIApps/Student/Challenge-00-nolab.md b/068-AzureOpenAIApps/Student/Challenge-00-nolab.md index 004d9535f9..0ebe875196 100644 --- a/068-AzureOpenAIApps/Student/Challenge-00-nolab.md +++ b/068-AzureOpenAIApps/Student/Challenge-00-nolab.md @@ -40,7 +40,6 @@ A GitHub Codespace is a development environment that is hosted in the cloud that #### Use Github Codespaces -[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/perktime/WhatTheHack/068-AzureOpenAIApps) | [![Open in Dev Containers](https://img.shields.io/static/v1?style=for-the-badge&label=Dev%20Containers&message=Open&color=blue&logo=visualstudiocode)](https://vscode.dev/redirect?url=vscode://ms-vscode-remote.remote-containers/cloneInVolume?url=https://github.com/perktime/WhatTheHack/068-AzureOpenAIApps) You must have a GitHub account to use GitHub Codespaces. If you do not have a GitHub account, you can [Sign Up Here](https://github.com/signup). GitHub Codespaces is available for developers in every organization. All personal GitHub.com accounts include a monthly quota of free usage each month. GitHub will provide users in the Free plan 120 core hours, or 60 hours of run time on a 2 core codespace, plus 15 GB of storage each month. From 91680812efdb7990364d7c6f70693f6455118f62 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Wed, 21 Jan 2026 16:22:34 -0600 Subject: [PATCH 45/58] Remove devcontainer.json to streamline the repository and eliminate unused configuration. --- .../.devcontainer/devcontainer.json | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 068-AzureOpenAIApps/.devcontainer/devcontainer.json diff --git a/068-AzureOpenAIApps/.devcontainer/devcontainer.json b/068-AzureOpenAIApps/.devcontainer/devcontainer.json deleted file mode 100644 index 30bdc7d8a0..0000000000 --- a/068-AzureOpenAIApps/.devcontainer/devcontainer.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "WhatTheHack", - "image": "mcr.microsoft.com/devcontainers/universal:2", - "features": { - "ghcr.io/devcontainers/features/azure-cli:1": {}, - "ghcr.io/devcontainers/features/node:1": {}, - "ghcr.io/devcontainers/features/github-cli:1": {} - }, - "customizations": { - "vscode": { - "extensions": [ - "ms-vscode.vscode-json" - ] - } - }, - "remoteUser": "codespace" -} \ No newline at end of file From 4a55ca7c7a24564cef5df2e028f3b54c3ad818e3 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Thu, 22 Jan 2026 14:53:30 -0600 Subject: [PATCH 46/58] Refactor Azure Bicep templates and deployment scripts; remove unused resources and update API versions - Updated `deploy.sh` to derive deployment name from template filename and corrected environment variable for project endpoint. - Removed parameters and modules related to Application Insights and Container Registry from `main.bicep`. - Updated AI Services resource to use the latest API version and added project management properties. - Deleted unused modules for Application Insights, Container Registry, Foundry Project, and Hub. - Added new notebook for building a Research Assistant Agent using the Microsoft Agent Framework. --- .../Student/Challenge-06.md | 51 ++- .../Student/Resources/infra/deploy.sh | 4 +- .../Student/Resources/infra/main.bicep | 72 +--- .../Student/Resources/infra/main.bicepparam | 1 - .../Resources/infra/modules/aiServices.bicep | 21 +- .../infra/modules/applicationInsights.bicep | 37 -- .../infra/modules/containerRegistry.bicep | 90 ---- .../Resources/infra/modules/document.bicep | 2 +- .../infra/modules/foundryProject.bicep | 243 ----------- .../Student/Resources/infra/modules/hub.bicep | 202 --------- .../Resources/notebooks/CH-06-AgenticAI.ipynb | 390 ++++++++++++++++++ 11 files changed, 446 insertions(+), 667 deletions(-) delete mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep delete mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/containerRegistry.bicep delete mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep delete mode 100644 066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep create mode 100644 066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb diff --git a/066-OpenAIFundamentals/Student/Challenge-06.md b/066-OpenAIFundamentals/Student/Challenge-06.md index 9388ab2f57..dcceff0913 100644 --- a/066-OpenAIFundamentals/Student/Challenge-06.md +++ b/066-OpenAIFundamentals/Student/Challenge-06.md @@ -4,39 +4,56 @@ ## Introduction -Integrating agents into an application after implementing Retrieval-Augmented Generation (RAG) can significantly enhance user experience by providing personalized interactions and automating repetitive tasks. Additionally, agents can improve decision-making, ensure scalability, and offer real-time responses, making them ideal for complex task management and continuous improvement. In this challenge, you will learn how to use the Azure AI Agent service to build, deploy, and scale enterprise-grade AI agents. +Integrating agents into an application after implementing Retrieval-Augmented Generation (RAG) can significantly enhance user experience by providing personalized interactions and automating repetitive tasks. Additionally, agents can improve decision-making, ensure scalability, and offer real-time responses, making them ideal for complex task management and continuous improvement. + +In this challenge, you will build a **Research Assistant Agent** using the Microsoft Agent Framework. This agent will leverage **Model Context Protocol (MCP)** to connect to live data sources like Microsoft Learn documentation, enabling it to provide accurate, up-to-date answers to technical questions. ## Description -In this challenge, you will create a basic agent. +In this challenge, you will create a code-based agent that can query real-time documentation using MCP tools. + +### Prerequisites +- Ensure you have Python 3.10+ installed +- Have access to a Microsoft Foundry project with a deployed model (e.g., `gpt-4o`) + +### Getting Started + +1. Open the Jupyter notebook for this challenge: + + πŸ““ **[CH-06-AgenticAI.ipynb](./Resources/notebooks/CH-06-AgenticAI.ipynb)** + +2. Work through the notebook sections: + - **Section 1:** Set up your environment and install the Microsoft Agent Framework + - **Section 2:** Create the Research Assistant agent with MCP integration + - **Section 3:** Test single queries and multi-turn conversations + - **Section 4:** Explore extending the agent with custom tools -### Creating the Agent -1. In the left-hand pane, under `Build & Customize`, select `Agents` -2. Select your Azure OpenAI resource and hit `Let's go`. -3. Select your model deployment and hit `Next`. -4. You should see an agent under the `Agents` tab at the top. If you select it, you can give it a new name. Enter "`FlightAgent`". -5. You can add instructions as well. Within your codespace, you should see a data folder. That contains the text in the file `FlightAgent.md`. Copy the text from here and add it in instructions. -6. Optional: You can also add a Knowledge Base and Actions to enhance the agent's capabilities. -7. At the top of the agent's `Setup` pane, select `Try in playground`. -8. Here you can interact with your agent in the `Playground` by entering queries in the chat window. For instance, ask the agent to `search for queries from Seattle to New York on the 28th`. Note: The agent may not provide completely accurate responses as it doesn't use real-time data in this example. The purpose is to test its ability to understand and respond to queries. +3. Test your agent with questions like: + - "What is Azure Kubernetes Service and when should I use it?" + - "How do I set up managed identity for an Azure Function?" + - "What are the best practices for Azure OpenAI prompt engineering?" ### Clean-Up -1. Remember to delete your resource group in the Azure portal once you have completed all of the challenges. +1. Remember to delete your resource group in the Azure portal once you have completed all of the challenges. ## Success Criteria To complete this challenge successfully, you should be able to: -- Articulate what an agent is and why it can be used -- Identify tools available to extend an agents capabilities +- Explain what an agent is and how tools extend its capabilities +- Create an agent using the Microsoft Agent Framework in Python +- Integrate MCP tools to connect your agent to live data sources +- Demonstrate a multi-turn conversation with your Research Assistant ## Conclusion -In this Challenge, you explored creating an agent through the Microsoft Foundry portal. This developer friendly experience integrates with several tools, knowledge connections, and systems. As you start or continue to develop your AI applications, think about the coordination needed between different agents and their roles. What would be some important considerations with multi-agent systems when handling complex tasks? +In this challenge, you built a Research Assistant agent using the Microsoft Agent Framework and connected it to live documentation via MCP. This code-first approach gives you full control over your agent's behavior while leveraging powerful integrations. As you continue developing AI applications, consider how agents can be composed togetherβ€”what coordination patterns would you use for multi-agent systems handling complex research or analysis tasks? ## Learning Resources -- [Overview of Microsoft Agents](https://learn.microsoft.com/en-us/azure/ai-services/agents/?view=azure-python-preview) -- These steps are listed here along with many other prompts: [Agents in Microsoft Foundry](https://techcommunity.microsoft.com/blog/educatordeveloperblog/step-by-step-tutorial-building-an-ai-agent-using-azure-ai-foundry/4386122) . +- [Microsoft Agent Framework on GitHub](https://github.com/microsoft/agent-framework) +- [Overview of Microsoft Agents](https://learn.microsoft.com/en-us/azure/ai-services/agents/) +- [Model Context Protocol (MCP) Overview](https://modelcontextprotocol.io/) +- [Microsoft Learn MCP Integration](https://learn.microsoft.com/en-us/mcp) diff --git a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh index b13ddc941d..8916d2c343 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh @@ -82,6 +82,8 @@ if [[ $validateTemplate == 1 ]]; then fi # Deploy the Bicep template +# Get deployment name from template filename (without path and extension) +deploymentName=$(basename "$template" .bicep) echo "Deploying [$template] Bicep template..." deploymentOutputs=$(az deployment group create \ --resource-group $resourceGroupName \ @@ -131,7 +133,7 @@ echo "Populating .env file..." echo "OPENAI_API_BASE=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesOpenAiEndpoint')\"" >> $environment_file echo "AZURE_AI_SEARCH_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.searchEndpoint')\"" >> $environment_file echo "DOCUMENT_INTELLIGENCE_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.documentEndpoint')\"" >> $environment_file -echo "AZURE_AI_PROJECT_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.aiServicesProjectEndpoint')\"" >> $environment_file +echo "AZURE_AI_PROJECT_ENDPOINT=\"$(echo "$json" | jq -r '.deploymentInfo.value.projectEndpoint')\"" >> $environment_file # Warning: this assumes the first deployed model is the chat model used by the Jupyter notebooks echo "CHAT_MODEL_NAME=\"$(echo "$json" | jq -r '.deploymentInfo.value.deployedModels[0].name')\"" >> $environment_file diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep index 995108e05a..8bc0349635 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicep @@ -35,9 +35,6 @@ param logAnalyticsSku string = 'PerNode' @description('Specifies the workspace data retention in days. -1 means Unlimited retention for the Unlimited Sku. 730 days is the maximum allowed for all other Skus.') param logAnalyticsRetentionInDays int = 60 -@description('Specifies the name of the Azure Application Insights resource.') -param applicationInsightsName string = '' - @description('Specifies the name of the Azure AI Services resource.') param aiServicesName string = '' @@ -116,23 +113,6 @@ param keyVaultEnableRbacAuthorization bool = true @description('Specifies the soft delete retention in days.') param keyVaultSoftDeleteRetentionInDays int = 7 -@description('Specifies whether creating the Azure Container Registry.') -param acrEnabled bool = false - -@description('Specifies the name of the Azure Container Registry resource.') -param acrName string = '' - -@description('Enable admin user that have push / pull permission to the registry.') -param acrAdminUserEnabled bool = false - -@description('Tier of your Azure Container Registry.') -@allowed([ - 'Basic' - 'Standard' - 'Premium' -]) -param acrSku string = 'Standard' - @description('Specifies the name of the Azure Azure Storage Account resource resource.') param storageAccountName string = '' @@ -208,30 +188,6 @@ module workspace 'modules/logAnalytics.bicep' = { } } -module applicationInsights 'modules/applicationInsights.bicep' = { - name: 'applicationInsights' - params: { - // properties - name: empty(applicationInsightsName) ? toLower('app-insights-${suffix}') : applicationInsightsName - location: location - tags: tags - workspaceId: workspace.outputs.id - } -} - -module containerRegistry 'modules/containerRegistry.bicep' = if (acrEnabled) { - name: 'containerRegistry' - params: { - // properties - name: empty(acrName) ? toLower('acr${suffix}') : acrName - location: location - tags: tags - sku: acrSku - adminUserEnabled: acrAdminUserEnabled - workspaceId: workspace.outputs.id - } -} - module storageAccount 'modules/storageAccount.bicep' = { name: 'storageAccount' params: { @@ -276,32 +232,6 @@ module aiServices 'modules/aiServices.bicep' = { } } -module project 'modules/foundryProject.bicep' = { - name: 'project' - params: { - // workspace organization - name: empty(projectName) ? toLower('project-${suffix}') : projectName - friendlyName: projectFriendlyName - location: location - tags: tags - - // dependent resources - aiServicesName: aiServices.outputs.name - applicationInsightsId: applicationInsights.outputs.id - containerRegistryId: acrEnabled ? containerRegistry!.outputs.id : '' - keyVaultId: keyVault.outputs.id - storageAccountId: storageAccount.outputs.id - - // workspace configuration - publicNetworkAccess: projectPublicNetworkAccess - workspaceId: workspace.outputs.id - - // role assignments - userObjectId: userObjectId - aiServicesPrincipalId: aiServices.outputs.principalId - } -} - module networkSecurityPerimeter 'modules/networkSecurityPerimeter.bicep' = if (nspEnabled) { name: 'networkSecurityPerimeter' params: { @@ -336,8 +266,8 @@ output deploymentInfo object = { aiServicesName: aiServices.outputs.name aiServicesEndpoint: aiServices.outputs.endpoint aiServicesOpenAiEndpoint: aiServices.outputs.openAiEndpoint - projectName: project.outputs.name documentEndpoint: document.outputs.endpoint searchEndpoint: search.outputs.endpoint deployedModels: aiServices.outputs.deployedModels + projectEndpoint: aiServices.outputs.projectEndpoint } diff --git a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam index 1e173d12af..04ba2c2fc7 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam +++ b/066-OpenAIFundamentals/Student/Resources/infra/main.bicepparam @@ -3,7 +3,6 @@ using './main.bicep' param userObjectId = '' param keyVaultEnablePurgeProtection = false -param acrEnabled = false param nspEnabled = false //param aiServicesDisableLocalAuth = false param storageAccountAllowSharedKeyAccess = true diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep index 4f44b5017f..bdeb2f4f51 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/aiServices.bicep @@ -61,7 +61,7 @@ var aiServicesMetrics = [ ] // Resources -resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' = { +resource aiServices 'Microsoft.CognitiveServices/accounts@2025-09-01' = { name: name location: location sku: sku @@ -71,11 +71,24 @@ resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' = properties: { customSubDomainName: customSubDomainName publicNetworkAccess: publicNetworkAccess + allowProjectManagement: true + defaultProject: '${name}-project' + associatedProjects:[ + '${name}-project' + ] + } +} +resource project 'Microsoft.CognitiveServices/accounts/projects@2025-09-01' = { + name: '${aiServices.name}-project' + parent: aiServices + location: location + identity: identity + properties: { + description: 'Default project for the AI Services account.' } } - @batchSize(1) -resource model 'Microsoft.CognitiveServices/accounts/deployments@2023-05-01' = [ +resource model 'Microsoft.CognitiveServices/accounts/deployments@2025-09-01' = [ for deployment in deployments: { name: deployment.model.name parent: aiServices @@ -170,7 +183,7 @@ output name string = aiServices.name output endpoint string = aiServices.properties.endpoint output openAiEndpoint string = aiServices.properties.endpoints['OpenAI Language Model Instance API'] output principalId string = aiServices.identity.principalId - +output projectEndpoint string = project.properties.endpoints['AI Foundry API'] // Output the deployed model names output deployedModels array = [for deployment in deployments: { name: deployment.model.name diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep deleted file mode 100644 index a30fb1ecf2..0000000000 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/applicationInsights.bicep +++ /dev/null @@ -1,37 +0,0 @@ -// Parameters -@description('Specifies the name of the Azure Application Insights.') -param name string - -@description('Specifies the location.') -param location string = resourceGroup().location - -@description('Specifies the Azure Log Analytics workspace ID.') -param workspaceId string - -@description('Specifies the resource tags.') -param tags object - -// Resources -resource applicationInsights 'Microsoft.Insights/components@2020-02-02' = { - name: name - location: location - tags: tags - kind: 'web' - properties: { - Application_Type: 'web' - DisableIpMasking: false - //DisableLocalAuth: false - Flow_Type: 'Bluefield' - ForceCustomerStorageForProfiler: false - ImmediatePurgeDataOn30Days: true - WorkspaceResourceId: workspaceId - IngestionMode: 'LogAnalytics' - publicNetworkAccessForIngestion: 'Enabled' - publicNetworkAccessForQuery: 'Disabled' - Request_Source: 'rest' - } -} - -//Outputs -output id string = applicationInsights.id -output name string = applicationInsights.name diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/containerRegistry.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/containerRegistry.bicep deleted file mode 100644 index 636e8e3c26..0000000000 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/containerRegistry.bicep +++ /dev/null @@ -1,90 +0,0 @@ -// Parameters -@description('Name of your Azure Container Registry') -@minLength(5) -@maxLength(50) -param name string = 'acr${uniqueString(resourceGroup().id)}' - -@description('Enable admin user that have push / pull permission to the registry.') -param adminUserEnabled bool = false - -@description('Whether to allow public network access. Defaults to Enabled.') -@allowed([ - 'Disabled' - 'Enabled' -]) -param publicNetworkAccess string = 'Enabled' - -@description('Tier of your Azure Container Registry.') -@allowed([ - 'Basic' - 'Standard' - 'Premium' -]) -param sku string = 'Premium' - -@description('Specifies the resource id of the Log Analytics workspace.') -param workspaceId string - -@description('Specifies the location.') -param location string = resourceGroup().location - -@description('Specifies the resource tags.') -param tags object - -// Variables -var diagnosticSettingsName = 'diagnosticSettings' -var logCategories = [ - 'ContainerRegistryRepositoryEvents' - 'ContainerRegistryLoginEvents' -] -var metricCategories = [ - 'AllMetrics' -] -var logs = [ - for category in logCategories: { - category: category - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] -var metrics = [ - for category in metricCategories: { - category: category - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -// Resources -resource containerRegistry 'Microsoft.ContainerRegistry/registries@2021-12-01-preview' = { - name: name - location: location - tags: tags - sku: { - name: sku - } - properties: { - adminUserEnabled: adminUserEnabled - publicNetworkAccess: publicNetworkAccess - } -} - -resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { - name: diagnosticSettingsName - scope: containerRegistry - properties: { - workspaceId: workspaceId - logs: logs - metrics: metrics - } -} - -// Outputs -output id string = containerRegistry.id -output name string = containerRegistry.name diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep index ce2f51a628..c1284b8b2d 100644 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep +++ b/066-OpenAIFundamentals/Student/Resources/infra/modules/document.bicep @@ -7,7 +7,7 @@ param location string @description('Custom subdomain name for the Azure Document Intelligence.') param customSubDomainName string -resource account 'Microsoft.CognitiveServices/accounts@2024-10-01' = { +resource account 'Microsoft.CognitiveServices/accounts@2025-09-01' = { name: name location: location sku: { diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep deleted file mode 100644 index 8112bef441..0000000000 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/foundryProject.bicep +++ /dev/null @@ -1,243 +0,0 @@ -// Parameters -@description('Specifies the name') -param name string - -@description('Specifies the location.') -param location string - -@description('Specifies the resource tags.') -param tags object - -@description('The SKU name to use for the Microsoft Foundry Project') -param skuName string = 'Basic' - -@description('The SKU tier to use for the Microsoft Foundry Project') -@allowed(['Basic', 'Free', 'Premium', 'Standard']) -param skuTier string = 'Basic' - -@description('Specifies the display name') -param friendlyName string = name - -@description('Specifies the public network access for the Foundry project.') -@allowed([ - 'Disabled' - 'Enabled' -]) -param publicNetworkAccess string = 'Enabled' - -@description('Specifies the resource ID of the application insights resource for storing diagnostics logs') -param applicationInsightsId string - -@description('Specifies the resource ID of the container registry resource for storing docker images') -param containerRegistryId string - -@description('Specifies the resource ID of the key vault resource for storing connection strings') -param keyVaultId string - -@description('Specifies the resource ID of the storage account resource for storing experimentation outputs') -param storageAccountId string - -@description('Specifies the name of the Azure AI Services resource') -param aiServicesName string - -@description('Specifies the resource id of the Log Analytics workspace.') -param workspaceId string - -@description('Specifies the object id of a Microsoft Entra ID user. In general, this the object id of the system administrator who deploys the Azure resources.') -param userObjectId string = '' - -@description('Specifies the principal id of the Azure AI Services.') -param aiServicesPrincipalId string = '' - -@description('Optional. The name of logs that will be streamed.') -@allowed([ - 'AmlComputeClusterEvent' - 'AmlComputeClusterNodeEvent' - 'AmlComputeJobEvent' - 'AmlComputeCpuGpuUtilization' - 'AmlRunStatusChangedEvent' - 'ModelsChangeEvent' - 'ModelsReadEvent' - 'ModelsActionEvent' - 'DeploymentReadEvent' - 'DeploymentEventACI' - 'DeploymentEventAKS' - 'InferencingOperationAKS' - 'InferencingOperationACI' - 'EnvironmentChangeEvent' - 'EnvironmentReadEvent' - 'DataLabelChangeEvent' - 'DataLabelReadEvent' - 'DataSetChangeEvent' - 'DataSetReadEvent' - 'PipelineChangeEvent' - 'PipelineReadEvent' - 'RunEvent' - 'RunReadEvent' -]) -param logsToEnable array = [ - 'AmlComputeClusterEvent' - 'AmlComputeClusterNodeEvent' - 'AmlComputeJobEvent' - 'AmlComputeCpuGpuUtilization' - 'AmlRunStatusChangedEvent' - 'ModelsChangeEvent' - 'ModelsReadEvent' - 'ModelsActionEvent' - 'DeploymentReadEvent' - 'DeploymentEventACI' - 'DeploymentEventAKS' - 'InferencingOperationAKS' - 'InferencingOperationACI' - 'EnvironmentChangeEvent' - 'EnvironmentReadEvent' - 'DataLabelChangeEvent' - 'DataLabelReadEvent' - 'DataSetChangeEvent' - 'DataSetReadEvent' - 'PipelineChangeEvent' - 'PipelineReadEvent' - 'RunEvent' - 'RunReadEvent' -] - -@description('Optional. The name of metrics that will be streamed.') -@allowed([ - 'AllMetrics' -]) -param metricsToEnable array = [ - 'AllMetrics' -] - -// Variables -var diagnosticSettingsName = 'diagnosticSettings' -var logs = [ - for log in logsToEnable: { - category: log - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -var metrics = [ - for metric in metricsToEnable: { - category: metric - timeGrain: null - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -// Resources -resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' existing = { - name: aiServicesName -} - -// Standalone Foundry Project (not hub-based) -resource project 'Microsoft.MachineLearningServices/workspaces@2024-04-01-preview' = { - name: name - location: location - tags: tags - sku: { - name: skuName - tier: skuTier - } - // Note: For standalone Foundry projects, kind is NOT set to 'Project' - // Omitting the kind property creates a standalone workspace that works with Foundry - identity: { - type: 'SystemAssigned' - } - properties: { - // organization - friendlyName: friendlyName - hbiWorkspace: false - v1LegacyMode: false - publicNetworkAccess: publicNetworkAccess - - // dependent resources - directly on the project (not inherited from hub) - keyVault: keyVaultId - storageAccount: storageAccountId - applicationInsights: applicationInsightsId - containerRegistry: containerRegistryId == '' ? null : containerRegistryId - systemDatastoresAuthMode: 'identity' - } - - // Create AI Services connection directly on the standalone project - resource aiServicesConnection 'connections@2024-01-01-preview' = { - name: toLower('${aiServices.name}-connection') - properties: { - category: 'AIServices' - target: aiServices.properties.endpoint - authType: 'AAD' - isSharedToAll: true - metadata: { - ApiType: 'Azure' - ResourceId: aiServices.id - } - } - } -} - -resource azureAIDeveloperRoleDefinition 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { - name: '64702f94-c441-49e6-a78b-ef80e0188fee' - scope: subscription() -} - -resource azureMLDataScientistRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { - name: 'f6c7c914-8db3-469d-8ca1-694a8f32e121' - scope: subscription() -} - -// This resource defines the Azure AI Developer role, which provides permissions for managing Azure AI resources, including deployments and configurations -resource aiDeveloperRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { - name: guid(project.id, azureAIDeveloperRoleDefinition.id, userObjectId) - scope: project - properties: { - roleDefinitionId: azureAIDeveloperRoleDefinition.id - principalType: 'User' - principalId: userObjectId - } -} - -// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Microsoft Foundry -resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { - name: guid(project.id, azureMLDataScientistRole.id, userObjectId) - scope: project - properties: { - roleDefinitionId: azureMLDataScientistRole.id - principalType: 'User' - principalId: userObjectId - } -} - -// This role assignment grants the Azure AI Services managed identity the required permissions to start Prompt Flow in a compute service defined in Microsoft Foundry -resource azureMLDataScientistManagedIdentityRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(aiServicesPrincipalId)) { - name: guid(project.id, azureMLDataScientistRole.id, aiServicesPrincipalId) - scope: project - properties: { - roleDefinitionId: azureMLDataScientistRole.id - principalType: 'ServicePrincipal' - principalId: aiServicesPrincipalId - } -} - -resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { - name: diagnosticSettingsName - scope: project - properties: { - workspaceId: workspaceId - logs: logs - metrics: metrics - } -} - -// Outputs -output name string = project.name -output id string = project.id -output principalId string = project.identity.principalId diff --git a/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep b/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep deleted file mode 100644 index c0cfe4165c..0000000000 --- a/066-OpenAIFundamentals/Student/Resources/infra/modules/hub.bicep +++ /dev/null @@ -1,202 +0,0 @@ -// Parameters -@description('Specifies the name') -param name string - -@description('Specifies the location.') -param location string - -@description('Specifies the resource tags.') -param tags object - -@description('The SKU name to use for the AI Foundry Hub Resource') -param skuName string = 'Basic' - -@description('The SKU tier to use for the AI Foundry Hub Resource') -@allowed(['Basic', 'Free', 'Premium', 'Standard']) -param skuTier string = 'Basic' - -@description('Specifies the display name') -param friendlyName string = name - -@description('Specifies the description') -param description_ string - -@description('Specifies the Isolation mode for the managed network of a machine learning workspace.') -@allowed([ - 'AllowInternetOutbound' - 'AllowOnlyApprovedOutbound' - 'Disabled' -]) -param isolationMode string = 'Disabled' - -@description('Specifies the public network access for the machine learning workspace.') -@allowed([ - 'Disabled' - 'Enabled' -]) -param publicNetworkAccess string = 'Enabled' - -@description('Specifies the resource ID of the application insights resource for storing diagnostics logs') -param applicationInsightsId string - -@description('Specifies the resource ID of the container registry resource for storing docker images') -param containerRegistryId string - -@description('Specifies the resource ID of the key vault resource for storing connection strings') -param keyVaultId string - -@description('Specifies the resource ID of the storage account resource for storing experimentation outputs') -param storageAccountId string - -@description('Specifies thename of the Azure AI Services resource') -param aiServicesName string - -@description('Specifies the authentication method for the OpenAI Service connection.') -@allowed([ - 'ApiKey' - 'AAD' - 'ManagedIdentity' - 'None' -]) -param connectionAuthType string = 'AAD' - -@description('Specifies the name for the Azure OpenAI Service connection.') -param aiServicesConnectionName string = '' - -@description('Specifies the resource id of the Log Analytics workspace.') -param workspaceId string - -@description('Specifies the object id of a Miccrosoft Entra ID user. In general, this the object id of the system administrator who deploys the Azure resources.') -param userObjectId string = '' - -@description('Optional. The name of logs that will be streamed.') -@allowed([ - 'ComputeInstanceEvent' -]) -param logsToEnable array = [ - 'ComputeInstanceEvent' -] - -@description('Optional. The name of metrics that will be streamed.') -@allowed([ - 'AllMetrics' -]) -param metricsToEnable array = [ - 'AllMetrics' -] - -@description('Determines whether or not to use credentials for the system datastores of the workspace workspaceblobstore and workspacefilestore. The default value is accessKey, in which case, the workspace will create the system datastores with credentials. If set to identity, the workspace will create the system datastores with no credentials.') -@allowed([ - 'identity' - 'accessKey' -]) -param systemDatastoresAuthMode string = 'identity' - -// Variables -var diagnosticSettingsName = 'diagnosticSettings' -var logs = [ - for log in logsToEnable: { - category: log - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -var metrics = [ - for metric in metricsToEnable: { - category: metric - timeGrain: null - enabled: true - retentionPolicy: { - enabled: true - days: 0 - } - } -] - -// Resources -resource aiServices 'Microsoft.CognitiveServices/accounts@2024-04-01-preview' existing = { - name: aiServicesName -} - -resource hub 'Microsoft.MachineLearningServices/workspaces@2024-04-01-preview' = { - name: name - location: location - tags: tags - sku: { - name: skuName - tier: skuTier - } - kind: 'Hub' - identity: { - type: 'SystemAssigned' - } - properties: { - // organization - friendlyName: friendlyName - description: description_ - managedNetwork: { - isolationMode: isolationMode - } - publicNetworkAccess: publicNetworkAccess - - // dependent resources - keyVault: keyVaultId - storageAccount: storageAccountId - applicationInsights: applicationInsightsId - containerRegistry: containerRegistryId == '' ? null : containerRegistryId - systemDatastoresAuthMode: systemDatastoresAuthMode - } - - resource aiServicesConnection 'connections@2024-01-01-preview' = { - name: !empty(aiServicesConnectionName) ? aiServicesConnectionName : toLower('${aiServices.name}-connection') - properties: { - category: 'AIServices' - target: aiServices.properties.endpoint - authType: connectionAuthType - isSharedToAll: true - metadata: { - ApiType: 'Azure' - ResourceId: aiServices.id - } - credentials: connectionAuthType == 'ApiKey' - ? { - key: aiServices.listKeys().key1 - } - : null - } - } -} - -resource azureMLDataScientistRole 'Microsoft.Authorization/roleDefinitions@2022-04-01' existing = { - name: 'f6c7c914-8db3-469d-8ca1-694a8f32e121' - scope: subscription() -} - -// This role assignment grants the user the required permissions to start a Prompt Flow in a compute service within Azure AI Foundry -resource azureMLDataScientistUserRoleAssignment 'Microsoft.Authorization/roleAssignments@2022-04-01' = if (!empty(userObjectId)) { - name: guid(hub.id, azureMLDataScientistRole.id, userObjectId) - scope: hub - properties: { - roleDefinitionId: azureMLDataScientistRole.id - principalType: 'User' - principalId: userObjectId - } -} - -resource diagnosticSettings 'Microsoft.Insights/diagnosticSettings@2021-05-01-preview' = { - name: diagnosticSettingsName - scope: hub - properties: { - workspaceId: workspaceId - logs: logs - metrics: metrics - } -} - -// Outputs -output name string = hub.name -output id string = hub.id diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb new file mode 100644 index 0000000000..45aec6f2eb --- /dev/null +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb @@ -0,0 +1,390 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "243b51a2", + "metadata": {}, + "source": [ + "# Challenge 06 - Agentic AI\n", + "\n", + "In this notebook, you will build a **Research Assistant Agent** using the Microsoft Agent Framework. This agent leverages **Model Context Protocol (MCP)** to connect to live data sources like Microsoft Learn documentation." + ] + }, + { + "cell_type": "markdown", + "id": "96883d46", + "metadata": {}, + "source": [ + "Quick tip! To view the Table of Contents for this Notebook in VS Code or within Codespaces, take a look at the \"Explorer\" tab, expand the \"Outline\" section." + ] + }, + { + "cell_type": "markdown", + "id": "4a5d9005", + "metadata": {}, + "source": [ + "## 1. Setting Up Your Environment\n", + "\n", + "First, install the Microsoft Agent Framework. The `--pre` flag is required while the Agent Framework is in preview." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c0865873", + "metadata": {}, + "outputs": [], + "source": [ + "%pip install agent-framework-azure-ai --pre" + ] + }, + { + "cell_type": "markdown", + "id": "3d6e6900", + "metadata": {}, + "source": [ + "### 1.1 Load Environment Variables\n", + "\n", + "Load your Microsoft Foundry project endpoint and model deployment name from the `.env` file.\n", + "\n", + "**NOTE:** Double check these values in your .env file to ensure the notebook runs seamlessly.\n", + "* AZURE_AI_PROJECT_ENDPOINT must equal your Microsoft Foundry project endpoint\n", + "* CHAT_MODEL_NAME must equal the deployed model's name (e.g., `gpt-4o`)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "79e84127", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from dotenv import load_dotenv, find_dotenv\n", + "load_dotenv(find_dotenv())\n", + "\n", + "# Note: We use the async version of DefaultAzureCredential for the Agent Framework\n", + "from azure.identity.aio import DefaultAzureCredential" + ] + }, + { + "cell_type": "markdown", + "id": "2d5eec04", + "metadata": {}, + "source": [ + "## 2. Creating the Research Assistant Agent\n", + "\n", + "### 2.1 Import Required Libraries\n", + "\n", + "Import the Agent Framework components and Azure Identity for authentication." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "4a994927", + "metadata": {}, + "outputs": [], + "source": [ + "from agent_framework.azure import AzureAIClient\n", + "from agent_framework import MCPStreamableHTTPTool" + ] + }, + { + "cell_type": "markdown", + "id": "b3394b76", + "metadata": {}, + "source": [ + "### 2.2 Define the MCP Tool\n", + "\n", + "Create a function that returns the MCP tool configuration for Microsoft Learn documentation. This allows your agent to query live, up-to-date documentation." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9bd08b8d", + "metadata": {}, + "outputs": [], + "source": [ + "def create_mcp_tools():\n", + " \"\"\"Create MCP tools for the Research Assistant agent.\"\"\"\n", + " return [\n", + " MCPStreamableHTTPTool(\n", + " name=\"Microsoft Learn MCP\",\n", + " description=\"Provides trusted, up-to-date information from Microsoft's official documentation\",\n", + " url=\"https://learn.microsoft.com/api/mcp\",\n", + " )\n", + " ]" + ] + }, + { + "cell_type": "markdown", + "id": "ce749f50", + "metadata": {}, + "source": [ + "### 2.3 Define the Agent Instructions\n", + "\n", + "Create the system instructions that define how the Research Assistant should behave." + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "b42304d3", + "metadata": {}, + "outputs": [], + "source": [ + "AGENT_INSTRUCTIONS = \"\"\"\n", + "You are a helpful research assistant that specializes in Azure and Microsoft technologies. \n", + "\n", + "Your responsibilities:\n", + "1. Use the Microsoft Learn MCP tool to find accurate, up-to-date documentation when answering questions\n", + "2. Always cite your sources by providing links to the documentation\n", + "3. If you're unsure about something, acknowledge it and suggest where the user might find more information\n", + "4. Provide clear, concise explanations suitable for developers of varying experience levels\n", + "\n", + "When responding:\n", + "- Start with a direct answer to the question\n", + "- Provide relevant code examples when appropriate\n", + "- Include links to official documentation for further reading\n", + "\"\"\"" + ] + }, + { + "cell_type": "markdown", + "id": "8b414374", + "metadata": {}, + "source": [ + "### 2.4 Set Up Environment Variables\n", + "\n", + "Load the project endpoint and model deployment from your `.env` file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ea960cc0", + "metadata": {}, + "outputs": [], + "source": [ + "PROJECT_ENDPOINT = os.getenv(\"AZURE_AI_PROJECT_ENDPOINT\", \"\").strip()\n", + "assert PROJECT_ENDPOINT, \"ERROR: AZURE_AI_PROJECT_ENDPOINT is missing\"\n", + "\n", + "MODEL_DEPLOYMENT = os.getenv(\"CHAT_MODEL_NAME\", \"\").strip()\n", + "assert MODEL_DEPLOYMENT, \"ERROR: CHAT_MODEL_NAME is missing\"\n", + "\n", + "print(f\"Project Endpoint: {PROJECT_ENDPOINT}\")\n", + "print(f\"Model Deployment: {MODEL_DEPLOYMENT}\")" + ] + }, + { + "cell_type": "markdown", + "id": "968fcd8f", + "metadata": {}, + "source": [ + "## 3. Testing the Research Assistant\n", + "\n", + "### 3.1 Single Query Test\n", + "\n", + "Let's test the agent with a single question about Azure services." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eeee9085", + "metadata": {}, + "outputs": [], + "source": [ + "async def ask_agent(question: str):\n", + " \"\"\"Send a single question to the Research Assistant agent.\"\"\"\n", + " async with (\n", + " DefaultAzureCredential() as credential,\n", + " AzureAIClient(\n", + " project_endpoint=PROJECT_ENDPOINT,\n", + " model_deployment_name=MODEL_DEPLOYMENT,\n", + " credential=credential,\n", + " ).as_agent(\n", + " name=\"ResearchAssistant\",\n", + " instructions=AGENT_INSTRUCTIONS,\n", + " tools=create_mcp_tools(),\n", + " ) as agent,\n", + " ):\n", + " print(f\"Question: {question}\\n\")\n", + " print(\"Assistant: \", end=\"\", flush=True)\n", + " \n", + " async for chunk in agent.run_stream(question):\n", + " if chunk.text:\n", + " print(chunk.text, end=\"\", flush=True)\n", + " print(\"\\n\")\n", + "\n", + "# Test with a sample question\n", + "await ask_agent(\"What is Azure Kubernetes Service and when should I use it?\")" + ] + }, + { + "cell_type": "markdown", + "id": "94073cb2", + "metadata": {}, + "source": [ + "### 3.2 Multi-Turn Conversation with Thread\n", + "\n", + "One of the powerful features of the Agent Framework is thread persistence, which maintains context across multiple conversation turns." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a8710a29", + "metadata": {}, + "outputs": [], + "source": [ + "async def multi_turn_conversation(questions: list):\n", + " \"\"\"Demonstrate multi-turn conversation with context retention.\"\"\"\n", + " async with (\n", + " DefaultAzureCredential() as credential,\n", + " AzureAIClient(\n", + " project_endpoint=PROJECT_ENDPOINT,\n", + " model_deployment_name=MODEL_DEPLOYMENT,\n", + " credential=credential,\n", + " ).as_agent(\n", + " name=\"ResearchAssistant\",\n", + " instructions=AGENT_INSTRUCTIONS,\n", + " tools=create_mcp_tools(),\n", + " ) as agent,\n", + " ):\n", + " # Create a thread for multi-turn conversation\n", + " thread = agent.get_new_thread()\n", + " \n", + " for i, question in enumerate(questions, 1):\n", + " print(f\"--- Turn {i} ---\")\n", + " print(f\"You: {question}\\n\")\n", + " print(\"Assistant: \", end=\"\", flush=True)\n", + " \n", + " async for chunk in agent.run_stream(question, thread=thread):\n", + " if chunk.text:\n", + " print(chunk.text, end=\"\", flush=True)\n", + " print(\"\\n\")\n", + "\n", + "# Test multi-turn conversation\n", + "questions = [\n", + " \"How do I set up managed identity for an Azure Function?\",\n", + " \"Can you show me a code example for that?\",\n", + " \"What are the security benefits of using managed identity instead of connection strings?\"\n", + "]\n", + "\n", + "await multi_turn_conversation(questions)" + ] + }, + { + "cell_type": "markdown", + "id": "b65362b9", + "metadata": {}, + "source": [ + "## 4. Exploring Agent Capabilities\n", + "\n", + "### 4.1 Adding Custom Tools\n", + "\n", + "In addition to MCP tools, you can create custom Python functions as tools. Here's an example of adding a simple calculation tool." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "47556caa", + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Annotated\n", + "\n", + "def calculate_azure_storage_cost(\n", + " storage_gb: Annotated[float, \"Amount of storage in GB\"],\n", + " tier: Annotated[str, \"Storage tier: 'hot', 'cool', or 'archive'\"] = \"hot\"\n", + ") -> str:\n", + " \"\"\"Calculate estimated monthly cost for Azure Blob Storage.\"\"\"\n", + " # Simplified pricing (actual prices vary by region)\n", + " prices = {\n", + " \"hot\": 0.0184,\n", + " \"cool\": 0.01,\n", + " \"archive\": 0.00099\n", + " }\n", + " price_per_gb = prices.get(tier.lower(), prices[\"hot\"])\n", + " monthly_cost = storage_gb * price_per_gb\n", + " return f\"Estimated monthly cost for {storage_gb} GB on {tier} tier: ${monthly_cost:.2f}\"\n", + "\n", + "# You can add this tool to your agent like this:\n", + "# tools=[get_mcp_tools()[0], calculate_azure_storage_cost]" + ] + }, + { + "cell_type": "markdown", + "id": "42ab6430", + "metadata": {}, + "source": [ + "### 4.2 Try It Yourself!\n", + "\n", + "Use the cell below to ask your own questions to the Research Assistant. Modify the question and run the cell to see the response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "56a14bd3", + "metadata": {}, + "outputs": [], + "source": [ + "# Try your own question!\n", + "your_question = \"What are the best practices for Azure OpenAI prompt engineering?\"\n", + "\n", + "await ask_agent(your_question)" + ] + }, + { + "cell_type": "markdown", + "id": "ebf0f473", + "metadata": {}, + "source": [ + "## 5. Summary\n", + "\n", + "In this notebook, you learned how to:\n", + "\n", + "1. **Set up the Microsoft Agent Framework** with the `agent-framework-azure-ai` package\n", + "2. **Create MCP tools** to connect your agent to live data sources (Microsoft Learn)\n", + "3. **Build a Research Assistant agent** with custom instructions\n", + "4. **Use thread persistence** for multi-turn conversations\n", + "5. **Extend agents with custom tools** using Python functions\n", + "\n", + "### Next Steps\n", + "\n", + "Consider exploring:\n", + "- Adding more MCP tools (e.g., GitHub, databases)\n", + "- Creating multi-agent systems for complex workflows\n", + "- Implementing agent handoffs for specialized tasks\n", + "- Adding memory and state management for long-running agents" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv (3.13.11)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.13.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} From 22f3fb30f1679255b25f429335b970975471919b Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Thu, 22 Jan 2026 14:55:08 -0600 Subject: [PATCH 47/58] Update note in CH-06-AgenticAI notebook for clarity on .env file requirements --- .../Student/Resources/notebooks/CH-06-AgenticAI.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb index 45aec6f2eb..36e4f99a18 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb @@ -47,7 +47,7 @@ "\n", "Load your Microsoft Foundry project endpoint and model deployment name from the `.env` file.\n", "\n", - "**NOTE:** Double check these values in your .env file to ensure the notebook runs seamlessly.\n", + "**NOTE:** These values in your .env file are required to ensure the notebook runs seamlessly. They should already be there if you deployed using the deployment script in Challenge 0.\n", "* AZURE_AI_PROJECT_ENDPOINT must equal your Microsoft Foundry project endpoint\n", "* CHAT_MODEL_NAME must equal the deployed model's name (e.g., `gpt-4o`)" ] From abdec0848606b81482a68f9a7693c664dd85f09a Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Thu, 22 Jan 2026 15:52:23 -0600 Subject: [PATCH 48/58] Add optional section for exploring the Microsoft Foundry portal in CH-06-AgenticAI notebook --- .../Resources/notebooks/CH-06-AgenticAI.ipynb | 21 +++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb index 36e4f99a18..af139258bc 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb @@ -341,6 +341,27 @@ "await ask_agent(your_question)" ] }, + { + "cell_type": "markdown", + "id": "5e733c45", + "metadata": {}, + "source": [ + "### 4.3 Try this in the new Foundry Portal (optional)\n", + "\n", + "The Microsoft Foundry portal also provides a no-code experience for creating and testing agents. If you'd like to explore the portal-based approach:\n", + "\n", + "1. Navigate to [Microsoft Foundry](https://ai.azure.com) and open your project using the New Foundry portal. \n", + "2. Click **Build** in the top right.\n", + "3. If you already did the steps above, you should already see a ResearchAssistant and you can click that. Otherwise, Create a new agent and give it a name like \"ResearchAssistant\"\n", + "4. Add instructions similar to what we defined in `AGENT_INSTRUCTIONS` above\n", + "5. Under **Tools**, add the Microsoft Learn MCP tool to give your agent access to documentation\n", + "6. Use the **Playground** to test your agent with the same questions you tried in this notebook\n", + "\n", + "Compare the portal experience with the code-first approach you used here. Consider:\n", + "- When would you prefer the portal vs. code?\n", + "- How might you use both together in a development workflow?" + ] + }, { "cell_type": "markdown", "id": "ebf0f473", From 347b70bef144e549f2567a1b804d181bf01e224e Mon Sep 17 00:00:00 2001 From: "Peter C. Laudati" Date: Fri, 23 Jan 2026 14:36:17 -0500 Subject: [PATCH 49/58] Fix typo in Challenge-00.md regarding constraints --- 066-OpenAIFundamentals/Student/Challenge-00.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index 3523855f1b..d6d49cbc06 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -195,7 +195,7 @@ Navigate to [Microsoft Foundry](https://ai.azure.com) to create your Microsoft F - Click on the **+ Create New** button. - Choose Microsoft Foundry resource for the resource type. Click the **Next** button - - Fill out a name for your project. **Note:** You should not need to specify Advanced Options unless you need or want to change the region because of capacity contraints. Click the **Create** button + - Fill out a name for your project. **Note:** You should not need to specify Advanced Options unless you need or want to change the region because of capacity constraints. Click the **Create** button - From the Azure portal (or you can use an Infrastructure as Code approach if you prefer using Bicep/Terraform/ARM/CLI) - Create an Azure AI Search service - Specify a service name for your Azure AI Search. You can use the same resource group and location as the Microsoft Foundry resource. **Note:** Make sure you set the Pricing Tier to Standard (Basic/Free is not supported) From 7d454b8c542d00bf3c9c9c84fe34ac5bcb258f45 Mon Sep 17 00:00:00 2001 From: "Peter C. Laudati" Date: Fri, 23 Jan 2026 14:39:38 -0500 Subject: [PATCH 50/58] Add 'Gally' to the wordlist --- 066-OpenAIFundamentals/.wordlist.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/066-OpenAIFundamentals/.wordlist.txt b/066-OpenAIFundamentals/.wordlist.txt index b3b2cc482f..c080eb5109 100644 --- a/066-OpenAIFundamentals/.wordlist.txt +++ b/066-OpenAIFundamentals/.wordlist.txt @@ -42,3 +42,4 @@ multimodal Agentic MCP Leaderboards +Gally From 9c464fd8bfe7a842169f502da647b53654fb8fc0 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 23 Jan 2026 13:37:25 -0600 Subject: [PATCH 51/58] Fix formatting and punctuation in instructions for using the Microsoft Foundry portal in CH-06-AgenticAI notebook --- .../Student/Resources/notebooks/CH-06-AgenticAI.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb index af139258bc..4a50e7be3f 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb @@ -350,9 +350,9 @@ "\n", "The Microsoft Foundry portal also provides a no-code experience for creating and testing agents. If you'd like to explore the portal-based approach:\n", "\n", - "1. Navigate to [Microsoft Foundry](https://ai.azure.com) and open your project using the New Foundry portal. \n", - "2. Click **Build** in the top right.\n", - "3. If you already did the steps above, you should already see a ResearchAssistant and you can click that. Otherwise, Create a new agent and give it a name like \"ResearchAssistant\"\n", + "1. Navigate to [Microsoft Foundry](https://ai.azure.com) and open your project using the New Foundry portal \n", + "2. Click **Build** in the top right\n", + "3. If you already did the steps above, you should already see a ResearchAssistant and you can click that. Otherwise, create a new agent and give it a name like \"ResearchAssistant\"\n", "4. Add instructions similar to what we defined in `AGENT_INSTRUCTIONS` above\n", "5. Under **Tools**, add the Microsoft Learn MCP tool to give your agent access to documentation\n", "6. Use the **Playground** to test your agent with the same questions you tried in this notebook\n", From 37b967a243952fcb7cb87dc7818f40ce4948382a Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 23 Jan 2026 13:39:36 -0600 Subject: [PATCH 52/58] Remove output logs from code cell and reset execution count in CH-04-A-RAG_for_structured_data notebook --- .../CH-04-A-RAG_for_structured_data.ipynb | 95 +------------------ 1 file changed, 2 insertions(+), 93 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb index 67fc0b57d3..9126e9e20b 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-04-A-RAG_for_structured_data.ipynb @@ -578,7 +578,7 @@ }, { "cell_type": "code", - "execution_count": 29, + "execution_count": null, "id": "56354758-427f-4af9-94b9-96a25946e9a5", "metadata": { "gather": { @@ -594,98 +594,7 @@ } } }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Generated embeddings for 11 chunks\n", - "\n", - "Query: What did Laurene Jobs say about Hillary Clinton?\n", - "\n", - "Result 1 (Score: 0.913):\n", - "She is one of America’s greatest modern creations. Laurene Jobs, pictured, widow of Apple's Steve, has strongly backed Hillary Clinton for president . Laurene Jobs said that Hillary Clinton, right, ha...\n", - "\n", - "Result 2 (Score: 0.904):\n", - "Apple founder Steve Jobs' widow Laurene has told of her admiration for Democratic White House front-runner Hillary Clinton. Ms Jobs, 51, called former First Lady Hillary a 'revolutionary' woman, and a...\n", - "\n", - "Result 3 (Score: 0.829):\n", - "'It matters, of course, that Hillary is a woman. But what matters more is what kind of woman she is.' Mrs Clinton announced her intention to seek the Democratic nomination on Sunday - and set upon the...\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: divide by zero encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: overflow encountered in matmul\n", - " ret = a @ b\n", - "/Users/pete/WTH/WhatTheHack/.venv/lib/python3.13/site-packages/sklearn/utils/extmath.py:227: RuntimeWarning: invalid value encountered in matmul\n", - " ret = a @ b\n" - ] - } - ], + "outputs": [], "source": [ "# Create embeddings for document chunks\n", "embeddings = []\n", From 02de28d08c5f7550dd0b20567a08d35a1c59bff4 Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 23 Jan 2026 13:48:45 -0600 Subject: [PATCH 53/58] Refactor Challenge 06 documentation: reorganize prerequisites, enhance clarity, and update success criteria --- .../Student/Challenge-06.md | 51 ++++++++++--------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-06.md b/066-OpenAIFundamentals/Student/Challenge-06.md index dcceff0913..9303e29f40 100644 --- a/066-OpenAIFundamentals/Student/Challenge-06.md +++ b/066-OpenAIFundamentals/Student/Challenge-06.md @@ -2,6 +2,11 @@ [< Previous Challenge](./Challenge-05.md) - **[Home](../README.md)** +## Pre-requisites + +- Python 3.10+ installed +- Access to a Microsoft Foundry project with a deployed model (e.g., `gpt-4o`) + ## Introduction Integrating agents into an application after implementing Retrieval-Augmented Generation (RAG) can significantly enhance user experience by providing personalized interactions and automating repetitive tasks. Additionally, agents can improve decision-making, ensure scalability, and offer real-time responses, making them ideal for complex task management and continuous improvement. @@ -12,41 +17,37 @@ In this challenge, you will build a **Research Assistant Agent** using the Micro In this challenge, you will create a code-based agent that can query real-time documentation using MCP tools. -### Prerequisites -- Ensure you have Python 3.10+ installed -- Have access to a Microsoft Foundry project with a deployed model (e.g., `gpt-4o`) - -### Getting Started +You will run the following Jupyter notebook to complete the tasks for this challenge: +- `CH-06-AgenticAI.ipynb` -1. Open the Jupyter notebook for this challenge: - - πŸ““ **[CH-06-AgenticAI.ipynb](./Resources/notebooks/CH-06-AgenticAI.ipynb)** +The file can be found in your Codespace under the `/notebooks` folder. +If you are working locally or in the Cloud, you can find it in the `/notebooks` folder of `Resources.zip` file. -2. Work through the notebook sections: - - **Section 1:** Set up your environment and install the Microsoft Agent Framework - - **Section 2:** Create the Research Assistant agent with MCP integration - - **Section 3:** Test single queries and multi-turn conversations - - **Section 4:** Explore extending the agent with custom tools -3. Test your agent with questions like: - - "What is Azure Kubernetes Service and when should I use it?" - - "How do I set up managed identity for an Azure Function?" - - "What are the best practices for Azure OpenAI prompt engineering?" - -### Clean-Up -1. Remember to delete your resource group in the Azure portal once you have completed all of the challenges. +The notebook covers the following areas: +- Setting up your environment and installing the Microsoft Agent Framework +- Creating the Research Assistant agent with MCP integration +- Testing single queries and multi-turn conversations +- Exploring how to extend the agent with custom tools +Test your agent with questions like: +- "What is Azure Kubernetes Service and when should I use it?" +- "How do I set up managed identity for Azure Functions?" +- "What are the best practices for Azure OpenAI prompt engineering?" ## Success Criteria To complete this challenge successfully, you should be able to: -- Explain what an agent is and how tools extend its capabilities -- Create an agent using the Microsoft Agent Framework in Python -- Integrate MCP tools to connect your agent to live data sources +- Demonstrate your understanding of what an agent is and how tools extend its capabilities +- Verify that your agent is created using the Microsoft Agent Framework in Python +- Verify that MCP tools are integrated to connect your agent to live data sources - Demonstrate a multi-turn conversation with your Research Assistant -## Conclusion -In this challenge, you built a Research Assistant agent using the Microsoft Agent Framework and connected it to live documentation via MCP. This code-first approach gives you full control over your agent's behavior while leveraging powerful integrations. As you continue developing AI applications, consider how agents can be composed togetherβ€”what coordination patterns would you use for multi-agent systems handling complex research or analysis tasks? +## Tips + +As you continue developing AI applications, consider how agents can be composed togetherβ€”what coordination patterns would you use for multi-agent systems handling complex research or analysis tasks? + +**Clean-Up:** Remember to delete your resource group in the Azure portal once you have completed all of the challenges. ## Learning Resources From 897ed58cd9081d83229d61c77fcf2ef962c2999a Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 23 Jan 2026 14:22:16 -0600 Subject: [PATCH 54/58] Update section numbering in CH-06-AgenticAI notebook for consistency --- .../Resources/notebooks/CH-06-AgenticAI.ipynb | 30 +++++++++---------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb index 4a50e7be3f..eefce0803f 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-06-AgenticAI.ipynb @@ -23,7 +23,7 @@ "id": "4a5d9005", "metadata": {}, "source": [ - "## 1. Setting Up Your Environment\n", + "## 6.1. Setting Up Your Environment\n", "\n", "First, install the Microsoft Agent Framework. The `--pre` flag is required while the Agent Framework is in preview." ] @@ -43,7 +43,7 @@ "id": "3d6e6900", "metadata": {}, "source": [ - "### 1.1 Load Environment Variables\n", + "### 6.1.1 Load Environment Variables\n", "\n", "Load your Microsoft Foundry project endpoint and model deployment name from the `.env` file.\n", "\n", @@ -73,9 +73,9 @@ "id": "2d5eec04", "metadata": {}, "source": [ - "## 2. Creating the Research Assistant Agent\n", + "## 6.2. Creating the Research Assistant Agent\n", "\n", - "### 2.1 Import Required Libraries\n", + "### 6.2.1 Import Required Libraries\n", "\n", "Import the Agent Framework components and Azure Identity for authentication." ] @@ -96,7 +96,7 @@ "id": "b3394b76", "metadata": {}, "source": [ - "### 2.2 Define the MCP Tool\n", + "### 6.2.2 Define the MCP Tool\n", "\n", "Create a function that returns the MCP tool configuration for Microsoft Learn documentation. This allows your agent to query live, up-to-date documentation." ] @@ -124,7 +124,7 @@ "id": "ce749f50", "metadata": {}, "source": [ - "### 2.3 Define the Agent Instructions\n", + "### 6.2.3 Define the Agent Instructions\n", "\n", "Create the system instructions that define how the Research Assistant should behave." ] @@ -157,7 +157,7 @@ "id": "8b414374", "metadata": {}, "source": [ - "### 2.4 Set Up Environment Variables\n", + "### 6.2.4 Set Up Environment Variables\n", "\n", "Load the project endpoint and model deployment from your `.env` file." ] @@ -184,9 +184,9 @@ "id": "968fcd8f", "metadata": {}, "source": [ - "## 3. Testing the Research Assistant\n", + "## 6.3. Testing the Research Assistant\n", "\n", - "### 3.1 Single Query Test\n", + "### 6.3.1 Single Query Test\n", "\n", "Let's test the agent with a single question about Azure services." ] @@ -229,7 +229,7 @@ "id": "94073cb2", "metadata": {}, "source": [ - "### 3.2 Multi-Turn Conversation with Thread\n", + "### 6.3.2 Multi-Turn Conversation with Thread\n", "\n", "One of the powerful features of the Agent Framework is thread persistence, which maintains context across multiple conversation turns." ] @@ -283,9 +283,9 @@ "id": "b65362b9", "metadata": {}, "source": [ - "## 4. Exploring Agent Capabilities\n", + "## 6.4. Exploring Agent Capabilities\n", "\n", - "### 4.1 Adding Custom Tools\n", + "### 6.4.1 Adding Custom Tools\n", "\n", "In addition to MCP tools, you can create custom Python functions as tools. Here's an example of adding a simple calculation tool." ] @@ -323,7 +323,7 @@ "id": "42ab6430", "metadata": {}, "source": [ - "### 4.2 Try It Yourself!\n", + "### 6.4.2 Try It Yourself!\n", "\n", "Use the cell below to ask your own questions to the Research Assistant. Modify the question and run the cell to see the response." ] @@ -346,7 +346,7 @@ "id": "5e733c45", "metadata": {}, "source": [ - "### 4.3 Try this in the new Foundry Portal (optional)\n", + "### 6.4.3 Try this in the new Foundry Portal (optional)\n", "\n", "The Microsoft Foundry portal also provides a no-code experience for creating and testing agents. If you'd like to explore the portal-based approach:\n", "\n", @@ -367,7 +367,7 @@ "id": "ebf0f473", "metadata": {}, "source": [ - "## 5. Summary\n", + "## 6.5. Summary\n", "\n", "In this notebook, you learned how to:\n", "\n", From f761e0dced4e5d5e762552706c0be2b99543f75b Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Mon, 26 Jan 2026 11:11:36 -0600 Subject: [PATCH 55/58] Fix formatting in Challenge-02.md --- 066-OpenAIFundamentals/Student/Challenge-02.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-02.md b/066-OpenAIFundamentals/Student/Challenge-02.md index 77cd2e780a..3d2b55e943 100644 --- a/066-OpenAIFundamentals/Student/Challenge-02.md +++ b/066-OpenAIFundamentals/Student/Challenge-02.md @@ -36,7 +36,7 @@ This challenge is divided into the following sections: Scenario: You are building a chatbot for a retail company that needs fast responses and safe outputs. Your goal is to explore the Model Catalog and identify models for this use case. There is no right or wrong answer here. #### Student Task 2.1 -- Go into the [Microsoft Foundry](https://ai.azure.com). +- Go into [Microsoft Foundry](https://ai.azure.com). - Navigate to the Model Catalog and explore different models using the correct filters. - Identify which model can potentially help with the task at hand. - Share your findings with a peer and compare your choices. Did you pick the same models? Why or why not? From 5b6131f919d50a1956a4f5f5a7e69cf6f92e402f Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Thu, 29 Jan 2026 15:56:33 -0600 Subject: [PATCH 56/58] Refactor notebook execution counts and clear outputs for CH-01 and CH-03 notebooks to improve clarity and maintainability. --- .../notebooks/CH-01-PromptEngineering.ipynb | 45 +++---------------- .../notebooks/CH-03-C-Embeddings.ipynb | 15 +------ 2 files changed, 8 insertions(+), 52 deletions(-) diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb index 242fce4795..e7ba963f42 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-01-PromptEngineering.ipynb @@ -1519,7 +1519,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": null, "metadata": { "gather": { "logged": 1685051978623 @@ -1534,18 +1534,7 @@ } } }, - "outputs": [ - { - "data": { - "text/plain": [ - "'To determine the most decorated individual athlete at the Sydney 2000 Olympic Games, we need to follow a step-by-step approach:\\n\\n1. **Identify the Event**: The Sydney 2000 Olympic Games were held from September 15 to October 1, 2000.\\n\\n2. **Research the Medalists**: We need to look into the medalists from the Sydney 2000 Olympics to find out who won the most medals.\\n\\n3. **Focus on Individual Athletes**: We are interested in individual athletes, not teams or countries.\\n\\n4. **Consult Reliable Sources**: Use reliable sources such as the official Olympic website, sports databases, and historical records.\\n\\n5. **Analyze the Data**: Compare the number of medals won by individual athletes.\\n\\n### Step-by-Step Analysis:\\n\\n- **Research**: According to the official Olympic records and sports databases, the Sydney 2000 Olympics featured many outstanding performances.\\n\\n- **Identify Top Performers**: Swimmer Ian Thorpe from'" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "text = f\"\"\"\n", " The 2020 Summer Olympics, officially the Games of the XXXII Olympiad and also known as Tokyo 2020, was an international multi-sport event held from 23 July to 8 August 2021 in Tokyo, Japan, with some preliminary events that began on 21 July 2021. Tokyo was selected as the host city during the 125th IOC Session in Buenos Aires, Argentina, on 7 September 2013.Originally scheduled to take place from 24 July to 9 August 2020, the event was postponed to 2021 on 24 March 2020 due to the global COVID-19 pandemic, the first such instance in the history of the Olympic Games (previous games had been cancelled but not rescheduled). However, the event retained the Tokyo 2020 branding for marketing purposes. It was largely held behind closed doors with no public spectators permitted due to the declaration of a state of emergency in the Greater Tokyo Area in response to the pandemic, the first and only Olympic Games to be held without official spectators. The Games were the most expensive ever, with total spending of over $20 billion.The Games were the fourth Olympic Games to be held in Japan, following the 1964 Summer Olympics (Tokyo), 1972 Winter Olympics (Sapporo), and 1998 Winter Olympics (Nagano). Tokyo became the first city in Asia to hold the Summer Olympic Games twice. The 2020 Games were the second of three consecutive Olympics to be held in East Asia, following the 2018 Winter Olympics in Pyeongchang, South Korea and preceding the 2022 Winter Olympics in Beijing, China. Due to the one-year postponement, Tokyo 2020 was the first and only Olympic Games to have been held in an odd-numbered year and the first Summer Olympics since 1900 to be held in a non-leap year.\\nNew events were introduced in existing sports, including 3x3 basketball, freestyle BMX and mixed gender team events in a number of existing sports, as well as the return of madison cycling for men and an introduction of the same event for women. New IOC policies also allowed the host organizing committee to add new sports to the Olympic program for just one Games. The disciplines added by the Japanese Olympic Committee were baseball and softball, karate, sport climbing, surfing and skateboarding, the last four of which made their Olympic debuts, and the last three of which will remain on the Olympic program.The United States topped the medal count by both total golds (39) and total medals (113), with China finishing second by both respects (38 and 89). Host nation Japan finished third, setting a record for the most gold medals and total medals ever won by their delegation at an Olympic Games with 27 and 58. Great Britain finished fourth, with a total of 22 gold and 64 medals. The Russian delegation competing as the ROC finished fifth with 20 gold medals and third in the overall medal count, with 71 medals. Bermuda, the Philippines and Qatar won their first-ever Olympic gold medals. Burkina Faso, San Marino and Turkmenistan also won their first-ever Olympic medals.'\n", @@ -1558,7 +1547,7 @@ }, { "cell_type": "code", - "execution_count": 20, + "execution_count": null, "metadata": { "jupyter": { "outputs_hidden": false, @@ -1570,18 +1559,7 @@ } } }, - "outputs": [ - { - "data": { - "text/plain": [ - "'To determine the largest time zone difference between the top two countries that won the most gold medals in the 2020 Tokyo Olympics, we need to identify these countries and their respective time zones.\\n\\n1. **Identify the top two countries by gold medals:**\\n - The United States won the most gold medals with 39.\\n - China finished second with 38 gold medals.\\n\\n2. **Determine the time zones for each country:**\\n - The United States spans multiple time zones, but the primary time zones are Eastern Standard Time (EST, UTC-5), Central Standard Time (CST, UTC-6), Mountain Standard Time (MST, UTC-7), and Pacific Standard Time (PST, UTC-8). For simplicity, we can consider the Eastern Standard Time (EST, UTC-5) as a representative time zone for the U.S.\\n - China operates on China Standard Time (CST, UTC+8), which is used nationwide.\\n\\n3. **Calculate'" - ] - }, - "execution_count": 20, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Another example\n", "prompt = f\"\"\"\n", @@ -1611,7 +1589,7 @@ }, { "cell_type": "code", - "execution_count": 21, + "execution_count": null, "metadata": { "gather": { "logged": 1685053144682 @@ -1626,18 +1604,7 @@ } } }, - "outputs": [ - { - "data": { - "text/plain": [ - "'To find out how many more silver and bronze medals the United States has over Great Britain, we need to calculate the number of silver and bronze medals each country has and then find the difference.\\n\\nFirst, calculate the number of silver and bronze medals for each country:\\n\\n1. **United States:**\\n - Total medals: 113\\n - Gold medals: 39\\n - Silver and bronze medals: 113 - 39 = 74\\n\\n2. **Great Britain:**\\n - Total medals: 64\\n - Gold medals: 22\\n - Silver and bronze medals: 64 - 22 = 42\\n\\nNow, find the difference in the number of silver and bronze medals between the United States and Great Britain:\\n\\n74 (United States) - 42 (Great Britain) = 32\\n\\nThe United States has 32 more silver and bronze medals than Great Britain.'" - ] - }, - "execution_count": 21, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "# Notice how this response may not be ideal, or the most accurate.\n", "prompt = f\"\"\"\n", diff --git a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb index de1b9484a3..11d6e59401 100644 --- a/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb +++ b/066-OpenAIFundamentals/Student/Resources/notebooks/CH-03-C-Embeddings.ipynb @@ -184,20 +184,9 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "398" - ] - }, - "execution_count": 14, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n", "shortened_df['n_tokens'] = shortened_df[\"name\"].apply(lambda x: len(tokenizer.encode(x)))\n", From c46d848235b1a04164bde24a5a4ce791ec2b75fc Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 30 Jan 2026 08:52:58 -0600 Subject: [PATCH 57/58] Update resource group name and location in deploy script for OpenAI fundamentals --- .../Student/Challenge-02-Old.md | 92 ------------------- .../Student/Resources/infra/deploy.sh | 4 +- 2 files changed, 2 insertions(+), 94 deletions(-) delete mode 100644 066-OpenAIFundamentals/Student/Challenge-02-Old.md diff --git a/066-OpenAIFundamentals/Student/Challenge-02-Old.md b/066-OpenAIFundamentals/Student/Challenge-02-Old.md deleted file mode 100644 index 31a4b24b0d..0000000000 --- a/066-OpenAIFundamentals/Student/Challenge-02-Old.md +++ /dev/null @@ -1,92 +0,0 @@ -# Challenge 02 - OpenAI Models & Capabilities - -[< Previous Challenge](./Challenge-01.md) - **[Home](../README.md)** - [Next Challenge >](./Challenge-03.md) - -## Introduction - -In this challenge, you will learn about the different capabilities of OpenAI models and learn how to choose the best model for your use case. - -There are a lot of different models available in the Azure AI Model Catalog. These include models from OpenAI and other open source large language models from Meta, Hugging Face, and more. You are going to explore various LLMs and compare gpt3.5 to gpt4 model in this challenge. - -In a world where the availability and development of models are always changing, the models we compare may change over time. But we encourage you to understand the general concepts and material in this Challenge because the comparison techniques utilized can be applicable to scenarios where you are comparing Large and/or Small Language Models. For more information on legacy models and additional models, reference the [documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/legacy-models) and [Azure model catalog](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/model-catalog-overview) for more details. - -## Description -Questions you should be able to answer by the end of this challenge: -- What are the capacities of each Azure OpenAI model? -- How to select the right model for your application? -- What model would you select to perform complex problem solving? -- What model would you select to generate new names? - -You will work in the Azure Microsoft Foundry for this challenge. We recommend keeping the student guide and the Azure Microsoft Foundry in two windows side by side as you work. This will also help to validate you have met the [success criteria](#success-criteria) below for this challenge. - -This challenge is divided into the following sections: - -- [2.1 Model Discovery](#21-model-discovery) -- [2.2 Model Benchmarking](#22-model-benchmarking) -- [2.3 Model Comparison](#23-model-comparison) -- [2.4 Prompt Flow](#24-prompt-flow) - - 2.4.1 Complex Problem Solving - - 2.4.2 Creative and Technical Writing - - 2.4.3 Long Form Content Understanding - -### 2.1 Model Discovery -Scenario: You are part of a research team working on getting information from biotech news articles. Your goal is to explore the Model Catalog and identify some suitable models for accurate question answering. There is no right or wrong answer here. - -#### Student Task 2.1 -- Go into the [Microsoft Foundry](https://ai.azure.com). -- Navigate to the Model Catalog and explore different models using the correct filters. -- Identify which models can potentially improve the accuracy of the task at hand. - -**HINT:** Take a look at the model cards for each model by clicking into them. Evaluate the models based on their capabilities, limitations, and fit for the use case. Which models seem to be good options for question answering? - -### 2.2 Model Benchmarking -#### Student Task 2.2 -- Use the benchmarking tool and **Compare models** in Foundry to compare the performance of all the selected models you chose from the previous challenge, on industry standard datasets now. -- Leverage the metrics such as accuracy, coherence, and more. -- Recommend the best-performing model for biotech news Q&A. - -### 2.3 Model Comparison -#### Student Task 2.3 -- Navigate to [Github's Model Marketplace](https://github.com/marketplace/models) -- Choose two models to compare. What are your observations? - -### 2.4 Prompt Flow -Scenario: You are a product manager at a multinational tech company, and your team is developing an advanced AI-powered virtual assistant to provide real-time customer support. The company is deciding between GPT-3.5 Turbo and GPT-4 to power the virtual assistant. Your task is to evaluate both models to determine which one best meets the company's needs for handling diverse customer inquiries efficiently and effectively. - -Navigate to the Microsoft Foundry and click on your project. You should be able to see **Prompt flow** under Tools in the navigation bar. Create a new **standard flow** to solve the tasks below and compare the responses from different models. For each task, you will see the provided prompts that you can test against the deployed models. - -**NOTE:** If you get this **User Error: This request is not authorized to perform this operation using this permission. Please grant workspace/registry read access to the source storage account.** when you create a new **standard** flow using the default name, then please append some random characters to the name or create a unique name for your flow. - -**HINT:** Click on the **Prompt Flow** and **Create a Flow** under the [Learning Resources](#learning-resources) for a good overview on Prompt Flow. - -#### Student Task 2.4.1: Complex Problem Solving - Compare the models' abilities to navigate complex customer complaints and provide satisfactory solutions. - - Prompt: "A customer is unhappy with their recent purchase due to a missing feature. Outline a step-by-step resolution process that addresses their concern and offers a satisfactory solution." - - Prompt: "Develop a multi-step troubleshooting guide for customers experiencing issues with their smart home devices, integrating potential scenarios and solutions." - -#### Student Task 2.4.2: Creative and Technical Writing - Assess the models' capabilities in technical writing, such as creating detailed product manuals or help articles. - - Prompt: "Write a product description for a new smartphone that highlights its innovative features in a creative and engaging manner." - - Prompt: "Create a comprehensive FAQ section for a complex software application, ensuring clarity and technical accuracy." - -#### Student Task 2.4.3: Long Form Content Understanding - Provide both models with extensive customer feedback or product reviews and ask them to summarize the key points. - - We have provided a `ch2_1.5_product_review.txt` file that contains a product review for you to use with the given prompt below. You will find the `ch2_1.5_product_review.txt` file in the `/data` folder of the codespace. If you are working on your local workstation, you will find the `ch2_1.5_product_review.txt` file in the `/data` folder of the `Resources.zip` file. Please copy & paste the contents of this file within your prompt. - - Prompt: "Analyze a detailed product review and extract actionable insights that can inform future product development." -## Success Criteria - -To complete this challenge successfully, you should be able to: -- Show an understanding of each model and its suitable use cases -- Show an understanding of differences between models -- Select the most suitable model to apply under different scenarios - -## Learning Resources - -- [Overview of Azure OpenAI Models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/concepts/models) -- [Prompt Flow](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/prompt-flow) -- [Create a Flow](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/flow-develop) -- [Tune Variants](https://learn.microsoft.com/en-us/azure/ai-studio/how-to/flow-tune-prompts-using-variants) -- [Azure OpenAI Pricing Page](https://azure.microsoft.com/en-us/pricing/details/cognitive-services/openai-service/) -- [Request for Quota Increase](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR4xPXO648sJKt4GoXAed-0pURVJWRU4yRTMxRkszU0NXRFFTTEhaT1g1NyQlQCN0PWcu) -- [Customize Models](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/fine-tuning?pivots=programming-language-studio) diff --git a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh index 8916d2c343..da90473dff 100755 --- a/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh +++ b/066-OpenAIFundamentals/Student/Resources/infra/deploy.sh @@ -7,8 +7,8 @@ source ./functions.sh declare -A variables=( [template]="main.bicep" [parameters]="main.bicepparam" - [resourceGroupName]="rg-microsoft-foundry-secure" - [location]="eastus" + [resourceGroupName]="rg-openai-fundamentals" + [location]="westus" [validateTemplate]=0 [useWhatIf]=0 ) From b8994e40274ac683f55e30eb0cc78fed131b492b Mon Sep 17 00:00:00 2001 From: Pete Rodriguez Date: Fri, 30 Jan 2026 08:59:05 -0600 Subject: [PATCH 58/58] Fix link reference for Microsoft Foundry Resources in Challenge 00 documentation --- 066-OpenAIFundamentals/Student/Challenge-00.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/066-OpenAIFundamentals/Student/Challenge-00.md b/066-OpenAIFundamentals/Student/Challenge-00.md index d6d49cbc06..825345077e 100644 --- a/066-OpenAIFundamentals/Student/Challenge-00.md +++ b/066-OpenAIFundamentals/Student/Challenge-00.md @@ -12,7 +12,7 @@ In this challenge, you will set up the necessary prerequisites and environment t - [Setup Jupyter Notebook Environment](#setup-jupyter-notebook-environment) - [GitHub Codespaces](#setup-github-codespace) - [Local Workstation](#setup-local-workstation) -- [Deploy Microsoft Foundry Resources](#deploy-azure-ai-foundry-resources) +- [Deploy Microsoft Foundry Resources](#deploy-microsoft-foundry-resources) ### Azure Subscription