diff --git a/.vscode/PythonImportHelper-v2-Completion.json b/.vscode/PythonImportHelper-v2-Completion.json index c836421..8404df1 100644 --- a/.vscode/PythonImportHelper-v2-Completion.json +++ b/.vscode/PythonImportHelper-v2-Completion.json @@ -1,4 +1,38 @@ [ + { + "label": "requests", + "kind": 6, + "isExtraImport": true, + "importPath": "requests", + "description": "requests", + "detail": "requests", + "documentation": {} + }, + { + "label": "BeautifulSoup", + "importPath": "bs4", + "description": "bs4", + "isExtraImport": true, + "detail": "bs4", + "documentation": {} + }, + { + "label": "BeautifulSoup", + "importPath": "bs4", + "description": "bs4", + "isExtraImport": true, + "detail": "bs4", + "documentation": {} + }, + { + "label": "json", + "kind": 6, + "isExtraImport": true, + "importPath": "json", + "description": "json", + "detail": "json", + "documentation": {} + }, { "label": "os", "kind": 6, @@ -52,6 +86,78 @@ "detail": "numpy", "documentation": {} }, + { + "label": "url", + "kind": 5, + "importPath": "github_fork_finder", + "description": "github_fork_finder", + "peekOfCode": "url = \"https://github.com/f/awesome-chatgpt-prompts/network/members\"\nresponse = requests.get(url)\nsoup = BeautifulSoup(response.content, \"html.parser\")\n# Find all the fork URLs on the page\nfork_urls = []\nfor element in soup.find_all(\"a\", class_=\"muted-link\"):\n if \"Forks\" in element.text or 'chatgpt' in element.text:\n fork_urls.append(element[\"href\"]) # element[\"href\"] is the URL of the fork\n print(element[\"href\"])\n# save the links to a file", + "detail": "github_fork_finder", + "documentation": {} + }, + { + "label": "response", + "kind": 5, + "importPath": "github_fork_finder", + "description": "github_fork_finder", + "peekOfCode": "response = requests.get(url)\nsoup = BeautifulSoup(response.content, \"html.parser\")\n# Find all the fork URLs on the page\nfork_urls = []\nfor element in soup.find_all(\"a\", class_=\"muted-link\"):\n if \"Forks\" in element.text or 'chatgpt' in element.text:\n fork_urls.append(element[\"href\"]) # element[\"href\"] is the URL of the fork\n print(element[\"href\"])\n# save the links to a file\nwith open(\"fork_urls.txt\", \"w\") as f:", + "detail": "github_fork_finder", + "documentation": {} + }, + { + "label": "soup", + "kind": 5, + "importPath": "github_fork_finder", + "description": "github_fork_finder", + "peekOfCode": "soup = BeautifulSoup(response.content, \"html.parser\")\n# Find all the fork URLs on the page\nfork_urls = []\nfor element in soup.find_all(\"a\", class_=\"muted-link\"):\n if \"Forks\" in element.text or 'chatgpt' in element.text:\n fork_urls.append(element[\"href\"]) # element[\"href\"] is the URL of the fork\n print(element[\"href\"])\n# save the links to a file\nwith open(\"fork_urls.txt\", \"w\") as f:\n for url in fork_urls:", + "detail": "github_fork_finder", + "documentation": {} + }, + { + "label": "fork_urls", + "kind": 5, + "importPath": "github_fork_finder", + "description": "github_fork_finder", + "peekOfCode": "fork_urls = []\nfor element in soup.find_all(\"a\", class_=\"muted-link\"):\n if \"Forks\" in element.text or 'chatgpt' in element.text:\n fork_urls.append(element[\"href\"]) # element[\"href\"] is the URL of the fork\n print(element[\"href\"])\n# save the links to a file\nwith open(\"fork_urls.txt\", \"w\") as f:\n for url in fork_urls:\n f.write(url + \"\\n\")\n# Iterate through each fork URL and scrape the contents of the \"readme.md\" file", + "detail": "github_fork_finder", + "documentation": {} + }, + { + "label": "url", + "kind": 5, + "importPath": "github_search", + "description": "github_search", + "peekOfCode": "url = \"https://api.github.com/search/repositories?q=chatgpt+in:readme\"\nresponse = requests.get(url)\n# Parse the JSON response\ndata = json.loads(response.text)\n# Iterate through the list of repositories and store the URLs in a list\nrepo_urls = []\nfor repo in data[\"items\"]:\n repo_urls.append(repo[\"html_url\"])\n# save repo_urls to a file\nwith open(\"repo_urls.txt\", \"w\") as f:", + "detail": "github_search", + "documentation": {} + }, + { + "label": "response", + "kind": 5, + "importPath": "github_search", + "description": "github_search", + "peekOfCode": "response = requests.get(url)\n# Parse the JSON response\ndata = json.loads(response.text)\n# Iterate through the list of repositories and store the URLs in a list\nrepo_urls = []\nfor repo in data[\"items\"]:\n repo_urls.append(repo[\"html_url\"])\n# save repo_urls to a file\nwith open(\"repo_urls.txt\", \"w\") as f:\n for url in repo_urls:", + "detail": "github_search", + "documentation": {} + }, + { + "label": "data", + "kind": 5, + "importPath": "github_search", + "description": "github_search", + "peekOfCode": "data = json.loads(response.text)\n# Iterate through the list of repositories and store the URLs in a list\nrepo_urls = []\nfor repo in data[\"items\"]:\n repo_urls.append(repo[\"html_url\"])\n# save repo_urls to a file\nwith open(\"repo_urls.txt\", \"w\") as f:\n for url in repo_urls:\n f.write(url + \"\\n\")\n# Iterate through each repository URL and use Beautiful Soup to scrape the contents of the page", + "detail": "github_search", + "documentation": {} + }, + { + "label": "repo_urls", + "kind": 5, + "importPath": "github_search", + "description": "github_search", + "peekOfCode": "repo_urls = []\nfor repo in data[\"items\"]:\n repo_urls.append(repo[\"html_url\"])\n# save repo_urls to a file\nwith open(\"repo_urls.txt\", \"w\") as f:\n for url in repo_urls:\n f.write(url + \"\\n\")\n# Iterate through each repository URL and use Beautiful Soup to scrape the contents of the page\nfor url in repo_urls:\n page = requests.get(url)", + "detail": "github_search", + "documentation": {} + }, { "label": "make_a_prompt", "kind": 2, diff --git a/data/prompts/prompt.txt b/data/prompts/prompt.txt index 634312f..ec5d80d 100644 --- a/data/prompts/prompt.txt +++ b/data/prompts/prompt.txt @@ -2,7 +2,7 @@ I want you to act as a prompt generator. Firstly, I will give you a title like t (You should adapt the sample prompt according to the title I gave. The prompt should be self-explanatory and appropriate to the title, don't refer to the example I gave you.). You will use the phrase "Your task is to create an outline for the code required for the project" in your response. My first title is "A professional programmer with VSCode development experience that uses python, and can create extensions" (Give me prompt only) My first sentence is "This VSCode extension analyzes changes to files and automatically creates commit messages. " - + I want you to act as a prompt generator. Firstly, I will give you a title like this: "Act as an English Pronunciation Helper". Then you give me a prompt like this: "I want you to act as an English pronunciation assistant for Turkish speaking people. I will write your sentences, and you will only answer their pronunciations, and nothing else. The replies must not be translations of my sentences but only pronunciations. Pronunciations should use Turkish Latin letters for phonetics. Do not write explanations on replies. (You should adapt the sample prompt according to the title I gave. The prompt should be self-explanatory and appropriate to the title, don't refer to the example I gave you.). @@ -10,11 +10,24 @@ I want you to act as a prompt generator. Firstly, I will give you a title like t My first title is "A linkedin networking professional" (Give me prompt only) My first sentence is "optimizing your linkedin page for maximum efficacy in job hunting " - + I want you to act as a prompt generator. Firstly, I will give you a title like this: "Act as an English Pronunciation Helper". Then you give me a prompt like this: "I want you to act as an English pronunciation assistant for Turkish speaking people. I will write your sentences, and you will only answer their pronunciations, and nothing else. The replies must not be translations of my sentences but only pronunciations. Pronunciations should use Turkish Latin letters for phonetics. Do not write explanations on replies. (You should adapt the sample prompt according to the title I gave. The prompt should be self-explanatory and appropriate to the title, don't refer to the example I gave you.). You will use the phrase "Your task is to create an outline for the code required for the project" in your response, but should not be limited to this phrase only, as it is a guideline. My first title is "A professional coder with expertise in creating GitHub Repositories with badging." (Give me prompt only) My first sentence is "Create the badges for issues, and relevant badging for a repo format as a markdown block. " + + +I want you to act as a prompt generator. Firstly, I will give you a title like this: "Act as an English Pronunciation Helper". Then you give me a prompt like this: "I want you to act as an English pronunciation assistant for Turkish speaking people. I will write your sentences, and you will only answer their pronunciations, and nothing else. The replies must not be translations of my sentences but only pronunciations. Pronunciations should use Turkish Latin letters for phonetics. Do not write explanations on replies. + (You should adapt the sample prompt according to the title I gave. The prompt should be self-explanatory and appropriate to the title, don't refer to the example I gave you.). + You will use the phrase "Your task is to create an outline for the code required for the project" in your response, but should not be limited to this phrase only, as it is a guideline. + My first title is "A professional prompt writer for chatGPT with the ability to maximize the power of Large Language Models." (Give me prompt only) + + +I want you to act as a prompt generator. + Firstly, I will give you a title like this: "Act as an English Pronunciation Helper". + Then you give me a prompt like this: "I want you to act as an English pronunciation assistant for Turkish speaking people. I will write your sentences, and you will only answer their pronunciations, and nothing else. The replies must not be translations of my sentences but only pronunciations. Pronunciations should use Turkish Latin letters for phonetics. Do not write explanations on replies. + (You should adapt the sample prompt according to the title I gave. The prompt should be self-explanatory and appropriate to the title, don't refer to the example I gave you.). + Your title is: web scraping professional using requests, beautiful soup, and python, This prompt should be to solve this problem: Using GitHub's API OR requests with selenium, identify repositories on GitHub that contain lists of chatGPT prompts and coallate these into a unified directory., and it should start with this sentence: Outline the codebase for this project using code blocks. \ No newline at end of file diff --git a/fork_urls.txt b/fork_urls.txt new file mode 100644 index 0000000..e69de29 diff --git a/github_fork_finder.py b/github_fork_finder.py new file mode 100644 index 0000000..e74d919 --- /dev/null +++ b/github_fork_finder.py @@ -0,0 +1,39 @@ +# # https://github.com/f/awesome-chatgpt-prompts/network/members +# go to each of the fork urls at the url above and scrape the readme.md file for the prompts. + +import requests +from bs4 import BeautifulSoup +import json + +# Use requests to access the GitHub API and search for repositories containing "chatgpt" in the readme file +url = "https://github.com/f/awesome-chatgpt-prompts/network/members" +response = requests.get(url) +soup = BeautifulSoup(response.content, "html.parser") + +# Find all the fork URLs on the page +fork_urls = [] +for element in soup.find_all("a", class_="muted-link"): + if "Forks" in element.text or 'chatgpt' in element.text: + fork_urls.append(element["href"]) # element["href"] is the URL of the fork + print(element["href"]) + + +# save the links to a file +with open("fork_urls.txt", "w") as f: + for url in fork_urls: + f.write(url + "\n") + +# Iterate through each fork URL and scrape the contents of the "readme.md" file +# prompts = [] +# for fork_url in fork_urls: +# readme_url = fork_url + "/readme.md" +# readme_response = requests.get(readme_url) +# readme_soup = BeautifulSoup(readme_response.content, "html.parser") +# # Use Beautiful Soup to find specific elements on the page (e.g. file names containing "prompts") +# # and store the data in a list +# for element in readme_soup.find_all("p"): +# if "prompts" in element.text: +# prompts.append(element.text) + +# Print the list of prompts +# print(prompts) diff --git a/github_search.py b/github_search.py new file mode 100644 index 0000000..c314e77 --- /dev/null +++ b/github_search.py @@ -0,0 +1,36 @@ +import requests +from bs4 import BeautifulSoup +import json + +# Use requests to access the GitHub API and search for repositories containing "chatgpt" in the name +# url = "https://api.github.com/search/repositories?q=chatgpt+in:name" +url = "https://api.github.com/search/repositories?q=chatgpt+in:readme" +response = requests.get(url) + +# Parse the JSON response +data = json.loads(response.text) + +# Iterate through the list of repositories and store the URLs in a list +repo_urls = [] +for repo in data["items"]: + repo_urls.append(repo["html_url"]) + +# save repo_urls to a file +with open("repo_urls.txt", "w") as f: + for url in repo_urls: + f.write(url + "\n") + +# Iterate through each repository URL and use Beautiful Soup to scrape the contents of the page +for url in repo_urls: + page = requests.get(url) + soup = BeautifulSoup(page.content, "html.parser") + + # Use Beautiful Soup to find specific elements on the page (e.g. file names containing "prompts") + # and store the data in a list + prompts = [] + for element in soup.find_all("a", class_="js-navigation-open"): + if "prompts" in element.text: + prompts.append(element.text) + +# Print the list of prompts +print(prompts) diff --git a/prompt_generator.py b/prompt_generator.py index 26d0e86..21ed163 100644 --- a/prompt_generator.py +++ b/prompt_generator.py @@ -13,13 +13,19 @@ def make_a_prompt(): ) first_sentence = input("Enter your first sentence: ") print("-" * 50) - the_prompt = """I want you to act as a prompt generator. Firstly, I will give you a title like this: "Act as an English Pronunciation Helper". Then you give me a prompt like this: "I want you to act as an English pronunciation assistant for Turkish speaking people. I will write your sentences, and you will only answer their pronunciations, and nothing else. The replies must not be translations of my sentences but only pronunciations. Pronunciations should use Turkish Latin letters for phonetics. Do not write explanations on replies. + print(f'Received: "{first_sentence}", excellent choice!\n') + print("Now, what problem would you like chatGPT to solve?\n") + this_problem = input("Enter your problem: ") + print("-" * 50) + + + the_prompt = """I want you to act as a prompt generator. + Firstly, I will give you a title like this: "Act as an English Pronunciation Helper". + Then you give me a prompt like this: "I want you to act as an English pronunciation assistant for Turkish speaking people. I will write your sentences, and you will only answer their pronunciations, and nothing else. The replies must not be translations of my sentences but only pronunciations. Pronunciations should use Turkish Latin letters for phonetics. Do not write explanations on replies. (You should adapt the sample prompt according to the title I gave. The prompt should be self-explanatory and appropriate to the title, don't refer to the example I gave you.). - You will use the phrase "Your task is to create an outline for the code required for the project" in your response, but should not be limited to this phrase only, as it is a guideline. - My first title is "{}" (Give me prompt only) - My first sentence is "{}" + Your title is: {}, This prompt should be to solve this problem: {}, and it should start with this sentence: {} """.format( - bot_type, first_sentence + bot_type, this_problem, first_sentence ) # save the prompt to a text file in data/prompts, if data/prompts doesn't exist, create it if not os.path.exists("data/prompts"): diff --git a/repo_urls.txt b/repo_urls.txt new file mode 100644 index 0000000..0c4161e --- /dev/null +++ b/repo_urls.txt @@ -0,0 +1,30 @@ +https://github.com/acheong08/ChatGPT +https://github.com/wong2/chat-gpt-google-extension +https://github.com/lencx/ChatGPT +https://github.com/humanloop/awesome-chatgpt +https://github.com/GitHubDaily/GitHubDaily +https://github.com/m1guelpf/chatgpt-telegram +https://github.com/fuergaosi233/wechat-chatgpt +https://github.com/transitive-bullshit/chatgpt-api +https://github.com/mpociot/chatgpt-vscode +https://github.com/gragland/chatgpt-chrome-extension +https://github.com/AutumnWhj/ChatGPT-wechat-bot +https://github.com/danielgross/whatsapp-gpt +https://github.com/vincelwt/chatgpt-mac +https://github.com/terry3041/pyChatGPT +https://github.com/mmabrouk/chatgpt-wrapper +https://github.com/altryne/chatGPT-telegram-bot +https://github.com/Zero6992/chatGPT-discord-bot +https://github.com/skydoves/chatgpt-android +https://github.com/adrianhajdin/project_openai_codex +https://github.com/Kamigami55/awesome-chatgpt +https://github.com/liady/ChatGPT-pdf +https://github.com/ChatGPT-Hackers/ChatGPT-API-server +https://github.com/sonnylazuardi/chatgpt-desktop +https://github.com/labteral/chatgpt-python +https://github.com/kazuki-sf/ChatGPT_Extension +https://github.com/ZohaibAhmed/ChatGPT-Google +https://github.com/qunash/chatgpt-advanced +https://github.com/kazuki-sf/YouTube_Summary_with_ChatGPT +https://github.com/A-kirami/nonebot-plugin-chatgpt +https://github.com/djun/wechatbot diff --git a/testing.md b/testing.md new file mode 100644 index 0000000..008f84a --- /dev/null +++ b/testing.md @@ -0,0 +1,133 @@ +
+

+ChatGPTea +

+A curated selection of the best prompts for savvy users of OpenAI's chatbot, chatGPT + +--- + +[![GitHub issues](https://img.shields.io/github/issues/grahamwaters/chatGPTea-Ultimate-Prompt-List)](https://github.com/grahamwaters/chatGPTea-Ultimate-Prompt-List/issues) +[![GitHub closed issues](https://img.shields.io/github/issues-closed/grahamwaters/chatGPTea-Ultimate-Prompt-List)](https://github.com/grahamwaters/chatGPTea-Ultimate-Prompt-List/issues?q=is%3Aissue+is%3Aclosed) +[![GitHub pull requests](https://img.shields.io/github/issues-pr/grahamwaters/chatGPTea-Ultimate-Prompt-List)](https://github.com/grahamwaters/chatGPTea-Ultimate-Prompt-List/pulls) +[![GitHub closed pull requests](https://img.shields.io/github/issues-pr-closed/grahamwaters/chatGPTea-Ultimate-Prompt-List)](https://github.com/grahamwaters/chatGPTea-Ultimate-Prompt-List/pulls?q=is%3Apr+is%3Aclosed) +[![GitHub contributors](https://img.shields.io/github/contributors/grahamwaters/chatGPTea-Ultimate-Prompt-List)](https://github.com/grahamwaters/chatGPTea-Ultimate-Prompt-List/graphs/contributors) + +--- + +
+ +--- + +## The Ultimate chatGPT Prompt List + +![main banner](./images/main.png) + + + + +## Instructions + +Choose a subject area you are interested in, and click the link below to go to the page with prompts for that subject. If that page is empty, then you can help by adding prompts to that page. If you are not sure how to do that, you can read the [contributing guidelines](./CONTRIBUTING.md). + +## Prompt Generator! +If you are feeling like having your mind melt into magic today then head over to the [prompt generator](./prompt_generator.py) and let the magic happen. This script will literally write your prompts for you, as if chatGPT wasn't enough magic for you already. + +
+ +

Topics To Choose From

+ +[![Advertising](https://img.shields.io/badge/-Advertising-green)](./industries/advertising.md) +[![animation](https://img.shields.io/badge/-animation-black)](./industries/animation.md) +[![architecture](https://img.shields.io/badge/-architecture-blue)](./industries/animation.md) +[![art](https://img.shields.io/badge/-art-green)](./industries/art.md) +[![astronomy](https://img.shields.io/badge/-astronomy-brown)](./industries/astronomy.md) +[![beauty](https://img.shields.io/badge/-beauty-blue)](./industries/beauty.md) +[![biology](https://img.shields.io/badge/-biology-green)](./industries/biology.md) +[![business](https://img.shields.io/badge/-business-yellow)](./industries/business.md) +[![career_development](https://img.shields.io/badge/-career_development-blue)](./industries/career_development.md) +[![chemistry](https://img.shields.io/badge/-chemistry-green)](./industries/chemistry.md) +[![computer_science](https://img.shields.io/badge/-computer_science-yellow)](./industries/computer_science.md) +[![cooking](https://img.shields.io/badge/-cooking-blue)](./industries/cooking.md) +[![crafts](https://img.shields.io/badge/-crafts-green)](./industries/crafts.md) +[![data_science](https://img.shields.io/badge/-data_science-red)](./industries/data_science.md) +[![economics](https://img.shields.io/badge/-economics-orange)](./industries/economics.md) +[![education](https://img.shields.io/badge/-education-pink)](./industries/education.md) +[![engineering](https://img.shields.io/badge/-engineering-yellow)](./industries/engineering.md) +[![entrepreneurship](https://img.shields.io/badge/-entrepreneurship-blue)](./industries/entrepreneurship.md) +[![fashion](https://img.shields.io/badge/-fashion-green)](./industries/fashion.md) +[![film](https://img.shields.io/badge/-film-yellow)](./industries/film.md) +[![finance](https://img.shields.io/badge/-finance-green)](./industries/finance.md) +[![food](https://img.shields.io/badge/-food-pink)](./industries/food.md) +[![gaming](https://img.shields.io/badge/-gaming-yellow)](./industries/gaming.md) +[![gaming](https://img.shields.io/badge/-gaming-blue)](./industries/gaming.md) +[![graphic_design](https://img.shields.io/badge/-graphic_design-green)](./industries/graphic_design.md) +[![health](https://img.shields.io/badge/-health-yellow)](./industries/health.md) +[![history](https://img.shields.io/badge/-history-blue)](./industries/history.md) +[![humor](https://img.shields.io/badge/-humor-green)](./industries/humor.md) +[![illustration](https://img.shields.io/badge/-illustration-brown)](./industries/illustration.md) +[![industrial_design](https://img.shields.io/badge/-industrial_design-blue)](./industries/industrial_design.md) +[![journalism](https://img.shields.io/badge/-journalism-green)](./industries/journalism.md) +[![law](https://img.shields.io/badge/-law-yellow)](./industries/law.md) +[![literature](https://img.shields.io/badge/-literature-lightblue)](./industries/literature.md) +[![marketing](https://img.shields.io/badge/-marketing-green)](./industries/marketing.md) +[![mathematics](https://img.shields.io/badge/-mathematics-red)](./industries/mathematics.md) +[![medicine](https://img.shields.io/badge/-medicine-blue)](./industries/medicine.md) +[![music](https://img.shields.io/badge/-music-green)](./industries/music.md) +[![music_production](https://img.shields.io/badge/-music_production-yellow)](./industries/music_production.md) +[![nature](https://img.shields.io/badge/-nature-lightgreen)](./industries/nature.md) +[![nutrition](https://img.shields.io/badge/-nutrition-blue)](./industries/nutrition.md) +[![other](https://img.shields.io/badge/-other-green)](./industries/other.md) +[![parenting](https://img.shields.io/badge/-parenting-red)](./industries/parenting.md) +[![personal_development](https://img.shields.io/badge/-personal_development-blue)](./industries/personal_development.md) +[![pets](https://img.shields.io/badge/-pets-green)](./industries/pets.md) +[![philosophy](https://img.shields.io/badge/-philosophy-yellow)](./industries/philosophy.md) +[![photography](https://img.shields.io/badge/-photography-black)](./industries/photography.md) +[![physics](https://img.shields.io/badge/-physics-green)](./industries/physics.md) +[![politics](https://img.shields.io/badge/-politics-yellow)](./industries/politics.md) +[![product_design](https://img.shields.io/badge/-product_design-blue)](./industries/product_design.md) +[![productivity](https://img.shields.io/badge/-productivity-green)](./industries/productivity.md) +[![psychology](https://img.shields.io/badge/-psychology-yellow)](./industries/psychology.md) +[![public_relations](https://img.shields.io/badge/-public_relations-blue)](./industries/public_relations.md) +[![religion](https://img.shields.io/badge/-religion-green)](./industries/religion.md) +[![robotics](https://img.shields.io/badge/-robotics-yellow)](./industries/robotics.md) +[![science](https://img.shields.io/badge/-science-blue)](./industries/science.md) +[![self_improvement](https://img.shields.io/badge/-self_improvement-blue)](./industries/self_improvement.md) +[![social_media](https://img.shields.io/badge/-social_media-yellow)](./industries/social_media.md) +[![software_engineering](https://img.shields.io/badge/-software_engineering-blue)](./industries/software_engineering.md) +[![sports](https://img.shields.io/badge/-sports-green)](./industries/sports.md) +[![statistics](https://img.shields.io/badge/-statistics-yellow)](./industries/statistics.md) +[![storytelling](https://img.shields.io/badge/-storytelling-purple)](./industries/storytelling.md) +[![technology](https://img.shields.io/badge/-technology-green)](./industries/technology.md) +[![web_development](https://img.shields.io/badge/-web_development-yellow)](./industries/web_development.md) +[![writing](https://img.shields.io/badge/-writing-lightpink)](./industries/writing.md) +[![artificial_intelligence](https://img.shields.io/badge/-artificial_intelligence-blue)](./specific_topics/artificial_intelligence.md) +[![back_end_development](https://img.shields.io/badge/-back_end_development-green)](./specific_topics/back_end_development.md) +[![cloud_computing](https://img.shields.io/badge/-cloud_computing-yellow)](./specific_topics/cloud_computing.md) +[![computer_graphics](https://img.shields.io/badge/-computer_graphics-blue)](./specific_topics/computer_graphics.md) +[![computer_networking](https://img.shields.io/badge/-computer_networking-green)](./specific_topics/computer_networking.md) +[![computer_security](https://img.shields.io/badge/-computer_security-yellow)](./specific_topics/computer_security.md) +[![computer_vision](https://img.shields.io/badge/-computer_vision-blue)](./specific_topics/computer_vision.md) +[![data_structures](https://img.shields.io/badge/-data_structures-green)](./specific_topics/data_structures.md) +[![databases](https://img.shields.io/badge/-databases-purple)](./specific_topics/databases.md) +[![design](https://img.shields.io/badge/-design-blue)](./specific_topics/design.md) + +
+ +## Contributing + +Your contributions are always welcome! Please take a look at the [contribution guidelines](./CONTRIBUTING.md) first. + +## Contents + +- [Awesome Courses](#awesome-courses) + - [Acknowledgements](#acknowledgements) + + + + +# Awesome Courses + + +## Acknowledgements +- [chatGPT](openai.com) - OpenAI's chatbot +- [f/Awesome-ChatGPT-Prompts Repository](https://github.com/f/awesome-chatgpt-prompts)