From d6927931410de124321f0a014e1d1cd6d05f5830 Mon Sep 17 00:00:00 2001 From: Sohail-pr-git Date: Thu, 16 Mar 2023 16:03:05 +0300 Subject: [PATCH] first commit --- CHAT-GPT-C++.cpp | 69 ++++++++++++++++++++++++++++++++ CHAT-GPT-CSharp.cs | 69 ++++++++++++++++++++++++++++++++ CHAT-GPT-GO.go | 91 ++++++++++++++++++++++++++++++++++++++++++ CHAT-GPT-JAVA.java | 62 ++++++++++++++++++++++++++++ CHAT-GPT-KOTLIN.kt | 61 ++++++++++++++++++++++++++++ CHAT-GPT-PERL.pl | 38 ++++++++++++++++++ CHAT-GPT-PHP.php | 42 +++++++++++++++++++ CHAT-GPT-PYTHON.py | 29 ++++++++++++++ CHAT-GPT-R.r | 51 +++++++++++++++++++++++ CHAT-GPT-RUBY.rb | 38 ++++++++++++++++++ CHAT-GPT-RUST.rs | 66 ++++++++++++++++++++++++++++++ CHAT-GPT-SCALA.sc | 28 +++++++++++++ CHAT-GPT-SWIFT.swift | 70 ++++++++++++++++++++++++++++++++ CHAT-GPT-TYPESCRIPT.ts | 38 ++++++++++++++++++ CHATGPT-NODEJS.js | 48 ++++++++++++++++++++++ LICENSE | 21 ++++++++++ README.md | 37 +++++++++++++++++ 17 files changed, 858 insertions(+) create mode 100644 CHAT-GPT-C++.cpp create mode 100644 CHAT-GPT-CSharp.cs create mode 100644 CHAT-GPT-GO.go create mode 100644 CHAT-GPT-JAVA.java create mode 100644 CHAT-GPT-KOTLIN.kt create mode 100644 CHAT-GPT-PERL.pl create mode 100644 CHAT-GPT-PHP.php create mode 100644 CHAT-GPT-PYTHON.py create mode 100644 CHAT-GPT-R.r create mode 100644 CHAT-GPT-RUBY.rb create mode 100644 CHAT-GPT-RUST.rs create mode 100644 CHAT-GPT-SCALA.sc create mode 100644 CHAT-GPT-SWIFT.swift create mode 100644 CHAT-GPT-TYPESCRIPT.ts create mode 100644 CHATGPT-NODEJS.js create mode 100644 LICENSE create mode 100644 README.md diff --git a/CHAT-GPT-C++.cpp b/CHAT-GPT-C++.cpp new file mode 100644 index 0000000..86ad91d --- /dev/null +++ b/CHAT-GPT-C++.cpp @@ -0,0 +1,69 @@ +#include +#include +#include + +// Define the API endpoint and authorization token +const std::string API_ENDPOINT = "https://api.openai.com/v1/engines/davinci-codex/completions"; +const std::string AUTH_TOKEN = ""; + +// Define a function that sends a question to ChatGPT and receives an answer +std::string ask_question(const std::string& question) { + // Create a cURL handle + CURL* curl = curl_easy_init(); + + // Set the API endpoint URL + curl_easy_setopt(curl, CURLOPT_URL, API_ENDPOINT.c_str()); + + // Set the request headers + struct curl_slist* headers = NULL; + headers = curl_slist_append(headers, "Content-Type: application/json"); + headers = curl_slist_append(headers, ("Authorization: Bearer " + AUTH_TOKEN).c_str()); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + // Set the request data + std::string request_data = "{ \"prompt\": \"" + question + "\", \"max_tokens\": 100, \"temperature\": 0.7 }"; + curl_easy_setopt(curl, CURLOPT_POSTFIELDS, request_data.c_str()); + + // Set the response buffer + std::string response_buffer; + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, [](char* ptr, size_t size, size_t nmemb, void* userdata) -> size_t { + size_t bytes = size * nmemb; + std::string* buffer = static_cast(userdata); + buffer->append(ptr, bytes); + return bytes; + }); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response_buffer); + + // Send the HTTP request + CURLcode result = curl_easy_perform(curl); + + // Clean up the cURL handle and headers + curl_easy_cleanup(curl); + curl_slist_free_all(headers); + + // Check if the request was successful + if (result != CURLE_OK) { + std::cerr << "Error sending HTTP request: " << curl_easy_strerror(result) << std::endl; + return ""; + } + + // Parse the response JSON to extract the answer + std::string answer; + size_t answer_start_pos = response_buffer.find("\"text\": \"") + 9; + size_t answer_end_pos = response_buffer.find("\"", answer_start_pos); + if (answer_start_pos != std::string::npos && answer_end_pos != std::string::npos) { + answer = response_buffer.substr(answer_start_pos, answer_end_pos - answer_start_pos); + } + + return answer; +} + +int main() { + // Ask a question and print the answer + std::string question = "What is the capital of France?"; + std::string answer = ask_question(question); + std::cout << "Question: " << question << std::endl; + std::cout << "Answer: " << answer << std::endl; + + return 0; +} diff --git a/CHAT-GPT-CSharp.cs b/CHAT-GPT-CSharp.cs new file mode 100644 index 0000000..a151cc6 --- /dev/null +++ b/CHAT-GPT-CSharp.cs @@ -0,0 +1,69 @@ +using System; +using System.Net.Http; +using System.Text; +using System.Threading.Tasks; + +namespace ChatGptDemo +{ + class Program + { + // Set up your OpenAI API key + private const string API_KEY = "YOUR_OPENAI_API_KEY"; + + // Define the URL for the OpenAI API + private const string OPENAI_URL = "https://api.openai.com/v1/engines/text-davinci-002/completions"; + + // Define the prompt for the conversation + private const string PROMPT = "Hello, I'm ChatGPT. How can I help you today?"; + + // Define a function to get a response from ChatGPT + static async Task GetResponse(string prompt) + { + // Create an HTTP client + var client = new HttpClient(); + + // Create a JSON payload + var payload = $"{{\"prompt\": \"{prompt}\", \"temperature\": 0.5, \"max_tokens\": 1024}}"; + + // Create an HTTP request + var request = new HttpRequestMessage + { + RequestUri = new Uri(OPENAI_URL), + Method = HttpMethod.Post, + Headers = + { + { "Content-Type", "application/json" }, + { "Authorization", $"Bearer {API_KEY}" } + }, + Content = new StringContent(payload, Encoding.UTF8, "application/json") + }; + + // Send the HTTP request and get the response + var response = await client.SendAsync(request); + var responseContent = await response.Content.ReadAsStringAsync(); + + // Decode the response JSON to get the response text + var responseText = responseContent.Split("\"text\": \"")[1].Split("\"}")[0]; + + // Return the response text + return responseText; + } + + static void Main(string[] args) + { + // Start the conversation + while (true) + { + // Prompt the user for input + Console.Write("You: "); + var userInput = Console.ReadLine(); + + // Generate a response from ChatGPT + var response = GetResponse($"{PROMPT}\n\nUser: {userInput}").Result; + + // Print the response + Console.WriteLine($"ChatGPT: {response.Trim()}"); + } + } + } +} diff --git a/CHAT-GPT-GO.go b/CHAT-GPT-GO.go new file mode 100644 index 0000000..8338c38 --- /dev/null +++ b/CHAT-GPT-GO.go @@ -0,0 +1,91 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "os" + "strings" +) + +// Set up your OpenAI API key +const apiKey = "YOUR_OPENAI_API_KEY" + +// Define the URL for the OpenAI API +const openaiUrl = "https://api.openai.com/v1/engines/text-davinci-002/completions" + +// Define the prompt for the conversation +const prompt = "Hello, I'm ChatGPT. How can I help you today?" + +// Define a struct to hold the response from ChatGPT +type ChatGptResponse struct { + Text string `json:"text"` +} + +// Define a function to get a response from ChatGPT +func getResponse(prompt string) (string, error) { + // Create an HTTP client + client := &http.Client{} + + // Create a JSON payload + payload := map[string]interface{}{ + "prompt": prompt, + "temperature": 0.5, + "max_tokens": 1024, + } + + // Encode the payload as JSON + payloadJson, err := json.Marshal(payload) + if err != nil { + return "", err + } + + // Create an HTTP request + req, err := http.NewRequest("POST", openaiUrl, bytes.NewBuffer(payloadJson)) + if err != nil { + return "", err + } + + // Set the HTTP headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", "Bearer "+apiKey) + + // Send the HTTP request + res, err := client.Do(req) + if err != nil { + return "", err + } + defer res.Body.Close() + + // Decode the HTTP response + var response struct { + Choices []ChatGptResponse `json:"choices"` + } + if err := json.NewDecoder(res.Body).Decode(&response); err != nil { + return "", err + } + + // Return the response text + return response.Choices[0].Text, nil +} + +func main() { + // Start the conversation + for { + // Prompt the user for input + fmt.Print("You: ") + var userInput string + fmt.Scanln(&userInput) + + // Generate a response from ChatGPT + response, err := getResponse(prompt + "\n\nUser: " + userInput) + if err != nil { + fmt.Fprintln(os.Stderr, "Error:", err) + continue + } + + // Print the response + fmt.Println("ChatGPT:", strings.TrimSpace(response)) + } +} diff --git a/CHAT-GPT-JAVA.java b/CHAT-GPT-JAVA.java new file mode 100644 index 0000000..e3e083a --- /dev/null +++ b/CHAT-GPT-JAVA.java @@ -0,0 +1,62 @@ +import java.io.IOException; +import java.net.URI; +import java.net.http.HttpClient; +import java.net.http.HttpRequest; +import java.net.http.HttpResponse; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +public class ChatGptDemo { + + // Set up your OpenAI API key + private static final String API_KEY = "YOUR_OPENAI_API_KEY"; + + // Define the URL for the OpenAI API + private static final String OPENAI_URL = "https://api.openai.com/v1/engines/text-davinci-002/completions"; + + // Define the prompt for the conversation + private static final String PROMPT = "Hello, I'm ChatGPT. How can I help you today?"; + + // Define a function to get a response from ChatGPT + public static String getResponse(String prompt) throws IOException, InterruptedException { + // Create an HTTP client + HttpClient client = HttpClient.newHttpClient(); + + // Create a JSON payload + String payload = String.format("{\"prompt\": \"%s\", \"temperature\": 0.5, \"max_tokens\": 1024}", prompt); + + // Create an HTTP request + HttpRequest request = HttpRequest.newBuilder() + .uri(URI.create(OPENAI_URL)) + .header("Content-Type", "application/json") + .header("Authorization", "Bearer " + API_KEY) + .POST(HttpRequest.BodyPublishers.ofString(payload, StandardCharsets.UTF_8)) + .build(); + + // Send the HTTP request + HttpResponse response = client.send(request, HttpResponse.BodyHandlers.ofString()); + + // Decode the HTTP response + String[] responseParts = response.body().split("\"text\": \""); + String[] responseParts2 = responseParts[1].split("\"}"); + String responseText = responseParts2[0]; + + // Return the response text + return responseText; + } + + public static void main(String[] args) throws IOException, InterruptedException { + // Start the conversation + while (true) { + // Prompt the user for input + System.out.print("You: "); + String userInput = System.console().readLine(); + + // Generate a response from ChatGPT + String response = getResponse(PROMPT + "\n\nUser: " + userInput); + + // Print the response + System.out.println("ChatGPT: " + response.trim()); + } + } +} diff --git a/CHAT-GPT-KOTLIN.kt b/CHAT-GPT-KOTLIN.kt new file mode 100644 index 0000000..dcee279 --- /dev/null +++ b/CHAT-GPT-KOTLIN.kt @@ -0,0 +1,61 @@ +import java.net.URI +import java.net.http.HttpClient +import java.net.http.HttpRequest +import java.net.http.HttpResponse +import java.nio.charset.StandardCharsets + +object ChatGptDemo { + + // Set up your OpenAI API key + private const val API_KEY = "YOUR_OPENAI_API_KEY" + + // Define the URL for the OpenAI API + private const val OPENAI_URL = "https://api.openai.com/v1/engines/text-davinci-002/completions" + + // Define the prompt for the conversation + private const val PROMPT = "Hello, I'm ChatGPT. How can I help you today?" + + // Define a function to get a response from ChatGPT + fun getResponse(prompt: String): String { + // Create an HTTP client + val client = HttpClient.newBuilder().build() + + // Create a JSON payload + val payload = "{\"prompt\": \"$prompt\", \"temperature\": 0.5, \"max_tokens\": 1024}" + + // Create an HTTP request + val request = HttpRequest.newBuilder() + .uri(URI.create(OPENAI_URL)) + .header("Content-Type", "application/json") + .header("Authorization", "Bearer $API_KEY") + .POST(HttpRequest.BodyPublishers.ofString(payload, StandardCharsets.UTF_8)) + .build() + + // Send the HTTP request + val response = client.send(request, HttpResponse.BodyHandlers.ofString()) + + // Decode the HTTP response + val responseParts = response.body().split("\"text\": \"") + val responseParts2 = responseParts[1].split("\"}") + val responseText = responseParts2[0] + + // Return the response text + return responseText + } + + @JvmStatic + fun main(args: Array) { + // Start the conversation + while (true) { + // Prompt the user for input + print("You: ") + val userInput = readLine()!! + + // Generate a response from ChatGPT + val response = getResponse("$PROMPT\n\nUser: $userInput") + + // Print the response + println("ChatGPT: ${response.trim()}") + } + } +} diff --git a/CHAT-GPT-PERL.pl b/CHAT-GPT-PERL.pl new file mode 100644 index 0000000..cf64576 --- /dev/null +++ b/CHAT-GPT-PERL.pl @@ -0,0 +1,38 @@ +use LWP::UserAgent; +use JSON::XS; + +# Define the API endpoint and authorization token +my $apiEndpoint = 'https://api.openai.com/v1/engines/davinci-codex/completions'; +my $authToken = ''; + +# Define a function that sends a question to ChatGPT and receives an answer +sub askQuestion { + my ($question) = @_; + + my $ua = LWP::UserAgent->new; + my $response = $ua->post( + $apiEndpoint, + Content_Type => 'application/json', + Authorization => "Bearer $authToken", + Content => encode_json({ + prompt => $question, + max_tokens => 100, + temperature => 0.7 + }) + ); + + if ($response->is_error) { + print "Error asking question: " . $response->status_line . "\n"; + return ''; + } else { + my $answer = decode_json($response->decoded_content)->{choices}->[0]->{text}; + $answer =~ s/\s+$//; # Remove any trailing whitespace + return $answer; + } +} + +# Example usage +my $question = 'What is the capital of France?'; +my $answer = askQuestion($question); +print "Question: $question\n"; +print "Answer: $answer\n"; diff --git a/CHAT-GPT-PHP.php b/CHAT-GPT-PHP.php new file mode 100644 index 0000000..1b55074 --- /dev/null +++ b/CHAT-GPT-PHP.php @@ -0,0 +1,42 @@ + $prompt, + "temperature" => $temperature, + "max_tokens" => 1024, + ]; + $curl = curl_init($url); + curl_setopt($curl, CURLOPT_POST, true); + curl_setopt($curl, CURLOPT_POSTFIELDS, json_encode($data)); + curl_setopt($curl, CURLOPT_HTTPHEADER, $headers); + curl_setopt($curl, CURLOPT_RETURNTRANSFER, true); + $response = curl_exec($curl); + curl_close($curl); + return json_decode($response, true)["choices"][0]["text"]; +} + +// Start the conversation +while (true) { + // Get user input + $user_input = readline("You: "); + + // Generate a response from ChatGPT + $response = get_response($prompt . "\n\nUser: " . $user_input, $model, $api_key); + + // Print the response + echo "ChatGPT: " . $response . "\n"; +} diff --git a/CHAT-GPT-PYTHON.py b/CHAT-GPT-PYTHON.py new file mode 100644 index 0000000..5283379 --- /dev/null +++ b/CHAT-GPT-PYTHON.py @@ -0,0 +1,29 @@ +import openai +import os + +# Set up your OpenAI API key +openai.api_key = os.environ["OPENAI_API_KEY"] + +# Define the prompt for the conversation +prompt = "Hello, I'm ChatGPT. How can I help you today?" + +# Define a function to get a response from ChatGPT +def get_response(prompt, model, temperature=0.5): + response = openai.Completion.create( + engine=model, + prompt=prompt, + max_tokens=1024, + temperature=temperature, + ) + return response.choices[0].text.strip() + +# Start the conversation +while True: + # Get user input + user_input = input("You: ") + + # Generate a response from ChatGPT + response = get_response(prompt + "\n\nUser: " + user_input, "text-davinci-002") + + # Print the response + print("ChatGPT:", response) diff --git a/CHAT-GPT-R.r b/CHAT-GPT-R.r new file mode 100644 index 0000000..3751953 --- /dev/null +++ b/CHAT-GPT-R.r @@ -0,0 +1,51 @@ +library(httr) + +# Set up your OpenAI API key +apiKey <- "YOUR_OPENAI_API_KEY" + +# Define the URL for the OpenAI API +openaiURL <- "https://api.openai.com/v1/engines/text-davinci-002/completions" + +# Define the prompt for the conversation +prompt <- "Hello, I'm ChatGPT. How can I help you today?" + +# Define a function to get a response from ChatGPT +getResponse <- function(prompt) { + # Create a JSON payload + payload <- list( + prompt = paste(prompt, "\n\nUser:"), + temperature = 0.5, + max_tokens = 1024 + ) + + # Create an HTTP request + response <- POST( + openaiURL, + add_headers(Authorization = paste("Bearer", apiKey)), + body = toJSON(payload), + encode = "json" + ) + + # Handle any errors + stop_for_status(response) + + # Parse the response JSON to get the response text + responseJSON <- content(response, "text") + responseDict <- fromJSON(responseJSON, simplifyVector = TRUE) + choices <- responseDict$choices[[1]] + text <- choices$text + + return(text) +} + +# Start the conversation +while (TRUE) { + # Prompt the user for input + userInput <- readline(prompt = "You: ") + + # Generate a response from ChatGPT + response <- getResponse(paste(prompt, userInput)) + + # Print the response + cat("ChatGPT: ", trimws(response), "\n") +} diff --git a/CHAT-GPT-RUBY.rb b/CHAT-GPT-RUBY.rb new file mode 100644 index 0000000..a77106a --- /dev/null +++ b/CHAT-GPT-RUBY.rb @@ -0,0 +1,38 @@ +require "json" +require "net/http" + +# Set up your OpenAI API key +API_KEY = "YOUR_OPENAI_API_KEY" +MODEL = "text-davinci-002" + +# Define the prompt for the conversation +PROMPT = "Hello, I'm ChatGPT. How can I help you today?" + +# Define a function to get a response from ChatGPT +def get_response(prompt, model, api_key, temperature = 0.5) + uri = URI("https://api.openai.com/v1/engines/#{model}/completions") + headers = { + "Content-Type" => "application/json", + "Authorization" => "Bearer #{api_key}" + } + data = { + prompt: prompt, + temperature: temperature, + max_tokens: 1024 + } + response = Net::HTTP.post(uri, data.to_json, headers) + JSON.parse(response.body)["choices"][0]["text"].strip +end + +# Start the conversation +loop do + # Prompt the user for input + print "You: " + user_input = gets.chomp + + # Generate a response from ChatGPT + response = get_response(PROMPT + "\n\nUser: #{user_input}", MODEL, API_KEY) + + # Print the response + puts "ChatGPT: #{response}" +end diff --git a/CHAT-GPT-RUST.rs b/CHAT-GPT-RUST.rs new file mode 100644 index 0000000..67d6032 --- /dev/null +++ b/CHAT-GPT-RUST.rs @@ -0,0 +1,66 @@ +use reqwest::header::{HeaderMap, AUTHORIZATION, CONTENT_TYPE}; +use serde::{Deserialize, Serialize}; +use std::io::{stdin, stdout, Write}; + +// Set up your OpenAI API key +const API_KEY: &str = "YOUR_OPENAI_API_KEY"; +const MODEL: &str = "text-davinci-002"; + +// Define the prompt for the conversation +const PROMPT: &str = "Hello, I'm ChatGPT. How can I help you today?"; + +// Define a struct to hold the response from ChatGPT +#[derive(Debug, Deserialize)] +struct ChatGptResponse { + text: String, +} + +// Define a function to get a response from ChatGPT +async fn get_response(prompt: &str, model: &str, api_key: &str, temperature: f64) -> String { + let client = reqwest::Client::new(); + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, "application/json".parse().unwrap()); + headers.insert(AUTHORIZATION, format!("Bearer {}", api_key).parse().unwrap()); + let data = format!( + r#"{{"prompt":"{}","temperature":{},"max_tokens":1024}}"#, + prompt, temperature + ); + let response = client + .post(&format!( + "https://api.openai.com/v1/engines/{}/completions", + model + )) + .headers(headers) + .body(data) + .send() + .await + .unwrap() + .json::>() + .await + .unwrap(); + response[0].text.trim().to_string() +} + +#[tokio::main] +async fn main() { + // Start the conversation + loop { + // Prompt the user for input + print!("You: "); + stdout().flush().unwrap(); + let mut user_input = String::new(); + stdin().read_line(&mut user_input).unwrap(); + + // Generate a response from ChatGPT + let response = get_response( + &(PROMPT.to_owned() + "\n\nUser: " + &user_input), + MODEL, + API_KEY, + 0.5, + ) + .await; + + // Print the response + println!("ChatGPT: {}", response); + } +} diff --git a/CHAT-GPT-SCALA.sc b/CHAT-GPT-SCALA.sc new file mode 100644 index 0000000..26f8678 --- /dev/null +++ b/CHAT-GPT-SCALA.sc @@ -0,0 +1,28 @@ +import scalaj.http._ + +// Define the API endpoint and authorization token +val apiEndpoint = "https://api.openai.com/v1/engines/davinci-codex/completions" +val authToken = "" + +// Define a function that sends a question to ChatGPT and receives an answer +def askQuestion(question: String): String = { + val request = Http(apiEndpoint) + .header("Content-Type", "application/json") + .header("Authorization", s"Bearer $authToken") + .postData(s"""{"prompt": "$question", "max_tokens": 100, "temperature": 0.7}""") + val response = request.asString + + if (response.isError) { + println(s"Error asking question: ${response.body}") + "" + } else { + val answer = response.body.replaceAll("\\s+$", "") + answer + } +} + +// Example usage +val question = "What is the capital of France?" +val answer = askQuestion(question) +println(s"Question: $question") +println(s"Answer: $answer") diff --git a/CHAT-GPT-SWIFT.swift b/CHAT-GPT-SWIFT.swift new file mode 100644 index 0000000..f4fa36d --- /dev/null +++ b/CHAT-GPT-SWIFT.swift @@ -0,0 +1,70 @@ +import Foundation + +// Set up your OpenAI API key +let apiKey = "YOUR_OPENAI_API_KEY" + +// Define the URL for the OpenAI API +let openaiURL = URL(string: "https://api.openai.com/v1/engines/text-davinci-002/completions")! + +// Define the prompt for the conversation +let prompt = "Hello, I'm ChatGPT. How can I help you today?" + +// Define a function to get a response from ChatGPT +func getResponse(prompt: String, completionHandler: @escaping (String?, Error?) -> Void) { + // Create a JSON payload + let payload = """ + { + "prompt": "\(prompt)\n\nUser:", + "temperature": 0.5, + "max_tokens": 1024 + } + """ + + // Create an HTTP request + var request = URLRequest(url: openaiURL) + request.httpMethod = "POST" + request.addValue("application/json", forHTTPHeaderField: "Content-Type") + request.addValue("Bearer \(apiKey)", forHTTPHeaderField: "Authorization") + request.httpBody = payload.data(using: .utf8) + + // Send the HTTP request and get the response + let session = URLSession.shared + let task = session.dataTask(with: request) { data, response, error in + // Handle any errors + guard error == nil else { + completionHandler(nil, error) + return + } + + // Parse the response JSON to get the response text + if let data = data, + let responseJSON = try? JSONSerialization.jsonObject(with: data, options: []), + let responseDict = responseJSON as? [String: Any], + let choices = responseDict["choices"] as? [[String: Any]], + let text = choices.first?["text"] as? String { + completionHandler(text, nil) + } else { + completionHandler(nil, NSError(domain: "ChatGPTError", code: 0, userInfo: [NSLocalizedDescriptionKey: "Failed to parse response."])) + } + } + task.resume() +} + +// Start the conversation +while true { + // Prompt the user for input + print("You: ", terminator: "") + let userInput = readLine() ?? "" + + // Generate a response from ChatGPT + getResponse(prompt: "\(prompt)\n\nUser: \(userInput)") { response, error in + // Handle any errors + if let error = error { + print("Error: \(error.localizedDescription)") + return + } + + // Print the response + print("ChatGPT: \(response?.trimmingCharacters(in: .whitespacesAndNewlines) ?? "")") + } +} diff --git a/CHAT-GPT-TYPESCRIPT.ts b/CHAT-GPT-TYPESCRIPT.ts new file mode 100644 index 0000000..01a763b --- /dev/null +++ b/CHAT-GPT-TYPESCRIPT.ts @@ -0,0 +1,38 @@ +import axios from 'axios'; + +// Define the API endpoint and authorization token +const API_ENDPOINT = 'https://api.openai.com/v1/engines/davinci-codex/completions'; +const AUTH_TOKEN = ''; + +// Define a function that sends a question to ChatGPT and receives an answer +async function askQuestion(question: string): Promise { + try { + // Send the HTTP request to the OpenAI API + const response = await axios.post(API_ENDPOINT, { + prompt: question, + max_tokens: 100, + temperature: 0.7 + }, { + headers: { + 'Content-Type': 'application/json', + 'Authorization': `Bearer ${AUTH_TOKEN}` + } + }); + + // Extract the answer from the response data + const answer = response.data.choices[0].text.trim(); + + return answer; + } catch (error) { + console.error(`Error asking question: ${error.message}`); + return ''; + } +} + +// Example usage +(async () => { + const question = 'What is the capital of France?'; + const answer = await askQuestion(question); + console.log(`Question: ${question}`); + console.log(`Answer: ${answer}`); +})(); diff --git a/CHATGPT-NODEJS.js b/CHATGPT-NODEJS.js new file mode 100644 index 0000000..5b62344 --- /dev/null +++ b/CHATGPT-NODEJS.js @@ -0,0 +1,48 @@ +const readline = require("readline"); +const axios = require("axios"); + +// Set up your OpenAI API key +const API_KEY = "YOUR_OPENAI_API_KEY"; +const MODEL = "text-davinci-002"; + +// Define the prompt for the conversation +const prompt = "Hello, I'm ChatGPT. How can I help you today?"; + +// Define a function to get a response from ChatGPT +async function getResponse(prompt, model, apiKey, temperature = 0.5) { + const response = await axios({ + method: "post", + url: `https://api.openai.com/v1/engines/${model}/completions`, + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${apiKey}`, + }, + data: { + prompt, + temperature, + max_tokens: 1024, + }, + }); + return response.data.choices[0].text.trim(); +} + +// Start the conversation +const rl = readline.createInterface({ + input: process.stdin, + output: process.stdout, +}); + +rl.on("line", async (input) => { + // Generate a response from ChatGPT + const response = await getResponse( + prompt + "\n\nUser: " + input, + MODEL, + API_KEY + ); + + // Print the response + console.log("ChatGPT:", response); +}); + +// Prompt the user for input +rl.prompt(); diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..b063c74 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Laqira Protocol + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..9217597 --- /dev/null +++ b/README.md @@ -0,0 +1,37 @@ +# ChatGPT-with-15-language + +Development of artificial intelligence infrastructure for all people. Collective cooperation. + +The initial version of GPT chat artificial intelligence in 15 programming languages + +- Go + +- C# + +- C++ + +- Java + +- Kotlin + +- Perl + +- PHP + +- NodeJS + +- TypeScript + +- Ruby + +- R + +- Rust + +- Python + +- Swift + +- Scala + +### Support us and tell us which language to develop more and add new parts to it.