mirror of
				https://gitlab.com/Kwoth/nadekobot.git
				synced 2025-11-04 00:34:26 -05:00 
			
		
		
		
	Update GPT ChatterBot
* Updates endpoint from v1/completions to v1/chat/completions * Add SharpTokens as a library to calculate input token usage * Subtract input tokens from max_tokens to ensure the API tokens don't exceed the max specified * Add Chat history support since this API supports it * Add a personality prompt to tweak the way the bot behaves * Add a min_tokens config to increase the quality of chat messages when history is enabled * Adjust the response function to throw an exception so that a null message isn't added to the list.
This commit is contained in:
		@@ -79,8 +79,12 @@ public class ChatterBotService : IExecOnMessage
 | 
			
		||||
            case ChatBotImplementation.Gpt3:
 | 
			
		||||
                if (!string.IsNullOrWhiteSpace(_creds.Gpt3ApiKey))
 | 
			
		||||
                    return new OfficialGpt3Session(_creds.Gpt3ApiKey,
 | 
			
		||||
                        _gcs.Data.ChatGpt.Model,
 | 
			
		||||
                        _gcs.Data.ChatGpt.ModelName,
 | 
			
		||||
                        _gcs.Data.ChatGpt.ChatHistory,
 | 
			
		||||
                        _gcs.Data.ChatGpt.MaxTokens,
 | 
			
		||||
                        _gcs.Data.ChatGpt.MinTokens,
 | 
			
		||||
                        _gcs.Data.ChatGpt.PersonalityPrompt,
 | 
			
		||||
                        _client.CurrentUser.Username,
 | 
			
		||||
                        _httpFactory);
 | 
			
		||||
 | 
			
		||||
                Log.Information("Gpt3 will not work as the api key is missing.");
 | 
			
		||||
@@ -199,7 +203,7 @@ public class ChatterBotService : IExecOnMessage
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
            _ = channel.TriggerTypingAsync();
 | 
			
		||||
            var response = await cbs.Think(message);
 | 
			
		||||
            var response = await cbs.Think(message, usrMsg.Author.ToString());
 | 
			
		||||
            await channel.SendConfirmAsync(_eb,
 | 
			
		||||
                title: null,
 | 
			
		||||
                response.SanitizeMentions(true)
 | 
			
		||||
 
 | 
			
		||||
@@ -11,7 +11,13 @@ public class Gpt3Response
 | 
			
		||||
 | 
			
		||||
public class Choice
 | 
			
		||||
{
 | 
			
		||||
    public string Text { get; set; }
 | 
			
		||||
    [JsonPropertyName("message")]
 | 
			
		||||
    public Message Message { get; init; }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
public class Message {
 | 
			
		||||
    [JsonPropertyName("content")]
 | 
			
		||||
    public string Content { get; init; }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
public class Gpt3ApiRequest
 | 
			
		||||
@@ -19,12 +25,22 @@ public class Gpt3ApiRequest
 | 
			
		||||
    [JsonPropertyName("model")]
 | 
			
		||||
    public string Model { get; init; }
 | 
			
		||||
 | 
			
		||||
    [JsonPropertyName("prompt")]
 | 
			
		||||
    public string Prompt { get; init; }
 | 
			
		||||
    [JsonPropertyName("messages")]
 | 
			
		||||
    public List<GPTMessage> Messages { get; init; }
 | 
			
		||||
 | 
			
		||||
    [JsonPropertyName("temperature")]
 | 
			
		||||
    public int Temperature { get; init; }
 | 
			
		||||
 | 
			
		||||
    [JsonPropertyName("max_tokens")]
 | 
			
		||||
    public int MaxTokens { get; init; }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
public class GPTMessage
 | 
			
		||||
{
 | 
			
		||||
    [JsonPropertyName("role")]
 | 
			
		||||
    public string Role {get; init;}
 | 
			
		||||
    [JsonPropertyName("content")]
 | 
			
		||||
    public string Content {get; init;}
 | 
			
		||||
    [JsonPropertyName("name")]
 | 
			
		||||
    public string Name {get; init;}
 | 
			
		||||
}
 | 
			
		||||
@@ -3,5 +3,5 @@ namespace NadekoBot.Modules.Games.Common.ChatterBot;
 | 
			
		||||
 | 
			
		||||
public interface IChatterBotSession
 | 
			
		||||
{
 | 
			
		||||
    Task<string> Think(string input);
 | 
			
		||||
    Task<string> Think(string input, string username);
 | 
			
		||||
}
 | 
			
		||||
@@ -18,7 +18,7 @@ public class OfficialCleverbotSession : IChatterBotSession
 | 
			
		||||
        _httpFactory = factory;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public async Task<string> Think(string input)
 | 
			
		||||
    public async Task<string> Think(string input, string username)
 | 
			
		||||
    {
 | 
			
		||||
        using var http = _httpFactory.CreateClient();
 | 
			
		||||
        var dataString = await http.GetStringAsync(string.Format(QueryString, input, cs ?? ""));
 | 
			
		||||
 
 | 
			
		||||
@@ -1,63 +1,101 @@
 | 
			
		||||
#nullable disable
 | 
			
		||||
using Newtonsoft.Json;
 | 
			
		||||
using System.Net.Http.Json;
 | 
			
		||||
using SharpToken;
 | 
			
		||||
using Antlr.Runtime;
 | 
			
		||||
using Microsoft.CodeAnalysis.CSharp.Syntax;
 | 
			
		||||
 | 
			
		||||
namespace NadekoBot.Modules.Games.Common.ChatterBot;
 | 
			
		||||
 | 
			
		||||
public class OfficialGpt3Session : IChatterBotSession
 | 
			
		||||
{
 | 
			
		||||
    private string Uri
 | 
			
		||||
        => $"https://api.openai.com/v1/completions";
 | 
			
		||||
        => $"https://api.openai.com/v1/chat/completions";
 | 
			
		||||
 | 
			
		||||
    private readonly string _apiKey;
 | 
			
		||||
    private readonly string _model;
 | 
			
		||||
    private readonly int _maxHistory;
 | 
			
		||||
    private readonly int _maxTokens;
 | 
			
		||||
    private readonly int _minTokens;
 | 
			
		||||
    private readonly string _nadekoUsername;
 | 
			
		||||
    private readonly GptEncoding _encoding;
 | 
			
		||||
    private List<GPTMessage> messages =  new();
 | 
			
		||||
    private readonly IHttpClientFactory _httpFactory;
 | 
			
		||||
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    public OfficialGpt3Session(
 | 
			
		||||
        string apiKey,
 | 
			
		||||
        Gpt3Model model,
 | 
			
		||||
        ChatGptModel model,
 | 
			
		||||
        int chatHistory,
 | 
			
		||||
        int maxTokens,
 | 
			
		||||
        int minTokens,
 | 
			
		||||
        string personality,
 | 
			
		||||
        string nadekoUsername,
 | 
			
		||||
        IHttpClientFactory factory)
 | 
			
		||||
    {
 | 
			
		||||
        _apiKey = apiKey;
 | 
			
		||||
        _httpFactory = factory;
 | 
			
		||||
        switch (model)
 | 
			
		||||
        {
 | 
			
		||||
            case Gpt3Model.Ada001:
 | 
			
		||||
                _model = "text-ada-001";
 | 
			
		||||
            case ChatGptModel.Gpt35Turbo:
 | 
			
		||||
                _model = "gpt-3.5-turbo";
 | 
			
		||||
                break;
 | 
			
		||||
            case Gpt3Model.Babbage001:
 | 
			
		||||
                _model = "text-babbage-001";
 | 
			
		||||
            case ChatGptModel.Gpt4:
 | 
			
		||||
                _model = "gpt-4";
 | 
			
		||||
                break;
 | 
			
		||||
            case Gpt3Model.Curie001:
 | 
			
		||||
                _model = "text-curie-001";
 | 
			
		||||
                break;
 | 
			
		||||
            case Gpt3Model.Davinci003:
 | 
			
		||||
                _model = "text-davinci-003";
 | 
			
		||||
            case ChatGptModel.Gpt432k:
 | 
			
		||||
                _model = "gpt-4-32k";
 | 
			
		||||
                break;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        _maxHistory = chatHistory;
 | 
			
		||||
        _maxTokens = maxTokens;
 | 
			
		||||
        _minTokens = minTokens;
 | 
			
		||||
        _nadekoUsername = nadekoUsername;
 | 
			
		||||
        _encoding = GptEncoding.GetEncodingForModel(_model);
 | 
			
		||||
        messages.Add(new GPTMessage(){Role = "user", Content = personality, Name = _nadekoUsername});
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    public async Task<string> Think(string input)
 | 
			
		||||
    public async Task<string> Think(string input, string username)
 | 
			
		||||
    {
 | 
			
		||||
        messages.Add(new GPTMessage(){Role = "user", Content = input, Name = username});
 | 
			
		||||
        while(messages.Count > _maxHistory + 2){
 | 
			
		||||
            messages.RemoveAt(1);
 | 
			
		||||
        }
 | 
			
		||||
        int tokensUsed = 0;
 | 
			
		||||
        foreach(GPTMessage message in messages){
 | 
			
		||||
            tokensUsed += _encoding.Encode(message.Content).Count;
 | 
			
		||||
        }
 | 
			
		||||
        tokensUsed *= 2; //Unsure why this is the case, but the token count chatgpt reports back is double what I calculate.
 | 
			
		||||
        //check if we have the minimum number of tokens available to use. Remove messages until we have enough, otherwise exit out and inform the user why.
 | 
			
		||||
        while(_maxTokens - tokensUsed <= _minTokens){
 | 
			
		||||
            if(messages.Count > 2){
 | 
			
		||||
                int tokens = _encoding.Encode(messages[1].Content).Count * 2;
 | 
			
		||||
                tokensUsed -= tokens;
 | 
			
		||||
                messages.RemoveAt(1);
 | 
			
		||||
            }
 | 
			
		||||
            else{
 | 
			
		||||
                return "Token count exceeded, please increase the number of tokens in the bot config and restart.";
 | 
			
		||||
            }
 | 
			
		||||
        }
 | 
			
		||||
        using var http = _httpFactory.CreateClient();
 | 
			
		||||
        http.DefaultRequestHeaders.Authorization = new("Bearer", _apiKey);
 | 
			
		||||
        var data = await http.PostAsJsonAsync(Uri, new Gpt3ApiRequest()
 | 
			
		||||
        {
 | 
			
		||||
            Model = _model,
 | 
			
		||||
            Prompt = input,
 | 
			
		||||
            MaxTokens = _maxTokens,
 | 
			
		||||
            Messages = messages,
 | 
			
		||||
            MaxTokens = _maxTokens - tokensUsed,
 | 
			
		||||
            Temperature = 1,
 | 
			
		||||
        });
 | 
			
		||||
        var dataString = await data.Content.ReadAsStringAsync();
 | 
			
		||||
        try
 | 
			
		||||
        {
 | 
			
		||||
            var response = JsonConvert.DeserializeObject<Gpt3Response>(dataString);
 | 
			
		||||
 | 
			
		||||
            return response?.Choices[0]?.Text;
 | 
			
		||||
            string message = response?.Choices[0]?.Message?.Content;
 | 
			
		||||
            //Can't rely on the return to except, now that we need to add it to the messages list.
 | 
			
		||||
            _ = message ?? throw new ArgumentNullException(nameof(message));
 | 
			
		||||
            messages.Add(new GPTMessage(){Role = "assistant", Content = message, Name = _nadekoUsername});
 | 
			
		||||
            return message;
 | 
			
		||||
        }
 | 
			
		||||
        catch
 | 
			
		||||
        {
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,7 @@ namespace NadekoBot.Modules.Games.Common;
 | 
			
		||||
public sealed partial class GamesConfig : ICloneable<GamesConfig>
 | 
			
		||||
{
 | 
			
		||||
    [Comment("DO NOT CHANGE")]
 | 
			
		||||
    public int Version { get; set; } = 2;
 | 
			
		||||
    public int Version { get; set; } = 3;
 | 
			
		||||
 | 
			
		||||
    [Comment("Hangman related settings (.hangman command)")]
 | 
			
		||||
    public HangmanConfig Hangman { get; set; } = new()
 | 
			
		||||
@@ -108,14 +108,22 @@ public sealed partial class GamesConfig : ICloneable<GamesConfig>
 | 
			
		||||
public sealed partial class ChatGptConfig
 | 
			
		||||
{
 | 
			
		||||
    [Comment(@"Which GPT-3 Model should bot use.
 | 
			
		||||
'ada001' - cheapest and fastest 
 | 
			
		||||
'babbage001' - 2nd option
 | 
			
		||||
'curie001' - 3rd option
 | 
			
		||||
'davinci003' - Most expensive, slowest")]
 | 
			
		||||
    public Gpt3Model Model { get; set; } = Gpt3Model.Ada001;
 | 
			
		||||
    gpt35turbo - cheapest
 | 
			
		||||
    gpt4 - 30x more expensive, higher quality
 | 
			
		||||
    gp432k - same model as above, but with a 32k token limit")]
 | 
			
		||||
    public ChatGptModel ModelName { get; set; } = ChatGptModel.Gpt35Turbo;
 | 
			
		||||
 | 
			
		||||
    [Comment(@"How should the chat bot behave, what's its personality? (Usage of this counts towards the max tokens)")]
 | 
			
		||||
    public string PersonalityPrompt { get; set; } = "You are a chat bot willing to have a conversation with anyone about anything.";
 | 
			
		||||
 | 
			
		||||
    [Comment(@"The maximum number of messages in a conversation that can be remembered. (This will increase the number of tokens used)")]
 | 
			
		||||
    public int ChatHistory { get; set; } = 5;
 | 
			
		||||
 | 
			
		||||
    [Comment(@"The maximum number of tokens to use per GPT-3 API call")]
 | 
			
		||||
    public int MaxTokens { get; set; } = 100;
 | 
			
		||||
 | 
			
		||||
    [Comment(@"The minimum number of tokens to use per GPT-3 API call, such that chat history is removed to make room.")]
 | 
			
		||||
    public int MinTokens { get; set; } = 30;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
[Cloneable]
 | 
			
		||||
@@ -149,10 +157,9 @@ public enum ChatBotImplementation
 | 
			
		||||
    Gpt3
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
public enum Gpt3Model
 | 
			
		||||
public enum ChatGptModel
 | 
			
		||||
{
 | 
			
		||||
    Ada001,
 | 
			
		||||
    Babbage001,
 | 
			
		||||
    Curie001,
 | 
			
		||||
    Davinci003
 | 
			
		||||
    Gpt35Turbo,
 | 
			
		||||
    Gpt4,
 | 
			
		||||
    Gpt432k
 | 
			
		||||
}
 | 
			
		||||
@@ -28,20 +28,33 @@ public sealed class GamesConfigService : ConfigServiceBase<GamesConfig>
 | 
			
		||||
            long.TryParse,
 | 
			
		||||
            ConfigPrinters.ToString,
 | 
			
		||||
            val => val >= 0);
 | 
			
		||||
        
 | 
			
		||||
        AddParsedProp("chatbot",
 | 
			
		||||
            gs => gs.ChatBot,
 | 
			
		||||
            ConfigParsers.InsensitiveEnum,
 | 
			
		||||
            ConfigPrinters.ToString);
 | 
			
		||||
        AddParsedProp("gpt.model",
 | 
			
		||||
            gs => gs.ChatGpt.Model,
 | 
			
		||||
        AddParsedProp("gpt.modelName",
 | 
			
		||||
            gs => gs.ChatGpt.ModelName,
 | 
			
		||||
            ConfigParsers.InsensitiveEnum,
 | 
			
		||||
            ConfigPrinters.ToString);
 | 
			
		||||
        AddParsedProp("gpt.personality",
 | 
			
		||||
            gs => gs.ChatGpt.PersonalityPrompt,
 | 
			
		||||
            ConfigParsers.String,
 | 
			
		||||
            ConfigPrinters.ToString);
 | 
			
		||||
        AddParsedProp("gpt.chathistory",
 | 
			
		||||
            gs => gs.ChatGpt.ChatHistory,
 | 
			
		||||
            int.TryParse,
 | 
			
		||||
            ConfigPrinters.ToString,
 | 
			
		||||
            val => val > 0);
 | 
			
		||||
        AddParsedProp("gpt.max_tokens",
 | 
			
		||||
            gs => gs.ChatGpt.MaxTokens,
 | 
			
		||||
            int.TryParse,
 | 
			
		||||
            ConfigPrinters.ToString,
 | 
			
		||||
            val => val > 0);
 | 
			
		||||
        AddParsedProp("gpt.min_tokens",
 | 
			
		||||
            gs => gs.ChatGpt.MinTokens,
 | 
			
		||||
            int.TryParse,
 | 
			
		||||
            ConfigPrinters.ToString,
 | 
			
		||||
            val => val > 0);
 | 
			
		||||
 | 
			
		||||
        Migrate();
 | 
			
		||||
    }
 | 
			
		||||
@@ -65,7 +78,16 @@ public sealed class GamesConfigService : ConfigServiceBase<GamesConfig>
 | 
			
		||||
            ModifyConfig(c =>
 | 
			
		||||
            {
 | 
			
		||||
                c.Version = 2;
 | 
			
		||||
                c.ChatBot = ChatBotImplementation.Cleverbot;  
 | 
			
		||||
                c.ChatBot = ChatBotImplementation.Cleverbot;
 | 
			
		||||
            });
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if (data.Version < 3)
 | 
			
		||||
        {
 | 
			
		||||
            ModifyConfig(c =>
 | 
			
		||||
            {
 | 
			
		||||
                c.Version = 3;
 | 
			
		||||
                c.ChatGpt.ModelName = ChatGptModel.Gpt35Turbo;
 | 
			
		||||
            });
 | 
			
		||||
        }
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -58,6 +58,7 @@
 | 
			
		||||
        <PackageReference Include="Scrutor" Version="4.2.0" />
 | 
			
		||||
        <PackageReference Include="Serilog.Sinks.Console" Version="4.0.1" />
 | 
			
		||||
        <PackageReference Include="Serilog.Sinks.Seq" Version="5.1.1" />
 | 
			
		||||
        <PackageReference Include="SharpToken" Version="1.2.14" />
 | 
			
		||||
        <PackageReference Include="SixLabors.Fonts" Version="1.0.0-beta17" />
 | 
			
		||||
        <PackageReference Include="SixLabors.ImageSharp" Version="2.1.3" />
 | 
			
		||||
        <PackageReference Include="SixLabors.ImageSharp.Drawing" Version="1.0.0-beta14" />
 | 
			
		||||
 
 | 
			
		||||
@@ -1,5 +1,5 @@
 | 
			
		||||
# DO NOT CHANGE
 | 
			
		||||
version: 2
 | 
			
		||||
version: 3
 | 
			
		||||
# Hangman related settings (.hangman command)
 | 
			
		||||
hangman:
 | 
			
		||||
# The amount of currency awarded to the winner of a hangman game
 | 
			
		||||
@@ -57,14 +57,19 @@ raceAnimals:
 | 
			
		||||
# Which chatbot API should bot use.
 | 
			
		||||
# 'cleverbot' - bot will use Cleverbot API.
 | 
			
		||||
# 'gpt3' - bot will use GPT-3 API
 | 
			
		||||
chatBot: gpt3
 | 
			
		||||
chatBot: Gpt3
 | 
			
		||||
 | 
			
		||||
chatGpt:
 | 
			
		||||
  # Which GPT-3 Model should bot use.
 | 
			
		||||
  # 'ada001' - cheapest and fastest
 | 
			
		||||
  # 'babbage001' - 2nd option
 | 
			
		||||
  # 'curie001' - 3rd option
 | 
			
		||||
  # 'davinci003' - Most expensive, slowest
 | 
			
		||||
  model: davinci003
 | 
			
		||||
# Which GPT-3 Model should bot use.
 | 
			
		||||
  # gpt35turbo - cheapest
 | 
			
		||||
  # gpt4 - 30x more expensive, higher quality
 | 
			
		||||
  # gp432k - same model as above, but with a 32k token limit
 | 
			
		||||
  modelName: Gpt35Turbo
 | 
			
		||||
  # How should the chat bot behave, whats its personality? (Usage of this counts towards the max tokens)
 | 
			
		||||
  personalityPrompt: You are a chat bot willing to have a conversation with anyone about anything.
 | 
			
		||||
  # The maximum number of messages in a conversation that can be remembered. (This will increase the number of tokens used)
 | 
			
		||||
  chatHistory: 5
 | 
			
		||||
  # The maximum number of tokens to use per GPT-3 API call
 | 
			
		||||
  maxTokens: 100
 | 
			
		||||
  # The minimum number of tokens to use per GPT-3 API call, such that chat history is removed to make room.
 | 
			
		||||
  minTokens: 30
 | 
			
		||||
		Reference in New Issue
	
	Block a user