Compare commits

...

5 Commits

Author SHA1 Message Date
Kwoth
b0ac35b82e Updated changelog. Version upped to 4.3.19 2024-01-20 14:15:30 +00:00
Kwoth
367135be6a Merge branch 'v4' of https://gitlab.com/kwoth/nadekobot into v4 2024-01-20 14:06:59 +00:00
Kwoth
f69f8548b0 Added followedStreams.maxCount to searches configx 2024-01-20 14:05:20 +00:00
Kwoth
449dbafff7 Merge branch 'v4' into 'v4'
Update GPT ChatterBot

See merge request Kwoth/nadekobot!313
2024-01-16 09:12:14 +00:00
Alexandra
afba004d85 Update GPT ChatterBot
* Updates endpoint from v1/completions to v1/chat/completions
* Add SharpTokens as a library to calculate input token usage
* Subtract input tokens from max_tokens to ensure the API tokens don't exceed the max specified
* Add Chat history support since this API supports it
* Add a personality prompt to tweak the way the bot behaves
* Add a min_tokens config to increase the quality of chat messages when history is enabled
* Adjust the response function to throw an exception so that a null message isn't added to the list.
2024-01-16 09:12:14 +00:00
15 changed files with 263 additions and 127 deletions

View File

@@ -2,6 +2,19 @@
Experimental changelog. Mostly based on [keepachangelog](https://keepachangelog.com/en/1.0.0/) except date format. a-c-f-r-o Experimental changelog. Mostly based on [keepachangelog](https://keepachangelog.com/en/1.0.0/) except date format. a-c-f-r-o
## [4.3.19] - 20.01.2024
### Added
- Added `followedStreams.maxCount` to `searches.yml` which lets bot owners change the default of 10 per server
### Changed
- Improvements to GPT ChatterBot (thx alexandra)
- Add a personality prompt to tweak the way chatgpt bot behaves
- Added Chat history support to chatgpt ChatterBot
- Chatgpt token usage now correctly calculated
- More chatgpt configs in `games.yml`
## [4.3.18] - 26.12.2023 ## [4.3.18] - 26.12.2023
### Added ### Added

View File

@@ -79,8 +79,12 @@ public class ChatterBotService : IExecOnMessage
case ChatBotImplementation.Gpt3: case ChatBotImplementation.Gpt3:
if (!string.IsNullOrWhiteSpace(_creds.Gpt3ApiKey)) if (!string.IsNullOrWhiteSpace(_creds.Gpt3ApiKey))
return new OfficialGpt3Session(_creds.Gpt3ApiKey, return new OfficialGpt3Session(_creds.Gpt3ApiKey,
_gcs.Data.ChatGpt.Model, _gcs.Data.ChatGpt.ModelName,
_gcs.Data.ChatGpt.ChatHistory,
_gcs.Data.ChatGpt.MaxTokens, _gcs.Data.ChatGpt.MaxTokens,
_gcs.Data.ChatGpt.MinTokens,
_gcs.Data.ChatGpt.PersonalityPrompt,
_client.CurrentUser.Username,
_httpFactory); _httpFactory);
Log.Information("Gpt3 will not work as the api key is missing."); Log.Information("Gpt3 will not work as the api key is missing.");
@@ -199,7 +203,7 @@ public class ChatterBotService : IExecOnMessage
} }
_ = channel.TriggerTypingAsync(); _ = channel.TriggerTypingAsync();
var response = await cbs.Think(message); var response = await cbs.Think(message, usrMsg.Author.ToString());
await channel.SendConfirmAsync(_eb, await channel.SendConfirmAsync(_eb,
title: null, title: null,
response.SanitizeMentions(true) response.SanitizeMentions(true)

View File

@@ -11,7 +11,13 @@ public class Gpt3Response
public class Choice public class Choice
{ {
public string Text { get; set; } [JsonPropertyName("message")]
public Message Message { get; init; }
}
public class Message {
[JsonPropertyName("content")]
public string Content { get; init; }
} }
public class Gpt3ApiRequest public class Gpt3ApiRequest
@@ -19,8 +25,8 @@ public class Gpt3ApiRequest
[JsonPropertyName("model")] [JsonPropertyName("model")]
public string Model { get; init; } public string Model { get; init; }
[JsonPropertyName("prompt")] [JsonPropertyName("messages")]
public string Prompt { get; init; } public List<GPTMessage> Messages { get; init; }
[JsonPropertyName("temperature")] [JsonPropertyName("temperature")]
public int Temperature { get; init; } public int Temperature { get; init; }
@@ -28,3 +34,13 @@ public class Gpt3ApiRequest
[JsonPropertyName("max_tokens")] [JsonPropertyName("max_tokens")]
public int MaxTokens { get; init; } public int MaxTokens { get; init; }
} }
public class GPTMessage
{
[JsonPropertyName("role")]
public string Role {get; init;}
[JsonPropertyName("content")]
public string Content {get; init;}
[JsonPropertyName("name")]
public string Name {get; init;}
}

View File

@@ -3,5 +3,5 @@ namespace NadekoBot.Modules.Games.Common.ChatterBot;
public interface IChatterBotSession public interface IChatterBotSession
{ {
Task<string> Think(string input); Task<string> Think(string input, string username);
} }

View File

@@ -18,7 +18,7 @@ public class OfficialCleverbotSession : IChatterBotSession
_httpFactory = factory; _httpFactory = factory;
} }
public async Task<string> Think(string input) public async Task<string> Think(string input, string username)
{ {
using var http = _httpFactory.CreateClient(); using var http = _httpFactory.CreateClient();
var dataString = await http.GetStringAsync(string.Format(QueryString, input, cs ?? "")); var dataString = await http.GetStringAsync(string.Format(QueryString, input, cs ?? ""));

View File

@@ -1,63 +1,101 @@
#nullable disable #nullable disable
using Newtonsoft.Json; using Newtonsoft.Json;
using System.Net.Http.Json; using System.Net.Http.Json;
using SharpToken;
using Antlr.Runtime;
using Microsoft.CodeAnalysis.CSharp.Syntax;
namespace NadekoBot.Modules.Games.Common.ChatterBot; namespace NadekoBot.Modules.Games.Common.ChatterBot;
public class OfficialGpt3Session : IChatterBotSession public class OfficialGpt3Session : IChatterBotSession
{ {
private string Uri private string Uri
=> $"https://api.openai.com/v1/completions"; => $"https://api.openai.com/v1/chat/completions";
private readonly string _apiKey; private readonly string _apiKey;
private readonly string _model; private readonly string _model;
private readonly int _maxHistory;
private readonly int _maxTokens; private readonly int _maxTokens;
private readonly int _minTokens;
private readonly string _nadekoUsername;
private readonly GptEncoding _encoding;
private List<GPTMessage> messages = new();
private readonly IHttpClientFactory _httpFactory; private readonly IHttpClientFactory _httpFactory;
public OfficialGpt3Session( public OfficialGpt3Session(
string apiKey, string apiKey,
Gpt3Model model, ChatGptModel model,
int chatHistory,
int maxTokens, int maxTokens,
int minTokens,
string personality,
string nadekoUsername,
IHttpClientFactory factory) IHttpClientFactory factory)
{ {
_apiKey = apiKey; _apiKey = apiKey;
_httpFactory = factory; _httpFactory = factory;
switch (model) switch (model)
{ {
case Gpt3Model.Ada001: case ChatGptModel.Gpt35Turbo:
_model = "text-ada-001"; _model = "gpt-3.5-turbo";
break; break;
case Gpt3Model.Babbage001: case ChatGptModel.Gpt4:
_model = "text-babbage-001"; _model = "gpt-4";
break; break;
case Gpt3Model.Curie001: case ChatGptModel.Gpt432k:
_model = "text-curie-001"; _model = "gpt-4-32k";
break;
case Gpt3Model.Davinci003:
_model = "text-davinci-003";
break; break;
} }
_maxHistory = chatHistory;
_maxTokens = maxTokens; _maxTokens = maxTokens;
_minTokens = minTokens;
_nadekoUsername = nadekoUsername;
_encoding = GptEncoding.GetEncodingForModel(_model);
messages.Add(new GPTMessage(){Role = "user", Content = personality, Name = _nadekoUsername});
} }
public async Task<string> Think(string input) public async Task<string> Think(string input, string username)
{ {
messages.Add(new GPTMessage(){Role = "user", Content = input, Name = username});
while(messages.Count > _maxHistory + 2){
messages.RemoveAt(1);
}
int tokensUsed = 0;
foreach(GPTMessage message in messages){
tokensUsed += _encoding.Encode(message.Content).Count;
}
tokensUsed *= 2; //Unsure why this is the case, but the token count chatgpt reports back is double what I calculate.
//check if we have the minimum number of tokens available to use. Remove messages until we have enough, otherwise exit out and inform the user why.
while(_maxTokens - tokensUsed <= _minTokens){
if(messages.Count > 2){
int tokens = _encoding.Encode(messages[1].Content).Count * 2;
tokensUsed -= tokens;
messages.RemoveAt(1);
}
else{
return "Token count exceeded, please increase the number of tokens in the bot config and restart.";
}
}
using var http = _httpFactory.CreateClient(); using var http = _httpFactory.CreateClient();
http.DefaultRequestHeaders.Authorization = new("Bearer", _apiKey); http.DefaultRequestHeaders.Authorization = new("Bearer", _apiKey);
var data = await http.PostAsJsonAsync(Uri, new Gpt3ApiRequest() var data = await http.PostAsJsonAsync(Uri, new Gpt3ApiRequest()
{ {
Model = _model, Model = _model,
Prompt = input, Messages = messages,
MaxTokens = _maxTokens, MaxTokens = _maxTokens - tokensUsed,
Temperature = 1, Temperature = 1,
}); });
var dataString = await data.Content.ReadAsStringAsync(); var dataString = await data.Content.ReadAsStringAsync();
try try
{ {
var response = JsonConvert.DeserializeObject<Gpt3Response>(dataString); var response = JsonConvert.DeserializeObject<Gpt3Response>(dataString);
string message = response?.Choices[0]?.Message?.Content;
return response?.Choices[0]?.Text; //Can't rely on the return to except, now that we need to add it to the messages list.
_ = message ?? throw new ArgumentNullException(nameof(message));
messages.Add(new GPTMessage(){Role = "assistant", Content = message, Name = _nadekoUsername});
return message;
} }
catch catch
{ {

View File

@@ -8,7 +8,7 @@ namespace NadekoBot.Modules.Games.Common;
public sealed partial class GamesConfig : ICloneable<GamesConfig> public sealed partial class GamesConfig : ICloneable<GamesConfig>
{ {
[Comment("DO NOT CHANGE")] [Comment("DO NOT CHANGE")]
public int Version { get; set; } = 2; public int Version { get; set; } = 3;
[Comment("Hangman related settings (.hangman command)")] [Comment("Hangman related settings (.hangman command)")]
public HangmanConfig Hangman { get; set; } = new() public HangmanConfig Hangman { get; set; } = new()
@@ -108,14 +108,22 @@ public sealed partial class GamesConfig : ICloneable<GamesConfig>
public sealed partial class ChatGptConfig public sealed partial class ChatGptConfig
{ {
[Comment(@"Which GPT-3 Model should bot use. [Comment(@"Which GPT-3 Model should bot use.
'ada001' - cheapest and fastest gpt35turbo - cheapest
'babbage001' - 2nd option gpt4 - 30x more expensive, higher quality
'curie001' - 3rd option gp432k - same model as above, but with a 32k token limit")]
'davinci003' - Most expensive, slowest")] public ChatGptModel ModelName { get; set; } = ChatGptModel.Gpt35Turbo;
public Gpt3Model Model { get; set; } = Gpt3Model.Ada001;
[Comment(@"How should the chat bot behave, what's its personality? (Usage of this counts towards the max tokens)")]
public string PersonalityPrompt { get; set; } = "You are a chat bot willing to have a conversation with anyone about anything.";
[Comment(@"The maximum number of messages in a conversation that can be remembered. (This will increase the number of tokens used)")]
public int ChatHistory { get; set; } = 5;
[Comment(@"The maximum number of tokens to use per GPT-3 API call")] [Comment(@"The maximum number of tokens to use per GPT-3 API call")]
public int MaxTokens { get; set; } = 100; public int MaxTokens { get; set; } = 100;
[Comment(@"The minimum number of tokens to use per GPT-3 API call, such that chat history is removed to make room.")]
public int MinTokens { get; set; } = 30;
} }
[Cloneable] [Cloneable]
@@ -149,10 +157,9 @@ public enum ChatBotImplementation
Gpt3 Gpt3
} }
public enum Gpt3Model public enum ChatGptModel
{ {
Ada001, Gpt35Turbo,
Babbage001, Gpt4,
Curie001, Gpt432k
Davinci003
} }

View File

@@ -28,20 +28,33 @@ public sealed class GamesConfigService : ConfigServiceBase<GamesConfig>
long.TryParse, long.TryParse,
ConfigPrinters.ToString, ConfigPrinters.ToString,
val => val >= 0); val => val >= 0);
AddParsedProp("chatbot", AddParsedProp("chatbot",
gs => gs.ChatBot, gs => gs.ChatBot,
ConfigParsers.InsensitiveEnum, ConfigParsers.InsensitiveEnum,
ConfigPrinters.ToString); ConfigPrinters.ToString);
AddParsedProp("gpt.model", AddParsedProp("gpt.modelName",
gs => gs.ChatGpt.Model, gs => gs.ChatGpt.ModelName,
ConfigParsers.InsensitiveEnum, ConfigParsers.InsensitiveEnum,
ConfigPrinters.ToString); ConfigPrinters.ToString);
AddParsedProp("gpt.personality",
gs => gs.ChatGpt.PersonalityPrompt,
ConfigParsers.String,
ConfigPrinters.ToString);
AddParsedProp("gpt.chathistory",
gs => gs.ChatGpt.ChatHistory,
int.TryParse,
ConfigPrinters.ToString,
val => val > 0);
AddParsedProp("gpt.max_tokens", AddParsedProp("gpt.max_tokens",
gs => gs.ChatGpt.MaxTokens, gs => gs.ChatGpt.MaxTokens,
int.TryParse, int.TryParse,
ConfigPrinters.ToString, ConfigPrinters.ToString,
val => val > 0); val => val > 0);
AddParsedProp("gpt.min_tokens",
gs => gs.ChatGpt.MinTokens,
int.TryParse,
ConfigPrinters.ToString,
val => val > 0);
Migrate(); Migrate();
} }
@@ -68,5 +81,14 @@ public sealed class GamesConfigService : ConfigServiceBase<GamesConfig>
c.ChatBot = ChatBotImplementation.Cleverbot; c.ChatBot = ChatBotImplementation.Cleverbot;
}); });
} }
if (data.Version < 3)
{
ModifyConfig(c =>
{
c.Version = 3;
c.ChatGpt.ModelName = ChatGptModel.Gpt35Turbo;
});
}
} }
} }

View File

@@ -28,6 +28,7 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
private readonly IPubSub _pubSub; private readonly IPubSub _pubSub;
private readonly IEmbedBuilderService _eb; private readonly IEmbedBuilderService _eb;
private readonly SearchesConfigService _config;
public TypedKey<List<StreamData>> StreamsOnlineKey { get; } public TypedKey<List<StreamData>> StreamsOnlineKey { get; }
public TypedKey<List<StreamData>> StreamsOfflineKey { get; } public TypedKey<List<StreamData>> StreamsOfflineKey { get; }
@@ -49,13 +50,15 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
IHttpClientFactory httpFactory, IHttpClientFactory httpFactory,
Bot bot, Bot bot,
IPubSub pubSub, IPubSub pubSub,
IEmbedBuilderService eb) IEmbedBuilderService eb,
SearchesConfigService config)
{ {
_db = db; _db = db;
_client = client; _client = client;
_strings = strings; _strings = strings;
_pubSub = pubSub; _pubSub = pubSub;
_eb = eb; _eb = eb;
_config = config;
_streamTracker = new(httpFactory, creds); _streamTracker = new(httpFactory, creds);
@@ -69,34 +72,34 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
{ {
var ids = client.GetGuildIds(); var ids = client.GetGuildIds();
var guildConfigs = uow.Set<GuildConfig>() var guildConfigs = uow.Set<GuildConfig>()
.AsQueryable() .AsQueryable()
.Include(x => x.FollowedStreams) .Include(x => x.FollowedStreams)
.Where(x => ids.Contains(x.GuildId)) .Where(x => ids.Contains(x.GuildId))
.ToList(); .ToList();
_offlineNotificationServers = new(guildConfigs _offlineNotificationServers = new(guildConfigs
.Where(gc => gc.NotifyStreamOffline) .Where(gc => gc.NotifyStreamOffline)
.Select(x => x.GuildId) .Select(x => x.GuildId)
.ToList()); .ToList());
_deleteOnOfflineServers = new(guildConfigs _deleteOnOfflineServers = new(guildConfigs
.Where(gc => gc.DeleteStreamOnlineMessage) .Where(gc => gc.DeleteStreamOnlineMessage)
.Select(x => x.GuildId) .Select(x => x.GuildId)
.ToList()); .ToList());
var followedStreams = guildConfigs.SelectMany(x => x.FollowedStreams).ToList(); var followedStreams = guildConfigs.SelectMany(x => x.FollowedStreams).ToList();
_shardTrackedStreams = followedStreams.GroupBy(x => new _shardTrackedStreams = followedStreams.GroupBy(x => new
{ {
x.Type, x.Type,
Name = x.Username.ToLower() Name = x.Username.ToLower()
}) })
.ToList() .ToList()
.ToDictionary( .ToDictionary(
x => new StreamDataKey(x.Key.Type, x.Key.Name.ToLower()), x => new StreamDataKey(x.Key.Type, x.Key.Name.ToLower()),
x => x.GroupBy(y => y.GuildId) x => x.GroupBy(y => y.GuildId)
.ToDictionary(y => y.Key, .ToDictionary(y => y.Key,
y => y.AsEnumerable().ToHashSet())); y => y.AsEnumerable().ToHashSet()));
// shard 0 will keep track of when there are no more guilds which track a stream // shard 0 will keep track of when there are no more guilds which track a stream
if (client.ShardId == 0) if (client.ShardId == 0)
@@ -107,12 +110,12 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
_streamTracker.AddLastData(fs.CreateKey(), null, false); _streamTracker.AddLastData(fs.CreateKey(), null, false);
_trackCounter = allFollowedStreams.GroupBy(x => new _trackCounter = allFollowedStreams.GroupBy(x => new
{ {
x.Type, x.Type,
Name = x.Username.ToLower() Name = x.Username.ToLower()
}) })
.ToDictionary(x => new StreamDataKey(x.Key.Type, x.Key.Name), .ToDictionary(x => new StreamDataKey(x.Key.Type, x.Key.Name),
x => x.Select(fs => fs.GuildId).ToHashSet()); x => x.Select(fs => fs.GuildId).ToHashSet());
} }
} }
@@ -152,7 +155,7 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
continue; continue;
var deleteGroups = failingStreams.GroupBy(x => x.Type) var deleteGroups = failingStreams.GroupBy(x => x.Type)
.ToDictionary(x => x.Key, x => x.Select(y => y.Name).ToList()); .ToDictionary(x => x.Key, x => x.Select(y => y.Name).ToList());
await using var uow = _db.GetDbContext(); await using var uow = _db.GetDbContext();
foreach (var kvp in deleteGroups) foreach (var kvp in deleteGroups)
@@ -165,9 +168,9 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
string.Join(", ", kvp.Value)); string.Join(", ", kvp.Value));
var toDelete = uow.Set<FollowedStream>() var toDelete = uow.Set<FollowedStream>()
.AsQueryable() .AsQueryable()
.Where(x => x.Type == kvp.Key && kvp.Value.Contains(x.Username)) .Where(x => x.Type == kvp.Key && kvp.Value.Contains(x.Username))
.ToList(); .ToList();
uow.RemoveRange(toDelete); uow.RemoveRange(toDelete);
await uow.SaveChangesAsync(); await uow.SaveChangesAsync();
@@ -246,13 +249,13 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
if (_shardTrackedStreams.TryGetValue(key, out var fss)) if (_shardTrackedStreams.TryGetValue(key, out var fss))
{ {
await fss await fss
// send offline stream notifications only to guilds which enable it with .stoff // send offline stream notifications only to guilds which enable it with .stoff
.SelectMany(x => x.Value) .SelectMany(x => x.Value)
.Where(x => _offlineNotificationServers.Contains(x.GuildId)) .Where(x => _offlineNotificationServers.Contains(x.GuildId))
.Select(fs => _client.GetGuild(fs.GuildId) .Select(fs => _client.GetGuild(fs.GuildId)
?.GetTextChannel(fs.ChannelId) ?.GetTextChannel(fs.ChannelId)
?.EmbedAsync(GetEmbed(fs.GuildId, stream))) ?.EmbedAsync(GetEmbed(fs.GuildId, stream)))
.WhenAll(); .WhenAll();
} }
} }
} }
@@ -266,28 +269,28 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
if (_shardTrackedStreams.TryGetValue(key, out var fss)) if (_shardTrackedStreams.TryGetValue(key, out var fss))
{ {
var messages = await fss.SelectMany(x => x.Value) var messages = await fss.SelectMany(x => x.Value)
.Select(async fs => .Select(async fs =>
{ {
var textChannel = _client.GetGuild(fs.GuildId)?.GetTextChannel(fs.ChannelId); var textChannel = _client.GetGuild(fs.GuildId)?.GetTextChannel(fs.ChannelId);
if (textChannel is null) if (textChannel is null)
return default; return default;
var rep = new ReplacementBuilder().WithOverride("%user%", () => fs.Username) var rep = new ReplacementBuilder().WithOverride("%user%", () => fs.Username)
.WithOverride("%platform%", () => fs.Type.ToString()) .WithOverride("%platform%", () => fs.Type.ToString())
.Build(); .Build();
var message = string.IsNullOrWhiteSpace(fs.Message) ? "" : rep.Replace(fs.Message); var message = string.IsNullOrWhiteSpace(fs.Message) ? "" : rep.Replace(fs.Message);
var msg = await textChannel.EmbedAsync(GetEmbed(fs.GuildId, stream, false), message); var msg = await textChannel.EmbedAsync(GetEmbed(fs.GuildId, stream, false), message);
// only cache the ids of channel/message pairs // only cache the ids of channel/message pairs
if(_deleteOnOfflineServers.Contains(fs.GuildId)) if (_deleteOnOfflineServers.Contains(fs.GuildId))
return (textChannel.Id, msg.Id); return (textChannel.Id, msg.Id);
else else
return default; return default;
}) })
.WhenAll(); .WhenAll();
// push online stream messages to redis // push online stream messages to redis
@@ -297,16 +300,15 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
try try
{ {
var pairs = messages var pairs = messages
.Where(x => x != default) .Where(x => x != default)
.Select(x => (x.Item1, x.Item2)) .Select(x => (x.Item1, x.Item2))
.ToList(); .ToList();
if (pairs.Count > 0) if (pairs.Count > 0)
await OnlineMessagesSent(key.Type, key.Name, pairs); await OnlineMessagesSent(key.Type, key.Name, pairs);
} }
catch catch
{ {
} }
} }
} }
@@ -384,10 +386,10 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
await using (var uow = _db.GetDbContext()) await using (var uow = _db.GetDbContext())
{ {
var fss = uow.Set<FollowedStream>() var fss = uow.Set<FollowedStream>()
.AsQueryable() .AsQueryable()
.Where(x => x.GuildId == guildId) .Where(x => x.GuildId == guildId)
.OrderBy(x => x.Id) .OrderBy(x => x.Id)
.ToList(); .ToList();
// out of range // out of range
if (fss.Count <= index) if (fss.Count <= index)
@@ -450,7 +452,9 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
GuildId = guildId GuildId = guildId
}; };
if (gc.FollowedStreams.Count >= 10) var config = _config.Data;
if (config.FollowedStreams.MaxCount is not -1
&& gc.FollowedStreams.Count >= config.FollowedStreams.MaxCount)
return null; return null;
gc.FollowedStreams.Add(fs); gc.FollowedStreams.Add(fs);
@@ -475,10 +479,10 @@ public sealed class StreamNotificationService : INService, IReadyExecutor
public IEmbedBuilder GetEmbed(ulong guildId, StreamData status, bool showViewers = true) public IEmbedBuilder GetEmbed(ulong guildId, StreamData status, bool showViewers = true)
{ {
var embed = _eb.Create() var embed = _eb.Create()
.WithTitle(status.Name) .WithTitle(status.Name)
.WithUrl(status.StreamUrl) .WithUrl(status.StreamUrl)
.WithDescription(status.StreamUrl) .WithDescription(status.StreamUrl)
.AddField(GetText(guildId, strs.status), status.IsLive ? "🟢 Online" : "🔴 Offline", true); .AddField(GetText(guildId, strs.status), status.IsLive ? "🟢 Online" : "🔴 Offline", true);
if (showViewers) if (showViewers)
{ {

View File

@@ -55,6 +55,15 @@ Use a fully qualified url. Example: https://my-invidious-instance.mydomain.com
Instances specified must have api available. Instances specified must have api available.
You check that by opening an api endpoint in your browser. For example: https://my-invidious-instance.mydomain.com/api/v1/trending")] You check that by opening an api endpoint in your browser. For example: https://my-invidious-instance.mydomain.com/api/v1/trending")]
public List<string> InvidiousInstances { get; set; } = new List<string>(); public List<string> InvidiousInstances { get; set; } = new List<string>();
[Comment("Maximum number of followed streams per server")]
public FollowedStreamConfig FollowedStreams { get; set; } = new FollowedStreamConfig();
}
public sealed class FollowedStreamConfig
{
[Comment("Maximum number of streams that each server can follow. -1 for infinite")]
public int MaxCount { get; set; } = 10;
} }
public enum YoutubeSearcher public enum YoutubeSearcher

View File

@@ -28,6 +28,11 @@ public class SearchesConfigService : ConfigServiceBase<SearchesConfig>
ConfigParsers.InsensitiveEnum, ConfigParsers.InsensitiveEnum,
ConfigPrinters.ToString); ConfigPrinters.ToString);
AddParsedProp("followedStreams.maxCount",
sc => sc.FollowedStreams.MaxCount,
ConfigParsers.InsensitiveEnum,
ConfigPrinters.ToString);
Migrate(); Migrate();
} }
@@ -41,5 +46,13 @@ public class SearchesConfigService : ConfigServiceBase<SearchesConfig>
c.WebSearchEngine = WebSearchEngine.Google_Scrape; c.WebSearchEngine = WebSearchEngine.Google_Scrape;
}); });
} }
if (data.Version < 2)
{
ModifyConfig(c =>
{
c.Version = 2;
});
}
} }
} }

View File

@@ -58,6 +58,7 @@
<PackageReference Include="Scrutor" Version="4.2.0" /> <PackageReference Include="Scrutor" Version="4.2.0" />
<PackageReference Include="Serilog.Sinks.Console" Version="4.0.1" /> <PackageReference Include="Serilog.Sinks.Console" Version="4.0.1" />
<PackageReference Include="Serilog.Sinks.Seq" Version="5.1.1" /> <PackageReference Include="Serilog.Sinks.Seq" Version="5.1.1" />
<PackageReference Include="SharpToken" Version="1.2.14" />
<PackageReference Include="SixLabors.Fonts" Version="1.0.0-beta17" /> <PackageReference Include="SixLabors.Fonts" Version="1.0.0-beta17" />
<PackageReference Include="SixLabors.ImageSharp" Version="2.1.3" /> <PackageReference Include="SixLabors.ImageSharp" Version="2.1.3" />
<PackageReference Include="SixLabors.ImageSharp.Drawing" Version="1.0.0-beta14" /> <PackageReference Include="SixLabors.ImageSharp.Drawing" Version="1.0.0-beta14" />

View File

@@ -7,7 +7,7 @@ namespace NadekoBot.Services;
public sealed class StatsService : IStatsService, IReadyExecutor, INService public sealed class StatsService : IStatsService, IReadyExecutor, INService
{ {
public const string BOT_VERSION = "4.3.18"; public const string BOT_VERSION = "4.3.19";
public string Author public string Author
=> "Kwoth#2452"; => "Kwoth#2452";

View File

@@ -1,5 +1,5 @@
# DO NOT CHANGE # DO NOT CHANGE
version: 2 version: 3
# Hangman related settings (.hangman command) # Hangman related settings (.hangman command)
hangman: hangman:
# The amount of currency awarded to the winner of a hangman game # The amount of currency awarded to the winner of a hangman game
@@ -57,14 +57,19 @@ raceAnimals:
# Which chatbot API should bot use. # Which chatbot API should bot use.
# 'cleverbot' - bot will use Cleverbot API. # 'cleverbot' - bot will use Cleverbot API.
# 'gpt3' - bot will use GPT-3 API # 'gpt3' - bot will use GPT-3 API
chatBot: gpt3 chatBot: Gpt3
chatGpt: chatGpt:
# Which GPT-3 Model should bot use. # Which GPT-3 Model should bot use.
# 'ada001' - cheapest and fastest # gpt35turbo - cheapest
# 'babbage001' - 2nd option # gpt4 - 30x more expensive, higher quality
# 'curie001' - 3rd option # gp432k - same model as above, but with a 32k token limit
# 'davinci003' - Most expensive, slowest modelName: Gpt35Turbo
model: davinci003 # How should the chat bot behave, whats its personality? (Usage of this counts towards the max tokens)
personalityPrompt: You are a chat bot willing to have a conversation with anyone about anything.
# The maximum number of messages in a conversation that can be remembered. (This will increase the number of tokens used)
chatHistory: 5
# The maximum number of tokens to use per GPT-3 API call # The maximum number of tokens to use per GPT-3 API call
maxTokens: 100 maxTokens: 100
# The minimum number of tokens to use per GPT-3 API call, such that chat history is removed to make room.
minTokens: 30

View File

@@ -1,5 +1,5 @@
# DO NOT CHANGE # DO NOT CHANGE
version: 1 version: 2
# Which engine should .search command # Which engine should .search command
# 'google_scrape' - default. Scrapes the webpage for results. May break. Requires no api keys. # 'google_scrape' - default. Scrapes the webpage for results. May break. Requires no api keys.
# 'google' - official google api. Requires googleApiKey and google.searchId set in creds.yml # 'google' - official google api. Requires googleApiKey and google.searchId set in creds.yml
@@ -41,3 +41,7 @@ searxInstances: []
# Instances specified must have api available. # Instances specified must have api available.
# You check that by opening an api endpoint in your browser. For example: https://my-invidious-instance.mydomain.com/api/v1/trending # You check that by opening an api endpoint in your browser. For example: https://my-invidious-instance.mydomain.com/api/v1/trending
invidiousInstances: [] invidiousInstances: []
# Maximum number of followed streams per server
followedStreams:
# Maximum number of streams that each server can follow. -1 for infinite
maxCount: 10