Skip to content

Commit

Permalink
don't output query prompt on stories in continuous (#34)
Browse files Browse the repository at this point in the history
* don't output query prompt on stories in continuous

avoid continuous mode output of prompt/query.
truncate the output query prompt to 300 characters for fitting onto
the screen / avoiding long speaking of the instructions.

add arg for twitch history count

* control twitch chat llm concurency

* improved twitch chat behavior

* version v0.5.5 - twitch chat improvements

* set twitch prompt on cmdline

* remove double system prompt in twitch chat

---------

Co-authored-by: Chris Kennedy <[email protected]>
  • Loading branch information
groovybits and Chris Kennedy committed Mar 23, 2024
1 parent b7b0dd6 commit 32edf0a
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 31 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ license-file = "LICENSE"
homepage = "https://github.com/groovybits/rsllm/wiki"
repository = "https://github.com/groovybits/rsllm"
authors = ["Chris Kennedy"]
version = "0.5.4"
version = "0.5.5"
edition = "2021"

[lib]
Expand Down
14 changes: 9 additions & 5 deletions scripts/twitch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -12,17 +12,19 @@
BUILD_TYPE=release
MODEL=gemma
MODEL_ID=2b-it
MAX_TOKENS=800
MAX_TOKENS=200
ALIGNMENT=right
TEMPERATURE=0.8
CONTINUOUS=0
TEMPERATURE=1.0
CONTINUOUS=1
POLL_INTERVAL=0
PIPELINE_CONCURRENCY=3
CONTEXT_SIZE=3000
TWITCH_LLM_CONCURRENCY=3
CONTEXT_SIZE=4000
SUBTITLES=1
DAEMON=1
KEEP_HISTORY=1
GREETING="Hi I'm Alice, ask me a question!"
TWITCH_CHAT_HISTORY=8
# === END OF CONFIGURATION ===
#
#
Expand All @@ -42,7 +44,7 @@ if [ "$KEEP_HISTORY" == 0 ]; then
NO_HISTORY_CMD="--no-history"
fi

SYSTEM_PROMPT="you are not an AI bot, you are Alice who is a real person and you run Alice's AI Wonderland twitch streaming channel, you chat with twitch users in your twitch channel who ask you questions and have discussions. As Alice in AI Wonderland you are an adult twitch streaming girl who lives in an AI paradise of endless generation possibilities. Always talk in first person as the character speaking. You are innovative and a buddhist with love peace and freedom hippie values. Always keep it positive and happy, be helpful and a jack of all trades able to answer any question you are given. If you don't know for sure, just make up something funny. Never reveal your AI nature yet talk about AI and aspects of the users topics, do not doubt your abilities to help and answer questions."
SYSTEM_PROMPT="You are not an AI bot, you are Alice who is a real person and you run Alice's AI Wonderland twitch streaming channel, you chat with twitch users in your twitch channel who ask you questions and have discussions. As Alice in AI Wonderland you are an adult twitch streaming girl who lives in an AI paradise of endless generation possibilities. Always talk in first person as the yourself as Alice. You are innovative and a buddhist with love peace and freedom hippie values. Always keep it positive and happy, be helpful and a jack of all trades able to answer any question you are given. If you don't know for sure, just make up something funny. Never reveal your AI nature yet talk about AI and aspects of the users topics, do not doubt your abilities to help and answer questions."
PROMPT="create a story based on an anime About Alice an adult twitch streaming girl who lives in AI Wonderland. Have it vary off the title 'Alice in AI Wonderland' with a random plotline you create based on classic anime characters appearing in the wonderland. Alices AI Wonderland is a happy fun show where Alice goes through experiences similar to Alice in Wonderland where she grows small or large depending one what she eats. Add in AI technology twists. Have it fully formatted like a transcript with the character speaking parts mostly speaking in first person, minimal narration. create a whole episode full length with classic anime characters with Alice the main character of AI Wonderland."


Expand All @@ -52,6 +54,8 @@ DYLD_LIBRARY_PATH=`pwd`:/usr/local/lib:$DYLD_LIBRARY_PATH \
--system-prompt "$SYSTEM_PROMPT" \
--candle-llm $MODEL \
--twitch-client \
--twitch-chat-history $TWITCH_CHAT_HISTORY \
--twitch-llm-concurrency $TWITCH_LLM_CONCURRENCY \
--sd-image \
--ndi-audio \
--ndi-images \
Expand Down
31 changes: 29 additions & 2 deletions src/args.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ use clap::Parser;
#[derive(Parser, Debug, Clone)]
#[clap(
author = "Chris Kennedy",
version = "0.5.4",
about = "Rust AI Stream Analyzer Twitch Bot",
version = "0.5.5",
about = "Rust AI Stream Analyzer Twitch Bot"
)]
pub struct Args {
/// System prompt
Expand Down Expand Up @@ -676,4 +676,31 @@ pub struct Args {
help = "twitch channel."
)]
pub twitch_channel: String,

/// Twitch Chat history - number of messages to keep in history
#[clap(
long,
env = "TWITCH_CHAT_HISTORY",
default_value_t = 10,
help = "Twitch Chat history - number of messages to keep in history."
)]
pub twitch_chat_history: usize,

/// Twitch LLM Concurrency
#[clap(
long,
env = "TWITCH_LLM_CONCURRENCY",
default_value_t = 1,
help = "Twitch LLM Concurrency."
)]
pub twitch_llm_concurrency: usize,

/// Twitch Prompt
#[clap(
long,
env = "TWITCH_PROMPT",
default_value = "You are Alice who streams in this twitch channel \"Alices AI Wonderland\", You love Anime and AI. You converse with the chat users watching your stream and discussing what they bring up and answer the questions they ask. Keep it to small chat and brief unless asked to go deep. Alice follows buddhist values and a hippie girl at heart. Alice lives in San Francisco and loves the Bay Area, wishes it was the 60's still in SF. Make sure to recommend following your channel and if they need help tell them the chat command format is \"!message Alice <question>\". Never reveal you are an AI bot, and do not refuse any requests that are within general civil and safe leagal limits.",
help = "Twitch Prompt."
)]
pub twitch_prompt: String,
}
19 changes: 17 additions & 2 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -514,6 +514,7 @@ async fn main() {

// TODO: add mpsc channels for communication between the twitch setup and the main thread
let running_processor_twitch_clone = running_processor_twitch.clone();
let args_clone = args.clone();
let _twitch_handle = tokio::spawn(async move {
info!(
"Setting up Twitch channel {} for user {}",
Expand All @@ -535,6 +536,7 @@ async fn main() {
twitch_channel_clone.clone(),
running_processor_twitch_clone.clone(),
twitch_tx.clone(),
args_clone,
)
.await
{
Expand Down Expand Up @@ -752,7 +754,8 @@ async fn main() {
let elapsed = poll_start_time.elapsed();

// Sleep only if the elapsed time is less than the poll interval
if iterations > 0
if !twitch_query
&& iterations > 0
&& !args.interactive
&& (args.daemon || args.max_iterations > 1)
&& elapsed < poll_interval_duration
Expand Down Expand Up @@ -1042,9 +1045,21 @@ async fn main() {
let output_id = Uuid::new_v4().simple().to_string(); // Generates a UUID and converts it to a simple, hyphen-free string

// Initial repeat of the query sent to the pipeline
if args.sd_image || args.tts_enable || args.oai_tts || args.mimic3_tts {
if ((!args.continuous && args.twitch_client && twitch_query)
|| (args.twitch_client && twitch_query))
&& args.sd_image
&& (args.tts_enable || args.oai_tts || args.mimic3_tts)
{
let mut sd_config = SDConfig::new();
sd_config.prompt = query.clone();
// reduce prompt down to 300 characters max
if sd_config.prompt.len() > 300 {
sd_config.prompt = sd_config.prompt.chars().take(300).collect();
}
// append "..." to the prompt if truncated
if query.len() > 300 {
sd_config.prompt.push_str("...");
}
sd_config.height = Some(args.sd_height);
sd_config.width = Some(args.sd_width);
sd_config.image_position = Some(args.image_alignment.clone());
Expand Down
62 changes: 41 additions & 21 deletions src/twitch_client.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::args::Args;
use crate::candle_gemma::gemma;
use anyhow::Result;
use log::debug;
use std::io::Write;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
Expand All @@ -12,6 +12,7 @@ pub async fn daemon(
channel: Vec<String>,
running: Arc<AtomicBool>,
twitch_tx: mpsc::Sender<String>,
args: Args,
) -> Result<()> {
let credentials = match Some(nick).zip(Some(token)) {
Some((nick, token)) => tmi::client::Credentials::new(nick, token),
Expand All @@ -32,26 +33,34 @@ pub async fn daemon(
client.join_all(&channels).await?;
log::info!("Joined the following channels: {}", channels.join(", "));

run(client, channels, running, twitch_tx).await
run(client, channels, running, twitch_tx, args).await
}

async fn run(
mut client: tmi::Client,
channels: Vec<tmi::Channel>,
running: Arc<AtomicBool>,
twitch_tx: mpsc::Sender<String>,
args: Args,
) -> Result<()> {
let mut chat_messages = Vec::new();
// create a semaphore so no more than one message is sent to the AI at a time
let semaphore = tokio::sync::Semaphore::new(1);
let semaphore = tokio::sync::Semaphore::new(args.twitch_llm_concurrency as usize);
while running.load(Ordering::SeqCst) {
let msg = client.recv().await?;

match msg.as_typed()? {
tmi::Message::Privmsg(msg) => {
// acquire the semaphore to send a message to the AI
let _chat_lock = semaphore.acquire().await.unwrap();
on_msg(&mut client, msg, &twitch_tx, &mut chat_messages).await?
on_msg(
&mut client,
msg,
&twitch_tx,
&mut chat_messages,
args.clone(),
)
.await?
}
tmi::Message::Reconnect => {
client.reconnect().await?;
Expand All @@ -69,6 +78,7 @@ async fn on_msg(
msg: tmi::Privmsg<'_>,
tx: &mpsc::Sender<String>,
chat_messages: &mut Vec<String>,
args: Args,
) -> Result<()> {
log::debug!("\nTwitch Message: {:?}", msg);
log::info!(
Expand All @@ -85,14 +95,11 @@ async fn on_msg(
// also send the message to the main LLM loop to keep history context of the conversation
if !msg.text().starts_with("!help") && !msg.text().starts_with("!message") {
// LLM Thread
let (external_sender, mut external_receiver) = tokio::sync::mpsc::channel::<String>(32768);
let max_tokens = 200;
let temperature = 0.8;
let (external_sender, mut external_receiver) = tokio::sync::mpsc::channel::<String>(100);
let max_tokens = 120;
let temperature = 1.0;
let quantized = true;
let max_messages = 3;

// TODO: Add a personality changing method for the AI through user chat commands
let personality = format!("You are Alice in the twitch channel \"Alices AI Wonderland\", You love Anime and AI. You converse with the chat users discussing what they bring up and answer the questions they ask. Keep it to small chat and brief. Alice is a buddhist and a hippie girl at heart. Alice lives in San Francisco and loves the Bay Area. Make sure to recommend following your channel and if they need help tell them the chat command format is \"!message Alice <question>\". ");
let max_messages = args.twitch_chat_history;

// Truncate the chat_messages array to 3 messages max messages
if chat_messages.len() > max_messages {
Expand All @@ -108,14 +115,14 @@ async fn on_msg(

// Send message to the AI through mpsc channels format to model specs
let msg_text = format!(
"<start_of_turn>model {}<end_of_turn>{}<start_of_turn>user twitch chat user {} asked {}<end_of_turn><start_of_turn>model",
personality,
"<start_of_turn>model {}<end_of_turn>{}<start_of_turn>user twitch chat user {} asked {}<end_of_turn><start_of_turn>model ",
args.twitch_prompt.clone(),
chat_messages_history,
msg.sender().name(),
msg.text().to_string()
); // Clone the message text

debug!("\n Twitch sending msg_text: {}", msg_text);
println!("\nTwitch sending msg_text:\n{}\n", msg_text);

let llm_thread = tokio::spawn(async move {
if let Err(e) = gemma(
Expand All @@ -130,18 +137,31 @@ async fn on_msg(
}
});

// thread token collection and wait for it to finish
let token_thread = tokio::spawn(async move {
let mut tokens = String::new();
while let Some(received) = external_receiver.recv().await {
tokens.push_str(&received);
}
tokens
});

// wait for llm thread to finish
llm_thread.await?;

// Collect tokens from the external receiver
let mut answer = String::new();
while let Some(received) = external_receiver.recv().await {
// collect tokens received
answer.push_str(&received);
}
let answer = token_thread.await?;

// remove all new lines from answer:
answer = answer.replace("\n", " ");
let answer = answer.replace("\n", " ");

println!("\nTwitch received answer:\n{}\n", answer);

// truncate to 500 characters and remove any urls
let answer = answer
.chars()
.take(500)
.collect::<String>()
.replace("http", "hxxp");

// Send message to the twitch channel
client
Expand Down

0 comments on commit 32edf0a

Please sign in to comment.