#################################### #### Server & DB Configurations #### #################################### # Cache Configs CACHE_STORE=database # Defaults to database. Other available cache store: redis and filesystem REDIS_URL= # Redis URL - could be a local redis instance or cloud hosted redis. Also support rediss:// URLs PGLITE_DATA_DIR= #../pgLite/ if selecting a directory --- or memory:// if selecting in memory # Eliza Port Config SERVER_PORT=3000 VITE_SERVER_PORT=${SERVER_PORT} # Supabase Configuration SUPABASE_URL= SUPABASE_ANON_KEY= # Comma separated list of remote character urls (optional) REMOTE_CHARACTER_URLS= # Logging DEFAULT_LOG_LEVEL=warn LOG_JSON_FORMAT=false # Print everything in logger as json; false by default ############################### #### Client Configurations #### ############################### # Discord Configuration DISCORD_APPLICATION_ID= DISCORD_API_TOKEN= # Bot token DISCORD_VOICE_CHANNEL_ID= # The ID of the voice channel the bot should join (optional) # Farcaster Neynar Configuration FARCASTER_FID= # The FID associated with the account your are sending casts from FARCASTER_NEYNAR_API_KEY= # Neynar API key: https://neynar.com/ FARCASTER_NEYNAR_SIGNER_UUID= # Signer for the account you are sending casts from. Create a signer here: https://dev.neynar.com/app FARCASTER_DRY_RUN=false # Set to true if you want to run the bot without actually publishing casts FARCASTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for farcaster interactions (replies and mentions) # Telegram Configuration TELEGRAM_BOT_TOKEN= # Twitter/X Configuration TWITTER_DRY_RUN=false TWITTER_USERNAME= # Account username TWITTER_PASSWORD= # Account password TWITTER_EMAIL= # Account email TWITTER_2FA_SECRET= TWITTER_POLL_INTERVAL=120 # How often (in seconds) the bot should check for interactions TWITTER_SEARCH_ENABLE=FALSE # Enable timeline search, WARNING this greatly increases your chance of getting banned TWITTER_TARGET_USERS= # Comma separated list of Twitter user names to interact with TWITTER_RETRY_LIMIT= # Maximum retry attempts for Twitter login TWITTER_SPACES_ENABLE=false # Enable or disable Twitter Spaces logic # Post Interval Settings (in minutes) POST_INTERVAL_MIN= # Default: 90 POST_INTERVAL_MAX= # Default: 180 POST_IMMEDIATELY= # Default: false # Twitter action processing configuration ACTION_INTERVAL= # Interval in minutes between action processing runs (default: 5 minutes) ENABLE_ACTION_PROCESSING=false # Set to true to enable the action processing loop MAX_ACTIONS_PROCESSING=1 # Maximum number of actions (e.g., retweets, likes) to process in a single cycle. Helps prevent excessive or uncontrolled actions. ACTION_TIMELINE_TYPE=foryou # Type of timeline to interact with. Options: "foryou" or "following". Default: "foryou" # CONFIGURATION FOR APPROVING TWEETS BEFORE IT GETS POSTED TWITTER_APPROVAL_DISCORD_CHANNEL_ID= # Channel ID for the Discord bot to listen and send approval messages TWITTER_APPROVAL_DISCORD_BOT_TOKEN= # Discord bot token (this could be a different bot token from DISCORD_API_TOKEN) TWITTER_APPROVAL_ENABLED= # Enable or disable Twitter approval logic #Default is false TWITTER_APPROVAL_CHECK_INTERVAL=60000 # Default: 60 seconds # WhatsApp Cloud API Configuration WHATSAPP_ACCESS_TOKEN= # Permanent access token from Facebook Developer Console WHATSAPP_PHONE_NUMBER_ID= # Phone number ID from WhatsApp Business API WHATSAPP_BUSINESS_ACCOUNT_ID= # Business Account ID from Facebook Business Manager WHATSAPP_WEBHOOK_VERIFY_TOKEN= # Custom string for webhook verification WHATSAPP_API_VERSION=v17.0 # WhatsApp API version (default: v17.0) # Direct Client Setting EXPRESS_MAX_PAYLOAD= # Default: 100kb ####################################### #### Model Provider Configurations #### ####################################### # OpenAI Configuration OPENAI_API_KEY= # OpenAI API key, starting with sk- OPENAI_API_URL= # OpenAI API Endpoint (optional), Default: https://api.openai.com/v1 SMALL_OPENAI_MODEL= # Default: gpt-4o-mini MEDIUM_OPENAI_MODEL= # Default: gpt-4o LARGE_OPENAI_MODEL= # Default: gpt-4o EMBEDDING_OPENAI_MODEL= # Default: text-embedding-3-small IMAGE_OPENAI_MODEL= # Default: dall-e-3 USE_OPENAI_EMBEDDING= # Set to TRUE for OpenAI/1536, leave blank for local # Community Plugin for OpenAI Configuration ENABLE_OPEN_AI_COMMUNITY_PLUGIN=false OPENAI_DEFAULT_MODEL= OPENAI_MAX_TOKENS= OPENAI_TEMPERATURE= # Atoma SDK Configuration ATOMASDK_BEARER_AUTH= # Atoma SDK Bearer Auth token ATOMA_API_URL= # Default: https://api.atoma.network/v1 SMALL_ATOMA_MODEL= # Default: meta-llama/Llama-3.3-70B-Instruct MEDIUM_ATOMA_MODEL= # Default: meta-llama/Llama-3.3-70B-Instruct LARGE_ATOMA_MODEL= # Default: meta-llama/Llama-3.3-70B-Instruct # Eternal AI's Decentralized Inference API ETERNALAI_URL= ETERNALAI_MODEL= # Default: "NousResearch/Hermes-3-Llama-3.1-70B-FP8" ETERNALAI_CHAIN_ID=8453 # Default: "8453" ETERNALAI_RPC_URL= # Ex: https://mainnet.base.org/ ETERNALAI_AGENT_CONTRACT_ADDRESS= # Ex: 0xAed016e060e2fFE3092916b1650Fc558D62e1CCC ETERNALAI_AGENT_ID= # Ex: 1711 ETERNALAI_API_KEY= ETERNALAI_LOG=false #Default: false # Hyperbolic Configuration HYPERBOLIC_API_KEY= # Hyperbolic API Key HYPERBOLIC_MODEL= IMAGE_HYPERBOLIC_MODEL= # Default: FLUX.1-dev SMALL_HYPERBOLIC_MODEL= # Default: meta-llama/Llama-3.2-3B-Instruct MEDIUM_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-70B-Instruct LARGE_HYPERBOLIC_MODEL= # Default: meta-llama/Meta-Llama-3.1-405-Instruct # Infera Configuration INFERA_API_KEY= # visit api.infera.org/docs to obtain an API key under /signup_user INFERA_MODEL= # Default: llama3.2:latest INFERA_SERVER_URL= # Default: https://api.infera.org/ SMALL_INFERA_MODEL= #Recommended: llama3.2:latest MEDIUM_INFERA_MODEL= #Recommended: mistral-nemo:latest LARGE_INFERA_MODEL= #Recommended: mistral-small:latest # Venice Configuration VENICE_API_KEY= # generate from venice settings SMALL_VENICE_MODEL= # Default: llama-3.3-70b MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b LARGE_VENICE_MODEL= # Default: llama-3.1-405b IMAGE_VENICE_MODEL= # Default: fluently-xl # Nineteen.ai Configuration NINETEEN_AI_API_KEY= # Get a free api key from https://nineteen.ai/app/api SMALL_NINETEEN_AI_MODEL= # Default: unsloth/Llama-3.2-3B-Instruct MEDIUM_NINETEEN_AI_MODEL= # Default: unsloth/Meta-Llama-3.1-8B-Instruct LARGE_NINETEEN_AI_MODEL= # Default: hugging-quants/Meta-Llama-3.1-70B-Instruct-AWQ-INT4 IMAGE_NINETEEN_AI_MODE= # Default: dataautogpt3/ProteusV0.4-Lightning # Akash Chat API Configuration docs: https://chatapi.akash.network/documentation AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/ SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8 # Livepeer configuration LIVEPEER_GATEWAY_URL=https://dream-gateway.livepeer.cloud # Free inference gateways and docs: https://livepeer-eliza.com/ IMAGE_LIVEPEER_MODEL= # Default: ByteDance/SDXL-Lightning SMALL_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct MEDIUM_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct LARGE_LIVEPEER_MODEL= # Default: meta-llama/Meta-Llama-3.1-8B-Instruct # Speech Synthesis ELEVENLABS_XI_API_KEY= # API key from elevenlabs # Transcription Provider TRANSCRIPTION_PROVIDER= # Default: local (possible values: openai, deepgram, local) # ElevenLabs Settings ELEVENLABS_MODEL_ID=eleven_multilingual_v2 ELEVENLABS_VOICE_ID=21m00Tcm4TlvDq8ikWAM ELEVENLABS_VOICE_STABILITY=0.5 ELEVENLABS_VOICE_SIMILARITY_BOOST=0.9 ELEVENLABS_VOICE_STYLE=0.66 ELEVENLABS_VOICE_USE_SPEAKER_BOOST=false ELEVENLABS_OPTIMIZE_STREAMING_LATENCY=4 ELEVENLABS_OUTPUT_FORMAT=pcm_16000 # OpenRouter Configuration OPENROUTER_API_KEY= # OpenRouter API Key OPENROUTER_MODEL= # Default: uses hermes 70b/405b SMALL_OPENROUTER_MODEL= MEDIUM_OPENROUTER_MODEL= LARGE_OPENROUTER_MODEL= # REDPILL Configuration (https://docs.red-pill.ai/get-started/supported-models) REDPILL_API_KEY= # REDPILL API Key REDPILL_MODEL= SMALL_REDPILL_MODEL= # Default: gpt-4o-mini MEDIUM_REDPILL_MODEL= # Default: gpt-4o LARGE_REDPILL_MODEL= # Default: gpt-4o # Grok Configuration GROK_API_KEY= # GROK/xAI API Key SMALL_GROK_MODEL= # Default: grok-2-1212 MEDIUM_GROK_MODEL= # Default: grok-2-1212 LARGE_GROK_MODEL= # Default: grok-2-1212 EMBEDDING_GROK_MODEL= # Default: grok-2-1212 # Ollama Configuration OLLAMA_SERVER_URL= # Default: localhost:11434 OLLAMA_MODEL= USE_OLLAMA_EMBEDDING= # Set to TRUE for OLLAMA/1024, leave blank for local OLLAMA_EMBEDDING_MODEL= # Default: mxbai-embed-large SMALL_OLLAMA_MODEL= # Default: llama3.2 MEDIUM_OLLAMA_MODEL= # Default: hermes3 LARGE_OLLAMA_MODEL= # Default: hermes3:70b # Google Configuration GOOGLE_MODEL= SMALL_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest MEDIUM_GOOGLE_MODEL= # Default: gemini-1.5-flash-latest LARGE_GOOGLE_MODEL= # Default: gemini-1.5-pro-latest EMBEDDING_GOOGLE_MODEL= # Default: text-embedding-004 # Mistral Configuration MISTRAL_MODEL= SMALL_MISTRAL_MODEL= # Default: mistral-small-latest MEDIUM_MISTRAL_MODEL= # Default: mistral-large-latest LARGE_MISTRAL_MODEL= # Default: mistral-large-latest # Groq Configuration GROQ_API_KEY= # Starts with gsk_ SMALL_GROQ_MODEL= # Default: llama-3.1-8b-instant MEDIUM_GROQ_MODEL= # Default: llama-3.3-70b-versatile LARGE_GROQ_MODEL= # Default: llama-3.2-90b-vision-preview EMBEDDING_GROQ_MODEL= # Default: llama-3.1-8b-instant # LlamaLocal Configuration LLAMALOCAL_PATH= # Default: "" which is the current directory in plugin-node/dist/ which gets destroyed and recreated on every build # NanoGPT Configuration SMALL_NANOGPT_MODEL= # Default: gpt-4o-mini MEDIUM_NANOGPT_MODEL= # Default: gpt-4o LARGE_NANOGPT_MODEL= # Default: gpt-4o # Anthropic Configuration ANTHROPIC_API_KEY= # For Claude SMALL_ANTHROPIC_MODEL= # Default: claude-3-haiku-20240307 MEDIUM_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022 LARGE_ANTHROPIC_MODEL= # Default: claude-3-5-sonnet-20241022 # Heurist Configuration HEURIST_API_KEY= # Get from https://heurist.ai/dev-access SMALL_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct MEDIUM_HEURIST_MODEL= # Default: meta-llama/llama-3-70b-instruct LARGE_HEURIST_MODEL= # Default: meta-llama/llama-3.3-70b-instruct HEURIST_IMAGE_MODEL= # Default: FLUX.1-dev HEURIST_EMBEDDING_MODEL= # Default: BAAI/bge-large-en-v1.5 USE_HEURIST_EMBEDDING= # Set to TRUE for HEURIST embedding, leave blank for local # Gaianet Configuration GAIANET_MODEL= GAIANET_SERVER_URL= SMALL_GAIANET_MODEL= # Default: llama3b SMALL_GAIANET_SERVER_URL= # Default: https://llama3b.gaia.domains/v1 MEDIUM_GAIANET_MODEL= # Default: llama MEDIUM_GAIANET_SERVER_URL= # Default: https://llama8b.gaia.domains/v1 LARGE_GAIANET_MODEL= # Default: qwen72b LARGE_GAIANET_SERVER_URL= # Default: https://qwen72b.gaia.domains/v1 GAIANET_EMBEDDING_MODEL= USE_GAIANET_EMBEDDING= # Set to TRUE for GAIANET/768, leave blank for local # Volcengine Configuration VOLENGINE_API_URL= # Volcengine API Endpoint, Default: https://open.volcengineapi.com/api/v3/ VOLENGINE_MODEL= SMALL_VOLENGINE_MODEL= # Default: doubao-lite-128k MEDIUM_VOLENGINE_MODEL= # Default: doubao-pro-128k LARGE_VOLENGINE_MODEL= # Default: doubao-pro-256k VOLENGINE_EMBEDDING_MODEL= # Default: doubao-embedding # DeepSeek Configuration DEEPSEEK_API_KEY= #Your DeepSeek API key DEEPSEEK_API_URL= # Default: https://api.deepseek.com SMALL_DEEPSEEK_MODEL= # Default: deepseek-chat MEDIUM_DEEPSEEK_MODEL= # Default: deepseek-chat LARGE_DEEPSEEK_MODEL= # Default: deepseek-chat # fal.ai Configuration FAL_API_KEY= FAL_AI_LORA_PATH= # LetzAI Configuration LETZAI_API_KEY= # LetzAI API Key LETZAI_MODELS= # list of Letzai models to add to each prompt, e.g.: "@modelname1, @modelname2" # Galadriel Configuration GALADRIEL_API_KEY=gal-* # Get from https://dashboard.galadriel.com/ SMALL_GALADRIEL_MODEL= # Default: gpt-4o-mini MEDIUM_GALADRIEL_MODEL= # Default: gpt-4o LARGE_GALADRIEL_MODEL= # Default: gpt-4o GALADRIEL_FINE_TUNE_API_KEY= # Use an OpenAI key to use a fine-tuned model with the verified inference endpoint # Remaining Provider Configurations GOOGLE_GENERATIVE_AI_API_KEY= # Gemini API key ALI_BAILIAN_API_KEY= # Ali Bailian API Key NANOGPT_API_KEY= # NanoGPT API Key TOGETHER_API_KEY= # Together API Key ###################################### #### Crypto Plugin Configurations #### ###################################### # CoinMarketCap / CMC COINMARKETCAP_API_KEY= # CoinGecko COINGECKO_API_KEY= COINGECKO_PRO_API_KEY= # EVM EVM_PRIVATE_KEY= EVM_PROVIDER_URL= # Avalanche AVALANCHE_PRIVATE_KEY= AVALANCHE_PUBLIC_KEY= # Arthera ARTHERA_PRIVATE_KEY= # Solana SOLANA_PRIVATE_KEY= SOLANA_PUBLIC_KEY= SOLANA_CLUSTER= # Default: devnet. Solana Cluster: 'devnet' | 'testnet' | 'mainnet-beta' SOLANA_ADMIN_PRIVATE_KEY= # This wallet is used to verify NFTs SOLANA_ADMIN_PUBLIC_KEY= # This wallet is used to verify NFTs SOLANA_VERIFY_TOKEN= # Authentication token for calling the verification API # Injective INJECTIVE_PRIVATE_KEY= # INJECTIVE_PUBLIC_KEY= # INJECTIVE_NETWORK= # # Fallback Wallet Configuration (deprecated) WALLET_PRIVATE_KEY= WALLET_PUBLIC_KEY= BIRDEYE_API_KEY= # Solana Configuration SOL_ADDRESS=So11111111111111111111111111111111111111112 SLIPPAGE=1 BASE_MINT=So11111111111111111111111111111111111111112 SOLANA_RPC_URL=https://api.mainnet-beta.solana.com HELIUS_API_KEY= # Abstract Configuration ABSTRACT_ADDRESS= ABSTRACT_PRIVATE_KEY= ABSTRACT_RPC_URL=https://api.testnet.abs.xyz # Starknet Configuration STARKNET_ADDRESS= STARKNET_PRIVATE_KEY= STARKNET_RPC_URL= # Lens Network Configuration LENS_ADDRESS= LENS_PRIVATE_KEY= # Coinbase COINBASE_COMMERCE_KEY= # From Coinbase developer portal COINBASE_API_KEY= # From Coinbase developer portal COINBASE_PRIVATE_KEY= # From Coinbase developer portal COINBASE_GENERATED_WALLET_ID= # Not your address but the wallet ID from generating a wallet through the plugin COINBASE_GENERATED_WALLET_HEX_SEED= # Not your address but the wallet hex seed from generating a wallet through the plugin and calling export COINBASE_NOTIFICATION_URI= # For webhook plugin the uri you want to send the webhook to for dummy ones use https://webhook.site # Coinbase AgentKit CDP_API_KEY_NAME= CDP_API_KEY_PRIVATE_KEY= CDP_AGENT_KIT_NETWORK=base-sepolia # Optional: Defaults to base-sepolia # Coinbase Charity Configuration IS_CHARITABLE=false # Set to true to enable charity donations CHARITY_ADDRESS_BASE=0x1234567890123456789012345678901234567890 CHARITY_ADDRESS_SOL=pWvDXKu6CpbKKvKQkZvDA66hgsTB6X2AgFxksYogHLV CHARITY_ADDRESS_ETH=0x750EF1D7a0b4Ab1c97B7A623D7917CcEb5ea779C CHARITY_ADDRESS_ARB=0x1234567890123456789012345678901234567890 CHARITY_ADDRESS_POL=0x1234567890123456789012345678901234567890 # thirdweb THIRDWEB_SECRET_KEY= # Create key on thirdweb developer dashboard: https://thirdweb.com/ # Conflux Configuration CONFLUX_CORE_PRIVATE_KEY= CONFLUX_CORE_SPACE_RPC_URL= CONFLUX_ESPACE_PRIVATE_KEY= CONFLUX_ESPACE_RPC_URL= CONFLUX_MEME_CONTRACT_ADDRESS= # ZeroG ZEROG_INDEXER_RPC= ZEROG_EVM_RPC= ZEROG_PRIVATE_KEY= ZEROG_FLOW_ADDRESS= # IQ6900 # Load json recorded on-chain through IQ # Inscribe your json character file here: https://elizacodein.com/ IQ_WALLET_ADDRESS= # If you enter the wallet address used on the site, the most recently inscribed json will be loaded. IQSOlRPC= # Squid Router SQUID_SDK_URL=https://apiplus.squidrouter.com # Default: https://apiplus.squidrouter.com SQUID_INTEGRATOR_ID= # get integrator id through https://docs.squidrouter.com/ SQUID_EVM_ADDRESS= SQUID_EVM_PRIVATE_KEY= SQUID_API_THROTTLE_INTERVAL=1000 # Default: 1000; Used to throttle API calls to avoid rate limiting (in ms) # TEE Configuration # TEE_MODE options: # - LOCAL: Uses simulator at localhost:8090 (for local development) # - DOCKER: Uses simulator at host.docker.internal:8090 (for docker development) # - PRODUCTION: No simulator, uses production endpoints # Defaults to OFF if not specified TEE_MODE=OFF # LOCAL | DOCKER | PRODUCTION WALLET_SECRET_SALT= # ONLY define if you want to use TEE Plugin, otherwise it will throw errors # TEE Verifiable Log Configuration VLOG= # true/false; if you want to use TEE Verifiable Log, set this to "true" # Galadriel Configuration GALADRIEL_API_KEY=gal-* # Get from https://dashboard.galadriel.com/ # Venice Configuration VENICE_API_KEY= # generate from venice settings SMALL_VENICE_MODEL= # Default: llama-3.3-70b MEDIUM_VENICE_MODEL= # Default: llama-3.3-70b LARGE_VENICE_MODEL= # Default: llama-3.1-405b IMAGE_VENICE_MODEL= # Default: fluently-xl # Akash Chat API Configuration docs: https://chatapi.akash.network/documentation AKASH_CHAT_API_KEY= # Get from https://chatapi.akash.network/ SMALL_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-2-3B-Instruct MEDIUM_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-3-70B-Instruct LARGE_AKASH_CHAT_API_MODEL= # Default: Meta-Llama-3-1-405B-Instruct-FP8 # fal.ai Configuration FAL_API_KEY= FAL_AI_LORA_PATH= # Web search API Configuration TAVILY_API_KEY= # WhatsApp Cloud API Configuration WHATSAPP_ACCESS_TOKEN= # Permanent access token from Facebook Developer Console WHATSAPP_PHONE_NUMBER_ID= # Phone number ID from WhatsApp Business API WHATSAPP_BUSINESS_ACCOUNT_ID= # Business Account ID from Facebook Business Manager WHATSAPP_WEBHOOK_VERIFY_TOKEN= # Custom string for webhook verification WHATSAPP_API_VERSION=v17.0 # WhatsApp API version (default: v17.0) ENABLE_TEE_LOG=false # Set to true to enable TEE logging, only available when running eliza in TEE # Flow Blockchain Configuration FLOW_ADDRESS= FLOW_PRIVATE_KEY= # Private key for SHA3-256 + P256 ECDSA FLOW_NETWORK= # Default: mainnet FLOW_ENDPOINT_URL= # Default: https://mainnet.onflow.org # ICP INTERNET_COMPUTER_PRIVATE_KEY= INTERNET_COMPUTER_ADDRESS= #Cloudflare AI Gateway CLOUDFLARE_GW_ENABLED= # Set to true to enable Cloudflare AI Gateway CLOUDFLARE_AI_ACCOUNT_ID= # Cloudflare AI Account ID - found in the Cloudflare Dashboard under AI Gateway CLOUDFLARE_AI_GATEWAY_ID= # Cloudflare AI Gateway ID - found in the Cloudflare Dashboard under AI Gateway # Aptos APTOS_PRIVATE_KEY= # Aptos private key APTOS_NETWORK= # Must be one of mainnet, testnet # MultiversX MVX_PRIVATE_KEY= # Multiversx private key MVX_NETWORK= # must be one of mainnet, devnet, testnet # NEAR NEAR_WALLET_SECRET_KEY= # NEAR Wallet Secret Key NEAR_WALLET_PUBLIC_KEY= # NEAR Wallet Public Key NEAR_ADDRESS= NEAR_SLIPPAGE=1 NEAR_RPC_URL=https://rpc.testnet.near.org NEAR_NETWORK=testnet # or mainnet # ZKsync Era Configuration ZKSYNC_ADDRESS= ZKSYNC_PRIVATE_KEY= # Avail DA Configuration AVAIL_ADDRESS= AVAIL_SEED= AVAIL_APP_ID=0 AVAIL_RPC_URL=wss://avail-turing.public.blastapi.io/ # (Default) Testnet: wss://avail-turing.public.blastapi.io/ | Mainnet: wss://avail-mainnet.public.blastapi.io/ # Marlin TEE_MARLIN= # Set "yes" to enable the plugin TEE_MARLIN_ATTESTATION_ENDPOINT= # Optional, default "http://127.0.0.1:1350" # Ton TON_PRIVATE_KEY= # Ton Mnemonic Seed Phrase Join With Empty String TON_RPC_URL= # ton rpc # Sui SUI_PRIVATE_KEY= # Sui Mnemonic Seed Phrase (`sui keytool generate ed25519`) , Also support `suiprivatekeyxxxx` (sui keytool export --key-identity 0x63) SUI_NETWORK= # must be one of mainnet, testnet, devnet, localnet # Story STORY_PRIVATE_KEY= # Story private key STORY_API_BASE_URL= # Story API base URL STORY_API_KEY= # Story API key PINATA_JWT= # Pinata JWT for uploading files to IPFS # Cosmos COSMOS_RECOVERY_PHRASE= # 12 words recovery phrase (need to be in quotes, because of spaces) COSMOS_AVAILABLE_CHAINS= # mantrachaintestnet2,cosmos # Array of chains # Cronos zkEVM CRONOSZKEVM_ADDRESS= CRONOSZKEVM_PRIVATE_KEY= # Fuel Ecosystem (FuelVM) FUEL_WALLET_PRIVATE_KEY= # Tokenizer Settings TOKENIZER_MODEL= # Specify the tokenizer model to be used. TOKENIZER_TYPE= # Options: tiktoken (for OpenAI models) or auto (AutoTokenizer from Hugging Face for non-OpenAI models). Default: tiktoken. # Spheron SPHERON_PRIVATE_KEY= SPHERON_PROVIDER_PROXY_URL= SPHERON_WALLET_ADDRESS= # Stargaze NFT marketplace from Cosmos (You can use https://graphql.mainnet.stargaze-apis.com/graphql) STARGAZE_ENDPOINT= # GenLayer GENLAYER_PRIVATE_KEY= # Private key of the GenLayer account to use for the agent in this format (0x0000000000000000000000000000000000000000000000000000000000000000) #################################### #### Misc Plugin Configurations #### #################################### # Intiface Configuration INTIFACE_WEBSOCKET_URL=ws://localhost:12345 # API key for giphy from https://developers.giphy.com/dashboard/ GIPHY_API_KEY= # OpenWeather OPEN_WEATHER_API_KEY= # OpenWeather API key #GITCOIN Passport PASSPORT_API_KEY= #Gitcoin Passport key PASSPORT_SCORER= #Scorer number # EchoChambers Configuration ECHOCHAMBERS_API_URL=http://127.0.0.1:3333 ECHOCHAMBERS_API_KEY=testingkey0011 ECHOCHAMBERS_USERNAME=eliza ECHOCHAMBERS_ROOMS=general #comma delimited list of rooms the agent watches ECHOCHAMBERS_POLL_INTERVAL=60 ECHOCHAMBERS_MAX_MESSAGES=10 # How often the agent checks if it should start a conversation ECHOCHAMBERS_CONVERSATION_STARTER_INTERVAL=300 # 5 minutes - checks rooms every 5 minutes # How long a room must be quiet before starting a new conversation ECHOCHAMBERS_QUIET_PERIOD=900 # 15 minutes - waits for 15 minutes of silence # Allora ALLORA_API_KEY= # Allora API key, format: UP-f8db7d6558ab432ca0d92716 ALLORA_CHAIN_SLUG= # must be one of mainnet, testnet. If not specified, it will use testnet by default # B2 Network B2_PRIVATE_KEY= # Private key of the B2 Network account to use for the agent # Opacity zkTLS OPACITY_TEAM_ID=f309ac8ae8a9a14a7e62cd1a521b1c5f OPACITY_CLOUDFLARE_NAME=eigen-test OPACITY_PROVER_URL=https://opacity-ai-zktls-demo.vercel.app # AWS S3 Configuration Settings for File Upload AWS_ACCESS_KEY_ID= AWS_SECRET_ACCESS_KEY= AWS_REGION= AWS_S3_BUCKET= AWS_S3_UPLOAD_PATH= AWS_S3_ENDPOINT= AWS_S3_SSL_ENABLED= AWS_S3_FORCE_PATH_STYLE= # Deepgram DEEPGRAM_API_KEY= # Verifiable Inference Configuration VERIFIABLE_INFERENCE_ENABLED=false # Set to false to disable verifiable inference VERIFIABLE_INFERENCE_PROVIDER=opacity # Options: opacity # Autonome Configuration AUTONOME_JWT_TOKEN= AUTONOME_RPC=https://wizard-bff-rpc.alt.technology/v1/bff/aaa/apps #################################### #### Akash Network Configuration #### #################################### AKASH_ENV=mainnet AKASH_NET=https://raw.githubusercontent.com/ovrclk/net/master/mainnet RPC_ENDPOINT=https://rpc.akashnet.net:443 AKASH_GAS_PRICES=0.025uakt AKASH_GAS_ADJUSTMENT=1.5 AKASH_KEYRING_BACKEND=os AKASH_FROM=default AKASH_FEES=20000uakt AKASH_DEPOSIT=500000uakt AKASH_MNEMONIC= AKASH_WALLET_ADDRESS= # Akash Pricing API AKASH_PRICING_API_URL=https://console-api.akash.network/v1/pricing # Default values # 1 CPU = 1000 1GB = 1000000000 1GB = 1000000000 AKASH_DEFAULT_CPU=1000 AKASH_DEFAULT_MEMORY=1000000000 AKASH_DEFAULT_STORAGE=1000000000 AKASH_SDL=example.sdl.yml # Close deployment # Close all deployments = closeAll # Close a single deployment = dseq and add the value in AKASH_CLOSE_DSEQ AKASH_CLOSE_DEP=closeAll AKASH_CLOSE_DSEQ=19729929 # Provider Info we added one to check you will have to pass this into the action AKASH_PROVIDER_INFO=akash1ccktptfkvdc67msasmesuy5m7gpc76z75kukpz # Deployment Status # AKASH_DEP_STATUS = dseq or param_passed when you are building you wil pass the dseq dinamically to test you # you can pass the dseq using AKASH_DEP_DSEQ 19729929 is an example of a dseq we test while build. AKASH_DEP_STATUS=dseq AKASH_DEP_DSEQ=19729929 # Gas Estimation Options: close, create, or update # qseq is required when operation is "close" 19729929 is an example of a dseq we test while build. AKASH_GAS_OPERATION=close AKASH_GAS_DSEQ=19729929 # Manifest # Values: "auto" | "manual" | "validate_only" Default: "auto" AKASH_MANIFEST_MODE=auto # Default: Will use the SDL directory AKASH_MANIFEST_PATH= # Values: "strict" | "lenient" | "none" - Default: "strict" AKASH_MANIFEST_VALIDATION_LEVEL=strict # Quai Network Ecosystem QUAI_PRIVATE_KEY= QUAI_RPC_URL=https://rpc.quai.network # Instagram Configuration INSTAGRAM_DRY_RUN=false INSTAGRAM_USERNAME= # Account username INSTAGRAM_PASSWORD= # Account password INSTAGRAM_APP_ID= # Instagram App ID is required INSTAGRAM_APP_SECRET= # Instagram App Secret is required INSTAGRAM_BUSINESS_ACCOUNT_ID= # Optional Business Account ID for additional features INSTAGRAM_POST_INTERVAL_MIN=60 # Default: 60 minutes INSTAGRAM_POST_INTERVAL_MAX=120 # Default: 120 minutes INSTAGRAM_ENABLE_ACTION_PROCESSING=false # Enable/disable action processing INSTAGRAM_ACTION_INTERVAL=5 # Interval between actions in minutes INSTAGRAM_MAX_ACTIONS=1 # Maximum number of actions to process at once #################################### #### Pyth Plugin Configuration #### #################################### # Network Environment (mainnet or testnet)git PYTH_NETWORK_ENV=mainnet # Mainnet Network Configuration PYTH_MAINNET_HERMES_URL=https://hermes.pyth.network PYTH_MAINNET_WSS_URL=wss://hermes.pyth.network/ws PYTH_MAINNET_PYTHNET_URL=https://pythnet.rpcpool.com PYTH_MAINNET_CONTRACT_REGISTRY=https://pyth.network/developers/price-feed-ids PYTH_MAINNET_PROGRAM_KEY= # Testnet Network Configuration PYTH_TESTNET_HERMES_URL=https://hermes.pyth.network PYTH_TESTNET_WSS_URL=wss://hermes.pyth.network/ws PYTH_TESTNET_PYTHNET_URL=https://pythnet.rpcpool.com PYTH_TESTNET_CONTRACT_REGISTRY=https://pyth.network/developers/price-feed-ids#testnet PYTH_TESTNET_PROGRAM_KEY= # Connection Settings PYTH_MAX_RETRIES=3 PYTH_RETRY_DELAY=1000 PYTH_TIMEOUT=5000 PYTH_GRANULAR_LOG=true PYTH_LOG_LEVEL=debug PYTH_LOG_LEVEL=info # Runtime Settings RUNTIME_CHECK_MODE=false # Pyth Price Streaming and test ID PYTH_ENABLE_PRICE_STREAMING=true PYTH_MAX_PRICE_STREAMS=2 PYTH_TEST_ID01=0xe62df6c8b4a85fe1a67db44dc12de5db330f7ac66b72dc658afedf0f4a415b43 PYTH_TEST_ID02=0xff61491a931112ddf1bd8147cd1b641375f79f5825126d665480874634fd0ace