-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtoken_usage_tracker.py
62 lines (45 loc) · 2.12 KB
/
token_usage_tracker.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
"""
Token Usage Tracker
This script allows users to retrieve and display the total number of tokens generated by a specific model on a given date. It interacts with the OpenAI API to fetch usage data, including the number of tokens generated and used in context.
The script requires an environment file (.env) containing the OpenAI API key. It uses the 'requests' library to make API requests and 'datetime' to handle dates.
Usage:
1. Ensure a .env file with the necessary API key is present in the same directory as this script.
2. Run the script directly. The script is set to query data for the current date but can be modified for other dates.
Author: Hit the Code Labs
Date: 2023-12-08
"""
import requests
import datetime
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
# Retrieve API key from environment variable
api_key = os.environ.get("OPENAI_API_KEY")
# Headers for API request
headers = {'Authorization': f'Bearer {api_key}'}
# API endpoint for retrieving usage data
url = 'https://api.openai.com/v1/usage'
# Set the date for usage data retrieval
date = datetime.date(2023, 12, 8)
# Parameters for the API request, formatted as 'YYYY-MM-DD'
params = {'date': date.strftime('%Y-%m-%d')}
# Sending the API request and storing the response
response = requests.get(url, headers=headers, params=params)
usage_data = response.json()
# Process and display the usage data if available
if "data" in usage_data:
usage_data2 = usage_data['data']
M = {} # Dictionary to store total tokens used by each model
# Iterate through the data to calculate total number of tokens for each model
for data in usage_data2:
model_name = data['snapshot_id']
n_generated_tokens_total = data['n_generated_tokens_total']
n_context_tokens_total = data['n_context_tokens_total']
total_tokens = n_generated_tokens_total + n_context_tokens_total
if model_name not in M:
M[model_name] = total_tokens
else:
M[model_name] += total_tokens
print(M) # Output the total tokens used for each model
# Sample output: {'gpt-4-1106-preview': 480468}