forked from bigdata-ustc/EduCAT
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
2024.1.12 Add new CAT Strategy (bigdata-ustc#10)
* Add BECAT * Fix bug and Change README * Change README * Update README.md * Modified code specification * Add BOBCAT and NCAT * Change readme * code formatted * Change NCAT code structure
- Loading branch information
Showing
8 changed files
with
710 additions
and
11 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,44 @@ | ||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
|
||
def hard_sample(logits, dim=-1): | ||
y_soft = F.softmax(logits, dim=-1) | ||
index = y_soft.max(dim, keepdim=True)[1] | ||
y_hard = torch.zeros_like(y_soft).scatter_(dim, index, 1.0) | ||
ret = y_hard - y_soft.detach() + y_soft | ||
return ret, index | ||
|
||
class Actor(nn.Module): | ||
def __init__(self, state_dim, action_dim, n_latent_var=256): | ||
super().__init__() | ||
# actor | ||
self.obs_layer = nn.Linear(state_dim, n_latent_var) | ||
self.actor_layer = nn.Sequential( | ||
nn.Linear(n_latent_var, n_latent_var), | ||
nn.Tanh(), | ||
nn.Linear(n_latent_var, action_dim) | ||
) | ||
|
||
def forward(self, state, action_mask): | ||
hidden_state = self.obs_layer(state) | ||
logits = self.actor_layer(hidden_state) | ||
inf_mask = torch.clamp(torch.log(action_mask.float()), | ||
min=torch.finfo(torch.float32).min) | ||
logits = logits + inf_mask | ||
actions = hard_sample(logits) | ||
return actions | ||
|
||
class StraightThrough: | ||
def __init__(self, state_dim, action_dim, lr, config): | ||
self.lr = lr | ||
device = config['device'] | ||
self.betas = config['betas'] | ||
self.policy = Actor(state_dim, action_dim).to(device) | ||
self.optimizer = torch.optim.Adam( | ||
self.policy.parameters(), lr=lr, betas=self.betas) | ||
|
||
def update(self, loss): | ||
self.optimizer.zero_grad() | ||
loss.mean().backward() | ||
self.optimizer.step() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
import numpy as np | ||
from scipy.optimize import minimize | ||
from CAT.strategy.abstract_strategy import AbstractStrategy | ||
from CAT.model import AbstractModel | ||
from CAT.dataset import AdapTestDataset | ||
|
||
class BOBCAT(AbstractStrategy): | ||
|
||
def __init__(self): | ||
super().__init__() | ||
|
||
@property | ||
def name(self): | ||
return 'BOBCAT' | ||
def adaptest_select(self, model: AbstractModel, adaptest_data: AdapTestDataset,S_set): | ||
assert hasattr(model, 'get_kli'), \ | ||
'the models must implement get_kli method' | ||
assert hasattr(model, 'get_pred'), \ | ||
'the models must implement get_pred method for accelerating' | ||
selection = {} | ||
for sid in range(adaptest_data.num_students): | ||
untested_questions = np.array(list(adaptest_data.untested[sid])) | ||
j = model.bobcat_policy(S_set[sid],untested_questions) | ||
selection[sid] = j | ||
return selection |
Oops, something went wrong.