Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions gemma/activations.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,9 @@ struct AttentionActivations {
? batch_size * layer_config.heads * 3
: batch_size * layer_config.heads,
allocator)),
vit_Q(MatFactory("Q2", batch_size, layer_config.qkv_dim, allocator)),
vit_K(MatFactory("K2", seq_len, layer_config.qkv_dim, allocator)),
vit_C(MatFactory("C2", batch_size, seq_len, allocator)),
pre_att_rms_out(MatFactory("pre_att_rms_out", batch_size,
config.model_dim, allocator)),
att(MatFactory("att", batch_size, layer_config.heads * seq_len,
Expand Down Expand Up @@ -96,6 +99,7 @@ struct AttentionActivations {
q.AllocateAndAttachRowPtrs(row_ptrs);
q_bf.AllocateAndAttachRowPtrs(row_ptrs);
q_T.AllocateAndAttachRowPtrs(row_ptrs);
vit_C.AllocateAndAttachRowPtrs(row_ptrs);
att_sums.AllocateAndAttachRowPtrs(row_ptrs);
}

Expand All @@ -104,6 +108,10 @@ struct AttentionActivations {
q_bf.OverrideRows(batch_size);
// q_T rows are always qkv_dim!

vit_Q.OverrideRows(batch_size);
// vit_K stays seq_len!
vit_C.OverrideRows(batch_size);

pre_att_rms_out.OverrideRows(batch_size);
att.OverrideRows(batch_size);
att_out.OverrideRows(batch_size);
Expand All @@ -116,6 +124,10 @@ struct AttentionActivations {
MatStorageT<BF16> q_bf;
MatStorageT<BF16> q_T; // Transposed to maximize attention speed.

MatStorageT<float> vit_Q;
MatStorageT<float> vit_K;
MatStorageT<float> vit_C;

MatStorageT<float> pre_att_rms_out;
MatStorageT<float> att; // attention vector
MatStorageT<float> att_out; // attention output
Expand All @@ -141,6 +153,9 @@ struct AttentionActivationsPtrs {
q = activations.q;
q_bf = activations.q_bf;
q_T = activations.q_T;
vit_Q = activations.vit_Q;
vit_K = activations.vit_K;
vit_C = activations.vit_C;
pre_att_rms_out = activations.pre_att_rms_out;
att = activations.att;
att_out = activations.att_out;
Expand All @@ -153,6 +168,11 @@ struct AttentionActivationsPtrs {
q.OverrideRows(batch_size);
q_bf.OverrideRows(batch_size);
// q_T rows are always qkv_dim!

vit_Q.OverrideRows(batch_size);
// vit_K stays seq_len!
vit_C.OverrideRows(batch_size);

pre_att_rms_out.OverrideRows(batch_size);
att.OverrideRows(batch_size);
att_out.OverrideRows(batch_size);
Expand All @@ -168,6 +188,11 @@ struct AttentionActivationsPtrs {
MatPtrT<float> q;
MatPtrT<BF16> q_bf;
MatPtrT<BF16> q_T;

MatPtrT<float> vit_Q;
MatPtrT<float> vit_K;
MatPtrT<float> vit_C;

MatPtrT<float> pre_att_rms_out;
MatPtrT<float> att;
MatPtrT<float> att_out;
Expand Down
5 changes: 4 additions & 1 deletion gemma/run.cc
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,10 @@ void ReplGemma(const ThreadingArgs& threading, const InferenceArgs& inference,
auto batch_stream_token = [&](size_t query_idx, size_t pos, int token,
float) {
std::string token_text;
HWY_ASSERT(gemma.Tokenizer().Decode(std::vector<int>{token}, &token_text));
if (!gemma.Tokenizer().Decode(std::vector<int>{token}, &token_text)) {
if (token == -2) return true; // Gemma 3 ViT?
HWY_WARN("Failed to decode token %d.", token);
}

HWY_ASSERT(pos == abs_pos);
++abs_pos;
Expand Down
10 changes: 3 additions & 7 deletions gemma/vit.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,13 +78,9 @@ class VitAttention {
const float query_scale = 1.0f / sqrtf(static_cast<float>(qkv_dim));
PROFILER_ZONE("Gen.VitAttention.DotSoftmax");

// Shift Q, K, VT to MatStorageT.
MatStorageT<float> Q("Q2", Extents2D(num_tokens_, qkv_dim),
env_.ctx.allocator, MatPadding::kPacked);
MatStorageT<float> K("K2", Extents2D(seq_len, qkv_dim), env_.ctx.allocator,
MatPadding::kPacked);
MatStorageT<float> C("C2", Extents2D(num_tokens_, seq_len),
env_.ctx.allocator, MatPadding::kPacked);
MatPtrT<float>& Q = activations_.attention.vit_Q;
MatPtrT<float>& K = activations_.attention.vit_K;
MatPtrT<float>& C = activations_.attention.vit_C;

// Initialize att_out to zero prior to head loop.
ZeroInit(activations_.attention.att_out);
Expand Down
Loading