-
Notifications
You must be signed in to change notification settings - Fork 792
Expand file tree
/
Copy pathds4_gpu.h
More file actions
804 lines (736 loc) · 30.1 KB
/
ds4_gpu.h
File metadata and controls
804 lines (736 loc) · 30.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
#ifndef DS4_GPU_H
#define DS4_GPU_H
#include <stdbool.h>
#include <stdint.h>
/* =========================================================================
* GPU Tensor and Command Lifetime.
* =========================================================================
*
* Opaque device tensor used by the DS4-specific GPU executor.
*
* The public GPU API is tensor-resident: activations, KV state, and scratch
* buffers stay device-owned across the whole prefill/decode command sequence.
*/
typedef struct ds4_gpu_tensor ds4_gpu_tensor;
int ds4_gpu_init(void);
void ds4_gpu_cleanup(void);
ds4_gpu_tensor *ds4_gpu_tensor_alloc(uint64_t bytes);
ds4_gpu_tensor *ds4_gpu_tensor_alloc_managed(uint64_t bytes);
ds4_gpu_tensor *ds4_gpu_tensor_view(const ds4_gpu_tensor *base, uint64_t offset, uint64_t bytes);
void ds4_gpu_tensor_free(ds4_gpu_tensor *tensor);
uint64_t ds4_gpu_tensor_bytes(const ds4_gpu_tensor *tensor);
void *ds4_gpu_tensor_contents(ds4_gpu_tensor *tensor);
int ds4_gpu_tensor_fill_f32(ds4_gpu_tensor *tensor, float value, uint64_t count);
int ds4_gpu_tensor_write(ds4_gpu_tensor *tensor, uint64_t offset, const void *data, uint64_t bytes);
int ds4_gpu_tensor_read(const ds4_gpu_tensor *tensor, uint64_t offset, void *data, uint64_t bytes);
int ds4_gpu_tensor_copy(ds4_gpu_tensor *dst, uint64_t dst_offset,
const ds4_gpu_tensor *src, uint64_t src_offset,
uint64_t bytes);
int ds4_gpu_begin_commands(void);
int ds4_gpu_flush_commands(void);
int ds4_gpu_end_commands(void);
int ds4_gpu_synchronize(void);
int ds4_gpu_set_model_map(const void *model_map, uint64_t model_size);
int ds4_gpu_set_model_fd(int fd);
int ds4_gpu_set_model_map_range(const void *model_map, uint64_t model_size, uint64_t map_offset, uint64_t map_size);
int ds4_gpu_cache_model_range(const void *model_map, uint64_t model_size, uint64_t offset, uint64_t bytes, const char *label);
int ds4_gpu_cache_q8_f16_range(const void *model_map, uint64_t model_size, uint64_t offset, uint64_t bytes, uint64_t in_dim, uint64_t out_dim, const char *label);
int ds4_gpu_should_use_managed_kv_cache(uint64_t kv_cache_bytes, uint64_t context_bytes);
void ds4_gpu_set_quality(bool quality);
void ds4_gpu_print_memory_report(const char *label);
/* =========================================================================
* Embeddings and Indexer Helpers.
* =========================================================================
*
* These kernels seed HC state from token embeddings and implement the ratio-4
* compressed-attention indexer that chooses visible compressed rows.
*/
int ds4_gpu_embed_token_hc_tensor(
ds4_gpu_tensor *out_hc,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint32_t n_vocab,
uint32_t token,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_embed_tokens_hc_tensor(
ds4_gpu_tensor *out_hc,
const ds4_gpu_tensor *tokens,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint32_t n_vocab,
uint32_t n_tokens,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_indexer_score_one_tensor(
ds4_gpu_tensor *scores,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *weights,
const ds4_gpu_tensor *index_comp,
uint32_t n_comp,
uint32_t n_head,
uint32_t head_dim,
float scale);
int ds4_gpu_indexer_scores_prefill_tensor(
ds4_gpu_tensor *scores,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *weights,
const ds4_gpu_tensor *index_comp,
uint32_t n_comp,
uint32_t n_tokens,
uint32_t n_head,
uint32_t head_dim,
uint32_t ratio,
float scale);
int ds4_gpu_indexer_scores_decode_batch_tensor(
ds4_gpu_tensor *scores,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *weights,
const ds4_gpu_tensor *index_comp,
uint32_t n_comp,
uint32_t n_tokens,
uint32_t pos0,
uint32_t n_head,
uint32_t head_dim,
uint32_t ratio,
float scale);
int ds4_gpu_indexer_topk_tensor(
ds4_gpu_tensor *selected,
const ds4_gpu_tensor *scores,
uint32_t n_comp,
uint32_t n_tokens,
uint32_t top_k);
int ds4_gpu_dsv4_topk_mask_tensor(
ds4_gpu_tensor *mask,
const ds4_gpu_tensor *topk,
uint32_t n_comp,
uint32_t n_tokens,
uint32_t top_k);
/* =========================================================================
* Dense Projections, Norms, RoPE, and KV Rounding.
* =========================================================================
*
* The graph uses these primitives for Q/KV projections, HC/output projections,
* attention output projections, and DS4's tail-only RoPE.
*/
int ds4_gpu_matmul_q8_0_tensor(
ds4_gpu_tensor *out,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *x,
uint64_t n_tok);
int ds4_gpu_shared_gate_up_swiglu_q8_0_tensor(
ds4_gpu_tensor *gate,
ds4_gpu_tensor *up,
ds4_gpu_tensor *mid,
const void *model_map,
uint64_t model_size,
uint64_t gate_offset,
uint64_t up_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *x);
int ds4_gpu_matmul_f16_tensor(
ds4_gpu_tensor *out,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *x,
uint64_t n_tok);
int ds4_gpu_matmul_f16_pair_tensor(
ds4_gpu_tensor *out_a,
ds4_gpu_tensor *out_b,
const void *model_map,
uint64_t model_size,
uint64_t weight_a_offset,
uint64_t weight_b_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *x,
uint64_t n_tok);
int ds4_gpu_matmul_f32_tensor(
ds4_gpu_tensor *out,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *x,
uint64_t n_tok);
int ds4_gpu_repeat_hc_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *row,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_rms_norm_plain_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *x,
uint32_t n,
float eps);
int ds4_gpu_rms_norm_plain_rows_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *x,
uint32_t n,
uint32_t rows,
float eps);
int ds4_gpu_rms_norm_weight_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *x,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint32_t n,
float eps);
int ds4_gpu_rms_norm_weight_rows_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *x,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint32_t n,
uint32_t rows,
float eps);
int ds4_gpu_dsv4_qkv_rms_norm_rows_tensor(
ds4_gpu_tensor *q_out,
const ds4_gpu_tensor *q,
const void *model_map,
uint64_t model_size,
uint64_t q_weight_offset,
uint32_t q_n,
ds4_gpu_tensor *kv_out,
const ds4_gpu_tensor *kv,
uint64_t kv_weight_offset,
uint32_t kv_n,
uint32_t rows,
float eps);
int ds4_gpu_head_rms_norm_tensor(
ds4_gpu_tensor *x,
uint32_t n_tok,
uint32_t n_head,
uint32_t head_dim,
float eps);
int ds4_gpu_dsv4_fp8_kv_quantize_tensor(
ds4_gpu_tensor *x,
uint32_t n_tok,
uint32_t head_dim,
uint32_t n_rot);
int ds4_gpu_rope_tail_tensor(
ds4_gpu_tensor *x,
uint32_t n_tok,
uint32_t n_head,
uint32_t head_dim,
uint32_t n_rot,
uint32_t pos0,
uint32_t n_ctx_orig,
bool inverse,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
float beta_slow);
/* Release decode fused KV finalizer: after the standalone RoPE kernel, this
* performs DS4's FP8 non-RoPE KV round trip and writes the F16-rounded raw
* attention cache row in one dispatch. */
int ds4_gpu_kv_fp8_store_raw_tensor(
ds4_gpu_tensor *kv,
ds4_gpu_tensor *raw_cache,
uint32_t raw_cap,
uint32_t row,
uint32_t head_dim,
uint32_t n_rot);
/* Reference/raw-cache primitive kept for prefill and diagnostics. Decode uses
* ds4_gpu_kv_fp8_store_raw_tensor unless a diagnostic reference path is
* explicitly selected by the graph driver. */
int ds4_gpu_store_raw_kv_tensor(
ds4_gpu_tensor *raw_cache,
const ds4_gpu_tensor *kv,
uint32_t raw_cap,
uint32_t row,
uint32_t head_dim);
int ds4_gpu_store_raw_kv_batch_tensor(
ds4_gpu_tensor *raw_cache,
const ds4_gpu_tensor *kv,
uint32_t raw_cap,
uint32_t pos0,
uint32_t n_tokens,
uint32_t head_dim);
/* =========================================================================
* KV Compression and Attention.
* =========================================================================
*
* Compressed layers maintain rolling score/KV state and append pooled rows at
* ratio boundaries. Attention kernels consume raw SWA rows, compressed rows,
* and optional indexer masks.
*/
int ds4_gpu_compressor_update_tensor(
const ds4_gpu_tensor *kv_cur,
const ds4_gpu_tensor *sc_cur,
ds4_gpu_tensor *state_kv,
ds4_gpu_tensor *state_score,
ds4_gpu_tensor *comp_cache,
const void *model_map,
uint64_t model_size,
uint64_t ape_offset,
uint32_t ape_type,
uint64_t norm_offset,
uint32_t norm_type,
uint32_t head_dim,
uint32_t ratio,
uint32_t pos,
uint32_t comp_row,
uint32_t n_rot,
uint32_t n_ctx_orig,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
float beta_slow,
float rms_eps);
int ds4_gpu_compressor_store_batch_tensor(
const ds4_gpu_tensor *kv,
const ds4_gpu_tensor *sc,
ds4_gpu_tensor *state_kv,
ds4_gpu_tensor *state_score,
const void *model_map,
uint64_t model_size,
uint64_t ape_offset,
uint32_t ape_type,
uint32_t head_dim,
uint32_t ratio,
uint32_t pos0,
uint32_t n_tokens);
int ds4_gpu_compressor_prefill_tensor(
ds4_gpu_tensor *comp_cache,
ds4_gpu_tensor *state_kv,
ds4_gpu_tensor *state_score,
const ds4_gpu_tensor *kv,
const ds4_gpu_tensor *sc,
const void *model_map,
uint64_t model_size,
uint64_t ape_offset,
uint32_t ape_type,
uint64_t norm_offset,
uint32_t norm_type,
uint32_t head_dim,
uint32_t ratio,
uint32_t pos0,
uint32_t n_tokens,
uint32_t n_rot,
uint32_t n_ctx_orig,
bool quantize_fp8,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
float beta_slow,
float rms_eps);
int ds4_gpu_compressor_prefill_ratio4_replay_tensor(
ds4_gpu_tensor *comp_cache,
ds4_gpu_tensor *state_kv,
ds4_gpu_tensor *state_score,
const ds4_gpu_tensor *kv,
const ds4_gpu_tensor *sc,
const void *model_map,
uint64_t model_size,
uint64_t ape_offset,
uint32_t ape_type,
uint64_t norm_offset,
uint32_t norm_type,
uint32_t head_dim,
uint32_t pos0,
uint32_t n_tokens,
uint32_t n_rot,
uint32_t n_ctx_orig,
bool quantize_fp8,
float freq_base,
float freq_scale,
float ext_factor,
float attn_factor,
float beta_fast,
float beta_slow,
float rms_eps);
int ds4_gpu_compressor_prefill_state_ratio4_tensor(
ds4_gpu_tensor *state_kv,
ds4_gpu_tensor *state_score,
const ds4_gpu_tensor *kv_tail,
const ds4_gpu_tensor *sc_tail,
const void *model_map,
uint64_t model_size,
uint64_t ape_offset,
uint32_t ape_type,
uint32_t head_dim,
uint32_t pos0);
int ds4_gpu_attention_decode_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
uint32_t n_raw,
uint32_t raw_cap,
uint32_t raw_start,
const ds4_gpu_tensor *comp_kv,
uint32_t n_comp,
const ds4_gpu_tensor *comp_mask,
uint32_t use_mask,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_prefill_raw_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
uint32_t n_tokens,
uint32_t window,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_decode_raw_batch_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
uint32_t n_tokens,
uint32_t pos0,
uint32_t n_raw,
uint32_t raw_cap,
uint32_t raw_start,
uint32_t window,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_decode_mixed_batch_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
const ds4_gpu_tensor *comp_kv,
const ds4_gpu_tensor *comp_mask,
uint32_t use_comp_mask,
uint32_t n_tokens,
uint32_t pos0,
uint32_t n_raw,
uint32_t raw_cap,
uint32_t raw_start,
uint32_t n_comp,
uint32_t window,
uint32_t ratio,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_indexed_mixed_batch_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
const ds4_gpu_tensor *comp_kv,
const ds4_gpu_tensor *topk,
uint32_t n_tokens,
uint32_t pos0,
uint32_t n_raw,
uint32_t raw_cap,
uint32_t raw_start,
uint32_t n_comp,
uint32_t top_k,
uint32_t window,
uint32_t ratio,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_prefill_static_mixed_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
const ds4_gpu_tensor *comp_kv,
uint32_t n_tokens,
uint32_t n_comp,
uint32_t window,
uint32_t ratio,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_prefill_masked_mixed_heads_tensor(
ds4_gpu_tensor *heads,
const void *model_map,
uint64_t model_size,
uint64_t sinks_offset,
const ds4_gpu_tensor *q,
const ds4_gpu_tensor *raw_kv,
const ds4_gpu_tensor *comp_kv,
const ds4_gpu_tensor *comp_mask,
uint32_t n_tokens,
uint32_t n_comp,
uint32_t window,
uint32_t ratio,
uint32_t n_head,
uint32_t head_dim);
int ds4_gpu_attention_output_q8_batch_tensor(
ds4_gpu_tensor *out,
ds4_gpu_tensor *low,
ds4_gpu_tensor *group_tmp,
ds4_gpu_tensor *low_tmp,
const void *model_map,
uint64_t model_size,
uint64_t out_a_offset,
uint64_t out_b_offset,
uint64_t group_dim,
uint64_t rank,
uint32_t n_groups,
uint64_t out_dim,
const ds4_gpu_tensor *heads,
uint32_t n_tokens);
int ds4_gpu_attention_output_low_q8_tensor(
ds4_gpu_tensor *low,
const void *model_map,
uint64_t model_size,
uint64_t out_a_offset,
uint64_t group_dim,
uint64_t rank,
uint32_t n_groups,
const ds4_gpu_tensor *heads);
/* =========================================================================
* Router, Shared Expert, and Routed MoE.
* =========================================================================
*
* These kernels implement the FFN body: router probabilities/top-k or hash
* routing, shared SwiGLU, and the IQ2_XXS/Q2_K/Q4_K routed experts.
*/
int ds4_gpu_swiglu_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *gate,
const ds4_gpu_tensor *up,
uint32_t n,
float clamp,
float weight);
int ds4_gpu_add_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *a,
const ds4_gpu_tensor *b,
uint32_t n);
int ds4_gpu_directional_steering_project_tensor(
ds4_gpu_tensor *x,
const ds4_gpu_tensor *directions,
uint32_t layer,
uint32_t width,
uint32_t rows,
float scale);
int ds4_gpu_router_select_tensor(
ds4_gpu_tensor *selected,
ds4_gpu_tensor *weights,
ds4_gpu_tensor *probs,
const void *model_map,
uint64_t model_size,
uint64_t bias_offset,
uint64_t hash_offset,
uint32_t hash_rows,
uint32_t token,
uint32_t n_expert_groups,
uint32_t n_group_used,
bool has_bias,
bool hash_mode,
const ds4_gpu_tensor *logits);
int ds4_gpu_router_select_batch_tensor(
ds4_gpu_tensor *selected,
ds4_gpu_tensor *weights,
ds4_gpu_tensor *probs,
const void *model_map,
uint64_t model_size,
uint64_t bias_offset,
uint64_t hash_offset,
uint32_t hash_rows,
uint32_t n_expert_groups,
uint32_t n_group_used,
bool has_bias,
bool hash_mode,
const ds4_gpu_tensor *logits,
const ds4_gpu_tensor *tokens,
uint32_t n_tokens);
int ds4_gpu_routed_moe_one_tensor(
ds4_gpu_tensor *out,
ds4_gpu_tensor *gate,
ds4_gpu_tensor *up,
ds4_gpu_tensor *mid,
ds4_gpu_tensor *experts,
const void *model_map,
uint64_t model_size,
uint64_t gate_offset,
uint64_t up_offset,
uint64_t down_offset,
uint32_t gate_type,
uint32_t down_type,
uint64_t gate_expert_bytes,
uint64_t gate_row_bytes,
uint64_t down_expert_bytes,
uint64_t down_row_bytes,
uint32_t expert_in_dim,
uint32_t expert_mid_dim,
uint32_t out_dim,
const ds4_gpu_tensor *selected,
const ds4_gpu_tensor *weights,
uint32_t n_expert,
float clamp,
const ds4_gpu_tensor *x);
int ds4_gpu_routed_moe_batch_tensor(
ds4_gpu_tensor *out,
ds4_gpu_tensor *gate,
ds4_gpu_tensor *up,
ds4_gpu_tensor *mid,
ds4_gpu_tensor *experts,
const void *model_map,
uint64_t model_size,
uint64_t gate_offset,
uint64_t up_offset,
uint64_t down_offset,
uint32_t gate_type,
uint32_t down_type,
uint64_t gate_expert_bytes,
uint64_t gate_row_bytes,
uint64_t down_expert_bytes,
uint64_t down_row_bytes,
uint32_t expert_in_dim,
uint32_t expert_mid_dim,
uint32_t out_dim,
const ds4_gpu_tensor *selected,
const ds4_gpu_tensor *weights,
uint32_t n_expert,
float clamp,
const ds4_gpu_tensor *x,
uint32_t n_tokens,
bool *mid_is_f16);
/* =========================================================================
* Hyper-Connection Kernels.
* =========================================================================
*
* HC kernels reduce four residual streams before a sublayer and expand the
* sublayer output back into four streams afterward.
*/
int ds4_gpu_hc_split_sinkhorn_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *mix,
const void *model_map,
uint64_t model_size,
uint64_t scale_offset,
uint64_t base_offset,
uint32_t n_hc,
uint32_t sinkhorn_iters,
float eps);
int ds4_gpu_hc_weighted_sum_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *weights,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_hc_weighted_sum_split_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *split,
uint32_t n_embd,
uint32_t n_hc);
/* Release decode fused HC pre-sublayer operation: split the HC mixer and
* immediately reduce four HC streams into the active 4096-wide sublayer row. */
int ds4_gpu_hc_split_weighted_sum_tensor(
ds4_gpu_tensor *out,
ds4_gpu_tensor *split,
const ds4_gpu_tensor *mix,
const ds4_gpu_tensor *residual_hc,
const void *model_map,
uint64_t model_size,
uint64_t scale_offset,
uint64_t base_offset,
uint32_t n_embd,
uint32_t n_hc,
uint32_t sinkhorn_iters,
float eps);
int ds4_gpu_hc_split_weighted_sum_norm_tensor(
ds4_gpu_tensor *out,
ds4_gpu_tensor *norm_out,
ds4_gpu_tensor *split,
const ds4_gpu_tensor *mix,
const ds4_gpu_tensor *residual_hc,
const void *model_map,
uint64_t model_size,
uint64_t scale_offset,
uint64_t base_offset,
uint64_t norm_weight_offset,
uint32_t n_embd,
uint32_t n_hc,
uint32_t sinkhorn_iters,
float eps,
float norm_eps);
int ds4_gpu_output_hc_weights_tensor(
ds4_gpu_tensor *out,
const ds4_gpu_tensor *pre,
const void *model_map,
uint64_t model_size,
uint64_t scale_offset,
uint64_t base_offset,
uint32_t n_hc,
float eps);
int ds4_gpu_hc_expand_tensor(
ds4_gpu_tensor *out_hc,
const ds4_gpu_tensor *block_out,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *post,
const ds4_gpu_tensor *comb,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_hc_expand_split_tensor(
ds4_gpu_tensor *out_hc,
const ds4_gpu_tensor *block_out,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *split,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_hc_expand_add_split_tensor(
ds4_gpu_tensor *out_hc,
const ds4_gpu_tensor *block_out,
const ds4_gpu_tensor *block_add,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *split,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_shared_down_hc_expand_q8_0_tensor(
ds4_gpu_tensor *out_hc,
ds4_gpu_tensor *shared_out,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *shared_mid,
const ds4_gpu_tensor *routed_out,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *split,
uint32_t n_embd,
uint32_t n_hc);
int ds4_gpu_matmul_q8_0_hc_expand_tensor(
ds4_gpu_tensor *out_hc,
ds4_gpu_tensor *block_out,
const void *model_map,
uint64_t model_size,
uint64_t weight_offset,
uint64_t in_dim,
uint64_t out_dim,
const ds4_gpu_tensor *x,
const ds4_gpu_tensor *residual_hc,
const ds4_gpu_tensor *split,
uint32_t n_embd,
uint32_t n_hc);
#endif