1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
|
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
* Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
*/
#ifndef RXE_HDR_H
#define RXE_HDR_H
/* extracted information about a packet carried in an sk_buff struct fits in
* the skbuff cb array. Must be at most 48 bytes. stored in control block of
* sk_buff for received packets.
*/
struct rxe_pkt_info {
struct rxe_dev *rxe; /* device that owns packet */
struct rxe_qp *qp; /* qp that owns packet */
struct rxe_send_wqe *wqe; /* send wqe */
u8 *hdr; /* points to bth */
u32 mask; /* useful info about pkt */
u32 psn; /* bth psn of packet */
u16 pkey_index; /* partition of pkt */
u16 paylen; /* length of bth - icrc */
u8 port_num; /* port pkt received on */
u8 opcode; /* bth opcode of packet */
u8 offset; /* bth offset from pkt->hdr */
};
/* Macros should be used only for received skb */
static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
return (void *)skb->cb;
}
static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
{
return container_of((void *)pkt, struct sk_buff, cb);
}
/*
* IBA header types and methods
*
* Some of these are for reference and completeness only since
* rxe does not currently support RD transport
* most of this could be moved into IB core. ib_pack.h has
* part of this but is incomplete
*
* Header specific routines to insert/extract values to/from headers
* the routines that are named __hhh_(set_)fff() take a pointer to a
* hhh header and get(set) the fff field. The routines named
* hhh_(set_)fff take a packet info struct and find the
* header and field based on the opcode in the packet.
* Conversion to/from network byte order from cpu order is also done.
*/
#define RXE_ICRC_SIZE (4)
#define RXE_MAX_HDR_LENGTH (80)
/******************************************************************************
* Base Transport Header
******************************************************************************/
struct rxe_bth {
u8 opcode;
u8 flags;
__be16 pkey;
__be32 qpn;
__be32 apsn;
};
#define BTH_TVER (0)
#define BTH_DEF_PKEY (0xffff)
#define BTH_SE_MASK (0x80)
#define BTH_MIG_MASK (0x40)
#define BTH_PAD_MASK (0x30)
#define BTH_TVER_MASK (0x0f)
#define BTH_FECN_MASK (0x80000000)
#define BTH_BECN_MASK (0x40000000)
#define BTH_RESV6A_MASK (0x3f000000)
#define BTH_QPN_MASK (0x00ffffff)
#define BTH_ACK_MASK (0x80000000)
#define BTH_RESV7_MASK (0x7f000000)
#define BTH_PSN_MASK (0x00ffffff)
static inline u8 __bth_opcode(void *arg)
{
struct rxe_bth *bth = arg;
return bth->opcode;
}
static inline void __bth_set_opcode(void *arg, u8 opcode)
{
struct rxe_bth *bth = arg;
bth->opcode = opcode;
}
static inline u8 __bth_se(void *arg)
{
struct rxe_bth *bth = arg;
return 0 != (BTH_SE_MASK & bth->flags);
}
static inline void __bth_set_se(void *arg, int se)
{
struct rxe_bth *bth = arg;
if (se)
bth->flags |= BTH_SE_MASK;
else
bth->flags &= ~BTH_SE_MASK;
}
static inline u8 __bth_mig(void *arg)
{
struct rxe_bth *bth = arg;
return 0 != (BTH_MIG_MASK & bth->flags);
}
static inline void __bth_set_mig(void *arg, u8 mig)
{
struct rxe_bth *bth = arg;
if (mig)
bth->flags |= BTH_MIG_MASK;
else
bth->flags &= ~BTH_MIG_MASK;
}
static inline u8 __bth_pad(void *arg)
{
struct rxe_bth *bth = arg;
return (BTH_PAD_MASK & bth->flags) >> 4;
}
static inline void __bth_set_pad(void *arg, u8 pad)
{
struct rxe_bth *bth = arg;
bth->flags = (BTH_PAD_MASK & (pad << 4)) |
(~BTH_PAD_MASK & bth->flags);
}
static inline u8 __bth_tver(void *arg)
{
struct rxe_bth *bth = arg;
return BTH_TVER_MASK & bth->flags;
}
static inline void __bth_set_tver(void *arg, u8 tver)
{
struct rxe_bth *bth = arg;
bth->flags = (BTH_TVER_MASK & tver) |
(~BTH_TVER_MASK & bth->flags);
}
static inline u16 __bth_pkey(void *arg)
{
struct rxe_bth *bth = arg;
return be16_to_cpu(bth->pkey);
}
static inline void __bth_set_pkey(void *arg, u16 pkey)
{
struct rxe_bth *bth = arg;
bth->pkey = cpu_to_be16(pkey);
}
static inline u32 __bth_qpn(void *arg)
{
struct rxe_bth *bth = arg;
return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
}
static inline void __bth_set_qpn(void *arg, u32 qpn)
{
struct rxe_bth *bth = arg;
u32 resvqpn = be32_to_cpu(bth->qpn);
bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
(~BTH_QPN_MASK & resvqpn));
}
static inline int __bth_fecn(void *arg)
{
struct rxe_bth *bth = arg;
return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
}
static inline void __bth_set_fecn(void *arg, int fecn)
{
struct rxe_bth *bth = arg;
if (fecn)
bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
else
bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
}
static inline int __bth_becn(void *arg)
{
struct rxe_bth *bth = arg;
return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
}
static inline void __bth_set_becn(void *arg, int becn)
{
struct rxe_bth *bth = arg;
if (becn)
bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
else
bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
}
static inline u8 __bth_resv6a(void *arg)
{
struct rxe_bth *bth = arg;
return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
}
static inline void __bth_set_resv6a(void *arg)
{
struct rxe_bth *bth = arg;
bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
}
static inline int __bth_ack(void *arg)
{
struct rxe_bth *bth = arg;
return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
}
static inline void __bth_set_ack(void *arg, int ack)
{
struct rxe_bth *bth = arg;
if (ack)
bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
else
bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
}
static inline void __bth_set_resv7(void *arg)
{
struct rxe_bth *bth = arg;
bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
}
static inline u32 __bth_psn(void *arg)
{
struct rxe_bth *bth = arg;
return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
}
static inline void __bth_set_psn(void *arg, u32 psn)
{
struct rxe_bth *bth = arg;
u32 apsn = be32_to_cpu(bth->apsn);
bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
(~BTH_PSN_MASK & apsn));
}
static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
{
return __bth_opcode(pkt->hdr + pkt->offset);
}
static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
{
__bth_set_opcode(pkt->hdr + pkt->offset, opcode);
}
static inline u8 bth_se(struct rxe_pkt_info *pkt)
{
return __bth_se(pkt->hdr + pkt->offset);
}
static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
{
__bth_set_se(pkt->hdr + pkt->offset, se);
}
static inline u8 bth_mig(struct rxe_pkt_info *pkt)
{
return __bth_mig(pkt->hdr + pkt->offset);
}
static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
{
__bth_set_mig(pkt->hdr + pkt->offset, mig);
}
static inline u8 bth_pad(struct rxe_pkt_info *pkt)
{
return __bth_pad(pkt->hdr + pkt->offset);
}
static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
{
__bth_set_pad(pkt->hdr + pkt->offset, pad);
}
static inline u8 bth_tver(struct rxe_pkt_info *pkt)
{
return __bth_tver(pkt->hdr + pkt->offset);
}
static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
{
__bth_set_tver(pkt->hdr + pkt->offset, tver);
}
static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
{
return __bth_pkey(pkt->hdr + pkt->offset);
}
static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
{
__bth_set_pkey(pkt->hdr + pkt->offset, pkey);
}
static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
{
return __bth_qpn(pkt->hdr + pkt->offset);
}
static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
{
__bth_set_qpn(pkt->hdr + pkt->offset, qpn);
}
static inline int bth_fecn(struct rxe_pkt_info *pkt)
{
return __bth_fecn(pkt->hdr + pkt->offset);
}
static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
{
__bth_set_fecn(pkt->hdr + pkt->offset, fecn);
}
static inline int bth_becn(struct rxe_pkt_info *pkt)
{
return __bth_becn(pkt->hdr + pkt->offset);
}
static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
{
__bth_set_becn(pkt->hdr + pkt->offset, becn);
}
static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
{
return __bth_resv6a(pkt->hdr + pkt->offset);
}
static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
{
__bth_set_resv6a(pkt->hdr + pkt->offset);
}
static inline int bth_ack(struct rxe_pkt_info *pkt)
{
return __bth_ack(pkt->hdr + pkt->offset);
}
static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
{
__bth_set_ack(pkt->hdr + pkt->offset, ack);
}
static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
{
__bth_set_resv7(pkt->hdr + pkt->offset);
}
static inline u32 bth_psn(struct rxe_pkt_info *pkt)
{
return __bth_psn(pkt->hdr + pkt->offset);
}
static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
{
__bth_set_psn(pkt->hdr + pkt->offset, psn);
}
static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
int mig, int pad, u16 pkey, u32 qpn, int ack_req,
u32 psn)
{
struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
bth->opcode = opcode;
bth->flags = (pad << 4) & BTH_PAD_MASK;
if (se)
bth->flags |= BTH_SE_MASK;
if (mig)
bth->flags |= BTH_MIG_MASK;
bth->pkey = cpu_to_be16(pkey);
bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
psn &= BTH_PSN_MASK;
if (ack_req)
psn |= BTH_ACK_MASK;
bth->apsn = cpu_to_be32(psn);
}
/******************************************************************************
* Reliable Datagram Extended Transport Header
******************************************************************************/
struct rxe_rdeth {
__be32 een;
};
#define RDETH_EEN_MASK (0x00ffffff)
static inline u8 __rdeth_een(void *arg)
{
struct rxe_rdeth *rdeth = arg;
return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
}
static inline void __rdeth_set_een(void *arg, u32 een)
{
struct rxe_rdeth *rdeth = arg;
rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
}
static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
{
return __rdeth_een(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
}
static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
{
__rdeth_set_een(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
}
/******************************************************************************
* Datagram Extended Transport Header
******************************************************************************/
struct rxe_deth {
__be32 qkey;
__be32 sqp;
};
#define GSI_QKEY (0x80010000)
#define DETH_SQP_MASK (0x00ffffff)
static inline u32 __deth_qkey(void *arg)
{
struct rxe_deth *deth = arg;
return be32_to_cpu(deth->qkey);
}
static inline void __deth_set_qkey(void *arg, u32 qkey)
{
struct rxe_deth *deth = arg;
deth->qkey = cpu_to_be32(qkey);
}
static inline u32 __deth_sqp(void *arg)
{
struct rxe_deth *deth = arg;
return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
}
static inline void __deth_set_sqp(void *arg, u32 sqp)
{
struct rxe_deth *deth = arg;
deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
}
static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
{
return __deth_qkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
}
static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
{
__deth_set_qkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
}
static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
{
return __deth_sqp(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_DETH]);
}
static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
{
__deth_set_sqp(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
}
/******************************************************************************
* RDMA Extended Transport Header
******************************************************************************/
struct rxe_reth {
__be64 va;
__be32 rkey;
__be32 len;
};
static inline u64 __reth_va(void *arg)
{
struct rxe_reth *reth = arg;
return be64_to_cpu(reth->va);
}
static inline void __reth_set_va(void *arg, u64 va)
{
struct rxe_reth *reth = arg;
reth->va = cpu_to_be64(va);
}
static inline u32 __reth_rkey(void *arg)
{
struct rxe_reth *reth = arg;
return be32_to_cpu(reth->rkey);
}
static inline void __reth_set_rkey(void *arg, u32 rkey)
{
struct rxe_reth *reth = arg;
reth->rkey = cpu_to_be32(rkey);
}
static inline u32 __reth_len(void *arg)
{
struct rxe_reth *reth = arg;
return be32_to_cpu(reth->len);
}
static inline void __reth_set_len(void *arg, u32 len)
{
struct rxe_reth *reth = arg;
reth->len = cpu_to_be32(len);
}
static inline u64 reth_va(struct rxe_pkt_info *pkt)
{
return __reth_va(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
{
__reth_set_va(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
}
static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
{
return __reth_rkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
__reth_set_rkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
}
static inline u32 reth_len(struct rxe_pkt_info *pkt)
{
return __reth_len(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RETH]);
}
static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
{
__reth_set_len(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
}
/******************************************************************************
* Atomic Extended Transport Header
******************************************************************************/
struct rxe_atmeth {
__be64 va;
__be32 rkey;
__be64 swap_add;
__be64 comp;
} __packed;
static inline u64 __atmeth_va(void *arg)
{
struct rxe_atmeth *atmeth = arg;
return be64_to_cpu(atmeth->va);
}
static inline void __atmeth_set_va(void *arg, u64 va)
{
struct rxe_atmeth *atmeth = arg;
atmeth->va = cpu_to_be64(va);
}
static inline u32 __atmeth_rkey(void *arg)
{
struct rxe_atmeth *atmeth = arg;
return be32_to_cpu(atmeth->rkey);
}
static inline void __atmeth_set_rkey(void *arg, u32 rkey)
{
struct rxe_atmeth *atmeth = arg;
atmeth->rkey = cpu_to_be32(rkey);
}
static inline u64 __atmeth_swap_add(void *arg)
{
struct rxe_atmeth *atmeth = arg;
return be64_to_cpu(atmeth->swap_add);
}
static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
{
struct rxe_atmeth *atmeth = arg;
atmeth->swap_add = cpu_to_be64(swap_add);
}
static inline u64 __atmeth_comp(void *arg)
{
struct rxe_atmeth *atmeth = arg;
return be64_to_cpu(atmeth->comp);
}
static inline void __atmeth_set_comp(void *arg, u64 comp)
{
struct rxe_atmeth *atmeth = arg;
atmeth->comp = cpu_to_be64(comp);
}
static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
{
return __atmeth_va(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
{
__atmeth_set_va(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
}
static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
{
return __atmeth_rkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
__atmeth_set_rkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
}
static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
{
return __atmeth_swap_add(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
{
__atmeth_set_swap_add(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
}
static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
{
return __atmeth_comp(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
}
static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
{
__atmeth_set_comp(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
}
/******************************************************************************
* Ack Extended Transport Header
******************************************************************************/
struct rxe_aeth {
__be32 smsn;
};
#define AETH_SYN_MASK (0xff000000)
#define AETH_MSN_MASK (0x00ffffff)
enum aeth_syndrome {
AETH_TYPE_MASK = 0xe0,
AETH_ACK = 0x00,
AETH_RNR_NAK = 0x20,
AETH_RSVD = 0x40,
AETH_NAK = 0x60,
AETH_ACK_UNLIMITED = 0x1f,
AETH_NAK_PSN_SEQ_ERROR = 0x60,
AETH_NAK_INVALID_REQ = 0x61,
AETH_NAK_REM_ACC_ERR = 0x62,
AETH_NAK_REM_OP_ERR = 0x63,
AETH_NAK_INV_RD_REQ = 0x64,
};
static inline u8 __aeth_syn(void *arg)
{
struct rxe_aeth *aeth = arg;
return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
}
static inline void __aeth_set_syn(void *arg, u8 syn)
{
struct rxe_aeth *aeth = arg;
u32 smsn = be32_to_cpu(aeth->smsn);
aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
(~AETH_SYN_MASK & smsn));
}
static inline u32 __aeth_msn(void *arg)
{
struct rxe_aeth *aeth = arg;
return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
}
static inline void __aeth_set_msn(void *arg, u32 msn)
{
struct rxe_aeth *aeth = arg;
u32 smsn = be32_to_cpu(aeth->smsn);
aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
(~AETH_MSN_MASK & smsn));
}
static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
{
return __aeth_syn(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
}
static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
{
__aeth_set_syn(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
}
static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
{
return __aeth_msn(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_AETH]);
}
static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
{
__aeth_set_msn(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
}
/******************************************************************************
* Atomic Ack Extended Transport Header
******************************************************************************/
struct rxe_atmack {
__be64 orig;
};
static inline u64 __atmack_orig(void *arg)
{
struct rxe_atmack *atmack = arg;
return be64_to_cpu(atmack->orig);
}
static inline void __atmack_set_orig(void *arg, u64 orig)
{
struct rxe_atmack *atmack = arg;
atmack->orig = cpu_to_be64(orig);
}
static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
{
return __atmack_orig(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
}
static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
{
__atmack_set_orig(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
}
/******************************************************************************
* Immediate Extended Transport Header
******************************************************************************/
struct rxe_immdt {
__be32 imm;
};
static inline __be32 __immdt_imm(void *arg)
{
struct rxe_immdt *immdt = arg;
return immdt->imm;
}
static inline void __immdt_set_imm(void *arg, __be32 imm)
{
struct rxe_immdt *immdt = arg;
immdt->imm = imm;
}
static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
{
return __immdt_imm(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
}
static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
{
__immdt_set_imm(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
}
/******************************************************************************
* Invalidate Extended Transport Header
******************************************************************************/
struct rxe_ieth {
__be32 rkey;
};
static inline u32 __ieth_rkey(void *arg)
{
struct rxe_ieth *ieth = arg;
return be32_to_cpu(ieth->rkey);
}
static inline void __ieth_set_rkey(void *arg, u32 rkey)
{
struct rxe_ieth *ieth = arg;
ieth->rkey = cpu_to_be32(rkey);
}
static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
{
return __ieth_rkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_IETH]);
}
static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
{
__ieth_set_rkey(pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
}
enum rxe_hdr_length {
RXE_BTH_BYTES = sizeof(struct rxe_bth),
RXE_DETH_BYTES = sizeof(struct rxe_deth),
RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
RXE_RETH_BYTES = sizeof(struct rxe_reth),
RXE_AETH_BYTES = sizeof(struct rxe_aeth),
RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
RXE_IETH_BYTES = sizeof(struct rxe_ieth),
RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
};
static inline size_t header_size(struct rxe_pkt_info *pkt)
{
return pkt->offset + rxe_opcode[pkt->opcode].length;
}
static inline void *payload_addr(struct rxe_pkt_info *pkt)
{
return pkt->hdr + pkt->offset
+ rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
}
static inline size_t payload_size(struct rxe_pkt_info *pkt)
{
return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
- bth_pad(pkt) - RXE_ICRC_SIZE;
}
#endif /* RXE_HDR_H */
|