4 #ifndef __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 5 #define __INCLUDE_RTE_SWX_PIPELINE_INTERNAL_H__ 27 #define TRACE(...) printf(__VA_ARGS__) 35 #define ntoh64(x) rte_be_to_cpu_64(x) 36 #define hton64(x) rte_cpu_to_be_64(x) 49 TAILQ_ENTRY(struct_type) node;
58 TAILQ_HEAD(struct_type_tailq, struct_type);
64 TAILQ_ENTRY(port_in_type) node;
69 TAILQ_HEAD(port_in_type_tailq, port_in_type);
72 TAILQ_ENTRY(port_in) node;
73 struct port_in_type *type;
78 TAILQ_HEAD(port_in_tailq, port_in);
80 struct port_in_runtime {
88 struct port_out_type {
89 TAILQ_ENTRY(port_out_type) node;
94 TAILQ_HEAD(port_out_type_tailq, port_out_type);
97 TAILQ_ENTRY(port_out) node;
98 struct port_out_type *type;
103 TAILQ_HEAD(port_out_tailq, port_out);
105 struct port_out_runtime {
116 struct mirroring_session {
119 uint32_t truncation_length;
125 struct extern_type_member_func {
126 TAILQ_ENTRY(extern_type_member_func) node;
132 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func);
135 TAILQ_ENTRY(extern_type) node;
137 struct struct_type *mailbox_struct_type;
140 struct extern_type_member_func_tailq funcs;
144 TAILQ_HEAD(extern_type_tailq, extern_type);
147 TAILQ_ENTRY(extern_obj) node;
149 struct extern_type *type;
155 TAILQ_HEAD(extern_obj_tailq, extern_obj);
157 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 158 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8 161 struct extern_obj_runtime {
171 TAILQ_ENTRY(extern_func) node;
173 struct struct_type *mailbox_struct_type;
179 TAILQ_HEAD(extern_func_tailq, extern_func);
181 struct extern_func_runtime {
190 TAILQ_ENTRY(hash_func) node;
196 TAILQ_HEAD(hash_func_tailq, hash_func);
198 struct hash_func_runtime {
206 TAILQ_ENTRY(header) node;
208 struct struct_type *st;
213 TAILQ_HEAD(header_tailq, header);
215 struct header_runtime {
220 struct header_out_runtime {
246 enum instruction_type {
304 INSTR_HDR_INVALIDATE,
356 INSTR_ALU_CKADD_FIELD,
357 INSTR_ALU_CKADD_STRUCT20,
358 INSTR_ALU_CKADD_STRUCT,
364 INSTR_ALU_CKSUB_FIELD,
422 INSTR_REGPREFETCH_RH,
423 INSTR_REGPREFETCH_RM,
424 INSTR_REGPREFETCH_RI,
502 INSTR_LEARNER_REARM_NEW,
505 INSTR_LEARNER_FORGET,
555 INSTR_JMP_ACTION_HIT,
560 INSTR_JMP_ACTION_MISS,
613 struct instr_operand {
634 uint8_t header_id[8];
635 uint8_t struct_id[8];
640 struct instr_hdr_validity {
651 uint8_t mf_first_arg_offset;
652 uint8_t mf_timeout_id_offset;
653 uint8_t mf_timeout_id_n_bits;
656 struct instr_extern_obj {
661 struct instr_extern_func {
665 struct instr_hash_func {
666 uint8_t hash_func_id;
680 struct instr_dst_src {
681 struct instr_operand dst;
683 struct instr_operand src;
688 struct instr_regarray {
693 struct instr_operand idx;
698 struct instr_operand dstsrc;
708 struct instr_operand idx;
712 struct instr_operand length;
715 struct instr_operand color_in;
716 uint32_t color_in_val;
719 struct instr_operand color_out;
724 uint8_t header_id[8];
725 uint8_t struct_id[8];
736 struct instruction *ip;
739 struct instr_operand a;
745 struct instr_operand b;
751 enum instruction_type type;
754 struct instr_dst_src mirror;
755 struct instr_hdr_validity valid;
756 struct instr_dst_src mov;
757 struct instr_regarray regarray;
758 struct instr_meter meter;
759 struct instr_dma dma;
760 struct instr_dst_src alu;
761 struct instr_table table;
762 struct instr_learn learn;
763 struct instr_extern_obj ext_obj;
764 struct instr_extern_func ext_func;
765 struct instr_hash_func hash_func;
766 struct instr_jmp jmp;
770 struct instruction_data {
777 typedef void (*instr_exec_t)(
struct rte_swx_pipeline *);
783 (*action_func_t)(
struct rte_swx_pipeline *p);
786 TAILQ_ENTRY(action) node;
788 struct struct_type *st;
789 int *args_endianness;
790 struct instruction *instructions;
791 struct instruction_data *instruction_data;
792 uint32_t n_instructions;
796 TAILQ_HEAD(action_tailq, action);
802 TAILQ_ENTRY(table_type) node;
808 TAILQ_HEAD(table_type_tailq, table_type);
816 TAILQ_ENTRY(table) node;
819 struct table_type *type;
822 struct match_field *fields;
824 struct header *header;
827 struct action **actions;
828 struct action *default_action;
829 uint8_t *default_action_data;
831 int default_action_is_const;
832 uint32_t action_data_size_max;
833 int *action_is_for_table_entries;
834 int *action_is_for_default_entry;
836 struct hash_func *hf;
841 TAILQ_HEAD(table_tailq, table);
843 struct table_runtime {
849 struct table_statistics {
850 uint64_t n_pkts_hit[2];
851 uint64_t *n_pkts_action;
858 TAILQ_ENTRY(selector) node;
861 struct field *group_id_field;
862 struct field **selector_fields;
863 uint32_t n_selector_fields;
864 struct header *selector_header;
865 struct field *member_id_field;
867 uint32_t n_groups_max;
868 uint32_t n_members_per_group_max;
873 TAILQ_HEAD(selector_tailq, selector);
875 struct selector_runtime {
877 uint8_t **group_id_buffer;
878 uint8_t **selector_buffer;
879 uint8_t **member_id_buffer;
882 struct selector_statistics {
890 TAILQ_ENTRY(learner) node;
894 struct field **fields;
896 struct header *header;
899 struct action **actions;
900 struct action *default_action;
901 uint8_t *default_action_data;
903 int default_action_is_const;
904 uint32_t action_data_size_max;
905 int *action_is_for_table_entries;
906 int *action_is_for_default_entry;
908 struct hash_func *hf;
915 TAILQ_HEAD(learner_tailq, learner);
917 struct learner_runtime {
922 struct learner_statistics {
923 uint64_t n_pkts_hit[2];
924 uint64_t n_pkts_learn[2];
925 uint64_t n_pkts_rearm;
926 uint64_t n_pkts_forget;
927 uint64_t *n_pkts_action;
934 TAILQ_ENTRY(regarray) node;
941 TAILQ_HEAD(regarray_tailq, regarray);
943 struct regarray_runtime {
951 struct meter_profile {
952 TAILQ_ENTRY(meter_profile) node;
955 struct rte_meter_trtcm_profile profile;
959 TAILQ_HEAD(meter_profile_tailq, meter_profile);
962 TAILQ_ENTRY(metarray) node;
968 TAILQ_HEAD(metarray_tailq, metarray);
972 struct meter_profile *profile;
980 struct metarray_runtime {
981 struct meter *metarray;
992 uint32_t *mirroring_slots;
993 uint64_t mirroring_slots_mask;
995 uint32_t recirc_pass_id;
1001 struct header_runtime *headers;
1002 struct header_out_runtime *headers_out;
1003 uint8_t *header_storage;
1004 uint8_t *header_out_storage;
1005 uint64_t valid_headers;
1006 uint32_t n_headers_out;
1012 struct table_runtime *tables;
1013 struct selector_runtime *selectors;
1014 struct learner_runtime *learners;
1019 uint32_t learner_id;
1023 struct extern_obj_runtime *extern_objs;
1024 struct extern_func_runtime *extern_funcs;
1027 struct instruction *ip;
1028 struct instruction *ret;
1031 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos))) 1032 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos))) 1033 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos))) 1035 #define HEADER_VALID(thread, header_id) \ 1036 MASK64_BIT_GET((thread)->valid_headers, header_id) 1038 static inline uint64_t
1039 instr_operand_hbo(
struct thread *t,
const struct instr_operand *x)
1041 uint8_t *x_struct = t->structs[x->struct_id];
1042 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1043 uint64_t x64 = *x64_ptr;
1044 uint64_t x64_mask = UINT64_MAX >> (64 - x->n_bits);
1046 return x64 & x64_mask;
1049 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1051 static inline uint64_t
1052 instr_operand_nbo(
struct thread *t,
const struct instr_operand *x)
1054 uint8_t *x_struct = t->structs[x->struct_id];
1055 uint64_t *x64_ptr = (uint64_t *)&x_struct[x->offset];
1056 uint64_t x64 = *x64_ptr;
1058 return ntoh64(x64) >> (64 - x->n_bits);
1063 #define instr_operand_nbo instr_operand_hbo 1067 #define ALU(thread, ip, operator) \ 1069 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1070 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1071 uint64_t dst64 = *dst64_ptr; \ 1072 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1073 uint64_t dst = dst64 & dst64_mask; \ 1075 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1076 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1077 uint64_t src64 = *src64_ptr; \ 1078 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1079 uint64_t src = src64 & src64_mask; \ 1081 uint64_t result = dst operator src; \ 1083 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1086 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1088 #define ALU_MH(thread, ip, operator) \ 1090 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1091 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1092 uint64_t dst64 = *dst64_ptr; \ 1093 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1094 uint64_t dst = dst64 & dst64_mask; \ 1096 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1097 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1098 uint64_t src64 = *src64_ptr; \ 1099 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1101 uint64_t result = dst operator src; \ 1103 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1106 #define ALU_HM(thread, ip, operator) \ 1108 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1109 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1110 uint64_t dst64 = *dst64_ptr; \ 1111 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1112 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1114 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1115 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1116 uint64_t src64 = *src64_ptr; \ 1117 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1118 uint64_t src = src64 & src64_mask; \ 1120 uint64_t result = dst operator src; \ 1121 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1123 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1126 #define ALU_HM_FAST(thread, ip, operator) \ 1128 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1129 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1130 uint64_t dst64 = *dst64_ptr; \ 1131 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1132 uint64_t dst = dst64 & dst64_mask; \ 1134 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1135 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1136 uint64_t src64 = *src64_ptr; \ 1137 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1138 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \ 1140 uint64_t result = dst operator src; \ 1142 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1145 #define ALU_HH(thread, ip, operator) \ 1147 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1148 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1149 uint64_t dst64 = *dst64_ptr; \ 1150 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1151 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1153 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1154 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1155 uint64_t src64 = *src64_ptr; \ 1156 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1158 uint64_t result = dst operator src; \ 1159 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1161 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1164 #define ALU_HH_FAST(thread, ip, operator) \ 1166 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1167 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1168 uint64_t dst64 = *dst64_ptr; \ 1169 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1170 uint64_t dst = dst64 & dst64_mask; \ 1172 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1173 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1174 uint64_t src64 = *src64_ptr; \ 1175 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \ 1177 uint64_t result = dst operator src; \ 1179 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1186 #define ALU_HM_FAST ALU 1188 #define ALU_HH_FAST ALU 1192 #define ALU_I(thread, ip, operator) \ 1194 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1195 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1196 uint64_t dst64 = *dst64_ptr; \ 1197 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1198 uint64_t dst = dst64 & dst64_mask; \ 1200 uint64_t src = (ip)->alu.src_val; \ 1202 uint64_t result = dst operator src; \ 1204 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1207 #define ALU_MI ALU_I 1209 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1211 #define ALU_HI(thread, ip, operator) \ 1213 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1214 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1215 uint64_t dst64 = *dst64_ptr; \ 1216 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1217 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1219 uint64_t src = (ip)->alu.src_val; \ 1221 uint64_t result = dst operator src; \ 1222 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1224 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1229 #define ALU_HI ALU_I 1233 #define MOV(thread, ip) \ 1235 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1236 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1237 uint64_t dst64 = *dst64_ptr; \ 1238 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1240 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1241 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1242 uint64_t src64 = *src64_ptr; \ 1243 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1244 uint64_t src = src64 & src64_mask; \ 1246 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1249 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1251 #define MOV_MH(thread, ip) \ 1253 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1254 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1255 uint64_t dst64 = *dst64_ptr; \ 1256 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1258 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1259 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1260 uint64_t src64 = *src64_ptr; \ 1261 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \ 1263 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1266 #define MOV_HM(thread, ip) \ 1268 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1269 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1270 uint64_t dst64 = *dst64_ptr; \ 1271 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1273 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1274 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1275 uint64_t src64 = *src64_ptr; \ 1276 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1277 uint64_t src = src64 & src64_mask; \ 1279 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \ 1280 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1283 #define MOV_HH(thread, ip) \ 1285 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1286 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1287 uint64_t dst64 = *dst64_ptr; \ 1288 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1290 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1291 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1292 uint64_t src64 = *src64_ptr; \ 1294 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \ 1295 src = src >> (64 - (ip)->mov.dst.n_bits); \ 1296 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1307 #define MOV_I(thread, ip) \ 1309 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1310 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1311 uint64_t dst64 = *dst64_ptr; \ 1312 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1314 uint64_t src = (ip)->mov.src_val; \ 1316 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1319 #define JMP_CMP(thread, ip, operator) \ 1321 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1322 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1323 uint64_t a64 = *a64_ptr; \ 1324 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1325 uint64_t a = a64 & a64_mask; \ 1327 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1328 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1329 uint64_t b64 = *b64_ptr; \ 1330 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1331 uint64_t b = b64 & b64_mask; \ 1333 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1336 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1338 #define JMP_CMP_MH(thread, ip, operator) \ 1340 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1341 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1342 uint64_t a64 = *a64_ptr; \ 1343 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1344 uint64_t a = a64 & a64_mask; \ 1346 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1347 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1348 uint64_t b64 = *b64_ptr; \ 1349 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1351 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1354 #define JMP_CMP_HM(thread, ip, operator) \ 1356 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1357 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1358 uint64_t a64 = *a64_ptr; \ 1359 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1361 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1362 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1363 uint64_t b64 = *b64_ptr; \ 1364 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1365 uint64_t b = b64 & b64_mask; \ 1367 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1370 #define JMP_CMP_HH(thread, ip, operator) \ 1372 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1373 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1374 uint64_t a64 = *a64_ptr; \ 1375 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1377 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1378 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1379 uint64_t b64 = *b64_ptr; \ 1380 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1382 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1385 #define JMP_CMP_HH_FAST(thread, ip, operator) \ 1387 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1388 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1389 uint64_t a64 = *a64_ptr; \ 1390 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \ 1392 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1393 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1394 uint64_t b64 = *b64_ptr; \ 1395 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \ 1397 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1402 #define JMP_CMP_MH JMP_CMP 1403 #define JMP_CMP_HM JMP_CMP 1404 #define JMP_CMP_HH JMP_CMP 1405 #define JMP_CMP_HH_FAST JMP_CMP 1409 #define JMP_CMP_I(thread, ip, operator) \ 1411 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1412 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1413 uint64_t a64 = *a64_ptr; \ 1414 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1415 uint64_t a = a64 & a64_mask; \ 1417 uint64_t b = (ip)->jmp.b_val; \ 1419 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1422 #define JMP_CMP_MI JMP_CMP_I 1424 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1426 #define JMP_CMP_HI(thread, ip, operator) \ 1428 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1429 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1430 uint64_t a64 = *a64_ptr; \ 1431 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1433 uint64_t b = (ip)->jmp.b_val; \ 1435 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1440 #define JMP_CMP_HI JMP_CMP_I 1444 #define METADATA_READ(thread, offset, n_bits) \ 1446 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1447 uint64_t m64 = *m64_ptr; \ 1448 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1452 #define METADATA_WRITE(thread, offset, n_bits, value) \ 1454 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1455 uint64_t m64 = *m64_ptr; \ 1456 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1458 uint64_t m_new = value; \ 1460 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \ 1463 #ifndef RTE_SWX_PIPELINE_THREADS_MAX 1464 #define RTE_SWX_PIPELINE_THREADS_MAX 16 1467 #ifndef RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 1468 #define RTE_SWX_PIPELINE_INSTRUCTION_TABLE_SIZE_MAX 256 1471 struct rte_swx_pipeline {
1474 struct struct_type_tailq struct_types;
1475 struct port_in_type_tailq port_in_types;
1476 struct port_in_tailq ports_in;
1477 struct port_out_type_tailq port_out_types;
1478 struct port_out_tailq ports_out;
1479 struct extern_type_tailq extern_types;
1480 struct extern_obj_tailq extern_objs;
1481 struct extern_func_tailq extern_funcs;
1482 struct hash_func_tailq hash_funcs;
1483 struct header_tailq headers;
1484 struct struct_type *metadata_st;
1485 uint32_t metadata_struct_id;
1486 struct action_tailq actions;
1487 struct table_type_tailq table_types;
1488 struct table_tailq tables;
1489 struct selector_tailq selectors;
1490 struct learner_tailq learners;
1491 struct regarray_tailq regarrays;
1492 struct meter_profile_tailq meter_profiles;
1493 struct metarray_tailq metarrays;
1495 struct port_in_runtime *in;
1496 struct port_out_runtime *out;
1497 struct mirroring_session *mirroring_sessions;
1498 struct instruction **action_instructions;
1499 action_func_t *action_funcs;
1501 struct table_statistics *table_stats;
1502 struct selector_statistics *selector_stats;
1503 struct learner_statistics *learner_stats;
1504 struct hash_func_runtime *hash_func_runtime;
1505 struct regarray_runtime *regarray_runtime;
1506 struct metarray_runtime *metarray_runtime;
1507 struct instruction *instructions;
1508 struct instruction_data *instruction_data;
1509 instr_exec_t *instruction_table;
1510 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX];
1514 uint32_t n_ports_in;
1515 uint32_t n_ports_out;
1516 uint32_t n_mirroring_slots;
1517 uint32_t n_mirroring_sessions;
1518 uint32_t n_extern_objs;
1519 uint32_t n_extern_funcs;
1520 uint32_t n_hash_funcs;
1523 uint32_t n_selectors;
1524 uint32_t n_learners;
1525 uint32_t n_regarrays;
1526 uint32_t n_metarrays;
1530 uint32_t n_instructions;
1539 pipeline_port_inc(
struct rte_swx_pipeline *p)
1541 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1);
1545 thread_ip_reset(
struct rte_swx_pipeline *p,
struct thread *t)
1547 t->ip = p->instructions;
1551 thread_ip_set(
struct thread *t,
struct instruction *ip)
1557 thread_ip_action_call(
struct rte_swx_pipeline *p,
1562 t->ip = p->action_instructions[action_id];
1566 thread_ip_inc(
struct rte_swx_pipeline *p);
1569 thread_ip_inc(
struct rte_swx_pipeline *p)
1571 struct thread *t = &p->threads[p->thread_id];
1577 thread_ip_inc_cond(
struct thread *t,
int cond)
1583 thread_yield(
struct rte_swx_pipeline *p)
1585 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1589 thread_yield_cond(
struct rte_swx_pipeline *p,
int cond)
1591 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1);
1598 __instr_rx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1600 struct port_in_runtime *port = &p->in[p->port_id];
1605 if (t->recirculate) {
1606 TRACE(
"[Thread %2u] rx - recirculate (pass %u)\n",
1608 t->recirc_pass_id + 1);
1611 t->ptr = &
pkt->pkt[
pkt->offset];
1612 t->mirroring_slots_mask = 0;
1614 t->recirc_pass_id++;
1617 t->valid_headers = 0;
1618 t->n_headers_out = 0;
1621 t->table_state = p->table_state;
1627 pkt_received = port->pkt_rx(port->obj,
pkt);
1628 t->ptr = &
pkt->pkt[
pkt->offset];
1631 TRACE(
"[Thread %2u] rx %s from port %u\n",
1633 pkt_received ?
"1 pkt" :
"0 pkts",
1636 t->mirroring_slots_mask = 0;
1637 t->recirc_pass_id = 0;
1640 t->valid_headers = 0;
1641 t->n_headers_out = 0;
1644 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id);
1647 t->table_state = p->table_state;
1650 pipeline_port_inc(p);
1652 return pkt_received;
1656 instr_rx_exec(
struct rte_swx_pipeline *p)
1658 struct thread *t = &p->threads[p->thread_id];
1659 struct instruction *ip = t->ip;
1663 pkt_received = __instr_rx_exec(p, t, ip);
1666 thread_ip_inc_cond(t, pkt_received);
1674 emit_handler(
struct thread *t)
1676 struct header_out_runtime *h0 = &t->headers_out[0];
1677 struct header_out_runtime *h1 = &t->headers_out[1];
1678 uint32_t offset = 0, i;
1681 if ((t->n_headers_out == 1) &&
1682 (h0->ptr + h0->n_bytes == t->ptr)) {
1683 TRACE(
"Emit handler: no header change or header decap.\n");
1685 t->pkt.offset -= h0->n_bytes;
1686 t->pkt.length += h0->n_bytes;
1692 if ((t->n_headers_out == 2) &&
1693 (h1->ptr + h1->n_bytes == t->ptr) &&
1694 (h0->ptr == h0->ptr0)) {
1697 TRACE(
"Emit handler: header encapsulation.\n");
1699 offset = h0->n_bytes + h1->n_bytes;
1700 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes);
1701 t->pkt.offset -= offset;
1702 t->pkt.length += offset;
1708 TRACE(
"Emit handler: complex case.\n");
1710 for (i = 0; i < t->n_headers_out; i++) {
1711 struct header_out_runtime *h = &t->headers_out[i];
1713 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes);
1714 offset += h->n_bytes;
1718 memcpy(t->ptr - offset, t->header_out_storage, offset);
1719 t->pkt.offset -= offset;
1720 t->pkt.length += offset;
1725 mirroring_handler(
struct rte_swx_pipeline *p,
struct thread *t,
struct rte_swx_pkt *pkt)
1727 uint64_t slots_mask = t->mirroring_slots_mask, slot_mask;
1730 for (slot_id = 0, slot_mask = 1LLU ; slots_mask; slot_id++, slot_mask <<= 1)
1731 if (slot_mask & slots_mask) {
1732 struct port_out_runtime *port;
1733 struct mirroring_session *session;
1734 uint32_t port_id, session_id;
1736 session_id = t->mirroring_slots[slot_id];
1737 session = &p->mirroring_sessions[session_id];
1739 port_id = session->port_id;
1740 port = &p->out[port_id];
1742 if (session->fast_clone)
1743 port->pkt_fast_clone_tx(port->obj, pkt);
1745 port->pkt_clone_tx(port->obj, pkt, session->truncation_length);
1747 slots_mask &= ~slot_mask;
1752 __instr_tx_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1754 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
1755 struct port_out_runtime *port = &p->out[port_id];
1759 if (t->recirculate) {
1760 TRACE(
"[Thread %2u]: tx 1 pkt - recirculate\n",
1767 mirroring_handler(p, t,
pkt);
1772 TRACE(
"[Thread %2u]: tx 1 pkt to port %u\n",
1780 mirroring_handler(p, t,
pkt);
1781 port->pkt_tx(port->obj,
pkt);
1785 __instr_tx_i_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
1787 uint64_t port_id = ip->io.io.val;
1788 struct port_out_runtime *port = &p->out[port_id];
1792 if (t->recirculate) {
1793 TRACE(
"[Thread %2u]: tx (i) 1 pkt - recirculate\n",
1800 mirroring_handler(p, t,
pkt);
1805 TRACE(
"[Thread %2u]: tx (i) 1 pkt to port %u\n",
1813 mirroring_handler(p, t,
pkt);
1814 port->pkt_tx(port->obj,
pkt);
1818 __instr_drop_exec(
struct rte_swx_pipeline *p,
1822 uint64_t port_id = p->n_ports_out - 1;
1823 struct port_out_runtime *port = &p->out[port_id];
1826 TRACE(
"[Thread %2u]: drop 1 pkt\n",
1833 mirroring_handler(p, t,
pkt);
1834 port->pkt_tx(port->obj,
pkt);
1838 __instr_mirror_exec(
struct rte_swx_pipeline *p,
1840 const struct instruction *ip)
1842 uint64_t slot_id = instr_operand_hbo(t, &ip->mirror.dst);
1843 uint64_t session_id = instr_operand_hbo(t, &ip->mirror.src);
1845 slot_id &= p->n_mirroring_slots - 1;
1846 session_id &= p->n_mirroring_sessions - 1;
1848 TRACE(
"[Thread %2u]: mirror pkt (slot = %u, session = %u)\n",
1851 (uint32_t)session_id);
1853 t->mirroring_slots[slot_id] = session_id;
1854 t->mirroring_slots_mask |= 1LLU << slot_id;
1858 __instr_recirculate_exec(
struct rte_swx_pipeline *p
__rte_unused,
1862 TRACE(
"[Thread %2u]: recirculate\n",
1869 __instr_recircid_exec(
struct rte_swx_pipeline *p
__rte_unused,
1871 const struct instruction *ip)
1873 TRACE(
"[Thread %2u]: recircid (pass %u)\n",
1878 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, t->recirc_pass_id);
1885 __instr_hdr_extract_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
1887 const struct instruction *ip,
1890 uint64_t valid_headers = t->valid_headers;
1891 uint8_t *ptr = t->ptr;
1892 uint32_t
offset = t->pkt.offset;
1893 uint32_t
length = t->pkt.length;
1896 for (i = 0; i < n_extract; i++) {
1897 uint32_t header_id = ip->io.hdr.header_id[i];
1898 uint32_t struct_id = ip->io.hdr.struct_id[i];
1899 uint32_t n_bytes = ip->io.hdr.n_bytes[i];
1901 TRACE(
"[Thread %2u]: extract header %u (%u bytes)\n",
1907 t->structs[struct_id] = ptr;
1908 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
1917 t->valid_headers = valid_headers;
1926 __instr_hdr_extract_exec(
struct rte_swx_pipeline *p,
1928 const struct instruction *ip)
1930 __instr_hdr_extract_many_exec(p, t, ip, 1);
1934 __instr_hdr_extract2_exec(
struct rte_swx_pipeline *p,
1936 const struct instruction *ip)
1938 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
1940 __instr_hdr_extract_many_exec(p, t, ip, 2);
1944 __instr_hdr_extract3_exec(
struct rte_swx_pipeline *p,
1946 const struct instruction *ip)
1948 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
1950 __instr_hdr_extract_many_exec(p, t, ip, 3);
1954 __instr_hdr_extract4_exec(
struct rte_swx_pipeline *p,
1956 const struct instruction *ip)
1958 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
1960 __instr_hdr_extract_many_exec(p, t, ip, 4);
1964 __instr_hdr_extract5_exec(
struct rte_swx_pipeline *p,
1966 const struct instruction *ip)
1968 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
1970 __instr_hdr_extract_many_exec(p, t, ip, 5);
1974 __instr_hdr_extract6_exec(
struct rte_swx_pipeline *p,
1976 const struct instruction *ip)
1978 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
1980 __instr_hdr_extract_many_exec(p, t, ip, 6);
1984 __instr_hdr_extract7_exec(
struct rte_swx_pipeline *p,
1986 const struct instruction *ip)
1988 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
1990 __instr_hdr_extract_many_exec(p, t, ip, 7);
1994 __instr_hdr_extract8_exec(
struct rte_swx_pipeline *p,
1996 const struct instruction *ip)
1998 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2000 __instr_hdr_extract_many_exec(p, t, ip, 8);
2004 __instr_hdr_extract_m_exec(
struct rte_swx_pipeline *p
__rte_unused,
2006 const struct instruction *ip)
2008 uint64_t valid_headers = t->valid_headers;
2009 uint8_t *ptr = t->ptr;
2010 uint32_t
offset = t->pkt.offset;
2011 uint32_t
length = t->pkt.length;
2013 uint32_t n_bytes_last = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits);
2014 uint32_t header_id = ip->io.hdr.header_id[0];
2015 uint32_t struct_id = ip->io.hdr.struct_id[0];
2016 uint32_t n_bytes = ip->io.hdr.n_bytes[0];
2018 struct header_runtime *h = &t->headers[header_id];
2020 TRACE(
"[Thread %2u]: extract header %u (%u + %u bytes)\n",
2026 n_bytes += n_bytes_last;
2029 t->structs[struct_id] = ptr;
2030 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2031 h->n_bytes = n_bytes;
2034 t->pkt.offset = offset + n_bytes;
2035 t->pkt.length = length - n_bytes;
2036 t->ptr = ptr + n_bytes;
2040 __instr_hdr_lookahead_exec(
struct rte_swx_pipeline *p
__rte_unused,
2042 const struct instruction *ip)
2044 uint64_t valid_headers = t->valid_headers;
2045 uint8_t *ptr = t->ptr;
2047 uint32_t header_id = ip->io.hdr.header_id[0];
2048 uint32_t struct_id = ip->io.hdr.struct_id[0];
2050 TRACE(
"[Thread %2u]: lookahead header %u\n",
2055 t->structs[struct_id] = ptr;
2056 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2063 __instr_hdr_emit_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
2065 const struct instruction *ip,
2068 uint64_t valid_headers = t->valid_headers;
2069 uint32_t n_headers_out = t->n_headers_out;
2070 struct header_out_runtime *ho = NULL;
2071 uint8_t *ho_ptr = NULL;
2072 uint32_t ho_nbytes = 0, i;
2074 for (i = 0; i < n_emit; i++) {
2075 uint32_t header_id = ip->io.hdr.header_id[i];
2076 uint32_t struct_id = ip->io.hdr.struct_id[i];
2078 struct header_runtime *hi = &t->headers[header_id];
2079 uint8_t *hi_ptr0 = hi->ptr0;
2080 uint32_t n_bytes = hi->n_bytes;
2082 uint8_t *hi_ptr = t->structs[struct_id];
2084 if (!MASK64_BIT_GET(valid_headers, header_id)) {
2085 TRACE(
"[Thread %2u]: emit header %u (invalid)\n",
2092 TRACE(
"[Thread %2u]: emit header %u (valid)\n",
2098 if (!n_headers_out) {
2099 ho = &t->headers_out[0];
2105 ho_nbytes = n_bytes;
2111 ho = &t->headers_out[n_headers_out - 1];
2114 ho_nbytes = ho->n_bytes;
2118 if (ho_ptr + ho_nbytes == hi_ptr) {
2119 ho_nbytes += n_bytes;
2121 ho->n_bytes = ho_nbytes;
2128 ho_nbytes = n_bytes;
2135 ho->n_bytes = ho_nbytes;
2136 t->n_headers_out = n_headers_out;
2140 __instr_hdr_emit_exec(
struct rte_swx_pipeline *p,
2142 const struct instruction *ip)
2144 __instr_hdr_emit_many_exec(p, t, ip, 1);
2148 __instr_hdr_emit_tx_exec(
struct rte_swx_pipeline *p,
2150 const struct instruction *ip)
2152 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2154 __instr_hdr_emit_many_exec(p, t, ip, 1);
2155 __instr_tx_exec(p, t, ip);
2159 __instr_hdr_emit2_tx_exec(
struct rte_swx_pipeline *p,
2161 const struct instruction *ip)
2163 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2165 __instr_hdr_emit_many_exec(p, t, ip, 2);
2166 __instr_tx_exec(p, t, ip);
2170 __instr_hdr_emit3_tx_exec(
struct rte_swx_pipeline *p,
2172 const struct instruction *ip)
2174 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2176 __instr_hdr_emit_many_exec(p, t, ip, 3);
2177 __instr_tx_exec(p, t, ip);
2181 __instr_hdr_emit4_tx_exec(
struct rte_swx_pipeline *p,
2183 const struct instruction *ip)
2185 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2187 __instr_hdr_emit_many_exec(p, t, ip, 4);
2188 __instr_tx_exec(p, t, ip);
2192 __instr_hdr_emit5_tx_exec(
struct rte_swx_pipeline *p,
2194 const struct instruction *ip)
2196 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2198 __instr_hdr_emit_many_exec(p, t, ip, 5);
2199 __instr_tx_exec(p, t, ip);
2203 __instr_hdr_emit6_tx_exec(
struct rte_swx_pipeline *p,
2205 const struct instruction *ip)
2207 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2209 __instr_hdr_emit_many_exec(p, t, ip, 6);
2210 __instr_tx_exec(p, t, ip);
2214 __instr_hdr_emit7_tx_exec(
struct rte_swx_pipeline *p,
2216 const struct instruction *ip)
2218 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2220 __instr_hdr_emit_many_exec(p, t, ip, 7);
2221 __instr_tx_exec(p, t, ip);
2225 __instr_hdr_emit8_tx_exec(
struct rte_swx_pipeline *p,
2227 const struct instruction *ip)
2229 TRACE(
"[Thread %2u] *** The next 9 instructions are fused. ***\n", p->thread_id);
2231 __instr_hdr_emit_many_exec(p, t, ip, 8);
2232 __instr_tx_exec(p, t, ip);
2239 __instr_hdr_validate_exec(
struct rte_swx_pipeline *p
__rte_unused,
2241 const struct instruction *ip)
2243 uint32_t header_id = ip->valid.header_id;
2244 uint32_t struct_id = ip->valid.struct_id;
2245 uint64_t valid_headers = t->valid_headers;
2246 struct header_runtime *h = &t->headers[header_id];
2248 TRACE(
"[Thread %2u] validate header %u\n", p->thread_id, header_id);
2254 if (MASK64_BIT_GET(valid_headers, header_id))
2258 t->structs[struct_id] = h->ptr0;
2259 t->valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2266 __instr_hdr_invalidate_exec(
struct rte_swx_pipeline *p
__rte_unused,
2268 const struct instruction *ip)
2270 uint32_t header_id = ip->valid.header_id;
2272 TRACE(
"[Thread %2u] invalidate header %u\n", p->thread_id, header_id);
2275 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id);
2282 __instr_learn_exec(
struct rte_swx_pipeline *p,
2284 const struct instruction *ip)
2286 uint64_t action_id = ip->learn.action_id;
2287 uint32_t mf_first_arg_offset = ip->learn.mf_first_arg_offset;
2288 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2289 ip->learn.mf_timeout_id_n_bits);
2290 uint32_t learner_id = t->learner_id;
2292 p->n_selectors + learner_id];
2293 struct learner_runtime *l = &t->learners[learner_id];
2294 struct learner_statistics *stats = &p->learner_stats[learner_id];
2302 &t->metadata[mf_first_arg_offset],
2305 TRACE(
"[Thread %2u] learner %u learn %s\n",
2308 status ?
"ok" :
"error");
2310 stats->n_pkts_learn[status] += 1;
2317 __instr_rearm_exec(
struct rte_swx_pipeline *p,
2321 uint32_t learner_id = t->learner_id;
2323 p->n_selectors + learner_id];
2324 struct learner_runtime *l = &t->learners[learner_id];
2325 struct learner_statistics *stats = &p->learner_stats[learner_id];
2330 TRACE(
"[Thread %2u] learner %u rearm\n",
2334 stats->n_pkts_rearm += 1;
2338 __instr_rearm_new_exec(
struct rte_swx_pipeline *p,
2340 const struct instruction *ip)
2342 uint32_t timeout_id = METADATA_READ(t, ip->learn.mf_timeout_id_offset,
2343 ip->learn.mf_timeout_id_n_bits);
2344 uint32_t learner_id = t->learner_id;
2346 p->n_selectors + learner_id];
2347 struct learner_runtime *l = &t->learners[learner_id];
2348 struct learner_statistics *stats = &p->learner_stats[learner_id];
2353 TRACE(
"[Thread %2u] learner %u rearm with timeout ID %u\n",
2358 stats->n_pkts_rearm += 1;
2365 __instr_forget_exec(
struct rte_swx_pipeline *p,
2369 uint32_t learner_id = t->learner_id;
2371 p->n_selectors + learner_id];
2372 struct learner_runtime *l = &t->learners[learner_id];
2373 struct learner_statistics *stats = &p->learner_stats[learner_id];
2378 TRACE(
"[Thread %2u] learner %u forget\n",
2382 stats->n_pkts_forget += 1;
2389 __instr_entryid_exec(
struct rte_swx_pipeline *p
__rte_unused,
2391 const struct instruction *ip)
2393 TRACE(
"[Thread %2u]: entryid\n",
2397 METADATA_WRITE(t, ip->mov.dst.offset, ip->mov.dst.n_bits, t->entry_id);
2403 static inline uint32_t
2404 __instr_extern_obj_exec(
struct rte_swx_pipeline *p
__rte_unused,
2406 const struct instruction *ip)
2408 uint32_t obj_id = ip->ext_obj.ext_obj_id;
2409 uint32_t func_id = ip->ext_obj.func_id;
2410 struct extern_obj_runtime *obj = &t->extern_objs[obj_id];
2414 TRACE(
"[Thread %2u] extern obj %u member func %u\n",
2419 done = func(obj->obj, obj->mailbox);
2424 static inline uint32_t
2425 __instr_extern_func_exec(
struct rte_swx_pipeline *p
__rte_unused,
2427 const struct instruction *ip)
2429 uint32_t ext_func_id = ip->ext_func.ext_func_id;
2430 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id];
2434 TRACE(
"[Thread %2u] extern func %u\n",
2438 done = func(ext_func->mailbox);
2447 __instr_hash_func_exec(
struct rte_swx_pipeline *p,
2449 const struct instruction *ip)
2451 uint32_t hash_func_id = ip->hash_func.hash_func_id;
2452 uint32_t dst_offset = ip->hash_func.dst.offset;
2453 uint32_t n_dst_bits = ip->hash_func.dst.n_bits;
2454 uint32_t src_struct_id = ip->hash_func.src.struct_id;
2455 uint32_t src_offset = ip->hash_func.src.offset;
2456 uint32_t n_src_bytes = ip->hash_func.src.n_bytes;
2458 struct hash_func_runtime *func = &p->hash_func_runtime[hash_func_id];
2459 uint8_t *src_ptr = t->structs[src_struct_id];
2462 TRACE(
"[Thread %2u] hash %u\n",
2466 result = func->func(&src_ptr[src_offset], n_src_bytes, 0);
2467 METADATA_WRITE(t, dst_offset, n_dst_bits, result);
2474 __instr_mov_exec(
struct rte_swx_pipeline *p
__rte_unused,
2476 const struct instruction *ip)
2478 TRACE(
"[Thread %2u] mov\n", p->thread_id);
2484 __instr_mov_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2486 const struct instruction *ip)
2488 TRACE(
"[Thread %2u] mov (mh)\n", p->thread_id);
2494 __instr_mov_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2496 const struct instruction *ip)
2498 TRACE(
"[Thread %2u] mov (hm)\n", p->thread_id);
2504 __instr_mov_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2506 const struct instruction *ip)
2508 TRACE(
"[Thread %2u] mov (hh)\n", p->thread_id);
2514 __instr_mov_dma_exec(
struct rte_swx_pipeline *p
__rte_unused,
2516 const struct instruction *ip)
2518 uint8_t *dst_struct = t->structs[ip->mov.dst.struct_id];
2519 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->mov.dst.offset];
2520 uint32_t *dst32_ptr;
2521 uint16_t *dst16_ptr;
2524 uint8_t *src_struct = t->structs[ip->mov.src.struct_id];
2525 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->mov.src.offset];
2526 uint32_t *src32_ptr;
2527 uint16_t *src16_ptr;
2530 uint32_t n = ip->mov.dst.n_bits >> 3, i;
2532 TRACE(
"[Thread %2u] mov (dma) %u bytes\n", p->thread_id, n);
2535 for (i = 0; i < n >> 3; i++)
2536 *dst64_ptr++ = *src64_ptr++;
2540 dst32_ptr = (uint32_t *)dst64_ptr;
2541 src32_ptr = (uint32_t *)src64_ptr;
2543 for (i = 0; i < n >> 2; i++)
2544 *dst32_ptr++ = *src32_ptr++;
2548 dst16_ptr = (uint16_t *)dst32_ptr;
2549 src16_ptr = (uint16_t *)src32_ptr;
2551 for (i = 0; i < n >> 1; i++)
2552 *dst16_ptr++ = *src16_ptr++;
2556 dst8_ptr = (uint8_t *)dst16_ptr;
2557 src8_ptr = (uint8_t *)src16_ptr;
2559 *dst8_ptr = *src8_ptr;
2563 __instr_mov_128_exec(
struct rte_swx_pipeline *p
__rte_unused,
2565 const struct instruction *ip)
2567 uint8_t *dst_struct = t->structs[ip->mov.dst.struct_id];
2568 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->mov.dst.offset];
2570 uint8_t *src_struct = t->structs[ip->mov.src.struct_id];
2571 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->mov.src.offset];
2573 TRACE(
"[Thread %2u] mov (128)\n", p->thread_id);
2575 dst64_ptr[0] = src64_ptr[0];
2576 dst64_ptr[1] = src64_ptr[1];
2580 __instr_mov_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2582 const struct instruction *ip)
2584 TRACE(
"[Thread %2u] mov m.f %" PRIx64
"\n", p->thread_id, ip->mov.src_val);
2593 __instr_dma_ht_many_exec(
struct rte_swx_pipeline *p
__rte_unused,
2595 const struct instruction *ip,
2598 uint8_t *action_data = t->structs[0];
2599 uint64_t valid_headers = t->valid_headers;
2602 for (i = 0; i < n_dma; i++) {
2603 uint32_t header_id = ip->dma.dst.header_id[i];
2604 uint32_t struct_id = ip->dma.dst.struct_id[i];
2605 uint32_t offset = ip->dma.src.offset[i];
2606 uint32_t n_bytes = ip->dma.n_bytes[i];
2608 struct header_runtime *h = &t->headers[header_id];
2609 uint8_t *h_ptr0 = h->ptr0;
2610 uint8_t *h_ptr = t->structs[struct_id];
2612 void *dst = MASK64_BIT_GET(valid_headers, header_id) ?
2614 void *src = &action_data[offset];
2616 TRACE(
"[Thread %2u] dma h.s t.f\n", p->thread_id);
2619 memcpy(dst, src, n_bytes);
2620 t->structs[struct_id] = dst;
2621 valid_headers = MASK64_BIT_SET(valid_headers, header_id);
2624 t->valid_headers = valid_headers;
2628 __instr_dma_ht_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2630 __instr_dma_ht_many_exec(p, t, ip, 1);
2634 __instr_dma_ht2_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2636 TRACE(
"[Thread %2u] *** The next 2 instructions are fused. ***\n", p->thread_id);
2638 __instr_dma_ht_many_exec(p, t, ip, 2);
2642 __instr_dma_ht3_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2644 TRACE(
"[Thread %2u] *** The next 3 instructions are fused. ***\n", p->thread_id);
2646 __instr_dma_ht_many_exec(p, t, ip, 3);
2650 __instr_dma_ht4_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2652 TRACE(
"[Thread %2u] *** The next 4 instructions are fused. ***\n", p->thread_id);
2654 __instr_dma_ht_many_exec(p, t, ip, 4);
2658 __instr_dma_ht5_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2660 TRACE(
"[Thread %2u] *** The next 5 instructions are fused. ***\n", p->thread_id);
2662 __instr_dma_ht_many_exec(p, t, ip, 5);
2666 __instr_dma_ht6_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2668 TRACE(
"[Thread %2u] *** The next 6 instructions are fused. ***\n", p->thread_id);
2670 __instr_dma_ht_many_exec(p, t, ip, 6);
2674 __instr_dma_ht7_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2676 TRACE(
"[Thread %2u] *** The next 7 instructions are fused. ***\n", p->thread_id);
2678 __instr_dma_ht_many_exec(p, t, ip, 7);
2682 __instr_dma_ht8_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
2684 TRACE(
"[Thread %2u] *** The next 8 instructions are fused. ***\n", p->thread_id);
2686 __instr_dma_ht_many_exec(p, t, ip, 8);
2693 __instr_alu_add_exec(
struct rte_swx_pipeline *p
__rte_unused,
2695 const struct instruction *ip)
2697 TRACE(
"[Thread %2u] add\n", p->thread_id);
2703 __instr_alu_add_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2705 const struct instruction *ip)
2707 TRACE(
"[Thread %2u] add (mh)\n", p->thread_id);
2713 __instr_alu_add_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2715 const struct instruction *ip)
2717 TRACE(
"[Thread %2u] add (hm)\n", p->thread_id);
2723 __instr_alu_add_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2725 const struct instruction *ip)
2727 TRACE(
"[Thread %2u] add (hh)\n", p->thread_id);
2733 __instr_alu_add_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2735 const struct instruction *ip)
2737 TRACE(
"[Thread %2u] add (mi)\n", p->thread_id);
2743 __instr_alu_add_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2745 const struct instruction *ip)
2747 TRACE(
"[Thread %2u] add (hi)\n", p->thread_id);
2753 __instr_alu_sub_exec(
struct rte_swx_pipeline *p
__rte_unused,
2755 const struct instruction *ip)
2757 TRACE(
"[Thread %2u] sub\n", p->thread_id);
2763 __instr_alu_sub_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2765 const struct instruction *ip)
2767 TRACE(
"[Thread %2u] sub (mh)\n", p->thread_id);
2773 __instr_alu_sub_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2775 const struct instruction *ip)
2777 TRACE(
"[Thread %2u] sub (hm)\n", p->thread_id);
2783 __instr_alu_sub_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2785 const struct instruction *ip)
2787 TRACE(
"[Thread %2u] sub (hh)\n", p->thread_id);
2793 __instr_alu_sub_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2795 const struct instruction *ip)
2797 TRACE(
"[Thread %2u] sub (mi)\n", p->thread_id);
2803 __instr_alu_sub_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2805 const struct instruction *ip)
2807 TRACE(
"[Thread %2u] sub (hi)\n", p->thread_id);
2813 __instr_alu_shl_exec(
struct rte_swx_pipeline *p
__rte_unused,
2815 const struct instruction *ip)
2817 TRACE(
"[Thread %2u] shl\n", p->thread_id);
2823 __instr_alu_shl_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2825 const struct instruction *ip)
2827 TRACE(
"[Thread %2u] shl (mh)\n", p->thread_id);
2833 __instr_alu_shl_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2835 const struct instruction *ip)
2837 TRACE(
"[Thread %2u] shl (hm)\n", p->thread_id);
2843 __instr_alu_shl_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2845 const struct instruction *ip)
2847 TRACE(
"[Thread %2u] shl (hh)\n", p->thread_id);
2853 __instr_alu_shl_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2855 const struct instruction *ip)
2857 TRACE(
"[Thread %2u] shl (mi)\n", p->thread_id);
2863 __instr_alu_shl_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2865 const struct instruction *ip)
2867 TRACE(
"[Thread %2u] shl (hi)\n", p->thread_id);
2873 __instr_alu_shr_exec(
struct rte_swx_pipeline *p
__rte_unused,
2875 const struct instruction *ip)
2877 TRACE(
"[Thread %2u] shr\n", p->thread_id);
2883 __instr_alu_shr_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2885 const struct instruction *ip)
2887 TRACE(
"[Thread %2u] shr (mh)\n", p->thread_id);
2893 __instr_alu_shr_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2895 const struct instruction *ip)
2897 TRACE(
"[Thread %2u] shr (hm)\n", p->thread_id);
2903 __instr_alu_shr_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2905 const struct instruction *ip)
2907 TRACE(
"[Thread %2u] shr (hh)\n", p->thread_id);
2913 __instr_alu_shr_mi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2915 const struct instruction *ip)
2917 TRACE(
"[Thread %2u] shr (mi)\n", p->thread_id);
2924 __instr_alu_shr_hi_exec(
struct rte_swx_pipeline *p
__rte_unused,
2926 const struct instruction *ip)
2928 TRACE(
"[Thread %2u] shr (hi)\n", p->thread_id);
2934 __instr_alu_and_exec(
struct rte_swx_pipeline *p
__rte_unused,
2936 const struct instruction *ip)
2938 TRACE(
"[Thread %2u] and\n", p->thread_id);
2944 __instr_alu_and_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2946 const struct instruction *ip)
2948 TRACE(
"[Thread %2u] and (mh)\n", p->thread_id);
2954 __instr_alu_and_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
2956 const struct instruction *ip)
2958 TRACE(
"[Thread %2u] and (hm)\n", p->thread_id);
2960 ALU_HM_FAST(t, ip, &);
2964 __instr_alu_and_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2966 const struct instruction *ip)
2968 TRACE(
"[Thread %2u] and (hh)\n", p->thread_id);
2970 ALU_HH_FAST(t, ip, &);
2974 __instr_alu_and_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
2976 const struct instruction *ip)
2978 TRACE(
"[Thread %2u] and (i)\n", p->thread_id);
2984 __instr_alu_or_exec(
struct rte_swx_pipeline *p
__rte_unused,
2986 const struct instruction *ip)
2988 TRACE(
"[Thread %2u] or\n", p->thread_id);
2994 __instr_alu_or_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
2996 const struct instruction *ip)
2998 TRACE(
"[Thread %2u] or (mh)\n", p->thread_id);
3004 __instr_alu_or_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
3006 const struct instruction *ip)
3008 TRACE(
"[Thread %2u] or (hm)\n", p->thread_id);
3010 ALU_HM_FAST(t, ip, |);
3014 __instr_alu_or_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3016 const struct instruction *ip)
3018 TRACE(
"[Thread %2u] or (hh)\n", p->thread_id);
3020 ALU_HH_FAST(t, ip, |);
3024 __instr_alu_or_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
3026 const struct instruction *ip)
3028 TRACE(
"[Thread %2u] or (i)\n", p->thread_id);
3034 __instr_alu_xor_exec(
struct rte_swx_pipeline *p
__rte_unused,
3036 const struct instruction *ip)
3038 TRACE(
"[Thread %2u] xor\n", p->thread_id);
3044 __instr_alu_xor_mh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3046 const struct instruction *ip)
3048 TRACE(
"[Thread %2u] xor (mh)\n", p->thread_id);
3054 __instr_alu_xor_hm_exec(
struct rte_swx_pipeline *p
__rte_unused,
3056 const struct instruction *ip)
3058 TRACE(
"[Thread %2u] xor (hm)\n", p->thread_id);
3060 ALU_HM_FAST(t, ip, ^);
3064 __instr_alu_xor_hh_exec(
struct rte_swx_pipeline *p
__rte_unused,
3066 const struct instruction *ip)
3068 TRACE(
"[Thread %2u] xor (hh)\n", p->thread_id);
3070 ALU_HH_FAST(t, ip, ^);
3074 __instr_alu_xor_i_exec(
struct rte_swx_pipeline *p
__rte_unused,
3076 const struct instruction *ip)
3078 TRACE(
"[Thread %2u] xor (i)\n", p->thread_id);
3084 __instr_alu_ckadd_field_exec(
struct rte_swx_pipeline *p
__rte_unused,
3086 const struct instruction *ip)
3088 uint8_t *dst_struct, *src_struct;
3089 uint16_t *dst16_ptr, dst;
3090 uint64_t *src64_ptr, src64, src64_mask, src;
3093 TRACE(
"[Thread %2u] ckadd (field)\n", p->thread_id);
3096 dst_struct = t->structs[ip->alu.dst.struct_id];
3097 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3100 src_struct = t->structs[ip->alu.src.struct_id];
3101 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3103 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3104 src = src64 & src64_mask;
3114 r += (src >> 32) + (src & 0xFFFFFFFF);
3120 r = (r & 0xFFFF) + (r >> 16);
3125 r = (r & 0xFFFF) + (r >> 16);
3132 r = (r & 0xFFFF) + (r >> 16);
3138 *dst16_ptr = (uint16_t)r;
3142 __instr_alu_cksub_field_exec(
struct rte_swx_pipeline *p
__rte_unused,
3144 const struct instruction *ip)
3146 uint8_t *dst_struct, *src_struct;
3147 uint16_t *dst16_ptr, dst;
3148 uint64_t *src64_ptr, src64, src64_mask, src;
3151 TRACE(
"[Thread %2u] cksub (field)\n", p->thread_id);
3154 dst_struct = t->structs[ip->alu.dst.struct_id];
3155 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3158 src_struct = t->structs[ip->alu.src.struct_id];
3159 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset];
3161 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits);
3162 src = src64 & src64_mask;
3180 r += 0xFFFF00000ULL;
3185 r -= (src >> 32) + (src & 0xFFFFFFFF);
3190 r = (r & 0xFFFF) + (r >> 16);
3195 r = (r & 0xFFFF) + (r >> 16);
3202 r = (r & 0xFFFF) + (r >> 16);
3208 *dst16_ptr = (uint16_t)r;
3212 __instr_alu_ckadd_struct20_exec(
struct rte_swx_pipeline *p
__rte_unused,
3214 const struct instruction *ip)
3216 uint8_t *dst_struct, *src_struct;
3217 uint16_t *dst16_ptr, dst;
3218 uint32_t *src32_ptr;
3221 TRACE(
"[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id);
3224 dst_struct = t->structs[ip->alu.dst.struct_id];
3225 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3228 src_struct = t->structs[ip->alu.src.struct_id];
3229 src32_ptr = (uint32_t *)&src_struct[0];
3239 r0 += r1 + src32_ptr[4];
3244 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3249 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3256 r0 = (r0 & 0xFFFF) + (r0 >> 16);
3260 r0 = r0 ? r0 : 0xFFFF;
3262 *dst16_ptr = (uint16_t)r0;
3266 __instr_alu_ckadd_struct_exec(
struct rte_swx_pipeline *p
__rte_unused,
3268 const struct instruction *ip)
3270 uint32_t src_header_id = ip->alu.src.n_bits;
3271 uint32_t n_src_header_bytes = t->headers[src_header_id].n_bytes;
3272 uint8_t *dst_struct, *src_struct;
3273 uint16_t *dst16_ptr, dst;
3274 uint32_t *src32_ptr;
3278 if (n_src_header_bytes == 20) {
3279 __instr_alu_ckadd_struct20_exec(p, t, ip);
3283 TRACE(
"[Thread %2u] ckadd (struct)\n", p->thread_id);
3286 dst_struct = t->structs[ip->alu.dst.struct_id];
3287 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset];
3290 src_struct = t->structs[ip->alu.src.struct_id];
3291 src32_ptr = (uint32_t *)&src_struct[0];
3301 for (i = 0; i < n_src_header_bytes / 4; i++, src32_ptr++)
3307 r = (r & 0xFFFF) + (r >> 16);
3312 r = (r & 0xFFFF) + (r >> 16);
3319 r = (r & 0xFFFF) + (r >> 16);
3325 *dst16_ptr = (uint16_t)r;
3331 static inline uint64_t *
3332 instr_regarray_regarray(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3334 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3338 static inline uint64_t
3339 instr_regarray_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3341 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3343 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3344 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3345 uint64_t idx64 = *idx64_ptr;
3346 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits);
3347 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3352 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3354 static inline uint64_t
3355 instr_regarray_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3357 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3359 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id];
3360 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset];
3361 uint64_t idx64 = *idx64_ptr;
3362 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask;
3369 #define instr_regarray_idx_nbo instr_regarray_idx_hbo 3373 static inline uint64_t
3374 instr_regarray_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3376 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id];
3378 uint64_t idx = ip->regarray.idx_val & r->size_mask;
3383 static inline uint64_t
3384 instr_regarray_src_hbo(
struct thread *t,
const struct instruction *ip)
3386 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3387 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3388 uint64_t src64 = *src64_ptr;
3389 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3390 uint64_t src = src64 & src64_mask;
3395 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3397 static inline uint64_t
3398 instr_regarray_src_nbo(
struct thread *t,
const struct instruction *ip)
3400 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id];
3401 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset];
3402 uint64_t src64 = *src64_ptr;
3403 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits);
3410 #define instr_regarray_src_nbo instr_regarray_src_hbo 3415 instr_regarray_dst_hbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3417 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3418 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3419 uint64_t dst64 = *dst64_ptr;
3420 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3422 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3426 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3429 instr_regarray_dst_nbo_src_hbo_set(
struct thread *t,
const struct instruction *ip, uint64_t src)
3431 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id];
3432 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset];
3433 uint64_t dst64 = *dst64_ptr;
3434 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits);
3436 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits);
3437 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3442 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set 3447 __instr_regprefetch_rh_exec(
struct rte_swx_pipeline *p,
3449 const struct instruction *ip)
3451 uint64_t *regarray, idx;
3453 TRACE(
"[Thread %2u] regprefetch (r[h])\n", p->thread_id);
3455 regarray = instr_regarray_regarray(p, ip);
3456 idx = instr_regarray_idx_nbo(p, t, ip);
3461 __instr_regprefetch_rm_exec(
struct rte_swx_pipeline *p,
3463 const struct instruction *ip)
3465 uint64_t *regarray, idx;
3467 TRACE(
"[Thread %2u] regprefetch (r[m])\n", p->thread_id);
3469 regarray = instr_regarray_regarray(p, ip);
3470 idx = instr_regarray_idx_hbo(p, t, ip);
3475 __instr_regprefetch_ri_exec(
struct rte_swx_pipeline *p,
3477 const struct instruction *ip)
3479 uint64_t *regarray, idx;
3481 TRACE(
"[Thread %2u] regprefetch (r[i])\n", p->thread_id);
3483 regarray = instr_regarray_regarray(p, ip);
3484 idx = instr_regarray_idx_imm(p, ip);
3489 __instr_regrd_hrh_exec(
struct rte_swx_pipeline *p,
3491 const struct instruction *ip)
3493 uint64_t *regarray, idx;
3495 TRACE(
"[Thread %2u] regrd (h = r[h])\n", p->thread_id);
3497 regarray = instr_regarray_regarray(p, ip);
3498 idx = instr_regarray_idx_nbo(p, t, ip);
3499 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3503 __instr_regrd_hrm_exec(
struct rte_swx_pipeline *p,
3505 const struct instruction *ip)
3507 uint64_t *regarray, idx;
3509 TRACE(
"[Thread %2u] regrd (h = r[m])\n", p->thread_id);
3512 regarray = instr_regarray_regarray(p, ip);
3513 idx = instr_regarray_idx_hbo(p, t, ip);
3514 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3518 __instr_regrd_mrh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3520 uint64_t *regarray, idx;
3522 TRACE(
"[Thread %2u] regrd (m = r[h])\n", p->thread_id);
3524 regarray = instr_regarray_regarray(p, ip);
3525 idx = instr_regarray_idx_nbo(p, t, ip);
3526 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3530 __instr_regrd_mrm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3532 uint64_t *regarray, idx;
3534 TRACE(
"[Thread %2u] regrd (m = r[m])\n", p->thread_id);
3536 regarray = instr_regarray_regarray(p, ip);
3537 idx = instr_regarray_idx_hbo(p, t, ip);
3538 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3542 __instr_regrd_hri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3544 uint64_t *regarray, idx;
3546 TRACE(
"[Thread %2u] regrd (h = r[i])\n", p->thread_id);
3548 regarray = instr_regarray_regarray(p, ip);
3549 idx = instr_regarray_idx_imm(p, ip);
3550 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]);
3554 __instr_regrd_mri_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3556 uint64_t *regarray, idx;
3558 TRACE(
"[Thread %2u] regrd (m = r[i])\n", p->thread_id);
3560 regarray = instr_regarray_regarray(p, ip);
3561 idx = instr_regarray_idx_imm(p, ip);
3562 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]);
3566 __instr_regwr_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3568 uint64_t *regarray, idx, src;
3570 TRACE(
"[Thread %2u] regwr (r[h] = h)\n", p->thread_id);
3572 regarray = instr_regarray_regarray(p, ip);
3573 idx = instr_regarray_idx_nbo(p, t, ip);
3574 src = instr_regarray_src_nbo(t, ip);
3575 regarray[idx] = src;
3579 __instr_regwr_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3581 uint64_t *regarray, idx, src;
3583 TRACE(
"[Thread %2u] regwr (r[h] = m)\n", p->thread_id);
3585 regarray = instr_regarray_regarray(p, ip);
3586 idx = instr_regarray_idx_nbo(p, t, ip);
3587 src = instr_regarray_src_hbo(t, ip);
3588 regarray[idx] = src;
3592 __instr_regwr_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3594 uint64_t *regarray, idx, src;
3596 TRACE(
"[Thread %2u] regwr (r[m] = h)\n", p->thread_id);
3598 regarray = instr_regarray_regarray(p, ip);
3599 idx = instr_regarray_idx_hbo(p, t, ip);
3600 src = instr_regarray_src_nbo(t, ip);
3601 regarray[idx] = src;
3605 __instr_regwr_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3607 uint64_t *regarray, idx, src;
3609 TRACE(
"[Thread %2u] regwr (r[m] = m)\n", p->thread_id);
3611 regarray = instr_regarray_regarray(p, ip);
3612 idx = instr_regarray_idx_hbo(p, t, ip);
3613 src = instr_regarray_src_hbo(t, ip);
3614 regarray[idx] = src;
3618 __instr_regwr_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3620 uint64_t *regarray, idx, src;
3622 TRACE(
"[Thread %2u] regwr (r[h] = i)\n", p->thread_id);
3624 regarray = instr_regarray_regarray(p, ip);
3625 idx = instr_regarray_idx_nbo(p, t, ip);
3626 src = ip->regarray.dstsrc_val;
3627 regarray[idx] = src;
3631 __instr_regwr_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3633 uint64_t *regarray, idx, src;
3635 TRACE(
"[Thread %2u] regwr (r[m] = i)\n", p->thread_id);
3637 regarray = instr_regarray_regarray(p, ip);
3638 idx = instr_regarray_idx_hbo(p, t, ip);
3639 src = ip->regarray.dstsrc_val;
3640 regarray[idx] = src;
3644 __instr_regwr_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3646 uint64_t *regarray, idx, src;
3648 TRACE(
"[Thread %2u] regwr (r[i] = h)\n", p->thread_id);
3650 regarray = instr_regarray_regarray(p, ip);
3651 idx = instr_regarray_idx_imm(p, ip);
3652 src = instr_regarray_src_nbo(t, ip);
3653 regarray[idx] = src;
3657 __instr_regwr_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3659 uint64_t *regarray, idx, src;
3661 TRACE(
"[Thread %2u] regwr (r[i] = m)\n", p->thread_id);
3663 regarray = instr_regarray_regarray(p, ip);
3664 idx = instr_regarray_idx_imm(p, ip);
3665 src = instr_regarray_src_hbo(t, ip);
3666 regarray[idx] = src;
3670 __instr_regwr_rii_exec(
struct rte_swx_pipeline *p,
3672 const struct instruction *ip)
3674 uint64_t *regarray, idx, src;
3676 TRACE(
"[Thread %2u] regwr (r[i] = i)\n", p->thread_id);
3678 regarray = instr_regarray_regarray(p, ip);
3679 idx = instr_regarray_idx_imm(p, ip);
3680 src = ip->regarray.dstsrc_val;
3681 regarray[idx] = src;
3685 __instr_regadd_rhh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3687 uint64_t *regarray, idx, src;
3689 TRACE(
"[Thread %2u] regadd (r[h] += h)\n", p->thread_id);
3691 regarray = instr_regarray_regarray(p, ip);
3692 idx = instr_regarray_idx_nbo(p, t, ip);
3693 src = instr_regarray_src_nbo(t, ip);
3694 regarray[idx] += src;
3698 __instr_regadd_rhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3700 uint64_t *regarray, idx, src;
3702 TRACE(
"[Thread %2u] regadd (r[h] += m)\n", p->thread_id);
3704 regarray = instr_regarray_regarray(p, ip);
3705 idx = instr_regarray_idx_nbo(p, t, ip);
3706 src = instr_regarray_src_hbo(t, ip);
3707 regarray[idx] += src;
3711 __instr_regadd_rmh_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3713 uint64_t *regarray, idx, src;
3715 TRACE(
"[Thread %2u] regadd (r[m] += h)\n", p->thread_id);
3717 regarray = instr_regarray_regarray(p, ip);
3718 idx = instr_regarray_idx_hbo(p, t, ip);
3719 src = instr_regarray_src_nbo(t, ip);
3720 regarray[idx] += src;
3724 __instr_regadd_rmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3726 uint64_t *regarray, idx, src;
3728 TRACE(
"[Thread %2u] regadd (r[m] += m)\n", p->thread_id);
3730 regarray = instr_regarray_regarray(p, ip);
3731 idx = instr_regarray_idx_hbo(p, t, ip);
3732 src = instr_regarray_src_hbo(t, ip);
3733 regarray[idx] += src;
3737 __instr_regadd_rhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3739 uint64_t *regarray, idx, src;
3741 TRACE(
"[Thread %2u] regadd (r[h] += i)\n", p->thread_id);
3743 regarray = instr_regarray_regarray(p, ip);
3744 idx = instr_regarray_idx_nbo(p, t, ip);
3745 src = ip->regarray.dstsrc_val;
3746 regarray[idx] += src;
3750 __instr_regadd_rmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3752 uint64_t *regarray, idx, src;
3754 TRACE(
"[Thread %2u] regadd (r[m] += i)\n", p->thread_id);
3756 regarray = instr_regarray_regarray(p, ip);
3757 idx = instr_regarray_idx_hbo(p, t, ip);
3758 src = ip->regarray.dstsrc_val;
3759 regarray[idx] += src;
3763 __instr_regadd_rih_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3765 uint64_t *regarray, idx, src;
3767 TRACE(
"[Thread %2u] regadd (r[i] += h)\n", p->thread_id);
3769 regarray = instr_regarray_regarray(p, ip);
3770 idx = instr_regarray_idx_imm(p, ip);
3771 src = instr_regarray_src_nbo(t, ip);
3772 regarray[idx] += src;
3776 __instr_regadd_rim_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3778 uint64_t *regarray, idx, src;
3780 TRACE(
"[Thread %2u] regadd (r[i] += m)\n", p->thread_id);
3782 regarray = instr_regarray_regarray(p, ip);
3783 idx = instr_regarray_idx_imm(p, ip);
3784 src = instr_regarray_src_hbo(t, ip);
3785 regarray[idx] += src;
3789 __instr_regadd_rii_exec(
struct rte_swx_pipeline *p,
3791 const struct instruction *ip)
3793 uint64_t *regarray, idx, src;
3795 TRACE(
"[Thread %2u] regadd (r[i] += i)\n", p->thread_id);
3797 regarray = instr_regarray_regarray(p, ip);
3798 idx = instr_regarray_idx_imm(p, ip);
3799 src = ip->regarray.dstsrc_val;
3800 regarray[idx] += src;
3806 static inline struct meter *
3807 instr_meter_idx_hbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3809 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3811 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3812 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3813 uint64_t idx64 = *idx64_ptr;
3814 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits);
3815 uint64_t idx = idx64 & idx64_mask & r->size_mask;
3817 return &r->metarray[idx];
3820 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3822 static inline struct meter *
3823 instr_meter_idx_nbo(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3825 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3827 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id];
3828 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset];
3829 uint64_t idx64 = *idx64_ptr;
3830 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask;
3832 return &r->metarray[idx];
3837 #define instr_meter_idx_nbo instr_meter_idx_hbo 3841 static inline struct meter *
3842 instr_meter_idx_imm(
struct rte_swx_pipeline *p,
const struct instruction *ip)
3844 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id];
3846 uint64_t idx = ip->meter.idx_val & r->size_mask;
3848 return &r->metarray[idx];
3851 static inline uint32_t
3852 instr_meter_length_hbo(
struct thread *t,
const struct instruction *ip)
3854 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3855 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3856 uint64_t src64 = *src64_ptr;
3857 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits);
3858 uint64_t src = src64 & src64_mask;
3860 return (uint32_t)src;
3863 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 3865 static inline uint32_t
3866 instr_meter_length_nbo(
struct thread *t,
const struct instruction *ip)
3868 uint8_t *src_struct = t->structs[ip->meter.length.struct_id];
3869 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset];
3870 uint64_t src64 = *src64_ptr;
3871 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits);
3873 return (uint32_t)src;
3878 #define instr_meter_length_nbo instr_meter_length_hbo 3883 instr_meter_color_in_hbo(
struct thread *t,
const struct instruction *ip)
3885 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id];
3886 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset];
3887 uint64_t src64 = *src64_ptr;
3888 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits);
3889 uint64_t src = src64 & src64_mask;
3895 instr_meter_color_out_hbo_set(
struct thread *t,
3896 const struct instruction *ip,
3899 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id];
3900 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset];
3901 uint64_t dst64 = *dst64_ptr;
3902 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits);
3904 uint64_t src = (uint64_t)color_out;
3906 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask);
3910 __instr_metprefetch_h_exec(
struct rte_swx_pipeline *p,
3912 const struct instruction *ip)
3916 TRACE(
"[Thread %2u] metprefetch (h)\n", p->thread_id);
3918 m = instr_meter_idx_nbo(p, t, ip);
3923 __instr_metprefetch_m_exec(
struct rte_swx_pipeline *p,
3925 const struct instruction *ip)
3929 TRACE(
"[Thread %2u] metprefetch (m)\n", p->thread_id);
3931 m = instr_meter_idx_hbo(p, t, ip);
3936 __instr_metprefetch_i_exec(
struct rte_swx_pipeline *p,
3938 const struct instruction *ip)
3942 TRACE(
"[Thread %2u] metprefetch (i)\n", p->thread_id);
3944 m = instr_meter_idx_imm(p, ip);
3949 __instr_meter_hhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3952 uint64_t time, n_pkts, n_bytes;
3956 TRACE(
"[Thread %2u] meter (hhm)\n", p->thread_id);
3958 m = instr_meter_idx_nbo(p, t, ip);
3961 length = instr_meter_length_nbo(t, ip);
3962 color_in = instr_meter_color_in_hbo(t, ip);
3965 &m->profile->profile,
3970 color_out &= m->color_mask;
3972 n_pkts = m->n_pkts[color_out];
3973 n_bytes = m->n_bytes[color_out];
3975 instr_meter_color_out_hbo_set(t, ip, color_out);
3977 m->n_pkts[color_out] = n_pkts + 1;
3978 m->n_bytes[color_out] = n_bytes + length;
3982 __instr_meter_hhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
3985 uint64_t time, n_pkts, n_bytes;
3989 TRACE(
"[Thread %2u] meter (hhi)\n", p->thread_id);
3991 m = instr_meter_idx_nbo(p, t, ip);
3994 length = instr_meter_length_nbo(t, ip);
3995 color_in = (
enum rte_color)ip->meter.color_in_val;
3998 &m->profile->profile,
4003 color_out &= m->color_mask;
4005 n_pkts = m->n_pkts[color_out];
4006 n_bytes = m->n_bytes[color_out];
4008 instr_meter_color_out_hbo_set(t, ip, color_out);
4010 m->n_pkts[color_out] = n_pkts + 1;
4011 m->n_bytes[color_out] = n_bytes + length;
4015 __instr_meter_hmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4018 uint64_t time, n_pkts, n_bytes;
4022 TRACE(
"[Thread %2u] meter (hmm)\n", p->thread_id);
4024 m = instr_meter_idx_nbo(p, t, ip);
4027 length = instr_meter_length_hbo(t, ip);
4028 color_in = instr_meter_color_in_hbo(t, ip);
4031 &m->profile->profile,
4036 color_out &= m->color_mask;
4038 n_pkts = m->n_pkts[color_out];
4039 n_bytes = m->n_bytes[color_out];
4041 instr_meter_color_out_hbo_set(t, ip, color_out);
4043 m->n_pkts[color_out] = n_pkts + 1;
4044 m->n_bytes[color_out] = n_bytes + length;
4048 __instr_meter_hmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4051 uint64_t time, n_pkts, n_bytes;
4055 TRACE(
"[Thread %2u] meter (hmi)\n", p->thread_id);
4057 m = instr_meter_idx_nbo(p, t, ip);
4060 length = instr_meter_length_hbo(t, ip);
4061 color_in = (
enum rte_color)ip->meter.color_in_val;
4064 &m->profile->profile,
4069 color_out &= m->color_mask;
4071 n_pkts = m->n_pkts[color_out];
4072 n_bytes = m->n_bytes[color_out];
4074 instr_meter_color_out_hbo_set(t, ip, color_out);
4076 m->n_pkts[color_out] = n_pkts + 1;
4077 m->n_bytes[color_out] = n_bytes + length;
4081 __instr_meter_mhm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4084 uint64_t time, n_pkts, n_bytes;
4088 TRACE(
"[Thread %2u] meter (mhm)\n", p->thread_id);
4090 m = instr_meter_idx_hbo(p, t, ip);
4093 length = instr_meter_length_nbo(t, ip);
4094 color_in = instr_meter_color_in_hbo(t, ip);
4097 &m->profile->profile,
4102 color_out &= m->color_mask;
4104 n_pkts = m->n_pkts[color_out];
4105 n_bytes = m->n_bytes[color_out];
4107 instr_meter_color_out_hbo_set(t, ip, color_out);
4109 m->n_pkts[color_out] = n_pkts + 1;
4110 m->n_bytes[color_out] = n_bytes + length;
4114 __instr_meter_mhi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4117 uint64_t time, n_pkts, n_bytes;
4121 TRACE(
"[Thread %2u] meter (mhi)\n", p->thread_id);
4123 m = instr_meter_idx_hbo(p, t, ip);
4126 length = instr_meter_length_nbo(t, ip);
4127 color_in = (
enum rte_color)ip->meter.color_in_val;
4130 &m->profile->profile,
4135 color_out &= m->color_mask;
4137 n_pkts = m->n_pkts[color_out];
4138 n_bytes = m->n_bytes[color_out];
4140 instr_meter_color_out_hbo_set(t, ip, color_out);
4142 m->n_pkts[color_out] = n_pkts + 1;
4143 m->n_bytes[color_out] = n_bytes + length;
4147 __instr_meter_mmm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4150 uint64_t time, n_pkts, n_bytes;
4154 TRACE(
"[Thread %2u] meter (mmm)\n", p->thread_id);
4156 m = instr_meter_idx_hbo(p, t, ip);
4159 length = instr_meter_length_hbo(t, ip);
4160 color_in = instr_meter_color_in_hbo(t, ip);
4163 &m->profile->profile,
4168 color_out &= m->color_mask;
4170 n_pkts = m->n_pkts[color_out];
4171 n_bytes = m->n_bytes[color_out];
4173 instr_meter_color_out_hbo_set(t, ip, color_out);
4175 m->n_pkts[color_out] = n_pkts + 1;
4176 m->n_bytes[color_out] = n_bytes + length;
4180 __instr_meter_mmi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4183 uint64_t time, n_pkts, n_bytes;
4187 TRACE(
"[Thread %2u] meter (mmi)\n", p->thread_id);
4189 m = instr_meter_idx_hbo(p, t, ip);
4192 length = instr_meter_length_hbo(t, ip);
4193 color_in = (
enum rte_color)ip->meter.color_in_val;
4196 &m->profile->profile,
4201 color_out &= m->color_mask;
4203 n_pkts = m->n_pkts[color_out];
4204 n_bytes = m->n_bytes[color_out];
4206 instr_meter_color_out_hbo_set(t, ip, color_out);
4208 m->n_pkts[color_out] = n_pkts + 1;
4209 m->n_bytes[color_out] = n_bytes + length;
4213 __instr_meter_ihm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4216 uint64_t time, n_pkts, n_bytes;
4220 TRACE(
"[Thread %2u] meter (ihm)\n", p->thread_id);
4222 m = instr_meter_idx_imm(p, ip);
4225 length = instr_meter_length_nbo(t, ip);
4226 color_in = instr_meter_color_in_hbo(t, ip);
4229 &m->profile->profile,
4234 color_out &= m->color_mask;
4236 n_pkts = m->n_pkts[color_out];
4237 n_bytes = m->n_bytes[color_out];
4239 instr_meter_color_out_hbo_set(t, ip, color_out);
4241 m->n_pkts[color_out] = n_pkts + 1;
4242 m->n_bytes[color_out] = n_bytes + length;
4246 __instr_meter_ihi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4249 uint64_t time, n_pkts, n_bytes;
4253 TRACE(
"[Thread %2u] meter (ihi)\n", p->thread_id);
4255 m = instr_meter_idx_imm(p, ip);
4258 length = instr_meter_length_nbo(t, ip);
4259 color_in = (
enum rte_color)ip->meter.color_in_val;
4262 &m->profile->profile,
4267 color_out &= m->color_mask;
4269 n_pkts = m->n_pkts[color_out];
4270 n_bytes = m->n_bytes[color_out];
4272 instr_meter_color_out_hbo_set(t, ip, color_out);
4274 m->n_pkts[color_out] = n_pkts + 1;
4275 m->n_bytes[color_out] = n_bytes + length;
4279 __instr_meter_imm_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4282 uint64_t time, n_pkts, n_bytes;
4286 TRACE(
"[Thread %2u] meter (imm)\n", p->thread_id);
4288 m = instr_meter_idx_imm(p, ip);
4291 length = instr_meter_length_hbo(t, ip);
4292 color_in = instr_meter_color_in_hbo(t, ip);
4295 &m->profile->profile,
4300 color_out &= m->color_mask;
4302 n_pkts = m->n_pkts[color_out];
4303 n_bytes = m->n_bytes[color_out];
4305 instr_meter_color_out_hbo_set(t, ip, color_out);
4307 m->n_pkts[color_out] = n_pkts + 1;
4308 m->n_bytes[color_out] = n_bytes + length;
4312 __instr_meter_imi_exec(
struct rte_swx_pipeline *p,
struct thread *t,
const struct instruction *ip)
4315 uint64_t time, n_pkts, n_bytes;
4319 TRACE(
"[Thread %2u] meter (imi)\n", p->thread_id);
4321 m = instr_meter_idx_imm(p, ip);
4324 length = instr_meter_length_hbo(t, ip);
4325 color_in = (
enum rte_color)ip->meter.color_in_val;
4328 &m->profile->profile,
4333 color_out &= m->color_mask;
4335 n_pkts = m->n_pkts[color_out];
4336 n_bytes = m->n_bytes[color_out];
4338 instr_meter_color_out_hbo_set(t, ip, color_out);
4340 m->n_pkts[color_out] = n_pkts + 1;
4341 m->n_bytes[color_out] = n_bytes + length;
uint32_t(* rte_swx_hash_func_t)(const void *key, uint32_t length, uint32_t seed)
int(* rte_swx_table_lookup_t)(void *table, void *mailbox, uint8_t **key, uint64_t *action_id, uint8_t **action_data, size_t *entry_id, int *hit)
int(* rte_swx_extern_func_t)(void *mailbox)
__rte_experimental void rte_swx_table_learner_delete(void *table, void *mailbox)
__rte_experimental void rte_swx_table_learner_rearm_new(void *table, void *mailbox, uint64_t time, uint32_t key_timeout_id)
__rte_experimental uint32_t rte_swx_table_learner_add(void *table, void *mailbox, uint64_t time, uint64_t action_id, uint8_t *action_data, uint32_t key_timeout_id)
void(* rte_swx_port_out_flush_t)(void *port)
static enum rte_color rte_meter_trtcm_color_aware_check(struct rte_meter_trtcm *m, struct rte_meter_trtcm_profile *p, uint64_t time, uint32_t pkt_len, enum rte_color pkt_color)
static uint64_t rte_get_tsc_cycles(void)
void(* rte_swx_port_out_pkt_clone_tx_t)(void *port, struct rte_swx_pkt *pkt, uint32_t truncation_length)
void(* rte_swx_port_out_pkt_fast_clone_tx_t)(void *port, struct rte_swx_pkt *pkt)
void(* rte_swx_extern_type_destructor_t)(void *object)
void *(* rte_swx_extern_type_constructor_t)(const char *args)
void(* rte_swx_port_out_pkt_tx_t)(void *port, struct rte_swx_pkt *pkt)
__rte_experimental void rte_swx_table_learner_rearm(void *table, void *mailbox, uint64_t time)
#define RTE_SWX_TABLE_LEARNER_N_KEY_TIMEOUTS_MAX
#define RTE_SWX_NAME_SIZE
int(* rte_swx_extern_type_member_func_t)(void *object, void *mailbox)
static void rte_prefetch0(const volatile void *p)
int(* rte_swx_port_in_pkt_rx_t)(void *port, struct rte_swx_pkt *pkt)