2 * Routines for NVM Express over Fabrics(RDMA) dissection
6 * Wireshark - Network traffic analyzer
7 * By Gerald Combs <gerald@wireshark.org>
8 * Copyright 1998 Gerald Combs
10 * SPDX-License-Identifier: GPL-2.0-or-later
14 NVM Express is high speed interface for accessing solid state drives.
15 NVM Express specifications are maintained by NVM Express industry
16 association at http://www.nvmexpress.org.
18 This file adds support to dissect NVM Express over fabrics packets
19 for RDMA. This adds very basic support for dissecting commands
22 Current dissection supports dissection of
24 (b) NVMe Fabric command and cqe
25 As part of it, it also calculates cmd completion latencies.
27 This protocol is similar to iSCSI and SCSI dissection where iSCSI is
28 transport protocol for carying SCSI commands and responses. Similarly
29 NVMe Fabrics - RDMA transport protocol carries NVMe commands.
35 +-----------+---------+
37 +----+-----------+----+
44 NVMe Express fabrics specification is located at
45 http://www.nvmexpress.org/wp-content/uploads/NVMe_over_Fabrics_1_0_Gold_20160605.pdf
47 NVMe Express specification is located at
48 http://www.nvmexpress.org/wp-content/uploads/NVM-Express-1_2a.pdf
50 NVM Express RDMA TCP port assigned by IANA that maps to RDMA IP service
51 TCP port can be found at
52 http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=NVM+Express
60 #include <epan/packet.h>
61 #include <epan/prefs.h>
62 #include <epan/conversation.h>
63 #include <epan/addr_resolv.h>
65 #include "packet-infiniband.h"
66 #include "packet-nvme.h"
68 #define SID_ULP_MASK 0x00000000FF000000
69 #define SID_PROTO_MASK 0x0000000000FF0000
70 #define SID_PORT_MASK 0x000000000000FFFF
73 #define SID_PROTO_TCP 0x06
74 #define NVME_RDMA_TCP_PORT_RANGE "4420" /* IANA registered */
76 #define SID_MASK (SID_ULP_MASK | SID_PROTO_MASK)
77 #define SID_ULP_TCP ((SID_ULP << 3 * 8) | (SID_PROTO_TCP << 2 * 8))
79 #define NVME_FABRICS_RDMA "NVMe Fabrics RDMA"
81 #define NVME_FABRIC_CMD_SIZE NVME_CMD_SIZE
82 #define NVME_FABRIC_CQE_SIZE NVME_CQE_SIZE
84 #define NVME_FABRIC_OPC 0x7F
86 #define NVME_FCTYPE_CONNECT 0x1
87 #define NVME_FCTYPE_AUTH_RECV 0x6
88 #define NVME_FCTYPE_PROP_GET 0x4
89 #define NVME_FCTYPE_PROP_SET 0x0
91 static const value_string fctype_tbl[] = {
92 { NVME_FCTYPE_CONNECT, "Connect"},
93 { NVME_FCTYPE_PROP_GET, "Property Get"},
94 { NVME_FCTYPE_PROP_SET, "Property Set"},
95 { NVME_FCTYPE_AUTH_RECV, "Authentication Recv"},
99 static const value_string prop_offset_tbl[] = {
100 { 0x0, "Controller Capabilities"},
104 { 0x14, "Controller Configuration"},
106 { 0x1c, "Controller Status"},
107 { 0x20, "NVM Subsystem Reset"},
117 static const value_string attr_size_tbl[] = {
123 struct nvme_rdma_q_ctx {
124 struct nvme_q_ctx n_q_ctx;
127 struct nvme_rdma_cmd_ctx {
128 struct nvme_cmd_ctx n_cmd_ctx;
129 guint8 fctype; /* fabric cmd type */
132 void proto_reg_handoff_nvme_rdma(void);
133 void proto_register_nvme_rdma(void);
135 static int proto_nvme_rdma = -1;
136 static dissector_handle_t ib_handler;
137 static int proto_ib = -1;
139 /* NVMe Fabrics RDMA CM Private data */
140 static int hf_nvme_rdma_cm_req_recfmt = -1;
141 static int hf_nvme_rdma_cm_req_qid = -1;
142 static int hf_nvme_rdma_cm_req_hrqsize = -1;
143 static int hf_nvme_rdma_cm_req_hsqsize = -1;
144 static int hf_nvme_rdma_cm_req_reserved = -1;
146 static int hf_nvme_rdma_cm_rsp_recfmt = -1;
147 static int hf_nvme_rdma_cm_rsp_crqsize = -1;
148 static int hf_nvme_rdma_cm_rsp_reserved = -1;
150 static int hf_nvme_rdma_cm_rej_recfmt = -1;
151 static int hf_nvme_rdma_cm_rej_status = -1;
152 static int hf_nvme_rdma_cm_rej_reserved = -1;
154 /* NVMe Fabric Cmd */
155 static int hf_nvme_rdma_cmd = -1;
156 static int hf_nvme_rdma_from_host_unknown_data = -1;
158 static int hf_nvme_rdma_cmd_opc = -1;
159 static int hf_nvme_rdma_cmd_rsvd = -1;
160 static int hf_nvme_rdma_cmd_cid = -1;
161 static int hf_nvme_rdma_cmd_fctype = -1;
162 static int hf_nvme_rdma_cmd_connect_rsvd1 = -1;
163 static int hf_nvme_rdma_cmd_connect_sgl1 = -1;
164 static int hf_nvme_rdma_cmd_connect_recfmt = -1;
165 static int hf_nvme_rdma_cmd_connect_qid = -1;
166 static int hf_nvme_rdma_cmd_connect_sqsize = -1;
167 static int hf_nvme_rdma_cmd_connect_cattr = -1;
168 static int hf_nvme_rdma_cmd_connect_rsvd2 = -1;
169 static int hf_nvme_rdma_cmd_connect_kato = -1;
170 static int hf_nvme_rdma_cmd_connect_rsvd3 = -1;
171 static int hf_nvme_rdma_cmd_data = -1;
172 static int hf_nvme_rdma_cmd_connect_data_hostid = -1;
173 static int hf_nvme_rdma_cmd_connect_data_cntlid = -1;
174 static int hf_nvme_rdma_cmd_connect_data_rsvd = -1;
175 static int hf_nvme_rdma_cmd_connect_data_subnqn = -1;
176 static int hf_nvme_rdma_cmd_connect_data_hostnqn = -1;
177 static int hf_nvme_rdma_cmd_connect_data_rsvd1 = -1;
179 static int hf_nvme_rdma_cmd_prop_attr_rsvd = -1;
180 static int hf_nvme_rdma_cmd_prop_attr_rsvd1 = -1;
181 static int hf_nvme_rdma_cmd_prop_attr_size = -1;
182 static int hf_nvme_rdma_cmd_prop_attr_rsvd2 = -1;
183 static int hf_nvme_rdma_cmd_prop_attr_offset = -1;
184 static int hf_nvme_rdma_cmd_prop_attr_get_rsvd3 = -1;
185 static int hf_nvme_rdma_cmd_prop_attr_set_4B_value = -1;
186 static int hf_nvme_rdma_cmd_prop_attr_set_4B_value_rsvd = -1;
187 static int hf_nvme_rdma_cmd_prop_attr_set_8B_value = -1;
188 static int hf_nvme_rdma_cmd_prop_attr_set_rsvd3 = -1;
190 static int hf_nvme_rdma_cmd_generic_rsvd1 = -1;
191 static int hf_nvme_rdma_cmd_generic_field = -1;
193 /* NVMe Fabric CQE */
194 static int hf_nvme_rdma_cqe = -1;
195 static int hf_nvme_rdma_cqe_sts = -1;
196 static int hf_nvme_rdma_cqe_sqhd = -1;
197 static int hf_nvme_rdma_cqe_rsvd = -1;
198 static int hf_nvme_rdma_cqe_cid = -1;
199 static int hf_nvme_rdma_cqe_status = -1;
200 static int hf_nvme_rdma_cqe_status_rsvd = -1;
202 static int hf_nvme_rdma_cqe_connect_cntlid = -1;
203 static int hf_nvme_rdma_cqe_connect_authreq = -1;
204 static int hf_nvme_rdma_cqe_connect_rsvd = -1;
205 static int hf_nvme_rdma_cqe_prop_set_rsvd = -1;
207 static int hf_nvme_rdma_to_host_unknown_data = -1;
209 /* tracking Cmd and its respective CQE */
210 static int hf_nvme_rdma_cmd_pkt = -1;
211 static int hf_nvme_rdma_cqe_pkt = -1;
212 static int hf_nvme_rdma_cmd_latency = -1;
213 static int hf_nvme_rdma_cmd_qid = -1;
215 /* Initialize the subtree pointers */
216 static gint ett_cm = -1;
217 static gint ett_data = -1;
219 static range_t *gPORT_RANGE;
221 static conversation_infiniband_data *get_conversion_data(conversation_t *conv)
223 conversation_infiniband_data *conv_data;
225 conv_data = (conversation_infiniband_data *)conversation_get_proto_data(conv, proto_ib);
229 if ((conv_data->service_id & SID_MASK) != SID_ULP_TCP)
230 return NULL; /* the service id doesn't match that of TCP ULP - nothing for us to do here */
232 if (!(value_is_in_range(gPORT_RANGE, (guint32)(conv_data->service_id & SID_PORT_MASK))))
233 return NULL; /* the port doesn't match that of NVM Express Fabrics - nothing for us to do here */
237 static conversation_t*
238 find_ib_conversation(packet_info *pinfo, conversation_infiniband_data **uni_conv_data)
240 conversation_t *conv;
241 conversation_infiniband_data *conv_data;
243 conv = find_conversation(pinfo->num, &pinfo->dst, &pinfo->dst,
244 ENDPOINT_IBQP, pinfo->destport, pinfo->destport,
245 NO_ADDR_B|NO_PORT_B);
247 return NULL; /* nothing to do with no conversation context */
249 conv_data = get_conversion_data(conv);
250 *uni_conv_data = conv_data;
254 /* now that we found unidirectional conversation, find bidirectional
255 * conversation, so that we can relate to nvme q.
257 return find_conversation(pinfo->num, &pinfo->src, &pinfo->dst,
258 ENDPOINT_IBQP, pinfo->srcport, pinfo->destport, 0);
261 static guint16 find_nvme_qid(packet_info *pinfo)
263 conversation_t *conv;
264 conversation_infiniband_data *conv_data;
267 conv = find_conversation(pinfo->num, &pinfo->dst, &pinfo->dst,
268 ENDPOINT_IBQP, pinfo->destport, pinfo->destport,
269 NO_ADDR_B|NO_PORT_B);
271 return 0; /* nothing to do with no conversation context */
273 conv_data = get_conversion_data(conv);
277 if (conv_data->client_to_server == FALSE) {
278 memcpy(&qid, &conv_data->mad_private_data[178], 2);
281 conv = find_conversation(pinfo->num, &pinfo->src, &pinfo->src,
282 ENDPOINT_IBQP, conv_data->src_qp, conv_data->src_qp,
283 NO_ADDR_B|NO_PORT_B);
286 conv_data = get_conversion_data(conv);
289 memcpy(&qid, &conv_data->mad_private_data[178], 2);
293 static struct nvme_rdma_q_ctx*
294 find_add_q_ctx(packet_info *pinfo, conversation_t *conv)
296 struct nvme_rdma_q_ctx *q_ctx;
299 q_ctx = (struct nvme_rdma_q_ctx*)conversation_get_proto_data(conv, proto_nvme_rdma);
301 qid = find_nvme_qid(pinfo);
302 q_ctx = wmem_new(wmem_file_scope(), struct nvme_rdma_q_ctx);
303 q_ctx->n_q_ctx.pending_cmds = wmem_tree_new(wmem_file_scope());
304 q_ctx->n_q_ctx.done_cmds = wmem_tree_new(wmem_file_scope());
305 q_ctx->n_q_ctx.data_requests = wmem_tree_new(wmem_file_scope());
306 q_ctx->n_q_ctx.data_responses = wmem_tree_new(wmem_file_scope());
307 q_ctx->n_q_ctx.qid = qid;
308 conversation_add_proto_data(conv, proto_nvme_rdma, q_ctx);
313 static conversation_infiniband_data*
314 find_ib_cm_conversation(packet_info *pinfo)
316 conversation_t *conv;
318 conv = find_conversation(pinfo->num, &pinfo->src, &pinfo->dst,
319 ENDPOINT_IBQP, pinfo->srcport, pinfo->destport, 0);
323 return get_conversion_data(conv);
326 static void dissect_rdma_cm_req_packet(tvbuff_t *tvb, proto_tree *tree)
329 proto_item *ti, *qid_item;
330 /* private data is at offset of 36 bytes */
334 /* create display subtree for private data */
335 ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, offset, 32, ENC_NA);
336 cm_tree = proto_item_add_subtree(ti, ett_cm);
338 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_recfmt, tvb,
339 offset + 0, 2, ENC_LITTLE_ENDIAN);
341 qid_item = proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_qid, tvb,
342 offset + 2, 2, ENC_LITTLE_ENDIAN);
343 qid = tvb_get_guint16(tvb, offset + 2, ENC_LITTLE_ENDIAN);
344 proto_item_append_text(qid_item, " %s", qid ? "IOQ" : "AQ");
346 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_hrqsize, tvb,
347 offset + 4, 2, ENC_LITTLE_ENDIAN);
348 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_hsqsize, tvb,
349 offset + 6, 2, ENC_LITTLE_ENDIAN);
350 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_req_reserved, tvb,
351 offset + 8, 24, ENC_NA);
354 static void dissect_rdma_cm_rsp_packet(tvbuff_t *tvb, proto_tree *tree)
359 /* create display subtree for the private datat that start at offset 0 */
360 ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 32, ENC_NA);
361 cm_tree = proto_item_add_subtree(ti, ett_cm);
363 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rsp_recfmt, tvb,
364 0, 2, ENC_LITTLE_ENDIAN);
365 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rsp_crqsize, tvb,
366 2, 2, ENC_LITTLE_ENDIAN);
367 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rsp_reserved, tvb,
371 static void dissect_rdma_cm_rej_packet(tvbuff_t *tvb, proto_tree *tree)
376 /* create display subtree for the private datat that start at offset 0 */
377 ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, 32, ENC_NA);
378 cm_tree = proto_item_add_subtree(ti, ett_cm);
380 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rej_recfmt, tvb,
381 0, 2, ENC_LITTLE_ENDIAN);
382 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rej_status, tvb,
383 2, 2, ENC_LITTLE_ENDIAN);
384 proto_tree_add_item(cm_tree, hf_nvme_rdma_cm_rej_reserved, tvb,
388 static int dissect_rdma_cm_packet(tvbuff_t *tvb, proto_tree *tree,
389 guint16 cm_attribute_id)
391 switch (cm_attribute_id) {
393 dissect_rdma_cm_req_packet(tvb, tree);
396 dissect_rdma_cm_rsp_packet(tvb, tree);
399 dissect_rdma_cm_rej_packet(tvb, tree);
408 dissect_nvme_ib_cm(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
411 conversation_infiniband_data *conv_data = NULL;
412 struct infinibandinfo *info = (struct infinibandinfo *)data;
414 conv_data = find_ib_cm_conversation(pinfo);
418 col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_RDMA);
419 return dissect_rdma_cm_packet(tvb, tree, info->cm_attribute_id);
422 static void dissect_nvme_fabric_connect_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
424 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_rsvd1, cmd_tvb,
426 dissect_nvme_cmd_sgl(cmd_tvb, cmd_tree, hf_nvme_rdma_cmd_connect_sgl1, NULL);
427 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_recfmt, cmd_tvb,
428 40, 2, ENC_LITTLE_ENDIAN);
429 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_qid, cmd_tvb,
430 42, 2, ENC_LITTLE_ENDIAN);
431 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_sqsize, cmd_tvb,
432 44, 2, ENC_LITTLE_ENDIAN);
433 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_cattr, cmd_tvb,
434 46, 1, ENC_LITTLE_ENDIAN);
435 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_rsvd2, cmd_tvb,
437 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_kato, cmd_tvb,
438 48, 4, ENC_LITTLE_ENDIAN);
439 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_connect_rsvd3, cmd_tvb,
443 static guint8 dissect_nvme_fabric_prop_cmd_common(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
445 proto_item *attr_item, *offset_item;
449 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_rsvd, cmd_tvb,
451 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_rsvd1, cmd_tvb,
452 40, 1, ENC_LITTLE_ENDIAN);
453 attr_item = proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_size, cmd_tvb,
454 40, 1, ENC_LITTLE_ENDIAN);
455 attr = tvb_get_guint8(cmd_tvb, 40) & 0x7;
456 proto_item_append_text(attr_item, " %s",
457 val_to_str(attr, attr_size_tbl, "Reserved"));
459 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_rsvd2, cmd_tvb,
462 offset_item = proto_tree_add_item_ret_uint(cmd_tree, hf_nvme_rdma_cmd_prop_attr_offset,
463 cmd_tvb, 44, 4, ENC_LITTLE_ENDIAN, &offset);
464 proto_item_append_text(offset_item, " %s",
465 val_to_str(offset, prop_offset_tbl, "Unknown Property"));
469 static void dissect_nvme_fabric_prop_get_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
471 dissect_nvme_fabric_prop_cmd_common(cmd_tree, cmd_tvb);
472 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_get_rsvd3, cmd_tvb,
476 static void dissect_nvme_fabric_prop_set_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
480 attr = dissect_nvme_fabric_prop_cmd_common(cmd_tree, cmd_tvb);
482 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_4B_value, cmd_tvb,
483 48, 4, ENC_LITTLE_ENDIAN);
484 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_4B_value_rsvd, cmd_tvb,
485 52, 4, ENC_LITTLE_ENDIAN);
487 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_8B_value, cmd_tvb,
488 48, 8, ENC_LITTLE_ENDIAN);
490 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_prop_attr_set_rsvd3, cmd_tvb,
494 static void dissect_nvme_fabric_generic_cmd(proto_tree *cmd_tree, tvbuff_t *cmd_tvb)
496 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_generic_rsvd1, cmd_tvb,
498 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_generic_field, cmd_tvb,
502 static struct nvme_rdma_cmd_ctx*
503 bind_cmd_to_qctx(packet_info *pinfo, struct nvme_q_ctx *q_ctx,
506 struct nvme_rdma_cmd_ctx *ctx;
508 if (!PINFO_FD_VISITED(pinfo)) {
509 ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_cmd_ctx);
511 nvme_add_cmd_to_pending_list(pinfo, q_ctx,
512 &ctx->n_cmd_ctx, (void*)ctx, cmd_id);
514 /* Already visited this frame */
515 ctx = (struct nvme_rdma_cmd_ctx*)
516 nvme_lookup_cmd_in_done_list(pinfo, q_ctx, cmd_id);
517 /* if we have already visited frame but haven't found completion yet,
518 * we won't find cmd in done q, so allocate a dummy ctx for doing
519 * rest of the processing.
522 ctx = wmem_new0(wmem_file_scope(), struct nvme_rdma_cmd_ctx);
528 dissect_nvme_fabric_cmd(tvbuff_t *nvme_tvb, proto_tree *nvme_tree,
529 struct nvme_rdma_cmd_ctx *cmd_ctx)
531 proto_tree *cmd_tree;
532 proto_item *ti, *opc_item, *fctype_item;
535 fctype = tvb_get_guint8(nvme_tvb, 4);
536 cmd_ctx->fctype = fctype;
538 ti = proto_tree_add_item(nvme_tree, hf_nvme_rdma_cmd, nvme_tvb, 0,
539 NVME_FABRIC_CMD_SIZE, ENC_NA);
540 cmd_tree = proto_item_add_subtree(ti, ett_data);
542 opc_item = proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_opc, nvme_tvb,
543 0, 1, ENC_LITTLE_ENDIAN);
544 proto_item_append_text(opc_item, "%s", " Fabric Cmd");
546 nvme_publish_cmd_to_cqe_link(cmd_tree, nvme_tvb, hf_nvme_rdma_cqe_pkt,
547 &cmd_ctx->n_cmd_ctx);
549 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_rsvd, nvme_tvb,
551 proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_cid, nvme_tvb,
552 2, 2, ENC_LITTLE_ENDIAN);
554 fctype_item = proto_tree_add_item(cmd_tree, hf_nvme_rdma_cmd_fctype,
556 4, 1, ENC_LITTLE_ENDIAN);
557 proto_item_append_text(fctype_item, " %s",
558 val_to_str(fctype, fctype_tbl, "Unknown FcType"));
561 case NVME_FCTYPE_CONNECT:
562 dissect_nvme_fabric_connect_cmd(cmd_tree, nvme_tvb);
564 case NVME_FCTYPE_PROP_GET:
565 dissect_nvme_fabric_prop_get_cmd(cmd_tree, nvme_tvb);
567 case NVME_FCTYPE_PROP_SET:
568 dissect_nvme_fabric_prop_set_cmd(cmd_tree, nvme_tvb);
570 case NVME_FCTYPE_AUTH_RECV:
572 dissect_nvme_fabric_generic_cmd(cmd_tree, nvme_tvb);
578 dissect_nvme_fabric_connect_cmd_data(tvbuff_t *data_tvb, proto_tree *data_tree,
581 proto_tree_add_item(data_tree, hf_nvme_rdma_cmd_connect_data_hostid, data_tvb,
583 proto_tree_add_item(data_tree, hf_nvme_rdma_cmd_connect_data_cntlid, data_tvb,
584 offset + 16, 2, ENC_LITTLE_ENDIAN);
585 proto_tree_add_item(data_tree, hf_nvme_rdma_cmd_connect_data_rsvd, data_tvb,
586 offset + 18, 238, ENC_NA);
587 proto_tree_add_item(data_tree, hf_nvme_rdma_cmd_connect_data_subnqn, data_tvb,
588 offset + 256, 256, ENC_ASCII | ENC_NA);
589 proto_tree_add_item(data_tree, hf_nvme_rdma_cmd_connect_data_hostnqn, data_tvb,
590 offset + 512, 256, ENC_ASCII | ENC_NA);
591 proto_tree_add_item(data_tree, hf_nvme_rdma_cmd_connect_data_rsvd1, data_tvb,
592 offset + 768, 256, ENC_NA);
596 dissect_nvme_fabric_data(tvbuff_t *nvme_tvb, proto_tree *nvme_tree,
597 guint len, guint8 fctype)
599 proto_tree *data_tree;
602 ti = proto_tree_add_item(nvme_tree, hf_nvme_rdma_cmd_data, nvme_tvb, 0,
604 data_tree = proto_item_add_subtree(ti, ett_data);
607 case NVME_FCTYPE_CONNECT:
608 dissect_nvme_fabric_connect_cmd_data(nvme_tvb, data_tree,
609 NVME_FABRIC_CMD_SIZE);
612 proto_tree_add_item(data_tree, hf_nvme_rdma_from_host_unknown_data,
613 nvme_tvb, 0, len, ENC_NA);
619 dissect_nvme_rdma_cmd(tvbuff_t *nvme_tvb, packet_info *pinfo, proto_tree *root_tree,
620 proto_tree *nvme_tree, struct nvme_rdma_q_ctx *q_ctx,
623 struct nvme_rdma_cmd_ctx *cmd_ctx;
627 opcode = tvb_get_guint8(nvme_tvb, 0);
628 cmd_id = tvb_get_guint16(nvme_tvb, 2, ENC_LITTLE_ENDIAN);
629 cmd_ctx = bind_cmd_to_qctx(pinfo, &q_ctx->n_q_ctx, cmd_id);
630 if (opcode == NVME_FABRIC_OPC) {
631 cmd_ctx->n_cmd_ctx.fabric = TRUE;
632 dissect_nvme_fabric_cmd(nvme_tvb, nvme_tree, cmd_ctx);
633 len -= NVME_FABRIC_CMD_SIZE;
635 dissect_nvme_fabric_data(nvme_tvb, nvme_tree, len, cmd_ctx->fctype);
637 cmd_ctx->n_cmd_ctx.fabric = FALSE;
638 dissect_nvme_cmd(nvme_tvb, pinfo, root_tree, &q_ctx->n_q_ctx,
639 &cmd_ctx->n_cmd_ctx);
640 if (cmd_ctx->n_cmd_ctx.remote_key) {
641 nvme_add_data_request(pinfo, &q_ctx->n_q_ctx,
642 &cmd_ctx->n_cmd_ctx, (void*)cmd_ctx);
648 dissect_nvme_from_host(tvbuff_t *nvme_tvb, packet_info *pinfo,
649 proto_tree *root_tree, proto_tree *nvme_tree,
650 struct infinibandinfo *info,
651 struct nvme_rdma_q_ctx *q_ctx,
655 switch (info->opCode) {
657 if (len >= NVME_FABRIC_CMD_SIZE)
658 dissect_nvme_rdma_cmd(nvme_tvb, pinfo, root_tree, nvme_tree, q_ctx, len);
660 proto_tree_add_item(nvme_tree, hf_nvme_rdma_from_host_unknown_data, nvme_tvb,
664 proto_tree_add_item(nvme_tree, hf_nvme_rdma_from_host_unknown_data, nvme_tvb,
671 dissect_nvme_rdma_cqe_status_8B(proto_tree *cqe_tree, tvbuff_t *cqe_tvb,
672 struct nvme_rdma_cmd_ctx *cmd_ctx)
674 switch (cmd_ctx->fctype) {
675 case NVME_FCTYPE_CONNECT:
676 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_connect_cntlid, cqe_tvb,
677 0, 2, ENC_LITTLE_ENDIAN);
678 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_connect_authreq, cqe_tvb,
679 2, 2, ENC_LITTLE_ENDIAN);
680 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_connect_rsvd, cqe_tvb,
683 case NVME_FCTYPE_PROP_GET:
684 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_sts, cqe_tvb,
685 0, 8, ENC_LITTLE_ENDIAN);
687 case NVME_FCTYPE_PROP_SET:
688 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_prop_set_rsvd, cqe_tvb,
691 case NVME_FCTYPE_AUTH_RECV:
693 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_sts, cqe_tvb,
694 0, 8, ENC_LITTLE_ENDIAN);
700 dissect_nvme_fabric_cqe(tvbuff_t *nvme_tvb,
701 proto_tree *nvme_tree,
702 struct nvme_rdma_cmd_ctx *cmd_ctx)
704 proto_tree *cqe_tree;
707 ti = proto_tree_add_item(nvme_tree, hf_nvme_rdma_cqe, nvme_tvb,
708 0, NVME_FABRIC_CQE_SIZE, ENC_NA);
709 proto_item_append_text(ti, " (For Cmd: %s)", val_to_str(cmd_ctx->fctype,
710 fctype_tbl, "Unknown Cmd"));
712 cqe_tree = proto_item_add_subtree(ti, ett_data);
714 nvme_publish_cqe_to_cmd_link(cqe_tree, nvme_tvb, hf_nvme_rdma_cmd_pkt,
715 &cmd_ctx->n_cmd_ctx);
716 nvme_publish_cmd_latency(cqe_tree, &cmd_ctx->n_cmd_ctx, hf_nvme_rdma_cmd_latency);
718 dissect_nvme_rdma_cqe_status_8B(cqe_tree, nvme_tvb, cmd_ctx);
720 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_sqhd, nvme_tvb,
722 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_rsvd, nvme_tvb,
723 10, 2, ENC_LITTLE_ENDIAN);
724 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_cid, nvme_tvb,
725 12, 2, ENC_LITTLE_ENDIAN);
726 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_status, nvme_tvb,
727 14, 2, ENC_LITTLE_ENDIAN);
728 proto_tree_add_item(cqe_tree, hf_nvme_rdma_cqe_status_rsvd, nvme_tvb,
729 14, 2, ENC_LITTLE_ENDIAN);
733 dissect_nvme_rdma_cqe(tvbuff_t *nvme_tvb, packet_info *pinfo,
734 proto_tree *root_tree, proto_tree *nvme_tree,
735 struct nvme_rdma_q_ctx *q_ctx)
737 struct nvme_rdma_cmd_ctx *cmd_ctx;
740 cmd_id = tvb_get_guint16(nvme_tvb, 12, ENC_LITTLE_ENDIAN);
742 if (!PINFO_FD_VISITED(pinfo)) {
744 cmd_ctx = (struct nvme_rdma_cmd_ctx*)
745 nvme_lookup_cmd_in_pending_list(&q_ctx->n_q_ctx, cmd_id);
749 /* we have already seen this cqe, or an identical one */
750 if (cmd_ctx->n_cmd_ctx.cqe_pkt_num)
753 cmd_ctx->n_cmd_ctx.cqe_pkt_num = pinfo->num;
754 nvme_add_cmd_cqe_to_done_list(&q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx, cmd_id);
756 /* Already visited this frame */
757 cmd_ctx = (struct nvme_rdma_cmd_ctx*)
758 nvme_lookup_cmd_in_done_list(pinfo, &q_ctx->n_q_ctx, cmd_id);
763 nvme_update_cmd_end_info(pinfo, &cmd_ctx->n_cmd_ctx);
765 if (cmd_ctx->n_cmd_ctx.fabric)
766 dissect_nvme_fabric_cqe(nvme_tvb, nvme_tree, cmd_ctx);
768 dissect_nvme_cqe(nvme_tvb, pinfo, root_tree, &cmd_ctx->n_cmd_ctx);
772 proto_tree_add_item(nvme_tree, hf_nvme_rdma_to_host_unknown_data, nvme_tvb,
773 0, NVME_FABRIC_CQE_SIZE, ENC_NA);
777 dissect_nvme_to_host(tvbuff_t *nvme_tvb, packet_info *pinfo,
778 proto_tree *root_tree, proto_tree *nvme_tree,
779 struct infinibandinfo *info,
780 struct nvme_rdma_q_ctx *q_ctx, guint len)
782 struct nvme_rdma_cmd_ctx *cmd_ctx;
784 switch (info->opCode) {
786 case RC_SEND_ONLY_INVAL:
787 if (len == NVME_FABRIC_CQE_SIZE)
788 dissect_nvme_rdma_cqe(nvme_tvb, pinfo, root_tree, nvme_tree, q_ctx);
790 proto_tree_add_item(nvme_tree, hf_nvme_rdma_to_host_unknown_data, nvme_tvb,
793 case RC_RDMA_WRITE_ONLY:
794 case RC_RDMA_WRITE_FIRST:
795 if (!PINFO_FD_VISITED(pinfo)) {
796 cmd_ctx = (struct nvme_rdma_cmd_ctx*)
797 nvme_lookup_data_request(&q_ctx->n_q_ctx,
798 info->reth_remote_key);
800 cmd_ctx->n_cmd_ctx.data_resp_pkt_num = pinfo->num;
801 nvme_add_data_response(&q_ctx->n_q_ctx, &cmd_ctx->n_cmd_ctx,
802 info->reth_remote_key);
805 cmd_ctx = (struct nvme_rdma_cmd_ctx*)
806 nvme_lookup_data_response(pinfo, &q_ctx->n_q_ctx,
807 info->reth_remote_key);
810 dissect_nvme_data_response(nvme_tvb, pinfo, root_tree, &q_ctx->n_q_ctx,
811 &cmd_ctx->n_cmd_ctx, len);
814 proto_tree_add_item(nvme_tree, hf_nvme_rdma_to_host_unknown_data, nvme_tvb,
821 dissect_nvme_ib(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree, void *data)
823 struct infinibandinfo *info = (struct infinibandinfo *)data;
824 conversation_infiniband_data *conv_data = NULL;
825 conversation_t *conv;
826 proto_tree *nvme_tree;
828 struct nvme_rdma_q_ctx *q_ctx;
829 guint len = tvb_reported_length(tvb);
831 conv = find_ib_conversation(pinfo, &conv_data);
835 q_ctx = find_add_q_ctx(pinfo, conv);
839 col_set_str(pinfo->cinfo, COL_PROTOCOL, NVME_FABRICS_RDMA);
841 ti = proto_tree_add_item(tree, proto_nvme_rdma, tvb, 0, len, ENC_NA);
842 nvme_tree = proto_item_add_subtree(ti, ett_data);
844 nvme_publish_qid(nvme_tree, hf_nvme_rdma_cmd_qid, q_ctx->n_q_ctx.qid);
846 if (conv_data->client_to_server)
847 dissect_nvme_from_host(tvb, pinfo, tree, nvme_tree, info, q_ctx, len);
849 dissect_nvme_to_host(tvb, pinfo, tree, nvme_tree, info, q_ctx, len);
855 proto_register_nvme_rdma(void)
857 module_t *nvme_rdma_module;
858 static hf_register_info hf[] = {
859 /* IB RDMA CM fields */
860 { &hf_nvme_rdma_cm_req_recfmt,
861 { "Recfmt", "nvme-rdma.cm.req.recfmt",
862 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
864 { &hf_nvme_rdma_cm_req_qid,
865 { "Qid", "nvme-rdma.cm.req.qid",
866 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
868 { &hf_nvme_rdma_cm_req_hrqsize,
869 { "HrqSize", "nvme-rdma.cm.req.hrqsize",
870 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
872 { &hf_nvme_rdma_cm_req_hsqsize,
873 { "HsqSize", "nvme-rdma.cm.req.hsqsize",
874 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
876 { &hf_nvme_rdma_cm_req_reserved,
877 { "Reserved", "nvme-rdma.cm.req.reserved",
878 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
880 { &hf_nvme_rdma_cm_rsp_recfmt,
881 { "Recfmt", "nvme-rdma.cm.rsp.recfmt",
882 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
884 { &hf_nvme_rdma_cm_rsp_crqsize,
885 { "CrqSize", "nvme-rdma.cm.rsp.crqsize",
886 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
888 { &hf_nvme_rdma_cm_rsp_reserved,
889 { "Reserved", "nvme-rdma.cm.rsp.reserved",
890 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
892 { &hf_nvme_rdma_cm_rej_recfmt,
893 { "Recfmt", "nvme-rdma.cm.rej.recfmt",
894 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
896 { &hf_nvme_rdma_cm_rej_status,
897 { "Status", "nvme-rdma.cm.rej.status",
898 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
900 { &hf_nvme_rdma_cm_rej_reserved,
901 { "Reserved", "nvme-rdma.cm.rej.reserved",
902 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
904 /* IB RDMA NVMe Command fields */
906 { "Cmd", "nvme-rdma.cmd",
907 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
909 { &hf_nvme_rdma_from_host_unknown_data,
910 { "Dissection unsupported", "nvme-rdma.unknown_data",
911 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
913 { &hf_nvme_rdma_cmd_opc,
914 { "Opcode", "nvme-rdma.cmd.opc",
915 FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
917 { &hf_nvme_rdma_cmd_rsvd,
918 { "Reserved", "nvme-rdma.cmd.rsvd",
919 FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
921 { &hf_nvme_rdma_cmd_cid,
922 { "Command ID", "nvme-rdma.cmd.cid",
923 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
925 { &hf_nvme_rdma_cmd_fctype,
926 { "Fabric Cmd Type", "nvme-rdma.cmd.fctype",
927 FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
929 { &hf_nvme_rdma_cmd_connect_rsvd1,
930 { "Reserved", "nvme-rdma.cmd.connect.rsvd1",
931 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
933 { &hf_nvme_rdma_cmd_connect_sgl1,
934 { "SGL1", "nvme-rdma.cmd.connect.sgl1",
935 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
937 { &hf_nvme_rdma_cmd_connect_recfmt,
938 { "Record Format", "nvme-rdma.cmd.connect.recfmt",
939 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
941 { &hf_nvme_rdma_cmd_connect_qid,
942 { "Queue ID", "nvme-rdma.cmd.connect.qid",
943 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
945 { &hf_nvme_rdma_cmd_connect_sqsize,
946 { "SQ Size", "nvme-rdma.cmd.connect.sqsize",
947 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
949 { &hf_nvme_rdma_cmd_connect_cattr,
950 { "Connect Attributes", "nvme-rdma.cmd.connect.cattr",
951 FT_UINT8, BASE_HEX, NULL, 0x0, NULL, HFILL}
953 { &hf_nvme_rdma_cmd_connect_rsvd2,
954 { "Reserved", "nvme-rdma.cmd.connect.rsvd2",
955 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
957 { &hf_nvme_rdma_cmd_connect_kato,
958 { "Keep Alive Timeout", "nvme-rdma.cmd.connect.kato",
959 FT_UINT32, BASE_DEC|BASE_UNIT_STRING, &units_milliseconds, 0x0, NULL, HFILL}
961 { &hf_nvme_rdma_cmd_connect_rsvd3,
962 { "Reserved", "nvme-rdma.cmd.connect.rsvd3",
963 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
965 { &hf_nvme_rdma_cmd_data,
966 { "Data", "nvme-rdma.cmd.data",
967 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
969 { &hf_nvme_rdma_cmd_connect_data_hostid,
970 { "Host Identifier", "nvme-rdma.cmd.connect.data.hostid",
971 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
973 { &hf_nvme_rdma_cmd_connect_data_cntlid,
974 { "Controller ID", "nvme-rdma.cmd.connect.data.cntrlid",
975 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
977 { &hf_nvme_rdma_cmd_connect_data_rsvd,
978 { "Reserved", "nvme-rdma.cmd.connect.data.rsvd",
979 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
981 { &hf_nvme_rdma_cmd_connect_data_subnqn,
982 { "Subsystem NQN", "nvme-rdma.cmd.connect.data.subnqn",
983 FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}
985 { &hf_nvme_rdma_cmd_connect_data_hostnqn,
986 { "Host NQN", "nvme-rdma.cmd.connect.data.hostnqn",
987 FT_STRING, BASE_NONE, NULL, 0x0, NULL, HFILL}
989 { &hf_nvme_rdma_cmd_connect_data_rsvd1,
990 { "Reserved", "nvme-rdma.cmd.connect.data.rsvd1",
991 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
993 { &hf_nvme_rdma_cmd_prop_attr_rsvd,
994 { "Reserved", "nvme-rdma.cmd.prop_attr.rsvd",
995 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
997 { &hf_nvme_rdma_cmd_prop_attr_rsvd1,
998 { "Reserved", "nvme-rdma.cmd.prop_attr.rsvd1",
999 FT_UINT8, BASE_HEX, NULL, 0xf8, NULL, HFILL}
1001 { &hf_nvme_rdma_cmd_prop_attr_size,
1002 { "Property Size", "nvme-rdma.cmd.prop_attr.size",
1003 FT_UINT8, BASE_HEX, NULL, 0x7, NULL, HFILL}
1005 { &hf_nvme_rdma_cmd_prop_attr_rsvd2,
1006 { "Reserved", "nvme-rdma.cmd.prop_attr.rsvd2",
1007 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1009 { &hf_nvme_rdma_cmd_prop_attr_offset,
1010 { "Offset", "nvme-rdma.cmd.prop_attr.offset",
1011 FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
1013 { &hf_nvme_rdma_cmd_prop_attr_get_rsvd3,
1014 { "Reserved", "nvme-rdma.cmd.prop_attr.get.rsvd3",
1015 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1017 { &hf_nvme_rdma_cmd_prop_attr_set_4B_value,
1018 { "Value", "nvme-rdma.cmd.prop_attr.set.value.4B",
1019 FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
1021 { &hf_nvme_rdma_cmd_prop_attr_set_4B_value_rsvd,
1022 { "Reserved", "nvme-rdma.cmd.prop_attr.set.value.rsvd",
1023 FT_UINT32, BASE_HEX, NULL, 0x0, NULL, HFILL}
1025 { &hf_nvme_rdma_cmd_prop_attr_set_8B_value,
1026 { "Value", "nvme-rdma.cmd.prop_attr.set.value.8B",
1027 FT_UINT64, BASE_HEX, NULL, 0x0, NULL, HFILL}
1029 { &hf_nvme_rdma_cmd_prop_attr_set_rsvd3,
1030 { "Reserved", "nvme-rdma.cmd.prop_attr.set.rsvd3",
1031 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1033 { &hf_nvme_rdma_cmd_generic_rsvd1,
1034 { "Reserved", "nvme-rdma.cmd.generic.rsvd1",
1035 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1037 { &hf_nvme_rdma_cmd_generic_field,
1038 { "Fabric Cmd specific field", "nvme-rdma.cmd.generic.field",
1039 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1041 /* IB RDMA NVMe Response fields */
1042 { &hf_nvme_rdma_cqe,
1043 { "Cqe", "nvme-rdma.cqe",
1044 FT_NONE, BASE_NONE, NULL, 0x0, NULL, HFILL}
1046 { &hf_nvme_rdma_cqe_sts,
1047 { "Cmd specific Status", "nvme-rdma.cqe.sts",
1048 FT_UINT64, BASE_HEX, NULL, 0x0, NULL, HFILL}
1050 { &hf_nvme_rdma_cqe_sqhd,
1051 { "SQ Head Pointer", "nvme-rdma.cqe.sqhd",
1052 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
1054 { &hf_nvme_rdma_cqe_rsvd,
1055 { "Reserved", "nvme-rdma.cqe.rsvd",
1056 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
1058 { &hf_nvme_rdma_cqe_cid,
1059 { "Command ID", "nvme-rdma.cqe.cid",
1060 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
1062 { &hf_nvme_rdma_cqe_status,
1063 { "Status", "nvme-rdma.cqe.status",
1064 FT_UINT16, BASE_HEX, NULL, 0xfffe, NULL, HFILL}
1066 { &hf_nvme_rdma_cqe_status_rsvd,
1067 { "Reserved", "nvme-rdma.cqe.status.rsvd",
1068 FT_UINT16, BASE_HEX, NULL, 0x1, NULL, HFILL}
1070 { &hf_nvme_rdma_cqe_connect_cntlid,
1071 { "Controller ID", "nvme-rdma.cqe.connect.cntrlid",
1072 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
1074 { &hf_nvme_rdma_cqe_connect_authreq,
1075 { "Authentication Required", "nvme-rdma.cqe.connect.authreq",
1076 FT_UINT16, BASE_HEX, NULL, 0x0, NULL, HFILL}
1078 { &hf_nvme_rdma_cqe_connect_rsvd,
1079 { "Reserved", "nvme-rdma.cqe.connect.rsvd",
1080 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1082 { &hf_nvme_rdma_cqe_prop_set_rsvd,
1083 { "Reserved", "nvme-rdma.cqe.prop_set.rsvd",
1084 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1086 { &hf_nvme_rdma_to_host_unknown_data,
1087 { "Dissection unsupported", "nvme-rdma.unknown_data",
1088 FT_BYTES, BASE_NONE, NULL, 0x0, NULL, HFILL}
1090 { &hf_nvme_rdma_cmd_pkt,
1091 { "Fabric Cmd in", "nvme-rdma.cmd_pkt",
1092 FT_FRAMENUM, BASE_NONE, NULL, 0,
1093 "The Cmd for this transaction is in this frame", HFILL }
1095 { &hf_nvme_rdma_cqe_pkt,
1096 { "Fabric Cqe in", "nvme-rdma.cqe_pkt",
1097 FT_FRAMENUM, BASE_NONE, NULL, 0,
1098 "The Cqe for this transaction is in this frame", HFILL }
1100 { &hf_nvme_rdma_cmd_latency,
1101 { "Cmd Latency", "nvme-rdma.cmd_latency",
1102 FT_DOUBLE, BASE_NONE, NULL, 0x0,
1103 "The time between the command and completion, in usec", HFILL }
1105 { &hf_nvme_rdma_cmd_qid,
1106 { "Cmd Qid", "nvme-rdma.cmd.qid",
1107 FT_UINT16, BASE_HEX, NULL, 0x0,
1108 "Qid on which command is issued", HFILL }
1111 static gint *ett[] = {
1116 proto_nvme_rdma = proto_register_protocol("NVM Express Fabrics RDMA",
1117 NVME_FABRICS_RDMA, "nvme-rdma");
1119 proto_register_field_array(proto_nvme_rdma, hf, array_length(hf));
1120 proto_register_subtree_array(ett, array_length(ett));
1122 /* Register preferences */
1123 //nvme_rdma_module = prefs_register_protocol(proto_nvme_rdma, proto_reg_handoff_nvme_rdma);
1124 nvme_rdma_module = prefs_register_protocol(proto_nvme_rdma, NULL);
1126 range_convert_str(wmem_epan_scope(), &gPORT_RANGE, NVME_RDMA_TCP_PORT_RANGE, MAX_TCP_PORT);
1127 prefs_register_range_preference(nvme_rdma_module,
1129 "Subsystem Ports Range",
1130 "Range of NVMe Subsystem ports"
1131 "(default " NVME_RDMA_TCP_PORT_RANGE ")",
1132 &gPORT_RANGE, MAX_TCP_PORT);
1136 proto_reg_handoff_nvme_rdma(void)
1138 heur_dissector_add("infiniband.mad.cm.private", dissect_nvme_ib_cm,
1139 "NVMe Fabrics RDMA CM packets",
1140 "nvme_rdma_cm_private", proto_nvme_rdma, HEURISTIC_ENABLE);
1141 heur_dissector_add("infiniband.payload", dissect_nvme_ib,
1142 "NVMe Fabrics RDMA packets",
1143 "nvme_rdma", proto_nvme_rdma, HEURISTIC_ENABLE);
1144 ib_handler = find_dissector_add_dependency("infiniband", proto_nvme_rdma);
1145 proto_ib = dissector_handle_get_protocol_index(ib_handler);
1149 * Editor modelines - http://www.wireshark.org/tools/modelines.html
1154 * indent-tabs-mode: nil
1157 * vi: set shiftwidth=4 tabstop=8 expandtab:
1158 * :indentSize=4:tabSize=8:noTabs=true: