2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
35 * Atul Gupta (atul.gupta@chelsio.com)
38 #define pr_fmt(fmt) "chcr:" fmt
40 #include <linux/kernel.h>
41 #include <linux/module.h>
42 #include <linux/crypto.h>
43 #include <linux/skbuff.h>
44 #include <linux/rtnetlink.h>
45 #include <linux/highmem.h>
46 #include <linux/if_vlan.h>
48 #include <linux/netdevice.h>
51 #include <crypto/aes.h>
52 #include <crypto/algapi.h>
53 #include <crypto/hash.h>
54 #include <crypto/sha.h>
55 #include <crypto/authenc.h>
56 #include <crypto/internal/aead.h>
57 #include <crypto/null.h>
58 #include <crypto/internal/skcipher.h>
59 #include <crypto/aead.h>
60 #include <crypto/scatterwalk.h>
61 #include <crypto/internal/hash.h>
63 #include "chcr_core.h"
64 #include "chcr_algo.h"
65 #include "chcr_crypto.h"
68 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
71 #define MAX_IMM_TX_PKT_LEN 256
72 #define GCM_ESP_IV_SIZE 8
74 static int chcr_xfrm_add_state(struct xfrm_state *x);
75 static void chcr_xfrm_del_state(struct xfrm_state *x);
76 static void chcr_xfrm_free_state(struct xfrm_state *x);
77 static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
78 static void chcr_advance_esn_state(struct xfrm_state *x);
80 static const struct xfrmdev_ops chcr_xfrmdev_ops = {
81 .xdo_dev_state_add = chcr_xfrm_add_state,
82 .xdo_dev_state_delete = chcr_xfrm_del_state,
83 .xdo_dev_state_free = chcr_xfrm_free_state,
84 .xdo_dev_offload_ok = chcr_ipsec_offload_ok,
85 .xdo_dev_state_advance_esn = chcr_advance_esn_state,
88 /* Add offload xfrms to Chelsio Interface */
89 void chcr_add_xfrmops(const struct cxgb4_lld_info *lld)
91 struct net_device *netdev = NULL;
94 for (i = 0; i < lld->nports; i++) {
95 netdev = lld->ports[i];
98 netdev->xfrmdev_ops = &chcr_xfrmdev_ops;
99 netdev->hw_enc_features |= NETIF_F_HW_ESP;
100 netdev->features |= NETIF_F_HW_ESP;
101 netdev_change_features(netdev);
105 static inline int chcr_ipsec_setauthsize(struct xfrm_state *x,
106 struct ipsec_sa_entry *sa_entry)
109 int authsize = x->aead->alg_icv_len / 8;
111 sa_entry->authsize = authsize;
115 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
118 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
121 hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
129 static inline int chcr_ipsec_setkey(struct xfrm_state *x,
130 struct ipsec_sa_entry *sa_entry)
132 int keylen = (x->aead->alg_key_len + 7) / 8;
133 unsigned char *key = x->aead->alg_key;
134 int ck_size, key_ctx_size = 0;
135 unsigned char ghash_h[AEAD_H_SIZE];
136 struct crypto_aes_ctx aes;
140 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
141 memcpy(sa_entry->salt, key + keylen, 4);
144 if (keylen == AES_KEYSIZE_128) {
145 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
146 } else if (keylen == AES_KEYSIZE_192) {
147 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
148 } else if (keylen == AES_KEYSIZE_256) {
149 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
151 pr_err("GCM: Invalid key length %d\n", keylen);
156 memcpy(sa_entry->key, key, keylen);
157 sa_entry->enckey_len = keylen;
158 key_ctx_size = sizeof(struct _key_ctx) +
159 ((DIV_ROUND_UP(keylen, 16)) << 4) +
162 sa_entry->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
163 CHCR_KEYCTX_MAC_KEY_SIZE_128,
167 /* Calculate the H = CIPH(K, 0 repeated 16 times).
168 * It will go in key context
170 ret = aes_expandkey(&aes, key, keylen);
172 sa_entry->enckey_len = 0;
175 memset(ghash_h, 0, AEAD_H_SIZE);
176 aes_encrypt(&aes, ghash_h, ghash_h);
177 memzero_explicit(&aes, sizeof(aes));
179 memcpy(sa_entry->key + (DIV_ROUND_UP(sa_entry->enckey_len, 16) *
180 16), ghash_h, AEAD_H_SIZE);
181 sa_entry->kctx_len = ((DIV_ROUND_UP(sa_entry->enckey_len, 16)) << 4) +
188 * chcr_xfrm_add_state
189 * returns 0 on success, negative error if failed to send message to FPGA
190 * positive error if FPGA returned a bad response
192 static int chcr_xfrm_add_state(struct xfrm_state *x)
194 struct ipsec_sa_entry *sa_entry;
197 if (x->props.aalgo != SADB_AALG_NONE) {
198 pr_debug("CHCR: Cannot offload authenticated xfrm states\n");
201 if (x->props.calgo != SADB_X_CALG_NONE) {
202 pr_debug("CHCR: Cannot offload compressed xfrm states\n");
205 if (x->props.family != AF_INET &&
206 x->props.family != AF_INET6) {
207 pr_debug("CHCR: Only IPv4/6 xfrm state offloaded\n");
210 if (x->props.mode != XFRM_MODE_TRANSPORT &&
211 x->props.mode != XFRM_MODE_TUNNEL) {
212 pr_debug("CHCR: Only transport and tunnel xfrm offload\n");
215 if (x->id.proto != IPPROTO_ESP) {
216 pr_debug("CHCR: Only ESP xfrm state offloaded\n");
220 pr_debug("CHCR: Encapsulated xfrm state not offloaded\n");
224 pr_debug("CHCR: Cannot offload xfrm states without aead\n");
227 if (x->aead->alg_icv_len != 128 &&
228 x->aead->alg_icv_len != 96) {
229 pr_debug("CHCR: Cannot offload xfrm states with AEAD ICV length other than 96b & 128b\n");
232 if ((x->aead->alg_key_len != 128 + 32) &&
233 (x->aead->alg_key_len != 256 + 32)) {
234 pr_debug("CHCR: Cannot offload xfrm states with AEAD key length other than 128/256 bit\n");
238 pr_debug("CHCR: Cannot offload xfrm states with tfc padding\n");
242 pr_debug("CHCR: Cannot offload xfrm states without geniv\n");
245 if (strcmp(x->geniv, "seqiv")) {
246 pr_debug("CHCR: Cannot offload xfrm states with geniv other than seqiv\n");
250 sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
256 sa_entry->hmac_ctrl = chcr_ipsec_setauthsize(x, sa_entry);
257 if (x->props.flags & XFRM_STATE_ESN)
259 chcr_ipsec_setkey(x, sa_entry);
260 x->xso.offload_handle = (unsigned long)sa_entry;
261 try_module_get(THIS_MODULE);
266 static void chcr_xfrm_del_state(struct xfrm_state *x)
269 if (!x->xso.offload_handle)
273 static void chcr_xfrm_free_state(struct xfrm_state *x)
275 struct ipsec_sa_entry *sa_entry;
277 if (!x->xso.offload_handle)
280 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
282 module_put(THIS_MODULE);
285 static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
287 if (x->props.family == AF_INET) {
288 /* Offload with IP options is not supported yet */
289 if (ip_hdr(skb)->ihl > 5)
292 /* Offload with IPv6 extension headers is not support yet */
293 if (ipv6_ext_hdr(ipv6_hdr(skb)->nexthdr))
296 /* Inline single pdu */
297 if (skb_shinfo(skb)->gso_size)
302 static void chcr_advance_esn_state(struct xfrm_state *x)
305 if (!x->xso.offload_handle)
309 static inline int is_eth_imm(const struct sk_buff *skb,
310 struct ipsec_sa_entry *sa_entry)
312 unsigned int kctx_len;
315 kctx_len = sa_entry->kctx_len;
316 hdrlen = sizeof(struct fw_ulptx_wr) +
317 sizeof(struct chcr_ipsec_req) + kctx_len;
319 hdrlen += sizeof(struct cpl_tx_pkt);
321 hdrlen += (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16)
323 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
328 static inline unsigned int calc_tx_sec_flits(const struct sk_buff *skb,
329 struct ipsec_sa_entry *sa_entry,
332 unsigned int kctx_len;
337 kctx_len = sa_entry->kctx_len;
338 hdrlen = is_eth_imm(skb, sa_entry);
339 aadivlen = sa_entry->esn ? DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
343 /* If the skb is small enough, we can pump it out as a work request
344 * with only immediate data. In that case we just have to have the
345 * TX Packet header plus the skb data in the Work Request.
350 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
353 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
355 /* Otherwise, we're going to have to construct a Scatter gather list
356 * of the skb body and fragments. We also include the flits necessary
357 * for the TX Packet Work Request and CPL. We always have a firmware
358 * Write Header (incorporated as part of the cpl_tx_pkt_lso and
359 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL
360 * message or, if we're doing a Large Send Offload, an LSO CPL message
361 * with an embedded TX Packet Write CPL message.
363 flits += (sizeof(struct fw_ulptx_wr) +
364 sizeof(struct chcr_ipsec_req) +
366 sizeof(struct cpl_tx_pkt_core) +
367 aadivlen) / sizeof(__be64);
371 inline void *copy_esn_pktxt(struct sk_buff *skb,
372 struct net_device *dev,
374 struct ipsec_sa_entry *sa_entry)
376 struct chcr_ipsec_aadiv *aadiv;
377 struct ulptx_idata *sc_imm;
378 struct ip_esp_hdr *esphdr;
379 struct xfrm_offload *xo;
380 struct sge_eth_txq *q;
381 struct adapter *adap;
382 struct port_info *pi;
390 pi = netdev_priv(dev);
392 qidx = skb->queue_mapping;
393 q = &adap->sge.ethtxq[qidx + pi->first_qset];
395 /* end of queue, reset pos to start of queue */
396 eoq = (void *)q->q.stat - pos;
400 len = DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv), 16) << 4;
402 aadiv = (struct chcr_ipsec_aadiv *)pos;
403 esphdr = (struct ip_esp_hdr *)skb_transport_header(skb);
404 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
405 xo = xfrm_offload(skb);
407 aadiv->spi = (esphdr->spi);
408 seqlo = htonl(esphdr->seq_no);
409 seqno = cpu_to_be64(seqlo + ((u64)xo->seq.hi << 32));
410 memcpy(aadiv->seq_no, &seqno, 8);
411 iv = skb_transport_header(skb) + sizeof(struct ip_esp_hdr);
412 memcpy(aadiv->iv, iv, 8);
414 if (is_eth_imm(skb, sa_entry) && !skb_is_nonlinear(skb)) {
415 sc_imm = (struct ulptx_idata *)(pos +
416 (DIV_ROUND_UP(sizeof(struct chcr_ipsec_aadiv),
417 sizeof(__be64)) << 3));
418 sc_imm->cmd_more = FILL_CMD_MORE(0);
419 sc_imm->len = cpu_to_be32(skb->len);
425 inline void *copy_cpltx_pktxt(struct sk_buff *skb,
426 struct net_device *dev,
428 struct ipsec_sa_entry *sa_entry)
430 struct cpl_tx_pkt_core *cpl;
431 struct sge_eth_txq *q;
432 struct adapter *adap;
433 struct port_info *pi;
438 pi = netdev_priv(dev);
440 qidx = skb->queue_mapping;
441 q = &adap->sge.ethtxq[qidx + pi->first_qset];
443 left = (void *)q->q.stat - pos;
447 cpl = (struct cpl_tx_pkt_core *)pos;
449 cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
450 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) |
451 TXPKT_PF_V(adap->pf);
452 if (skb_vlan_tag_present(skb)) {
454 cntrl |= TXPKT_VLAN_VLD_F | TXPKT_VLAN_V(skb_vlan_tag_get(skb));
457 cpl->ctrl0 = htonl(ctrl0);
458 cpl->pack = htons(0);
459 cpl->len = htons(skb->len);
460 cpl->ctrl1 = cpu_to_be64(cntrl);
462 pos += sizeof(struct cpl_tx_pkt_core);
463 /* Copy ESN info for HW */
465 pos = copy_esn_pktxt(skb, dev, pos, sa_entry);
469 inline void *copy_key_cpltx_pktxt(struct sk_buff *skb,
470 struct net_device *dev,
472 struct ipsec_sa_entry *sa_entry)
474 struct _key_ctx *key_ctx;
475 int left, eoq, key_len;
476 struct sge_eth_txq *q;
477 struct adapter *adap;
478 struct port_info *pi;
481 pi = netdev_priv(dev);
483 qidx = skb->queue_mapping;
484 q = &adap->sge.ethtxq[qidx + pi->first_qset];
485 key_len = sa_entry->kctx_len;
487 /* end of queue, reset pos to start of queue */
488 eoq = (void *)q->q.stat - pos;
492 left = 64 * q->q.size;
495 /* Copy the Key context header */
496 key_ctx = (struct _key_ctx *)pos;
497 key_ctx->ctx_hdr = sa_entry->key_ctx_hdr;
498 memcpy(key_ctx->salt, sa_entry->salt, MAX_SALT);
499 pos += sizeof(struct _key_ctx);
500 left -= sizeof(struct _key_ctx);
502 if (likely(key_len <= left)) {
503 memcpy(key_ctx->key, sa_entry->key, key_len);
506 memcpy(pos, sa_entry->key, left);
507 memcpy(q->q.desc, sa_entry->key + left,
509 pos = (u8 *)q->q.desc + (key_len - left);
511 /* Copy CPL TX PKT XT */
512 pos = copy_cpltx_pktxt(skb, dev, pos, sa_entry);
517 inline void *chcr_crypto_wreq(struct sk_buff *skb,
518 struct net_device *dev,
521 struct ipsec_sa_entry *sa_entry)
523 struct port_info *pi = netdev_priv(dev);
524 struct adapter *adap = pi->adapter;
525 unsigned int ivsize = GCM_ESP_IV_SIZE;
526 struct chcr_ipsec_wr *wr;
527 bool immediate = false;
539 int qidx = skb_get_queue_mapping(skb);
540 struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset];
541 unsigned int kctx_len = sa_entry->kctx_len;
542 int qid = q->q.cntxt_id;
544 atomic_inc(&adap->chcr_stats.ipsec_cnt);
546 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
547 ndesc = DIV_ROUND_UP(flits, 2);
552 immdatalen = skb->len;
555 esnlen = sizeof(struct chcr_ipsec_aadiv);
556 if (!skb_is_nonlinear(skb))
561 wr = (struct chcr_ipsec_wr *)pos;
562 wr->wreq.op_to_compl = htonl(FW_WR_OP_V(FW_ULPTX_WR));
563 wr_mid = FW_CRYPTO_LOOKASIDE_WR_LEN16_V(ndesc);
565 if (unlikely(credits < ETHTXQ_STOP_THRES)) {
566 netif_tx_stop_queue(q->txq);
569 wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
571 wr_mid |= FW_ULPTX_WR_DATA_F;
572 wr->wreq.flowid_len16 = htonl(wr_mid);
575 wr->req.ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(pi->port_id, qid);
576 wr->req.ulptx.len = htonl(ndesc - 1);
579 wr->req.sc_imm.cmd_more = FILL_CMD_MORE(!immdatalen || sc_more);
580 wr->req.sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
581 sizeof(wr->req.key_ctx) +
583 sizeof(struct cpl_tx_pkt_core) +
585 (esnlen ? 0 : immdatalen));
588 ivinoffset = sa_entry->esn ? (ESN_IV_INSERT_OFFSET + 1) :
589 (skb_transport_offset(skb) +
590 sizeof(struct ip_esp_hdr) + 1);
591 wr->req.sec_cpl.op_ivinsrtofst = htonl(
592 CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) |
593 CPL_TX_SEC_PDU_CPLLEN_V(2) |
594 CPL_TX_SEC_PDU_PLACEHOLDER_V(1) |
595 CPL_TX_SEC_PDU_IVINSRTOFST_V(
598 wr->req.sec_cpl.pldlen = htonl(skb->len + esnlen);
599 aadstart = sa_entry->esn ? 1 : (skb_transport_offset(skb) + 1);
600 aadstop = sa_entry->esn ? ESN_IV_INSERT_OFFSET :
601 (skb_transport_offset(skb) +
602 sizeof(struct ip_esp_hdr));
603 ciphstart = skb_transport_offset(skb) + sizeof(struct ip_esp_hdr) +
605 ciphstart += sa_entry->esn ? esnlen : 0;
607 wr->req.sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
612 wr->req.sec_cpl.cipherstop_lo_authinsert =
613 FILL_SEC_CPL_AUTHINSERT(0, ciphstart,
616 wr->req.sec_cpl.seqno_numivs =
617 FILL_SEC_CPL_SCMD0_SEQNO(CHCR_ENCRYPT_OP, 1,
618 CHCR_SCMD_CIPHER_MODE_AES_GCM,
619 CHCR_SCMD_AUTH_MODE_GHASH,
622 wr->req.sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
625 pos += sizeof(struct fw_ulptx_wr) +
626 sizeof(struct ulp_txpkt) +
627 sizeof(struct ulptx_idata) +
628 sizeof(struct cpl_tx_sec_pdu);
630 pos = copy_key_cpltx_pktxt(skb, dev, pos, sa_entry);
636 * flits_to_desc - returns the num of Tx descriptors for the given flits
637 * @n: the number of flits
639 * Returns the number of Tx descriptors needed for the supplied number
642 static inline unsigned int flits_to_desc(unsigned int n)
644 WARN_ON(n > SGE_MAX_WR_LEN / 8);
645 return DIV_ROUND_UP(n, 8);
648 static inline unsigned int txq_avail(const struct sge_txq *q)
650 return q->size - 1 - q->in_use;
653 static void eth_txq_stop(struct sge_eth_txq *q)
655 netif_tx_stop_queue(q->txq);
659 static inline void txq_advance(struct sge_txq *q, unsigned int n)
663 if (q->pidx >= q->size)
668 * chcr_ipsec_xmit called from ULD Tx handler
670 int chcr_ipsec_xmit(struct sk_buff *skb, struct net_device *dev)
672 struct xfrm_state *x = xfrm_input_state(skb);
673 unsigned int last_desc, ndesc, flits = 0;
674 struct ipsec_sa_entry *sa_entry;
675 u64 *pos, *end, *before, *sgl;
676 struct tx_sw_desc *sgl_sdesc;
677 int qidx, left, credits;
678 bool immediate = false;
679 struct sge_eth_txq *q;
680 struct adapter *adap;
681 struct port_info *pi;
684 if (!x->xso.offload_handle)
685 return NETDEV_TX_BUSY;
687 sa_entry = (struct ipsec_sa_entry *)x->xso.offload_handle;
689 sp = skb_sec_path(skb);
691 out_free: dev_kfree_skb_any(skb);
695 pi = netdev_priv(dev);
697 qidx = skb->queue_mapping;
698 q = &adap->sge.ethtxq[qidx + pi->first_qset];
700 cxgb4_reclaim_completed_tx(adap, &q->q, true);
702 flits = calc_tx_sec_flits(skb, sa_entry, &immediate);
703 ndesc = flits_to_desc(flits);
704 credits = txq_avail(&q->q) - ndesc;
706 if (unlikely(credits < 0)) {
708 dev_err(adap->pdev_dev,
709 "%s: Tx ring %u full while queue awake! cred:%d %d %d flits:%d\n",
710 dev->name, qidx, credits, ndesc, txq_avail(&q->q),
712 return NETDEV_TX_BUSY;
715 last_desc = q->q.pidx + ndesc - 1;
716 if (last_desc >= q->q.size)
717 last_desc -= q->q.size;
718 sgl_sdesc = &q->q.sdesc[last_desc];
721 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) {
722 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr));
727 pos = (u64 *)&q->q.desc[q->q.pidx];
729 end = (u64 *)pos + flits;
730 /* Setup IPSec CPL */
731 pos = (void *)chcr_crypto_wreq(skb, dev, (void *)pos,
733 if (before > (u64 *)pos) {
734 left = (u8 *)end - (u8 *)q->q.stat;
735 end = (void *)q->q.desc + left;
737 if (pos == (u64 *)q->q.stat) {
738 left = (u8 *)end - (u8 *)q->q.stat;
739 end = (void *)q->q.desc + left;
740 pos = (void *)q->q.desc;
745 cxgb4_inline_tx_skb(skb, &q->q, sgl);
746 dev_consume_skb_any(skb);
748 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end,
751 sgl_sdesc->skb = skb;
753 txq_advance(&q->q, ndesc);
755 cxgb4_ring_tx_db(adap, &q->q, ndesc);