schedule_work(&bp->sp_task);
}
-static void bnxt_cancel_sp_work(struct bnxt *bp)
-{
- if (BNXT_PF(bp)) {
- flush_workqueue(bnxt_pf_wq);
- } else {
- cancel_work_sync(&bp->sp_task);
- cancel_delayed_work_sync(&bp->fw_reset_task);
- }
-}
-
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM;
u16 dst = BNXT_HWRM_CHNL_CHIMP;
- if (BNXT_NO_FW_ACCESS(bp))
+ if (BNXT_NO_FW_ACCESS(bp) &&
+ le16_to_cpu(req->req_type) != HWRM_FUNC_RESET)
return -EBUSY;
if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
{
int rc = 0;
- rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state))
+ rc = -EIO;
+ if (!rc)
+ rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
if (rc) {
netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
dev_close(bp->dev);
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_cancel_sp_work(bp);
- bp->sp_event = 0;
-
- bnxt_dl_fw_reporters_destroy(bp, true);
if (BNXT_PF(bp))
devlink_port_type_clear(&bp->dl_port);
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
+ clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
+ /* Flush any pending tasks */
+ cancel_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->fw_reset_task);
+ bp->sp_event = 0;
+
+ bnxt_dl_fw_reporters_destroy(bp, true);
bnxt_dl_unregister(bp);
bnxt_shutdown_tc(bp);
return PCI_ERS_RESULT_DISCONNECT;
}
+ if (state == pci_channel_io_frozen)
+ set_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state);
+
if (netif_running(netdev))
bnxt_close(netdev);
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
- int err = 0;
+ int err = 0, off;
pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
"Cannot re-enable PCI device after reset.\n");
} else {
pci_set_master(pdev);
+ /* Upon fatal error, our device internal logic that latches to
+ * BAR value is getting reset and will restore only upon
+ * rewritting the BARs.
+ *
+ * As pci_restore_state() does not re-write the BARs if the
+ * value is same as saved value earlier, driver needs to
+ * write the BARs to 0 to force restore, in case of fatal error.
+ */
+ if (test_and_clear_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN,
+ &bp->state)) {
+ for (off = PCI_BASE_ADDRESS_0;
+ off <= PCI_BASE_ADDRESS_5; off += 4)
+ pci_write_config_dword(bp->pdev, off, 0);
+ }
pci_restore_state(pdev);
pci_save_state(pdev);
#define BNXT_STATE_ABORT_ERR 5
#define BNXT_STATE_FW_FATAL_COND 6
#define BNXT_STATE_DRV_REGISTERED 7
+#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
- err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
- if (err)
- goto smac_err;
-
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
+ if (err)
+ goto smac_err;
+
+ err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (!err)
return 0;
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
+ FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
+ fwr->smac_sel = f->smt->idx;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
TX_QUEUE_V(f->fs.nat_mode) |
T5_OPT_2_VALID_F |
RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
- CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
- (f->fs.dirsteer << 1)) |
PACE_V((f->fs.maskhash) |
- ((f->fs.dirsteerhash) << 1)) |
- CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
+ ((f->fs.dirsteerhash) << 1)));
}
static int cxgb4_set_hash_filter(struct net_device *dev,
}
return;
}
+ switch (f->fs.action) {
+ case FILTER_PASS:
+ if (f->fs.dirsteer)
+ set_tcb_tflag(adap, f, tid,
+ TF_DIRECT_STEER_S, 1, 1);
+ break;
+ case FILTER_DROP:
+ set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
+ break;
+ case FILTER_SWITCH:
+ set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
+ break;
+ }
+
break;
default:
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
- int err = 0;
-
- if (f->fs.newsmac)
- err = configure_filter_smac(adap, f);
-
- if (!err) {
- f->pending = 0; /* async setup completed */
- f->valid = 1;
- if (ctx) {
- ctx->result = 0;
- ctx->tid = idx;
- }
- } else {
- clear_filter(adap, f);
- if (ctx)
- ctx->result = err;
+ f->pending = 0; /* async setup completed */
+ f->valid = 1;
+ if (ctx) {
+ ctx->result = 0;
+ ctx->tid = idx;
}
} else {
/* Something went wrong. Issue a warning about the
#define TCB_T_FLAGS_M 0xffffffffffffffffULL
#define TCB_T_FLAGS_V(x) ((__u64)(x) << TCB_T_FLAGS_S)
+#define TF_DROP_S 22
+#define TF_DIRECT_STEER_S 23
+#define TF_LPBK_S 59
+
#define TF_CCTRL_ECE_S 60
#define TF_CCTRL_CWR_S 61
#define TF_CCTRL_RFR_S 62
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected PASS_OPEN_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static int chtls_close_listsrv_rpl(struct chtls_dev *cdev, struct sk_buff *skb)
if (rpl->status != CPL_ERR_NONE) {
pr_info("Unexpected CLOSE_LISTSRV_RPL status %u for STID %u\n",
rpl->status, stid);
- return CPL_RET_BUF_DONE;
+ } else {
+ cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
+ sock_put(listen_ctx->lsk);
+ kfree(listen_ctx);
+ module_put(THIS_MODULE);
}
-
- cxgb4_free_stid(cdev->tids, stid, listen_ctx->lsk->sk_family);
- sock_put(listen_ctx->lsk);
- kfree(listen_ctx);
- module_put(THIS_MODULE);
-
- return 0;
+ return CPL_RET_BUF_DONE;
}
static void chtls_purge_wr_queue(struct sock *sk)
struct chtls_sock *csk = sk->sk_user_data;
local_bh_disable();
- bh_lock_sock(sk);
release_tcp_port(sk); /* release the port immediately */
spin_lock(&reap_list_lock);
if (!csk->passive_reap_next)
schedule_work(&reap_task);
spin_unlock(&reap_list_lock);
- bh_unlock_sock(sk);
local_bh_enable();
}
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
+ struct sk_buff *next_skb;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
chtls_free_skb(sk, skb);
buffers_freed++;
hws->copied_seq = 0;
- if (copied >= target &&
- !skb_peek(&sk->sk_receive_queue))
+ next_skb = skb_peek(&sk->sk_receive_queue);
+ if (copied >= target && !next_skb)
+ break;
+ if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR)
break;
}
} while (len > 0);
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_TBIM;
}
- if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII))
+ if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
upsmr |= UCC_GETH_UPSMR_SGMM;
out_be32(&uf_regs->upsmr, upsmr);
return ret;
}
- return ret;
+ return 0;
}
int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
hclgevf_uninit_msi(hdev);
}
- hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
+ hclgevf_pci_uninit(hdev);
hclgevf_uninit_mac_list(hdev);
}
ret = -EOPNOTSUPP;
}
- if (!ether_addr_equal(ether_header->h_source, netdev->dev_addr)) {
- netdev_dbg(netdev, "source packet MAC address does not match veth device's, dropping packet.\n");
- netdev->stats.tx_dropped++;
- ret = -EOPNOTSUPP;
- }
-
return ret;
}
int rc;
rc = 0;
- ether_addr_copy(adapter->mac_addr, addr->sa_data);
- if (adapter->state != VNIC_PROBED)
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (adapter->state != VNIC_PROBED) {
+ ether_addr_copy(adapter->mac_addr, addr->sa_data);
rc = __ibmvnic_set_mac(netdev, addr->sa_data);
+ }
return rc;
}
err = mlxsw_emad_transmit(trans->core, trans);
if (err == 0)
return;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
} else {
err = -EIO;
}
if (!reload)
devlink_resources_unregister(devlink, NULL);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
+ if (!reload)
+ devlink_free(devlink);
return;
u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
const struct mlxsw_sp_port_type_speed_ops *ops;
char ptys_pl[MLXSW_REG_PTYS_LEN];
+ u32 eth_proto_cap_masked;
int err;
ops = mlxsw_sp->port_type_speed_ops;
- /* Set advertised speeds to supported speeds. */
+ /* Set advertised speeds to speeds supported by both the driver
+ * and the device.
+ */
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
0, false);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, ð_proto_cap,
ð_proto_admin, ð_proto_oper);
+ eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
- eth_proto_cap, mlxsw_sp_port->link.autoneg);
+ eth_proto_cap_masked,
+ mlxsw_sp_port->link.autoneg);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
u32 *p_eth_proto_cap,
u32 *p_eth_proto_admin,
u32 *p_eth_proto_oper);
+ u32 (*ptys_proto_cap_masked_get)(u32 eth_proto_cap);
};
static inline struct net_device *
p_eth_proto_oper);
}
+static u32 mlxsw_sp1_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP1_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp1_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp1_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp1_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp1_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp1_from_ptys_link,
.to_ptys_speed = mlxsw_sp1_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp1_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp1_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp1_ptys_proto_cap_masked_get,
};
static const enum ethtool_link_mode_bit_indices
p_eth_proto_admin, p_eth_proto_oper);
}
+static u32 mlxsw_sp2_ptys_proto_cap_masked_get(u32 eth_proto_cap)
+{
+ u32 ptys_proto_cap_masked = 0;
+ int i;
+
+ for (i = 0; i < MLXSW_SP2_PORT_LINK_MODE_LEN; i++) {
+ if (mlxsw_sp2_port_link_mode[i].mask & eth_proto_cap)
+ ptys_proto_cap_masked |=
+ mlxsw_sp2_port_link_mode[i].mask;
+ }
+
+ return ptys_proto_cap_masked;
+}
+
const struct mlxsw_sp_port_type_speed_ops mlxsw_sp2_port_type_speed_ops = {
.from_ptys_supported_port = mlxsw_sp2_from_ptys_supported_port,
.from_ptys_link = mlxsw_sp2_from_ptys_link,
.to_ptys_speed = mlxsw_sp2_to_ptys_speed,
.reg_ptys_eth_pack = mlxsw_sp2_reg_ptys_eth_pack,
.reg_ptys_eth_unpack = mlxsw_sp2_reg_ptys_eth_unpack,
+ .ptys_proto_cap_masked_get = mlxsw_sp2_ptys_proto_cap_masked_get,
};
union ionic_dev_cmd cmd = {
.vf_setattr.opcode = IONIC_CMD_VF_SETATTR,
.vf_setattr.attr = attr,
- .vf_setattr.vf_index = vf,
+ .vf_setattr.vf_index = cpu_to_le16(vf),
};
int err;
{
union ionic_dev_cmd cmd = {
.q_identify.opcode = IONIC_CMD_Q_IDENTIFY,
- .q_identify.lif_type = lif_type,
+ .q_identify.lif_type = cpu_to_le16(lif_type),
.q_identify.type = qtype,
.q_identify.ver = qver,
};
int res_index;
};
+#ifndef __CHECKER__
/* Registers */
static_assert(sizeof(struct ionic_intr) == 32);
static_assert(sizeof(struct ionic_vf_setattr_comp) == 16);
static_assert(sizeof(struct ionic_vf_getattr_cmd) == 64);
static_assert(sizeof(struct ionic_vf_getattr_comp) == 16);
+#endif /* __CHECKER__ */
struct ionic_devinfo {
u8 asic_type;
{
union ionic_dev_cmd cmd = {
.fw_download.opcode = IONIC_CMD_FW_DOWNLOAD,
- .fw_download.offset = offset,
- .fw_download.addr = addr,
- .fw_download.length = length
+ .fw_download.offset = cpu_to_le32(offset),
+ .fw_download.addr = cpu_to_le64(addr),
+ .fw_download.length = cpu_to_le32(length),
};
ionic_dev_cmd_go(idev, &cmd);
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
- ionic_rx_flush(&lif->rxqcqs[i]->cq);
ionic_rx_empty(&lif->rxqcqs[i]->q);
}
}
ret = -EINVAL;
} else {
ivf->vf = vf;
- ivf->vlan = ionic->vfs[vf].vlanid;
+ ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
ivf->qos = 0;
ivf->spoofchk = ionic->vfs[vf].spoofchk;
ivf->linkstate = ionic->vfs[vf].linkstate;
- ivf->max_tx_rate = ionic->vfs[vf].maxrate;
+ ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
ivf->trusted = ionic->vfs[vf].trusted;
ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
}
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
if (!ret)
- ionic->vfs[vf].vlanid = vlan;
+ ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
}
up_write(&ionic->vf_op_lock);
ret = ionic_set_vf_config(ionic, vf,
IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
if (!ret)
- lif->ionic->vfs[vf].maxrate = tx_max;
+ lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
}
up_write(&ionic->vf_op_lock);
static void ionic_lif_queue_identify(struct ionic_lif *lif)
{
+ union ionic_q_identity __iomem *q_ident;
struct ionic *ionic = lif->ionic;
- union ionic_q_identity *q_ident;
struct ionic_dev *idev;
int qtype;
int err;
idev = &lif->ionic->idev;
- q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
+ q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
ionic_qtype_versions[qtype]);
err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
if (!err) {
- qti->version = q_ident->version;
- qti->supported = q_ident->supported;
- qti->features = le64_to_cpu(q_ident->features);
- qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
- qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
- qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
- qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
- qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
+ qti->version = readb(&q_ident->version);
+ qti->supported = readb(&q_ident->supported);
+ qti->features = readq(&q_ident->features);
+ qti->desc_sz = readw(&q_ident->desc_sz);
+ qti->comp_sz = readw(&q_ident->comp_sz);
+ qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
+ qti->max_sg_elems = readw(&q_ident->max_sg_elems);
+ qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
}
mutex_unlock(&ionic->dev_cmd_lock);
static void ionic_dev_cmd_clean(struct ionic *ionic)
{
- union ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
+ union __iomem ionic_dev_cmd_regs *regs = ionic->idev.dev_cmd_regs;
iowrite32(0, ®s->doorbell);
memset_io(®s->cmd, 0, sizeof(regs->cmd));
*/
max_wait = jiffies + (max_seconds * HZ);
try_again:
- opcode = idev->dev_cmd_regs->cmd.cmd.opcode;
+ opcode = readb(&idev->dev_cmd_regs->cmd.cmd.opcode);
start_time = jiffies;
do {
done = ionic_dev_cmd_done(idev);
(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
#define IONIC_READ_STAT_LE64(base_ptr, desc_ptr) \
- __le64_to_cpu(*((u64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
+ __le64_to_cpu(*((__le64 *)(((u8 *)(base_ptr)) + (desc_ptr)->offset)))
struct ionic_stat_desc {
char name[ETH_GSTRING_LEN];
if (likely(netdev->features & NETIF_F_RXCSUM)) {
if (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = (__wsum)le16_to_cpu(comp->csum);
+ skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
stats->csum_complete++;
}
} else {
return true;
}
-void ionic_rx_flush(struct ionic_cq *cq)
-{
- struct ionic_dev *idev = &cq->lif->ionic->idev;
- u32 work_done;
-
- work_done = ionic_cq_service(cq, cq->num_descs,
- ionic_rx_service, NULL, NULL);
-
- if (work_done)
- ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
- work_done, IONIC_INTR_CRED_RESET_COALESCE);
-}
-
static int ionic_rx_page_alloc(struct ionic_queue *q,
struct ionic_page_info *page_info)
{
void ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_desc_info *desc_info;
- struct ionic_rxq_desc *desc;
- unsigned int i;
- u16 idx;
-
- idx = q->tail_idx;
- while (idx != q->head_idx) {
- desc_info = &q->info[idx];
- desc = desc_info->desc;
- desc->addr = 0;
- desc->len = 0;
+ struct ionic_page_info *page_info;
+ unsigned int i, j;
- for (i = 0; i < desc_info->npages; i++)
- ionic_rx_page_free(q, &desc_info->pages[i]);
+ for (i = 0; i < q->num_descs; i++) {
+ desc_info = &q->info[i];
+ for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
+ page_info = &desc_info->pages[j];
+ if (page_info->page)
+ ionic_rx_page_free(q, page_info);
+ }
+ desc_info->npages = 0;
+ desc_info->cb = NULL;
desc_info->cb_arg = NULL;
- idx = (idx + 1) & (q->num_descs - 1);
}
}
skb_frag_t *frag;
bool start, done;
bool outer_csum;
+ dma_addr_t addr;
bool has_vlan;
u16 desc_len;
u8 desc_nsge;
if (frag_left > 0) {
len = min(frag_left, left);
frag_left -= len;
- elem->addr =
- cpu_to_le64(ionic_tx_map_frag(q, frag,
- offset, len));
- if (dma_mapping_error(dev, elem->addr))
+ addr = ionic_tx_map_frag(q, frag, offset, len);
+ if (dma_mapping_error(dev, addr))
goto err_out_abort;
+ elem->addr = cpu_to_le64(addr);
elem->len = cpu_to_le16(len);
elem++;
desc_nsge++;
#ifndef _IONIC_TXRX_H_
#define _IONIC_TXRX_H_
-void ionic_rx_flush(struct ionic_cq *cq);
void ionic_tx_flush(struct ionic_cq *cq);
void ionic_rx_fill(struct ionic_queue *q);
}
rtl_irq_disable(tp);
- napi_schedule_irqoff(&tp->napi);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
rtl_request_firmware(tp);
retval = request_irq(pci_irq_vector(pdev, 0), rtl8169_interrupt,
- IRQF_NO_THREAD | IRQF_SHARED, dev->name, tp);
+ IRQF_SHARED, dev->name, tp);
if (retval < 0)
goto err_release_fw_2;
config.flags = 0;
config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON :
HWTSTAMP_TX_OFF;
- if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_V2_L2_EVENT)
+ switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) {
+ case RAVB_RXTSTAMP_TYPE_V2_L2_EVENT:
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
- else if (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE_ALL)
+ break;
+ case RAVB_RXTSTAMP_TYPE_ALL:
config.rx_filter = HWTSTAMP_FILTER_ALL;
- else
+ break;
+ default:
config.rx_filter = HWTSTAMP_FILTER_NONE;
+ }
return copy_to_user(req->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
gtp = netdev_priv(dev);
- err = gtp_encap_enable(gtp, data);
- if (err < 0)
- return err;
-
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
hashsize = 1024;
} else {
err = gtp_hashtable_new(gtp, hashsize);
if (err < 0)
- goto out_encap;
+ return err;
+
+ err = gtp_encap_enable(gtp, data);
+ if (err < 0)
+ goto out_hashtable;
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
- goto out_hashtable;
+ goto out_encap;
}
gn = net_generic(dev_net(dev), gtp_net_id);
return 0;
+out_encap:
+ gtp_encap_disable(gtp);
out_hashtable:
kfree(gtp->addr_hash);
kfree(gtp->tid_hash);
-out_encap:
- gtp_encap_disable(gtp);
return err;
}
/* assert(which < trans->tre_count); */
- /* Set the page information for the buffer. We also need to fill in
- * the DMA address and length for the buffer (something dma_map_sg()
- * normally does).
+ /* Commands are quite different from data transfer requests.
+ * Their payloads come from a pool whose memory is allocated
+ * using dma_alloc_coherent(). We therefore do *not* map them
+ * for DMA (unlike what we do for pages and skbs).
+ *
+ * When a transaction completes, the SGL is normally unmapped.
+ * A command transaction has direction DMA_NONE, which tells
+ * gsi_trans_complete() to skip the unmapping step.
+ *
+ * The only things we use directly in a command scatter/gather
+ * entry are the DMA address and length. We still need the SG
+ * table flags to be maintained though, so assign a NULL page
+ * pointer for that purpose.
*/
sg = &trans->sgl[which];
-
- sg_set_buf(sg, buf, size);
+ sg_assign_page(sg, NULL);
sg_dma_address(sg) = addr;
- sg_dma_len(sg) = sg->length;
+ sg_dma_len(sg) = size;
info = &trans->info[which];
info->opcode = opcode;
}
static struct ism_systemeid SYSTEM_EID = {
- .seid_string = "IBM-SYSZ-IBMSEID00000000",
+ .seid_string = "IBM-SYSZ-ISMSEID00000000",
.serial_number = "0000",
.type = "0000",
};
if (err)
goto nla_put_failure;
- if (region->port)
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
- region->port->index))
+ if (region->port) {
+ err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index);
+ if (err)
goto nla_put_failure;
+ }
err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME, region->ops->name);
if (err)
if (err)
goto out_cancel_msg;
- if (region->port)
- if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
- region->port->index))
+ if (region->port) {
+ err = nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index);
+ if (err)
goto out_cancel_msg;
+ }
err = nla_put_string(msg, DEVLINK_ATTR_REGION_NAME,
region->ops->name);
index = nla_get_u32(info->attrs[DEVLINK_ATTR_PORT_INDEX]);
port = devlink_port_get_by_index(devlink, index);
- if (!port)
- return -ENODEV;
+ if (!port) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
}
region_name = nla_data(attrs[DEVLINK_ATTR_REGION_NAME]);
if (err)
goto nla_put_failure;
- if (region->port)
- if (nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
- region->port->index))
+ if (region->port) {
+ err = nla_put_u32(skb, DEVLINK_ATTR_PORT_INDEX,
+ region->port->index);
+ if (err)
goto nla_put_failure;
+ }
err = nla_put_string(skb, DEVLINK_ATTR_REGION_NAME, region_name);
if (err)
return true;
if (tcp_rmem_pressure(sk))
return true;
+ if (tcp_receive_window(tp) <= inet_csk(sk)->icsk_ack.rcv_mss)
+ return true;
}
if (sk->sk_prot->stream_memory_read)
return sk->sk_prot->stream_memory_read(sk);
int avail = tp->rcv_nxt - tp->copied_seq;
if (avail < sk->sk_rcvlowat && !tcp_rmem_pressure(sk) &&
- !sock_flag(sk, SOCK_DONE))
+ !sock_flag(sk, SOCK_DONE) &&
+ tcp_receive_window(tp) > inet_csk(sk)->icsk_ack.rcv_mss)
return;
sk->sk_data_ready(sk);
skb_ext_reset(skb);
skb_orphan(skb);
+ /* try to fetch required memory from subflow */
+ if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
+ if (ssk->sk_forward_alloc < skb->truesize)
+ goto drop;
+ __sk_mem_reclaim(ssk, skb->truesize);
+ if (!sk_rmem_schedule(sk, skb, skb->truesize))
+ goto drop;
+ }
+
/* the skb map_seq accounts for the skb offset:
* mptcp_subflow_get_mapped_dsn() is based on the current tp->copied_seq
* value
* will retransmit as needed, if needed.
*/
MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA);
+drop:
mptcp_drop(sk, skb);
return false;
}
module_init(mpls_init_module);
module_exit(mpls_cleanup_module);
+MODULE_SOFTDEP("post: mpls_gso");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MPLS manipulation actions");
block_cb->indr.binder_type,
&block->flow_block, tcf_block_shared(block),
&extack);
+ rtnl_lock();
down_write(&block->cb_lock);
list_del(&block_cb->driver_list);
list_move(&block_cb->list, &bo.cb_list);
- up_write(&block->cb_lock);
- rtnl_lock();
tcf_block_unbind(block, &bo);
+ up_write(&block->cb_lock);
rtnl_unlock();
}
/* default uniform distribution */
if (dist == NULL)
- return ((rnd % (2 * sigma)) + mu) - sigma;
+ return ((rnd % (2 * (u32)sigma)) + mu) - sigma;
t = dist->table[rnd % dist->size];
x = (sigma % NETEM_DIST_SCALE) * t;
q->slot_config.max_packets = INT_MAX;
if (q->slot_config.max_bytes == 0)
q->slot_config.max_bytes = INT_MAX;
+
+ /* capping dist_jitter to the range acceptable by tabledist() */
+ q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter));
+
q->slot.packets_left = q->slot_config.max_packets;
q->slot.bytes_left = q->slot_config.max_bytes;
if (q->slot_config.min_delay | q->slot_config.max_delay |
if (tb[TCA_NETEM_SLOT])
get_slot(q, tb[TCA_NETEM_SLOT]);
+ /* capping jitter to the range acceptable by tabledist() */
+ q->jitter = min_t(s64, abs(q->jitter), INT_MAX);
+
return ret;
get_table_failure:
/* listen worker: decline and fall back if possible */
static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
- struct smc_init_info *ini, u8 version)
+ int local_first, u8 version)
{
/* RDMA setup failed, switch back to TCP */
- if (ini->first_contact_local)
+ if (local_first)
smc_lgr_cleanup_early(&new_smc->conn);
else
smc_conn_free(&new_smc->conn);
out_unlock:
mutex_unlock(&smc_server_lgr_pending);
out_decl:
- smc_listen_decline(new_smc, rc, ini, version);
+ smc_listen_decline(new_smc, rc, ini ? ini->first_contact_local : 0,
+ version);
out_free:
kfree(ini);
kfree(buf);
rc = smc_ism_register_dmb(lgr, bufsize, buf_desc);
if (rc) {
kfree(buf_desc);
- return (rc == -ENOMEM) ? ERR_PTR(-EAGAIN) :
- ERR_PTR(-EIO);
+ if (rc == -ENOMEM)
+ return ERR_PTR(-EAGAIN);
+ if (rc == -ENOSPC)
+ return ERR_PTR(-ENOSPC);
+ return ERR_PTR(-EIO);
}
buf_desc->pages = virt_to_page(buf_desc->cpu_addr);
/* CDC header stored in buf. So, pretend it was smaller */
if (fragid == FIRST_FRAGMENT) {
if (unlikely(head))
goto err;
- if (skb_cloned(frag))
- frag = skb_copy(frag, GFP_ATOMIC);
+ *buf = NULL;
+ frag = skb_unshare(frag, GFP_ATOMIC);
if (unlikely(!frag))
goto err;
head = *headbuf = frag;
- *buf = NULL;
TIPC_SKB_CB(head)->tail = NULL;
if (skb_is_nonlinear(head)) {
skb_walk_frags(head, tail) {
vsk->buffer_min_size = psk->buffer_min_size;
vsk->buffer_max_size = psk->buffer_max_size;
} else {
- vsk->trusted = capable(CAP_NET_ADMIN);
+ vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
vsk->owner = get_current_cred();
vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;