2 * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
39 static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
41 void mlx5e_vxlan_init(struct mlx5e_priv *priv)
43 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
45 spin_lock_init(&vxlan_db->lock);
46 INIT_RADIX_TREE(&vxlan_db->tree, GFP_ATOMIC);
48 if (mlx5e_vxlan_allowed(priv->mdev))
49 /* Hardware adds 4789 by default.
50 * Lockless since we are the only hash table consumers, wq and TX are disabled.
52 mlx5e_vxlan_add_port(priv, 4789);
55 static inline u8 mlx5e_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
57 return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
60 static int mlx5e_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
62 u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
63 u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
65 MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
66 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
67 MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
68 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
71 static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
73 u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
74 u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
76 MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
77 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
78 MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
79 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
82 struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
84 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
85 struct mlx5e_vxlan *vxlan;
87 spin_lock_bh(&vxlan_db->lock);
88 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
89 spin_unlock_bh(&vxlan_db->lock);
94 static void mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
96 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
97 struct mlx5e_vxlan *vxlan;
100 vxlan = mlx5e_vxlan_lookup_port(priv, port);
102 atomic_inc(&vxlan->refcount);
106 if (vxlan_db->num_ports >= mlx5e_vxlan_max_udp_ports(priv->mdev)) {
107 netdev_info(priv->netdev,
108 "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
109 port, mlx5e_vxlan_max_udp_ports(priv->mdev));
113 if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
116 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
118 goto err_delete_port;
120 vxlan->udp_port = port;
121 atomic_set(&vxlan->refcount, 1);
123 spin_lock_bh(&vxlan_db->lock);
124 err = radix_tree_insert(&vxlan_db->tree, vxlan->udp_port, vxlan);
125 spin_unlock_bh(&vxlan_db->lock);
129 vxlan_db->num_ports++;
135 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
138 static void mlx5e_vxlan_add_work(struct work_struct *work)
140 struct mlx5e_vxlan_work *vxlan_work =
141 container_of(work, struct mlx5e_vxlan_work, work);
142 struct mlx5e_priv *priv = vxlan_work->priv;
143 u16 port = vxlan_work->port;
145 mutex_lock(&priv->state_lock);
146 mlx5e_vxlan_add_port(priv, port);
147 mutex_unlock(&priv->state_lock);
152 static void mlx5e_vxlan_del_work(struct work_struct *work)
154 struct mlx5e_vxlan_work *vxlan_work =
155 container_of(work, struct mlx5e_vxlan_work, work);
156 struct mlx5e_priv *priv = vxlan_work->priv;
157 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
158 u16 port = vxlan_work->port;
159 struct mlx5e_vxlan *vxlan;
162 mutex_lock(&priv->state_lock);
163 spin_lock_bh(&vxlan_db->lock);
164 vxlan = radix_tree_lookup(&vxlan_db->tree, port);
168 if (atomic_dec_and_test(&vxlan->refcount)) {
169 radix_tree_delete(&vxlan_db->tree, port);
174 spin_unlock_bh(&vxlan_db->lock);
177 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
179 vxlan_db->num_ports--;
181 mutex_unlock(&priv->state_lock);
185 void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
188 struct mlx5e_vxlan_work *vxlan_work;
190 vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
195 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
197 INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
199 vxlan_work->priv = priv;
200 vxlan_work->port = port;
201 vxlan_work->sa_family = sa_family;
202 queue_work(priv->wq, &vxlan_work->work);
205 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
207 struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
208 struct mlx5e_vxlan *vxlan;
209 unsigned int port = 0;
211 /* Lockless since we are the only radix-tree consumers, wq is disabled */
212 while (radix_tree_gang_lookup(&vxlan_db->tree, (void **)&vxlan, port, 1)) {
213 port = vxlan->udp_port;
214 radix_tree_delete(&vxlan_db->tree, port);
215 mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);