2 * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include "mlx5_core.h"
40 struct mlx5_core_dev *mdev;
41 spinlock_t lock; /* protect vxlan table */
43 /* max_num_ports is usuallly 4, 16 buckets is more than enough */
44 DECLARE_HASHTABLE(htable, 4);
47 struct mlx5_vxlan_port {
48 struct hlist_node hlist;
53 static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
55 return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
58 static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
60 u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {0};
61 u32 out[MLX5_ST_SZ_DW(add_vxlan_udp_dport_out)] = {0};
63 MLX5_SET(add_vxlan_udp_dport_in, in, opcode,
64 MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT);
65 MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port);
66 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
69 static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
71 u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {0};
72 u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)] = {0};
74 MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
75 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
76 MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port);
77 return mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
80 static struct mlx5_vxlan_port*
81 mlx5_vxlan_lookup_port_locked(struct mlx5_vxlan *vxlan, u16 port)
83 struct mlx5_vxlan_port *vxlanp;
85 hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) {
86 if (vxlanp->udp_port == port)
93 struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port)
95 struct mlx5_vxlan_port *vxlanp;
97 if (!mlx5_vxlan_allowed(vxlan))
100 spin_lock_bh(&vxlan->lock);
101 vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
102 spin_unlock_bh(&vxlan->lock);
107 int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
109 struct mlx5_vxlan_port *vxlanp;
112 vxlanp = mlx5_vxlan_lookup_port(vxlan, port);
114 atomic_inc(&vxlanp->refcount);
118 if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
119 mlx5_core_info(vxlan->mdev,
120 "UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
121 port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
126 ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
130 vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
133 goto err_delete_port;
136 vxlanp->udp_port = port;
137 atomic_set(&vxlanp->refcount, 1);
139 spin_lock_bh(&vxlan->lock);
140 hash_add(vxlan->htable, &vxlanp->hlist, port);
141 spin_unlock_bh(&vxlan->lock);
147 mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
151 int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
153 struct mlx5_vxlan_port *vxlanp;
157 spin_lock_bh(&vxlan->lock);
158 vxlanp = mlx5_vxlan_lookup_port_locked(vxlan, port);
164 if (atomic_dec_and_test(&vxlanp->refcount)) {
165 hash_del(&vxlanp->hlist);
170 spin_unlock_bh(&vxlan->lock);
173 mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
180 struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev)
182 struct mlx5_vxlan *vxlan;
184 if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev))
185 return ERR_PTR(-ENOTSUPP);
187 vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
189 return ERR_PTR(-ENOMEM);
192 spin_lock_init(&vxlan->lock);
193 hash_init(vxlan->htable);
195 /* Hardware adds 4789 by default */
196 mlx5_vxlan_add_port(vxlan, 4789);
201 void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan)
203 struct mlx5_vxlan_port *vxlanp;
204 struct hlist_node *tmp;
207 if (!mlx5_vxlan_allowed(vxlan))
210 /* Lockless since we are the only hash table consumers*/
211 hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) {
212 hash_del(&vxlanp->hlist);
213 mlx5_vxlan_core_del_port_cmd(vxlan->mdev, vxlanp->udp_port);