1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
4 #include <linux/dpll.h>
5 #include <linux/mlx5/driver.h>
7 /* This structure represents a reference to DPLL, one is created
11 struct dpll_device *dpll;
12 struct dpll_pin *dpll_pin;
13 struct mlx5_core_dev *mdev;
14 struct workqueue_struct *wq;
15 struct delayed_work work;
18 enum dpll_lock_status lock_status;
19 enum dpll_pin_state pin_state;
21 struct notifier_block mdev_nb;
22 struct net_device *tracking_netdev;
25 static int mlx5_dpll_clock_id_get(struct mlx5_core_dev *mdev, u64 *clock_id)
27 u32 out[MLX5_ST_SZ_DW(msecq_reg)] = {};
28 u32 in[MLX5_ST_SZ_DW(msecq_reg)] = {};
31 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
32 MLX5_REG_MSECQ, 0, 0);
35 *clock_id = MLX5_GET64(msecq_reg, out, local_clock_identity);
39 struct mlx5_dpll_synce_status {
40 enum mlx5_msees_admin_status admin_status;
41 enum mlx5_msees_oper_status oper_status;
43 bool oper_freq_measure;
48 mlx5_dpll_synce_status_get(struct mlx5_core_dev *mdev,
49 struct mlx5_dpll_synce_status *synce_status)
51 u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
52 u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
55 err = mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
56 MLX5_REG_MSEES, 0, 0);
59 synce_status->admin_status = MLX5_GET(msees_reg, out, admin_status);
60 synce_status->oper_status = MLX5_GET(msees_reg, out, oper_status);
61 synce_status->ho_acq = MLX5_GET(msees_reg, out, ho_acq);
62 synce_status->oper_freq_measure = MLX5_GET(msees_reg, out, oper_freq_measure);
63 synce_status->frequency_diff = MLX5_GET(msees_reg, out, frequency_diff);
68 mlx5_dpll_synce_status_set(struct mlx5_core_dev *mdev,
69 enum mlx5_msees_admin_status admin_status)
71 u32 out[MLX5_ST_SZ_DW(msees_reg)] = {};
72 u32 in[MLX5_ST_SZ_DW(msees_reg)] = {};
74 MLX5_SET(msees_reg, in, field_select,
75 MLX5_MSEES_FIELD_SELECT_ENABLE |
76 MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE |
77 MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS);
78 MLX5_SET(msees_reg, in, admin_status, admin_status);
79 MLX5_SET(msees_reg, in, admin_freq_measure, true);
80 return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
81 MLX5_REG_MSEES, 0, 1);
84 static enum dpll_lock_status
85 mlx5_dpll_lock_status_get(struct mlx5_dpll_synce_status *synce_status)
87 switch (synce_status->oper_status) {
88 case MLX5_MSEES_OPER_STATUS_SELF_TRACK:
90 case MLX5_MSEES_OPER_STATUS_OTHER_TRACK:
91 return synce_status->ho_acq ? DPLL_LOCK_STATUS_LOCKED_HO_ACQ :
92 DPLL_LOCK_STATUS_LOCKED;
93 case MLX5_MSEES_OPER_STATUS_HOLDOVER:
95 case MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER:
96 return DPLL_LOCK_STATUS_HOLDOVER;
98 return DPLL_LOCK_STATUS_UNLOCKED;
102 static enum dpll_pin_state
103 mlx5_dpll_pin_state_get(struct mlx5_dpll_synce_status *synce_status)
105 return (synce_status->admin_status == MLX5_MSEES_ADMIN_STATUS_TRACK &&
106 (synce_status->oper_status == MLX5_MSEES_OPER_STATUS_SELF_TRACK ||
107 synce_status->oper_status == MLX5_MSEES_OPER_STATUS_OTHER_TRACK)) ?
108 DPLL_PIN_STATE_CONNECTED : DPLL_PIN_STATE_DISCONNECTED;
112 mlx5_dpll_pin_ffo_get(struct mlx5_dpll_synce_status *synce_status,
115 if (!synce_status->oper_freq_measure)
117 *ffo = synce_status->frequency_diff;
121 static int mlx5_dpll_device_lock_status_get(const struct dpll_device *dpll,
123 enum dpll_lock_status *status,
124 struct netlink_ext_ack *extack)
126 struct mlx5_dpll_synce_status synce_status;
127 struct mlx5_dpll *mdpll = priv;
130 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
133 *status = mlx5_dpll_lock_status_get(&synce_status);
137 static int mlx5_dpll_device_mode_get(const struct dpll_device *dpll,
138 void *priv, enum dpll_mode *mode,
139 struct netlink_ext_ack *extack)
141 *mode = DPLL_MODE_MANUAL;
145 static const struct dpll_device_ops mlx5_dpll_device_ops = {
146 .lock_status_get = mlx5_dpll_device_lock_status_get,
147 .mode_get = mlx5_dpll_device_mode_get,
150 static int mlx5_dpll_pin_direction_get(const struct dpll_pin *pin,
152 const struct dpll_device *dpll,
154 enum dpll_pin_direction *direction,
155 struct netlink_ext_ack *extack)
157 *direction = DPLL_PIN_DIRECTION_INPUT;
161 static int mlx5_dpll_state_on_dpll_get(const struct dpll_pin *pin,
163 const struct dpll_device *dpll,
165 enum dpll_pin_state *state,
166 struct netlink_ext_ack *extack)
168 struct mlx5_dpll_synce_status synce_status;
169 struct mlx5_dpll *mdpll = pin_priv;
172 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
175 *state = mlx5_dpll_pin_state_get(&synce_status);
179 static int mlx5_dpll_state_on_dpll_set(const struct dpll_pin *pin,
181 const struct dpll_device *dpll,
183 enum dpll_pin_state state,
184 struct netlink_ext_ack *extack)
186 struct mlx5_dpll *mdpll = pin_priv;
188 return mlx5_dpll_synce_status_set(mdpll->mdev,
189 state == DPLL_PIN_STATE_CONNECTED ?
190 MLX5_MSEES_ADMIN_STATUS_TRACK :
191 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
194 static int mlx5_dpll_ffo_get(const struct dpll_pin *pin, void *pin_priv,
195 const struct dpll_device *dpll, void *dpll_priv,
196 s64 *ffo, struct netlink_ext_ack *extack)
198 struct mlx5_dpll_synce_status synce_status;
199 struct mlx5_dpll *mdpll = pin_priv;
202 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
205 return mlx5_dpll_pin_ffo_get(&synce_status, ffo);
208 static const struct dpll_pin_ops mlx5_dpll_pins_ops = {
209 .direction_get = mlx5_dpll_pin_direction_get,
210 .state_on_dpll_get = mlx5_dpll_state_on_dpll_get,
211 .state_on_dpll_set = mlx5_dpll_state_on_dpll_set,
212 .ffo_get = mlx5_dpll_ffo_get,
215 static const struct dpll_pin_properties mlx5_dpll_pin_properties = {
216 .type = DPLL_PIN_TYPE_SYNCE_ETH_PORT,
217 .capabilities = DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE,
220 #define MLX5_DPLL_PERIODIC_WORK_INTERVAL 500 /* ms */
222 static void mlx5_dpll_periodic_work_queue(struct mlx5_dpll *mdpll)
224 queue_delayed_work(mdpll->wq, &mdpll->work,
225 msecs_to_jiffies(MLX5_DPLL_PERIODIC_WORK_INTERVAL));
228 static void mlx5_dpll_periodic_work(struct work_struct *work)
230 struct mlx5_dpll *mdpll = container_of(work, struct mlx5_dpll,
232 struct mlx5_dpll_synce_status synce_status;
233 enum dpll_lock_status lock_status;
234 enum dpll_pin_state pin_state;
237 err = mlx5_dpll_synce_status_get(mdpll->mdev, &synce_status);
240 lock_status = mlx5_dpll_lock_status_get(&synce_status);
241 pin_state = mlx5_dpll_pin_state_get(&synce_status);
243 if (!mdpll->last.valid)
246 if (mdpll->last.lock_status != lock_status)
247 dpll_device_change_ntf(mdpll->dpll);
248 if (mdpll->last.pin_state != pin_state)
249 dpll_pin_change_ntf(mdpll->dpll_pin);
252 mdpll->last.lock_status = lock_status;
253 mdpll->last.pin_state = pin_state;
254 mdpll->last.valid = true;
256 mlx5_dpll_periodic_work_queue(mdpll);
259 static void mlx5_dpll_netdev_dpll_pin_set(struct mlx5_dpll *mdpll,
260 struct net_device *netdev)
262 if (mdpll->tracking_netdev)
264 dpll_netdev_pin_set(netdev, mdpll->dpll_pin);
265 mdpll->tracking_netdev = netdev;
268 static void mlx5_dpll_netdev_dpll_pin_clear(struct mlx5_dpll *mdpll)
270 if (!mdpll->tracking_netdev)
272 dpll_netdev_pin_clear(mdpll->tracking_netdev);
273 mdpll->tracking_netdev = NULL;
276 static int mlx5_dpll_mdev_notifier_event(struct notifier_block *nb,
277 unsigned long event, void *data)
279 struct mlx5_dpll *mdpll = container_of(nb, struct mlx5_dpll, mdev_nb);
280 struct net_device *netdev = data;
283 case MLX5_DRIVER_EVENT_UPLINK_NETDEV:
285 mlx5_dpll_netdev_dpll_pin_set(mdpll, netdev);
287 mlx5_dpll_netdev_dpll_pin_clear(mdpll);
296 static void mlx5_dpll_mdev_netdev_track(struct mlx5_dpll *mdpll,
297 struct mlx5_core_dev *mdev)
299 mdpll->mdev_nb.notifier_call = mlx5_dpll_mdev_notifier_event;
300 mlx5_blocking_notifier_register(mdev, &mdpll->mdev_nb);
301 mlx5_core_uplink_netdev_event_replay(mdev);
304 static void mlx5_dpll_mdev_netdev_untrack(struct mlx5_dpll *mdpll,
305 struct mlx5_core_dev *mdev)
307 mlx5_blocking_notifier_unregister(mdev, &mdpll->mdev_nb);
308 mlx5_dpll_netdev_dpll_pin_clear(mdpll);
311 static int mlx5_dpll_probe(struct auxiliary_device *adev,
312 const struct auxiliary_device_id *id)
314 struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
315 struct mlx5_core_dev *mdev = edev->mdev;
316 struct mlx5_dpll *mdpll;
320 err = mlx5_dpll_synce_status_set(mdev,
321 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
325 err = mlx5_dpll_clock_id_get(mdev, &clock_id);
329 mdpll = kzalloc(sizeof(*mdpll), GFP_KERNEL);
333 auxiliary_set_drvdata(adev, mdpll);
335 /* Multiple mdev instances might share one DPLL device. */
336 mdpll->dpll = dpll_device_get(clock_id, 0, THIS_MODULE);
337 if (IS_ERR(mdpll->dpll)) {
338 err = PTR_ERR(mdpll->dpll);
342 err = dpll_device_register(mdpll->dpll, DPLL_TYPE_EEC,
343 &mlx5_dpll_device_ops, mdpll);
345 goto err_put_dpll_device;
347 /* Multiple mdev instances might share one DPLL pin. */
348 mdpll->dpll_pin = dpll_pin_get(clock_id, mlx5_get_dev_index(mdev),
349 THIS_MODULE, &mlx5_dpll_pin_properties);
350 if (IS_ERR(mdpll->dpll_pin)) {
351 err = PTR_ERR(mdpll->dpll_pin);
352 goto err_unregister_dpll_device;
355 err = dpll_pin_register(mdpll->dpll, mdpll->dpll_pin,
356 &mlx5_dpll_pins_ops, mdpll);
358 goto err_put_dpll_pin;
360 mdpll->wq = create_singlethread_workqueue("mlx5_dpll");
363 goto err_unregister_dpll_pin;
366 mlx5_dpll_mdev_netdev_track(mdpll, mdev);
368 INIT_DELAYED_WORK(&mdpll->work, &mlx5_dpll_periodic_work);
369 mlx5_dpll_periodic_work_queue(mdpll);
373 err_unregister_dpll_pin:
374 dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
375 &mlx5_dpll_pins_ops, mdpll);
377 dpll_pin_put(mdpll->dpll_pin);
378 err_unregister_dpll_device:
379 dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
381 dpll_device_put(mdpll->dpll);
387 static void mlx5_dpll_remove(struct auxiliary_device *adev)
389 struct mlx5_dpll *mdpll = auxiliary_get_drvdata(adev);
390 struct mlx5_core_dev *mdev = mdpll->mdev;
392 cancel_delayed_work_sync(&mdpll->work);
393 mlx5_dpll_mdev_netdev_untrack(mdpll, mdev);
394 destroy_workqueue(mdpll->wq);
395 dpll_pin_unregister(mdpll->dpll, mdpll->dpll_pin,
396 &mlx5_dpll_pins_ops, mdpll);
397 dpll_pin_put(mdpll->dpll_pin);
398 dpll_device_unregister(mdpll->dpll, &mlx5_dpll_device_ops, mdpll);
399 dpll_device_put(mdpll->dpll);
402 mlx5_dpll_synce_status_set(mdev,
403 MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING);
406 static int mlx5_dpll_suspend(struct auxiliary_device *adev, pm_message_t state)
411 static int mlx5_dpll_resume(struct auxiliary_device *adev)
416 static const struct auxiliary_device_id mlx5_dpll_id_table[] = {
417 { .name = MLX5_ADEV_NAME ".dpll", },
421 MODULE_DEVICE_TABLE(auxiliary, mlx5_dpll_id_table);
423 static struct auxiliary_driver mlx5_dpll_driver = {
425 .probe = mlx5_dpll_probe,
426 .remove = mlx5_dpll_remove,
427 .suspend = mlx5_dpll_suspend,
428 .resume = mlx5_dpll_resume,
429 .id_table = mlx5_dpll_id_table,
432 static int __init mlx5_dpll_init(void)
434 return auxiliary_driver_register(&mlx5_dpll_driver);
437 static void __exit mlx5_dpll_exit(void)
439 auxiliary_driver_unregister(&mlx5_dpll_driver);
442 module_init(mlx5_dpll_init);
443 module_exit(mlx5_dpll_exit);
445 MODULE_AUTHOR("Jiri Pirko <jiri@nvidia.com>");
446 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) DPLL driver");
447 MODULE_LICENSE("Dual BSD/GPL");