0e8e3695f31a8b397a178364b7cb43860197659d
[openwrt/staging/hauke.git] / target / linux / layerscape / patches-4.9 / 703-phy-support-layerscape.patch
1 From be07319b9897738a4ab1501880b7dd9be26eba66 Mon Sep 17 00:00:00 2001
2 From: Yangbo Lu <yangbo.lu@nxp.com>
3 Date: Mon, 25 Sep 2017 11:54:28 +0800
4 Subject: [PATCH] phy: support layerscape
5
6 This is a integrated patch for layerscape mdio-phy support.
7
8 Signed-off-by: Bogdan Purcareata <bogdan.purcareata@nxp.com>
9 Signed-off-by: Zhang Ying-22455 <ying.zhang22455@nxp.com>
10 Signed-off-by: costi <constantin.tudor@freescale.com>
11 Signed-off-by: Madalin Bucur <madalin.bucur@freescale.com>
12 Signed-off-by: Shaohui Xie <Shaohui.Xie@nxp.com>
13 Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
14 Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com>
15 ---
16 drivers/net/phy/Kconfig | 11 +
17 drivers/net/phy/Makefile | 2 +
18 drivers/net/phy/aquantia.c | 28 +
19 drivers/net/phy/cortina.c | 118 ++++
20 drivers/net/phy/fsl_backplane.c | 1358 +++++++++++++++++++++++++++++++++++++++
21 drivers/net/phy/phy.c | 23 +-
22 drivers/net/phy/phy_device.c | 6 +-
23 drivers/net/phy/swphy.c | 1 +
24 include/linux/phy.h | 4 +
25 9 files changed, 1544 insertions(+), 7 deletions(-)
26 create mode 100644 drivers/net/phy/cortina.c
27 create mode 100644 drivers/net/phy/fsl_backplane.c
28
29 diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
30 index 30a3a2f5..3521c1ac 100644
31 --- a/drivers/net/phy/Kconfig
32 +++ b/drivers/net/phy/Kconfig
33 @@ -89,6 +89,12 @@ config MDIO_BUS_MUX_MMIOREG
34 config MDIO_CAVIUM
35 tristate
36
37 +config MDIO_FSL_BACKPLANE
38 + tristate "Support for backplane on Freescale XFI interface"
39 + depends on OF_MDIO
40 + help
41 + This module provides a driver for Freescale XFI's backplane.
42 +
43 config MDIO_GPIO
44 tristate "GPIO lib-based bitbanged MDIO buses"
45 depends on MDIO_BITBANG && GPIOLIB
46 @@ -298,6 +304,11 @@ config CICADA_PHY
47 ---help---
48 Currently supports the cis8204
49
50 +config CORTINA_PHY
51 + tristate "Cortina EDC CDR 10G Ethernet PHY"
52 + ---help---
53 + Currently supports the CS4340 phy.
54 +
55 config DAVICOM_PHY
56 tristate "Davicom PHYs"
57 ---help---
58 diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
59 index 93a68fcd..ef3ec265 100644
60 --- a/drivers/net/phy/Makefile
61 +++ b/drivers/net/phy/Makefile
62 @@ -30,6 +30,7 @@ obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o
63 obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o
64 obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
65 obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o
66 +obj-$(CONFIG_MDIO_FSL_BACKPLANE) += fsl_backplane.o
67 obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
68 obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o
69 obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o
70 @@ -48,6 +49,7 @@ obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o
71 obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o
72 obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
73 obj-$(CONFIG_CICADA_PHY) += cicada.o
74 +obj-$(CONFIG_CORTINA_PHY) += cortina.o
75 obj-$(CONFIG_DAVICOM_PHY) += davicom.o
76 obj-$(CONFIG_DP83640_PHY) += dp83640.o
77 obj-$(CONFIG_DP83848_PHY) += dp83848.o
78 diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
79 index 09b0b0aa..e8ae50e1 100644
80 --- a/drivers/net/phy/aquantia.c
81 +++ b/drivers/net/phy/aquantia.c
82 @@ -21,6 +21,8 @@
83 #define PHY_ID_AQ1202 0x03a1b445
84 #define PHY_ID_AQ2104 0x03a1b460
85 #define PHY_ID_AQR105 0x03a1b4a2
86 +#define PHY_ID_AQR106 0x03a1b4d0
87 +#define PHY_ID_AQR107 0x03a1b4e0
88 #define PHY_ID_AQR405 0x03a1b4b0
89
90 #define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \
91 @@ -153,6 +155,30 @@ static struct phy_driver aquantia_driver[] = {
92 .ack_interrupt = aquantia_ack_interrupt,
93 .read_status = aquantia_read_status,
94 },
95 +{
96 + .phy_id = PHY_ID_AQR106,
97 + .phy_id_mask = 0xfffffff0,
98 + .name = "Aquantia AQR106",
99 + .features = PHY_AQUANTIA_FEATURES,
100 + .flags = PHY_HAS_INTERRUPT,
101 + .aneg_done = aquantia_aneg_done,
102 + .config_aneg = aquantia_config_aneg,
103 + .config_intr = aquantia_config_intr,
104 + .ack_interrupt = aquantia_ack_interrupt,
105 + .read_status = aquantia_read_status,
106 +},
107 +{
108 + .phy_id = PHY_ID_AQR107,
109 + .phy_id_mask = 0xfffffff0,
110 + .name = "Aquantia AQR107",
111 + .features = PHY_AQUANTIA_FEATURES,
112 + .flags = PHY_HAS_INTERRUPT,
113 + .aneg_done = aquantia_aneg_done,
114 + .config_aneg = aquantia_config_aneg,
115 + .config_intr = aquantia_config_intr,
116 + .ack_interrupt = aquantia_ack_interrupt,
117 + .read_status = aquantia_read_status,
118 +},
119 {
120 .phy_id = PHY_ID_AQR405,
121 .phy_id_mask = 0xfffffff0,
122 @@ -173,6 +199,8 @@ static struct mdio_device_id __maybe_unused aquantia_tbl[] = {
123 { PHY_ID_AQ1202, 0xfffffff0 },
124 { PHY_ID_AQ2104, 0xfffffff0 },
125 { PHY_ID_AQR105, 0xfffffff0 },
126 + { PHY_ID_AQR106, 0xfffffff0 },
127 + { PHY_ID_AQR107, 0xfffffff0 },
128 { PHY_ID_AQR405, 0xfffffff0 },
129 { }
130 };
131 diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
132 new file mode 100644
133 index 00000000..72f4228a
134 --- /dev/null
135 +++ b/drivers/net/phy/cortina.c
136 @@ -0,0 +1,118 @@
137 +/*
138 + * Copyright 2017 NXP
139 + *
140 + * This program is free software; you can redistribute it and/or modify
141 + * it under the terms of the GNU General Public License as published by
142 + * the Free Software Foundation; either version 2 of the License, or
143 + * (at your option) any later version.
144 + *
145 + * This program is distributed in the hope that it will be useful,
146 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
147 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
148 + * GNU General Public License for more details.
149 + *
150 + * CORTINA is a registered trademark of Cortina Systems, Inc.
151 + *
152 + */
153 +#include <linux/module.h>
154 +#include <linux/phy.h>
155 +
156 +#define PHY_ID_CS4340 0x13e51002
157 +
158 +#define VILLA_GLOBAL_CHIP_ID_LSB 0x0
159 +#define VILLA_GLOBAL_CHIP_ID_MSB 0x1
160 +
161 +#define VILLA_GLOBAL_GPIO_1_INTS 0x017
162 +
163 +static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
164 +{
165 + return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
166 + MII_ADDR_C45 | regnum);
167 +}
168 +
169 +static int cortina_config_aneg(struct phy_device *phydev)
170 +{
171 + phydev->supported = SUPPORTED_10000baseT_Full;
172 + phydev->advertising = SUPPORTED_10000baseT_Full;
173 +
174 + return 0;
175 +}
176 +
177 +static int cortina_read_status(struct phy_device *phydev)
178 +{
179 + int gpio_int_status, ret = 0;
180 +
181 + gpio_int_status = cortina_read_reg(phydev, VILLA_GLOBAL_GPIO_1_INTS);
182 + if (gpio_int_status < 0) {
183 + ret = gpio_int_status;
184 + goto err;
185 + }
186 +
187 + if (gpio_int_status & 0x8) {
188 + /* up when edc_convergedS set */
189 + phydev->speed = SPEED_10000;
190 + phydev->duplex = DUPLEX_FULL;
191 + phydev->link = 1;
192 + } else {
193 + phydev->link = 0;
194 + }
195 +
196 +err:
197 + return ret;
198 +}
199 +
200 +static int cortina_soft_reset(struct phy_device *phydev)
201 +{
202 + return 0;
203 +}
204 +
205 +static int cortina_probe(struct phy_device *phydev)
206 +{
207 + u32 phy_id = 0;
208 + int id_lsb = 0, id_msb = 0;
209 +
210 + /* Read device id from phy registers. */
211 + id_lsb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_LSB);
212 + if (id_lsb < 0)
213 + return -ENXIO;
214 +
215 + phy_id = id_lsb << 16;
216 +
217 + id_msb = cortina_read_reg(phydev, VILLA_GLOBAL_CHIP_ID_MSB);
218 + if (id_msb < 0)
219 + return -ENXIO;
220 +
221 + phy_id |= id_msb;
222 +
223 + /* Make sure the device tree binding matched the driver with the
224 + * right device.
225 + */
226 + if (phy_id != phydev->drv->phy_id) {
227 + phydev_err(phydev, "Error matching phy with %s driver\n",
228 + phydev->drv->name);
229 + return -ENODEV;
230 + }
231 +
232 + return 0;
233 +}
234 +
235 +static struct phy_driver cortina_driver[] = {
236 +{
237 + .phy_id = PHY_ID_CS4340,
238 + .phy_id_mask = 0xffffffff,
239 + .name = "Cortina CS4340",
240 + .config_aneg = cortina_config_aneg,
241 + .read_status = cortina_read_status,
242 + .soft_reset = cortina_soft_reset,
243 + .probe = cortina_probe,
244 +},
245 +};
246 +
247 +module_phy_driver(cortina_driver);
248 +
249 +static struct mdio_device_id __maybe_unused cortina_tbl[] = {
250 + { PHY_ID_CS4340, 0xffffffff},
251 + {},
252 +};
253 +
254 +MODULE_DEVICE_TABLE(mdio, cortina_tbl);
255 diff --git a/drivers/net/phy/fsl_backplane.c b/drivers/net/phy/fsl_backplane.c
256 new file mode 100644
257 index 00000000..76865261
258 --- /dev/null
259 +++ b/drivers/net/phy/fsl_backplane.c
260 @@ -0,0 +1,1358 @@
261 +/* Freescale backplane driver.
262 + * Author: Shaohui Xie <Shaohui.Xie@freescale.com>
263 + *
264 + * Copyright 2015 Freescale Semiconductor, Inc.
265 + *
266 + * Licensed under the GPL-2 or later.
267 + */
268 +
269 +#include <linux/kernel.h>
270 +#include <linux/module.h>
271 +#include <linux/mii.h>
272 +#include <linux/mdio.h>
273 +#include <linux/ethtool.h>
274 +#include <linux/phy.h>
275 +#include <linux/io.h>
276 +#include <linux/of.h>
277 +#include <linux/of_net.h>
278 +#include <linux/of_address.h>
279 +#include <linux/of_platform.h>
280 +#include <linux/timer.h>
281 +#include <linux/delay.h>
282 +#include <linux/workqueue.h>
283 +
284 +/* XFI PCS Device Identifier */
285 +#define FSL_PCS_PHY_ID 0x0083e400
286 +
287 +/* Freescale KR PMD registers */
288 +#define FSL_KR_PMD_CTRL 0x96
289 +#define FSL_KR_PMD_STATUS 0x97
290 +#define FSL_KR_LP_CU 0x98
291 +#define FSL_KR_LP_STATUS 0x99
292 +#define FSL_KR_LD_CU 0x9a
293 +#define FSL_KR_LD_STATUS 0x9b
294 +
295 +/* Freescale KR PMD defines */
296 +#define PMD_RESET 0x1
297 +#define PMD_STATUS_SUP_STAT 0x4
298 +#define PMD_STATUS_FRAME_LOCK 0x2
299 +#define TRAIN_EN 0x3
300 +#define TRAIN_DISABLE 0x1
301 +#define RX_STAT 0x1
302 +
303 +#define FSL_KR_RX_LINK_STAT_MASK 0x1000
304 +#define FSL_XFI_PCS_10GR_SR1 0x20
305 +
306 +/* Freescale KX PCS mode register */
307 +#define FSL_PCS_IF_MODE 0x8014
308 +
309 +/* Freescale KX PCS mode register init value */
310 +#define IF_MODE_INIT 0x8
311 +
312 +/* Freescale KX/KR AN registers */
313 +#define FSL_AN_AD1 0x11
314 +#define FSL_AN_BP_STAT 0x30
315 +
316 +/* Freescale KX/KR AN registers defines */
317 +#define AN_CTRL_INIT 0x1200
318 +#define KX_AN_AD1_INIT 0x25
319 +#define KR_AN_AD1_INIT 0x85
320 +#define AN_LNK_UP_MASK 0x4
321 +#define KR_AN_MASK 0x8
322 +#define TRAIN_FAIL 0x8
323 +
324 +/* C(-1) */
325 +#define BIN_M1 0
326 +/* C(1) */
327 +#define BIN_LONG 1
328 +#define BIN_M1_SEL 6
329 +#define BIN_Long_SEL 7
330 +#define CDR_SEL_MASK 0x00070000
331 +#define BIN_SNAPSHOT_NUM 5
332 +#define BIN_M1_THRESHOLD 3
333 +#define BIN_LONG_THRESHOLD 2
334 +
335 +#define PRE_COE_SHIFT 22
336 +#define POST_COE_SHIFT 16
337 +#define ZERO_COE_SHIFT 8
338 +
339 +#define PRE_COE_MAX 0x0
340 +#define PRE_COE_MIN 0x8
341 +#define POST_COE_MAX 0x0
342 +#define POST_COE_MIN 0x10
343 +#define ZERO_COE_MAX 0x30
344 +#define ZERO_COE_MIN 0x0
345 +
346 +#define TECR0_INIT 0x24200000
347 +#define RATIO_PREQ 0x3
348 +#define RATIO_PST1Q 0xd
349 +#define RATIO_EQ 0x20
350 +
351 +#define GCR0_RESET_MASK 0x600000
352 +#define GCR1_SNP_START_MASK 0x00000040
353 +#define GCR1_CTL_SNP_START_MASK 0x00002000
354 +#define GCR1_REIDL_TH_MASK 0x00700000
355 +#define GCR1_REIDL_EX_SEL_MASK 0x000c0000
356 +#define GCR1_REIDL_ET_MAS_MASK 0x00004000
357 +#define TECR0_AMP_RED_MASK 0x0000003f
358 +
359 +#define RECR1_CTL_SNP_DONE_MASK 0x00000002
360 +#define RECR1_SNP_DONE_MASK 0x00000004
361 +#define TCSR1_SNP_DATA_MASK 0x0000ffc0
362 +#define TCSR1_SNP_DATA_SHIFT 6
363 +#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100
364 +
365 +#define RECR1_GAINK2_MASK 0x0f000000
366 +#define RECR1_GAINK2_SHIFT 24
367 +#define RECR1_GAINK3_MASK 0x000f0000
368 +#define RECR1_GAINK3_SHIFT 16
369 +#define RECR1_OFFSET_MASK 0x00003f80
370 +#define RECR1_OFFSET_SHIFT 7
371 +#define RECR1_BLW_MASK 0x00000f80
372 +#define RECR1_BLW_SHIFT 7
373 +#define EYE_CTRL_SHIFT 12
374 +#define BASE_WAND_SHIFT 10
375 +
376 +#define XGKR_TIMEOUT 1050
377 +
378 +#define INCREMENT 1
379 +#define DECREMENT 2
380 +#define TIMEOUT_LONG 3
381 +#define TIMEOUT_M1 3
382 +
383 +#define RX_READY_MASK 0x8000
384 +#define PRESET_MASK 0x2000
385 +#define INIT_MASK 0x1000
386 +#define COP1_MASK 0x30
387 +#define COP1_SHIFT 4
388 +#define COZ_MASK 0xc
389 +#define COZ_SHIFT 2
390 +#define COM1_MASK 0x3
391 +#define COM1_SHIFT 0
392 +#define REQUEST_MASK 0x3f
393 +#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \
394 + COP1_MASK | COZ_MASK | COM1_MASK)
395 +
396 +#define NEW_ALGORITHM_TRAIN_TX
397 +#ifdef NEW_ALGORITHM_TRAIN_TX
398 +#define FORCE_INC_COP1_NUMBER 0
399 +#define FORCE_INC_COM1_NUMBER 1
400 +#endif
401 +
402 +#define VAL_INVALID 0xff
403 +
404 +static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5,
405 + 0x7, 0x9, 0xb, 0xc, VAL_INVALID};
406 +static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, 0x7,
407 + 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID};
408 +
409 +enum backplane_mode {
410 + PHY_BACKPLANE_1000BASE_KX,
411 + PHY_BACKPLANE_10GBASE_KR,
412 + PHY_BACKPLANE_INVAL
413 +};
414 +
415 +enum coe_filed {
416 + COE_COP1,
417 + COE_COZ,
418 + COE_COM
419 +};
420 +
421 +enum coe_update {
422 + COE_NOTUPDATED,
423 + COE_UPDATED,
424 + COE_MIN,
425 + COE_MAX,
426 + COE_INV
427 +};
428 +
429 +enum train_state {
430 + DETECTING_LP,
431 + TRAINED,
432 +};
433 +
434 +struct per_lane_ctrl_status {
435 + __be32 gcr0; /* 0x.000 - General Control Register 0 */
436 + __be32 gcr1; /* 0x.004 - General Control Register 1 */
437 + __be32 gcr2; /* 0x.008 - General Control Register 2 */
438 + __be32 resv1; /* 0x.00C - Reserved */
439 + __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */
440 + __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */
441 + __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */
442 + __be32 resv2; /* 0x.01C - Reserved */
443 + __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */
444 + __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */
445 + __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */
446 + __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */
447 + __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */
448 + __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */
449 + __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */
450 + __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */
451 +};
452 +
453 +struct tx_condition {
454 + bool bin_m1_late_early;
455 + bool bin_long_late_early;
456 + bool bin_m1_stop;
457 + bool bin_long_stop;
458 + bool tx_complete;
459 + bool sent_init;
460 + int m1_min_max_cnt;
461 + int long_min_max_cnt;
462 +#ifdef NEW_ALGORITHM_TRAIN_TX
463 + int pre_inc;
464 + int post_inc;
465 +#endif
466 +};
467 +
468 +struct fsl_xgkr_inst {
469 + void *reg_base;
470 + struct phy_device *phydev;
471 + struct tx_condition tx_c;
472 + struct delayed_work xgkr_wk;
473 + enum train_state state;
474 + u32 ld_update;
475 + u32 ld_status;
476 + u32 ratio_preq;
477 + u32 ratio_pst1q;
478 + u32 adpt_eq;
479 +};
480 +
481 +static void tx_condition_init(struct tx_condition *tx_c)
482 +{
483 + tx_c->bin_m1_late_early = true;
484 + tx_c->bin_long_late_early = false;
485 + tx_c->bin_m1_stop = false;
486 + tx_c->bin_long_stop = false;
487 + tx_c->tx_complete = false;
488 + tx_c->sent_init = false;
489 + tx_c->m1_min_max_cnt = 0;
490 + tx_c->long_min_max_cnt = 0;
491 +#ifdef NEW_ALGORITHM_TRAIN_TX
492 + tx_c->pre_inc = FORCE_INC_COM1_NUMBER;
493 + tx_c->post_inc = FORCE_INC_COP1_NUMBER;
494 +#endif
495 +}
496 +
497 +void tune_tecr0(struct fsl_xgkr_inst *inst)
498 +{
499 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
500 + u32 val;
501 +
502 + val = TECR0_INIT |
503 + inst->adpt_eq << ZERO_COE_SHIFT |
504 + inst->ratio_preq << PRE_COE_SHIFT |
505 + inst->ratio_pst1q << POST_COE_SHIFT;
506 +
507 + /* reset the lane */
508 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
509 + &reg_base->gcr0);
510 + udelay(1);
511 + iowrite32(val, &reg_base->tecr0);
512 + udelay(1);
513 + /* unreset the lane */
514 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
515 + &reg_base->gcr0);
516 + udelay(1);
517 +}
518 +
519 +static void start_lt(struct phy_device *phydev)
520 +{
521 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_EN);
522 +}
523 +
524 +static void stop_lt(struct phy_device *phydev)
525 +{
526 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
527 +}
528 +
529 +static void reset_gcr0(struct fsl_xgkr_inst *inst)
530 +{
531 + struct per_lane_ctrl_status *reg_base = inst->reg_base;
532 +
533 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
534 + &reg_base->gcr0);
535 + udelay(1);
536 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
537 + &reg_base->gcr0);
538 + udelay(1);
539 +}
540 +
541 +void lane_set_1gkx(void *reg)
542 +{
543 + struct per_lane_ctrl_status *reg_base = reg;
544 + u32 val;
545 +
546 + /* reset the lane */
547 + iowrite32(ioread32(&reg_base->gcr0) & ~GCR0_RESET_MASK,
548 + &reg_base->gcr0);
549 + udelay(1);
550 +
551 + /* set gcr1 for 1GKX */
552 + val = ioread32(&reg_base->gcr1);
553 + val &= ~(GCR1_REIDL_TH_MASK | GCR1_REIDL_EX_SEL_MASK |
554 + GCR1_REIDL_ET_MAS_MASK);
555 + iowrite32(val, &reg_base->gcr1);
556 + udelay(1);
557 +
558 + /* set tecr0 for 1GKX */
559 + val = ioread32(&reg_base->tecr0);
560 + val &= ~TECR0_AMP_RED_MASK;
561 + iowrite32(val, &reg_base->tecr0);
562 + udelay(1);
563 +
564 + /* unreset the lane */
565 + iowrite32(ioread32(&reg_base->gcr0) | GCR0_RESET_MASK,
566 + &reg_base->gcr0);
567 + udelay(1);
568 +}
569 +
570 +static void reset_lt(struct phy_device *phydev)
571 +{
572 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1, PMD_RESET);
573 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_CTRL, TRAIN_DISABLE);
574 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_CU, 0);
575 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LD_STATUS, 0);
576 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS, 0);
577 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU, 0);
578 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS, 0);
579 +}
580 +
581 +static void start_xgkr_state_machine(struct delayed_work *work)
582 +{
583 + queue_delayed_work(system_power_efficient_wq, work,
584 + msecs_to_jiffies(XGKR_TIMEOUT));
585 +}
586 +
587 +static void start_xgkr_an(struct phy_device *phydev)
588 +{
589 + struct fsl_xgkr_inst *inst;
590 +
591 + reset_lt(phydev);
592 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KR_AN_AD1_INIT);
593 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
594 +
595 + inst = phydev->priv;
596 +
597 + /* start state machine*/
598 + start_xgkr_state_machine(&inst->xgkr_wk);
599 +}
600 +
601 +static void start_1gkx_an(struct phy_device *phydev)
602 +{
603 + phy_write_mmd(phydev, MDIO_MMD_PCS, FSL_PCS_IF_MODE, IF_MODE_INIT);
604 + phy_write_mmd(phydev, MDIO_MMD_AN, FSL_AN_AD1, KX_AN_AD1_INIT);
605 + phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
606 + phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_CTRL1, AN_CTRL_INIT);
607 +}
608 +
609 +static void ld_coe_status(struct fsl_xgkr_inst *inst)
610 +{
611 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
612 + FSL_KR_LD_STATUS, inst->ld_status);
613 +}
614 +
615 +static void ld_coe_update(struct fsl_xgkr_inst *inst)
616 +{
617 + dev_dbg(&inst->phydev->mdio.dev, "sending request: %x\n", inst->ld_update);
618 + phy_write_mmd(inst->phydev, MDIO_MMD_PMAPMD,
619 + FSL_KR_LD_CU, inst->ld_update);
620 +}
621 +
622 +static void init_inst(struct fsl_xgkr_inst *inst, int reset)
623 +{
624 + if (reset) {
625 + inst->ratio_preq = RATIO_PREQ;
626 + inst->ratio_pst1q = RATIO_PST1Q;
627 + inst->adpt_eq = RATIO_EQ;
628 + tune_tecr0(inst);
629 + }
630 +
631 + tx_condition_init(&inst->tx_c);
632 + inst->state = DETECTING_LP;
633 + inst->ld_status &= RX_READY_MASK;
634 + ld_coe_status(inst);
635 + inst->ld_update = 0;
636 + inst->ld_status &= ~RX_READY_MASK;
637 + ld_coe_status(inst);
638 +}
639 +
640 +#ifdef NEW_ALGORITHM_TRAIN_TX
641 +static int get_median_gaink2(u32 *reg)
642 +{
643 + int gaink2_snap_shot[BIN_SNAPSHOT_NUM];
644 + u32 rx_eq_snp;
645 + struct per_lane_ctrl_status *reg_base;
646 + int timeout;
647 + int i, j, tmp, pos;
648 +
649 + reg_base = (struct per_lane_ctrl_status *)reg;
650 +
651 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
652 + /* wait RECR1_CTL_SNP_DONE_MASK has cleared */
653 + timeout = 100;
654 + while (ioread32(&reg_base->recr1) &
655 + RECR1_CTL_SNP_DONE_MASK) {
656 + udelay(1);
657 + timeout--;
658 + if (timeout == 0)
659 + break;
660 + }
661 +
662 + /* start snap shot */
663 + iowrite32((ioread32(&reg_base->gcr1) |
664 + GCR1_CTL_SNP_START_MASK),
665 + &reg_base->gcr1);
666 +
667 + /* wait for SNP done */
668 + timeout = 100;
669 + while (!(ioread32(&reg_base->recr1) &
670 + RECR1_CTL_SNP_DONE_MASK)) {
671 + udelay(1);
672 + timeout--;
673 + if (timeout == 0)
674 + break;
675 + }
676 +
677 + /* read and save the snap shot */
678 + rx_eq_snp = ioread32(&reg_base->recr1);
679 + gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >>
680 + RECR1_GAINK2_SHIFT;
681 +
682 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
683 + iowrite32((ioread32(&reg_base->gcr1) &
684 + ~GCR1_CTL_SNP_START_MASK),
685 + &reg_base->gcr1);
686 + }
687 +
688 + /* get median of the 5 snap shot */
689 + for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) {
690 + tmp = gaink2_snap_shot[i];
691 + pos = i;
692 + for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) {
693 + if (gaink2_snap_shot[j] < tmp) {
694 + tmp = gaink2_snap_shot[j];
695 + pos = j;
696 + }
697 + }
698 +
699 + gaink2_snap_shot[pos] = gaink2_snap_shot[i];
700 + gaink2_snap_shot[i] = tmp;
701 + }
702 +
703 + return gaink2_snap_shot[2];
704 +}
705 +#endif
706 +
707 +static bool is_bin_early(int bin_sel, void *reg)
708 +{
709 + bool early = false;
710 + int bin_snap_shot[BIN_SNAPSHOT_NUM];
711 + int i, negative_count = 0;
712 + struct per_lane_ctrl_status *reg_base = reg;
713 + int timeout;
714 +
715 + for (i = 0; i < BIN_SNAPSHOT_NUM; i++) {
716 + /* wait RECR1_SNP_DONE_MASK has cleared */
717 + timeout = 100;
718 + while ((ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
719 + udelay(1);
720 + timeout--;
721 + if (timeout == 0)
722 + break;
723 + }
724 +
725 + /* set TCSR1[CDR_SEL] to BinM1/BinLong */
726 + if (bin_sel == BIN_M1) {
727 + iowrite32((ioread32(&reg_base->tcsr1) &
728 + ~CDR_SEL_MASK) | BIN_M1_SEL,
729 + &reg_base->tcsr1);
730 + } else {
731 + iowrite32((ioread32(&reg_base->tcsr1) &
732 + ~CDR_SEL_MASK) | BIN_Long_SEL,
733 + &reg_base->tcsr1);
734 + }
735 +
736 + /* start snap shot */
737 + iowrite32(ioread32(&reg_base->gcr1) | GCR1_SNP_START_MASK,
738 + &reg_base->gcr1);
739 +
740 + /* wait for SNP done */
741 + timeout = 100;
742 + while (!(ioread32(&reg_base->recr1) & RECR1_SNP_DONE_MASK)) {
743 + udelay(1);
744 + timeout--;
745 + if (timeout == 0)
746 + break;
747 + }
748 +
749 + /* read and save the snap shot */
750 + bin_snap_shot[i] = (ioread32(&reg_base->tcsr1) &
751 + TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT;
752 + if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK)
753 + negative_count++;
754 +
755 + /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */
756 + iowrite32(ioread32(&reg_base->gcr1) & ~GCR1_SNP_START_MASK,
757 + &reg_base->gcr1);
758 + }
759 +
760 + if (((bin_sel == BIN_M1) && (negative_count > BIN_M1_THRESHOLD)) ||
761 + ((bin_sel == BIN_LONG && (negative_count > BIN_LONG_THRESHOLD)))) {
762 + early = true;
763 + }
764 +
765 + return early;
766 +}
767 +
768 +static void train_tx(struct fsl_xgkr_inst *inst)
769 +{
770 + struct phy_device *phydev = inst->phydev;
771 + struct tx_condition *tx_c = &inst->tx_c;
772 + bool bin_m1_early, bin_long_early;
773 + u32 lp_status, old_ld_update;
774 + u32 status_cop1, status_coz, status_com1;
775 + u32 req_cop1, req_coz, req_com1, req_preset, req_init;
776 + u32 temp;
777 +#ifdef NEW_ALGORITHM_TRAIN_TX
778 + u32 median_gaink2;
779 +#endif
780 +
781 +recheck:
782 + if (tx_c->bin_long_stop && tx_c->bin_m1_stop) {
783 + tx_c->tx_complete = true;
784 + inst->ld_status |= RX_READY_MASK;
785 + ld_coe_status(inst);
786 + /* tell LP we are ready */
787 + phy_write_mmd(phydev, MDIO_MMD_PMAPMD,
788 + FSL_KR_PMD_STATUS, RX_STAT);
789 + return;
790 + }
791 +
792 + /* We start by checking the current LP status. If we got any responses,
793 + * we can clear up the appropriate update request so that the
794 + * subsequent code may easily issue new update requests if needed.
795 + */
796 + lp_status = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
797 + REQUEST_MASK;
798 + status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT;
799 + status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT;
800 + status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT;
801 +
802 + old_ld_update = inst->ld_update;
803 + req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT;
804 + req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT;
805 + req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT;
806 + req_preset = old_ld_update & PRESET_MASK;
807 + req_init = old_ld_update & INIT_MASK;
808 +
809 + /* IEEE802.3-2008, 72.6.10.2.3.1
810 + * We may clear PRESET when all coefficients show UPDATED or MAX.
811 + */
812 + if (req_preset) {
813 + if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) &&
814 + (status_coz == COE_UPDATED || status_coz == COE_MAX) &&
815 + (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) {
816 + inst->ld_update &= ~PRESET_MASK;
817 + }
818 + }
819 +
820 + /* IEEE802.3-2008, 72.6.10.2.3.2
821 + * We may clear INITIALIZE when no coefficients show NOT UPDATED.
822 + */
823 + if (req_init) {
824 + if (status_cop1 != COE_NOTUPDATED &&
825 + status_coz != COE_NOTUPDATED &&
826 + status_com1 != COE_NOTUPDATED) {
827 + inst->ld_update &= ~INIT_MASK;
828 + }
829 + }
830 +
831 + /* IEEE802.3-2008, 72.6.10.2.3.2
832 + * we send initialize to the other side to ensure default settings
833 + * for the LP. Naturally, we should do this only once.
834 + */
835 + if (!tx_c->sent_init) {
836 + if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) {
837 + inst->ld_update = INIT_MASK;
838 + tx_c->sent_init = true;
839 + }
840 + }
841 +
842 + /* IEEE802.3-2008, 72.6.10.2.3.3
843 + * We set coefficient requests to HOLD when we get the information
844 + * about any updates On clearing our prior response, we also update
845 + * our internal status.
846 + */
847 + if (status_cop1 != COE_NOTUPDATED) {
848 + if (req_cop1) {
849 + inst->ld_update &= ~COP1_MASK;
850 +#ifdef NEW_ALGORITHM_TRAIN_TX
851 + if (tx_c->post_inc) {
852 + if (req_cop1 == INCREMENT &&
853 + status_cop1 == COE_MAX) {
854 + tx_c->post_inc = 0;
855 + tx_c->bin_long_stop = true;
856 + tx_c->bin_m1_stop = true;
857 + } else {
858 + tx_c->post_inc -= 1;
859 + }
860 +
861 + ld_coe_update(inst);
862 + goto recheck;
863 + }
864 +#endif
865 + if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) ||
866 + (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) {
867 + dev_dbg(&inst->phydev->mdio.dev, "COP1 hit limit %s",
868 + (status_cop1 == COE_MIN) ?
869 + "DEC MIN" : "INC MAX");
870 + tx_c->long_min_max_cnt++;
871 + if (tx_c->long_min_max_cnt >= TIMEOUT_LONG) {
872 + tx_c->bin_long_stop = true;
873 + ld_coe_update(inst);
874 + goto recheck;
875 + }
876 + }
877 + }
878 + }
879 +
880 + if (status_coz != COE_NOTUPDATED) {
881 + if (req_coz)
882 + inst->ld_update &= ~COZ_MASK;
883 + }
884 +
885 + if (status_com1 != COE_NOTUPDATED) {
886 + if (req_com1) {
887 + inst->ld_update &= ~COM1_MASK;
888 +#ifdef NEW_ALGORITHM_TRAIN_TX
889 + if (tx_c->pre_inc) {
890 + if (req_com1 == INCREMENT &&
891 + status_com1 == COE_MAX)
892 + tx_c->pre_inc = 0;
893 + else
894 + tx_c->pre_inc -= 1;
895 +
896 + ld_coe_update(inst);
897 + goto recheck;
898 + }
899 +#endif
900 + /* Stop If we have reached the limit for a parameter. */
901 + if ((req_com1 == DECREMENT && status_com1 == COE_MIN) ||
902 + (req_com1 == INCREMENT && status_com1 == COE_MAX)) {
903 + dev_dbg(&inst->phydev->mdio.dev, "COM1 hit limit %s",
904 + (status_com1 == COE_MIN) ?
905 + "DEC MIN" : "INC MAX");
906 + tx_c->m1_min_max_cnt++;
907 + if (tx_c->m1_min_max_cnt >= TIMEOUT_M1) {
908 + tx_c->bin_m1_stop = true;
909 + ld_coe_update(inst);
910 + goto recheck;
911 + }
912 + }
913 + }
914 + }
915 +
916 + if (old_ld_update != inst->ld_update) {
917 + ld_coe_update(inst);
918 + /* Redo these status checks and updates until we have no more
919 + * changes, to speed up the overall process.
920 + */
921 + goto recheck;
922 + }
923 +
924 + /* Do nothing if we have pending request. */
925 + if ((req_coz || req_com1 || req_cop1))
926 + return;
927 + else if (lp_status)
928 + /* No pending request but LP status was not reverted to
929 + * not updated.
930 + */
931 + return;
932 +
933 +#ifdef NEW_ALGORITHM_TRAIN_TX
934 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
935 + if (tx_c->pre_inc) {
936 + inst->ld_update = INCREMENT << COM1_SHIFT;
937 + ld_coe_update(inst);
938 + return;
939 + }
940 +
941 + if (status_cop1 != COE_MAX) {
942 + median_gaink2 = get_median_gaink2(inst->reg_base);
943 + if (median_gaink2 == 0xf) {
944 + tx_c->post_inc = 1;
945 + } else {
946 + /* Gaink2 median lower than "F" */
947 + tx_c->bin_m1_stop = true;
948 + tx_c->bin_long_stop = true;
949 + goto recheck;
950 + }
951 + } else {
952 + /* C1 MAX */
953 + tx_c->bin_m1_stop = true;
954 + tx_c->bin_long_stop = true;
955 + goto recheck;
956 + }
957 +
958 + if (tx_c->post_inc) {
959 + inst->ld_update = INCREMENT << COP1_SHIFT;
960 + ld_coe_update(inst);
961 + return;
962 + }
963 + }
964 +#endif
965 +
966 + /* snapshot and select bin */
967 + bin_m1_early = is_bin_early(BIN_M1, inst->reg_base);
968 + bin_long_early = is_bin_early(BIN_LONG, inst->reg_base);
969 +
970 + if (!tx_c->bin_m1_stop && !tx_c->bin_m1_late_early && bin_m1_early) {
971 + tx_c->bin_m1_stop = true;
972 + goto recheck;
973 + }
974 +
975 + if (!tx_c->bin_long_stop &&
976 + tx_c->bin_long_late_early && !bin_long_early) {
977 + tx_c->bin_long_stop = true;
978 + goto recheck;
979 + }
980 +
981 + /* IEEE802.3-2008, 72.6.10.2.3.3
982 + * We only request coefficient updates when no PRESET/INITIALIZE is
983 + * pending. We also only request coefficient updates when the
984 + * corresponding status is NOT UPDATED and nothing is pending.
985 + */
986 + if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) {
987 + if (!tx_c->bin_long_stop) {
988 + /* BinM1 correction means changing COM1 */
989 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
990 + /* Avoid BinM1Late by requesting an
991 + * immediate decrement.
992 + */
993 + if (!bin_m1_early) {
994 + /* request decrement c(-1) */
995 + temp = DECREMENT << COM1_SHIFT;
996 + inst->ld_update = temp;
997 + ld_coe_update(inst);
998 + tx_c->bin_m1_late_early = bin_m1_early;
999 + return;
1000 + }
1001 + }
1002 +
1003 + /* BinLong correction means changing COP1 */
1004 + if (!status_cop1 && !(inst->ld_update & COP1_MASK)) {
1005 + /* Locate BinLong transition point (if any)
1006 + * while avoiding BinM1Late.
1007 + */
1008 + if (bin_long_early) {
1009 + /* request increment c(1) */
1010 + temp = INCREMENT << COP1_SHIFT;
1011 + inst->ld_update = temp;
1012 + } else {
1013 + /* request decrement c(1) */
1014 + temp = DECREMENT << COP1_SHIFT;
1015 + inst->ld_update = temp;
1016 + }
1017 +
1018 + ld_coe_update(inst);
1019 + tx_c->bin_long_late_early = bin_long_early;
1020 + }
1021 + /* We try to finish BinLong before we do BinM1 */
1022 + return;
1023 + }
1024 +
1025 + if (!tx_c->bin_m1_stop) {
1026 + /* BinM1 correction means changing COM1 */
1027 + if (!status_com1 && !(inst->ld_update & COM1_MASK)) {
1028 + /* Locate BinM1 transition point (if any) */
1029 + if (bin_m1_early) {
1030 + /* request increment c(-1) */
1031 + temp = INCREMENT << COM1_SHIFT;
1032 + inst->ld_update = temp;
1033 + } else {
1034 + /* request decrement c(-1) */
1035 + temp = DECREMENT << COM1_SHIFT;
1036 + inst->ld_update = temp;
1037 + }
1038 +
1039 + ld_coe_update(inst);
1040 + tx_c->bin_m1_late_early = bin_m1_early;
1041 + }
1042 + }
1043 + }
1044 +}
1045 +
1046 +static int is_link_up(struct phy_device *phydev)
1047 +{
1048 + int val;
1049 +
1050 + phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
1051 + val = phy_read_mmd(phydev, MDIO_MMD_PCS, FSL_XFI_PCS_10GR_SR1);
1052 +
1053 + return (val & FSL_KR_RX_LINK_STAT_MASK) ? 1 : 0;
1054 +}
1055 +
1056 +static int is_link_training_fail(struct phy_device *phydev)
1057 +{
1058 + int val;
1059 + int timeout = 100;
1060 +
1061 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_PMD_STATUS);
1062 + if (!(val & TRAIN_FAIL) && (val & RX_STAT)) {
1063 + /* check LNK_STAT for sure */
1064 + while (timeout--) {
1065 + if (is_link_up(phydev))
1066 + return 0;
1067 +
1068 + usleep_range(100, 500);
1069 + }
1070 + }
1071 +
1072 + return 1;
1073 +}
1074 +
1075 +static int check_rx(struct phy_device *phydev)
1076 +{
1077 + return phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_STATUS) &
1078 + RX_READY_MASK;
1079 +}
1080 +
1081 +/* Coefficient values have hardware restrictions */
1082 +static int is_ld_valid(struct fsl_xgkr_inst *inst)
1083 +{
1084 + u32 ratio_pst1q = inst->ratio_pst1q;
1085 + u32 adpt_eq = inst->adpt_eq;
1086 + u32 ratio_preq = inst->ratio_preq;
1087 +
1088 + if ((ratio_pst1q + adpt_eq + ratio_preq) > 48)
1089 + return 0;
1090 +
1091 + if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >=
1092 + ((adpt_eq - ratio_pst1q - ratio_preq) * 17))
1093 + return 0;
1094 +
1095 + if (ratio_preq > ratio_pst1q)
1096 + return 0;
1097 +
1098 + if (ratio_preq > 8)
1099 + return 0;
1100 +
1101 + if (adpt_eq < 26)
1102 + return 0;
1103 +
1104 + if (ratio_pst1q > 16)
1105 + return 0;
1106 +
1107 + return 1;
1108 +}
1109 +
1110 +static int is_value_allowed(const u32 *val_table, u32 val)
1111 +{
1112 + int i;
1113 +
1114 + for (i = 0;; i++) {
1115 + if (*(val_table + i) == VAL_INVALID)
1116 + return 0;
1117 + if (*(val_table + i) == val)
1118 + return 1;
1119 + }
1120 +}
1121 +
1122 +static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request)
1123 +{
1124 + u32 ld_limit[3], ld_coe[3], step[3];
1125 +
1126 + ld_coe[0] = inst->ratio_pst1q;
1127 + ld_coe[1] = inst->adpt_eq;
1128 + ld_coe[2] = inst->ratio_preq;
1129 +
1130 + /* Information specific to the Freescale SerDes for 10GBase-KR:
1131 + * Incrementing C(+1) means *decrementing* RATIO_PST1Q
1132 + * Incrementing C(0) means incrementing ADPT_EQ
1133 + * Incrementing C(-1) means *decrementing* RATIO_PREQ
1134 + */
1135 + step[0] = -1;
1136 + step[1] = 1;
1137 + step[2] = -1;
1138 +
1139 + switch (request) {
1140 + case INCREMENT:
1141 + ld_limit[0] = POST_COE_MAX;
1142 + ld_limit[1] = ZERO_COE_MAX;
1143 + ld_limit[2] = PRE_COE_MAX;
1144 + if (ld_coe[field] != ld_limit[field])
1145 + ld_coe[field] += step[field];
1146 + else
1147 + /* MAX */
1148 + return 2;
1149 + break;
1150 + case DECREMENT:
1151 + ld_limit[0] = POST_COE_MIN;
1152 + ld_limit[1] = ZERO_COE_MIN;
1153 + ld_limit[2] = PRE_COE_MIN;
1154 + if (ld_coe[field] != ld_limit[field])
1155 + ld_coe[field] -= step[field];
1156 + else
1157 + /* MIN */
1158 + return 1;
1159 + break;
1160 + default:
1161 + break;
1162 + }
1163 +
1164 + if (is_ld_valid(inst)) {
1165 + /* accept new ld */
1166 + inst->ratio_pst1q = ld_coe[0];
1167 + inst->adpt_eq = ld_coe[1];
1168 + inst->ratio_preq = ld_coe[2];
1169 + /* only some values for preq and pst1q can be used.
1170 + * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc.
1171 + * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10.
1172 + */
1173 + if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) {
1174 + dev_dbg(&inst->phydev->mdio.dev,
1175 + "preq skipped value: %d\n", ld_coe[2]);
1176 + return 0;
1177 + }
1178 +
1179 + if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) {
1180 + dev_dbg(&inst->phydev->mdio.dev,
1181 + "pst1q skipped value: %d\n", ld_coe[0]);
1182 + return 0;
1183 + }
1184 +
1185 + tune_tecr0(inst);
1186 + } else {
1187 + if (request == DECREMENT)
1188 + /* MIN */
1189 + return 1;
1190 + if (request == INCREMENT)
1191 + /* MAX */
1192 + return 2;
1193 + }
1194 +
1195 + return 0;
1196 +}
1197 +
1198 +static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld)
1199 +{
1200 + u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX};
1201 + u32 mask, val;
1202 +
1203 + switch (field) {
1204 + case COE_COP1:
1205 + mask = COP1_MASK;
1206 + val = ld_coe[new_ld] << COP1_SHIFT;
1207 + break;
1208 + case COE_COZ:
1209 + mask = COZ_MASK;
1210 + val = ld_coe[new_ld] << COZ_SHIFT;
1211 + break;
1212 + case COE_COM:
1213 + mask = COM1_MASK;
1214 + val = ld_coe[new_ld] << COM1_SHIFT;
1215 + break;
1216 + default:
1217 + return;
1218 + }
1219 +
1220 + inst->ld_status &= ~mask;
1221 + inst->ld_status |= val;
1222 +}
1223 +
1224 +static void check_request(struct fsl_xgkr_inst *inst, int request)
1225 +{
1226 + int cop1_req, coz_req, com_req;
1227 + int old_status, new_ld_sta;
1228 +
1229 + cop1_req = (request & COP1_MASK) >> COP1_SHIFT;
1230 + coz_req = (request & COZ_MASK) >> COZ_SHIFT;
1231 + com_req = (request & COM1_MASK) >> COM1_SHIFT;
1232 +
1233 + /* IEEE802.3-2008, 72.6.10.2.5
1234 + * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED
1235 + */
1236 + old_status = inst->ld_status;
1237 +
1238 + if (cop1_req && !(inst->ld_status & COP1_MASK)) {
1239 + new_ld_sta = inc_dec(inst, COE_COP1, cop1_req);
1240 + min_max_updated(inst, COE_COP1, new_ld_sta);
1241 + }
1242 +
1243 + if (coz_req && !(inst->ld_status & COZ_MASK)) {
1244 + new_ld_sta = inc_dec(inst, COE_COZ, coz_req);
1245 + min_max_updated(inst, COE_COZ, new_ld_sta);
1246 + }
1247 +
1248 + if (com_req && !(inst->ld_status & COM1_MASK)) {
1249 + new_ld_sta = inc_dec(inst, COE_COM, com_req);
1250 + min_max_updated(inst, COE_COM, new_ld_sta);
1251 + }
1252 +
1253 + if (old_status != inst->ld_status)
1254 + ld_coe_status(inst);
1255 +}
1256 +
1257 +static void preset(struct fsl_xgkr_inst *inst)
1258 +{
1259 + /* These are all MAX values from the IEEE802.3 perspective. */
1260 + inst->ratio_pst1q = POST_COE_MAX;
1261 + inst->adpt_eq = ZERO_COE_MAX;
1262 + inst->ratio_preq = PRE_COE_MAX;
1263 +
1264 + tune_tecr0(inst);
1265 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1266 + inst->ld_status |= COE_MAX << COP1_SHIFT |
1267 + COE_MAX << COZ_SHIFT |
1268 + COE_MAX << COM1_SHIFT;
1269 + ld_coe_status(inst);
1270 +}
1271 +
1272 +static void initialize(struct fsl_xgkr_inst *inst)
1273 +{
1274 + inst->ratio_preq = RATIO_PREQ;
1275 + inst->ratio_pst1q = RATIO_PST1Q;
1276 + inst->adpt_eq = RATIO_EQ;
1277 +
1278 + tune_tecr0(inst);
1279 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1280 + inst->ld_status |= COE_UPDATED << COP1_SHIFT |
1281 + COE_UPDATED << COZ_SHIFT |
1282 + COE_UPDATED << COM1_SHIFT;
1283 + ld_coe_status(inst);
1284 +}
1285 +
1286 +static void train_rx(struct fsl_xgkr_inst *inst)
1287 +{
1288 + struct phy_device *phydev = inst->phydev;
1289 + int request, old_ld_status;
1290 +
1291 + /* get request from LP */
1292 + request = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, FSL_KR_LP_CU) &
1293 + (LD_ALL_MASK);
1294 + old_ld_status = inst->ld_status;
1295 +
1296 + /* IEEE802.3-2008, 72.6.10.2.5
1297 + * Ensure we always go to NOT UDPATED for status reporting in
1298 + * response to HOLD requests.
1299 + * IEEE802.3-2008, 72.6.10.2.3.1/2
1300 + * ... but only if PRESET/INITIALIZE are not active to ensure
1301 + * we keep status until they are released.
1302 + */
1303 + if (!(request & (PRESET_MASK | INIT_MASK))) {
1304 + if (!(request & COP1_MASK))
1305 + inst->ld_status &= ~COP1_MASK;
1306 +
1307 + if (!(request & COZ_MASK))
1308 + inst->ld_status &= ~COZ_MASK;
1309 +
1310 + if (!(request & COM1_MASK))
1311 + inst->ld_status &= ~COM1_MASK;
1312 +
1313 + if (old_ld_status != inst->ld_status)
1314 + ld_coe_status(inst);
1315 + }
1316 +
1317 + /* As soon as the LP shows ready, no need to do any more updates. */
1318 + if (check_rx(phydev)) {
1319 + /* LP receiver is ready */
1320 + if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) {
1321 + inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK);
1322 + ld_coe_status(inst);
1323 + }
1324 + } else {
1325 + /* IEEE802.3-2008, 72.6.10.2.3.1/2
1326 + * only act on PRESET/INITIALIZE if all status is NOT UPDATED.
1327 + */
1328 + if (request & (PRESET_MASK | INIT_MASK)) {
1329 + if (!(inst->ld_status &
1330 + (COP1_MASK | COZ_MASK | COM1_MASK))) {
1331 + if (request & PRESET_MASK)
1332 + preset(inst);
1333 +
1334 + if (request & INIT_MASK)
1335 + initialize(inst);
1336 + }
1337 + }
1338 +
1339 + /* LP Coefficient are not in HOLD */
1340 + if (request & REQUEST_MASK)
1341 + check_request(inst, request & REQUEST_MASK);
1342 + }
1343 +}
1344 +
1345 +static void xgkr_start_train(struct phy_device *phydev)
1346 +{
1347 + struct fsl_xgkr_inst *inst = phydev->priv;
1348 + struct tx_condition *tx_c = &inst->tx_c;
1349 + int val = 0, i;
1350 + int lt_state;
1351 + unsigned long dead_line;
1352 + int rx_ok, tx_ok;
1353 +
1354 + init_inst(inst, 0);
1355 + start_lt(phydev);
1356 +
1357 + for (i = 0; i < 2;) {
1358 + dead_line = jiffies + msecs_to_jiffies(500);
1359 + while (time_before(jiffies, dead_line)) {
1360 + val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1361 + FSL_KR_PMD_STATUS);
1362 + if (val & TRAIN_FAIL) {
1363 + /* LT failed already, reset lane to avoid
1364 + * it run into hanging, then start LT again.
1365 + */
1366 + reset_gcr0(inst);
1367 + start_lt(phydev);
1368 + } else if ((val & PMD_STATUS_SUP_STAT) &&
1369 + (val & PMD_STATUS_FRAME_LOCK))
1370 + break;
1371 + usleep_range(100, 500);
1372 + }
1373 +
1374 + if (!((val & PMD_STATUS_FRAME_LOCK) &&
1375 + (val & PMD_STATUS_SUP_STAT))) {
1376 + i++;
1377 + continue;
1378 + }
1379 +
1380 + /* init process */
1381 + rx_ok = false;
1382 + tx_ok = false;
1383 + /* the LT should be finished in 500ms, failed or OK. */
1384 + dead_line = jiffies + msecs_to_jiffies(500);
1385 +
1386 + while (time_before(jiffies, dead_line)) {
1387 + /* check if the LT is already failed */
1388 + lt_state = phy_read_mmd(phydev, MDIO_MMD_PMAPMD,
1389 + FSL_KR_PMD_STATUS);
1390 + if (lt_state & TRAIN_FAIL) {
1391 + reset_gcr0(inst);
1392 + break;
1393 + }
1394 +
1395 + rx_ok = check_rx(phydev);
1396 + tx_ok = tx_c->tx_complete;
1397 +
1398 + if (rx_ok && tx_ok)
1399 + break;
1400 +
1401 + if (!rx_ok)
1402 + train_rx(inst);
1403 +
1404 + if (!tx_ok)
1405 + train_tx(inst);
1406 +
1407 + usleep_range(100, 500);
1408 + }
1409 +
1410 + i++;
1411 + /* check LT result */
1412 + if (is_link_training_fail(phydev)) {
1413 + init_inst(inst, 0);
1414 + continue;
1415 + } else {
1416 + stop_lt(phydev);
1417 + inst->state = TRAINED;
1418 + break;
1419 + }
1420 + }
1421 +}
1422 +
1423 +static void xgkr_state_machine(struct work_struct *work)
1424 +{
1425 + struct delayed_work *dwork = to_delayed_work(work);
1426 + struct fsl_xgkr_inst *inst = container_of(dwork,
1427 + struct fsl_xgkr_inst,
1428 + xgkr_wk);
1429 + struct phy_device *phydev = inst->phydev;
1430 + int an_state;
1431 + bool needs_train = false;
1432 +
1433 + mutex_lock(&phydev->lock);
1434 +
1435 + switch (inst->state) {
1436 + case DETECTING_LP:
1437 + phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1438 + an_state = phy_read_mmd(phydev, MDIO_MMD_AN, FSL_AN_BP_STAT);
1439 + if ((an_state & KR_AN_MASK))
1440 + needs_train = true;
1441 + break;
1442 + case TRAINED:
1443 + if (!is_link_up(phydev)) {
1444 + dev_info(&phydev->mdio.dev,
1445 + "Detect hotplug, restart training\n");
1446 + init_inst(inst, 1);
1447 + start_xgkr_an(phydev);
1448 + inst->state = DETECTING_LP;
1449 + }
1450 + break;
1451 + }
1452 +
1453 + if (needs_train)
1454 + xgkr_start_train(phydev);
1455 +
1456 + mutex_unlock(&phydev->lock);
1457 + queue_delayed_work(system_power_efficient_wq, &inst->xgkr_wk,
1458 + msecs_to_jiffies(XGKR_TIMEOUT));
1459 +}
1460 +
1461 +static int fsl_backplane_probe(struct phy_device *phydev)
1462 +{
1463 + struct fsl_xgkr_inst *xgkr_inst;
1464 + struct device_node *phy_node, *lane_node;
1465 + struct resource res_lane;
1466 + const char *bm;
1467 + int ret;
1468 + int bp_mode;
1469 + u32 lane[2];
1470 +
1471 + phy_node = phydev->mdio.dev.of_node;
1472 + bp_mode = of_property_read_string(phy_node, "backplane-mode", &bm);
1473 + if (bp_mode < 0)
1474 + return 0;
1475 +
1476 + if (!strcasecmp(bm, "1000base-kx")) {
1477 + bp_mode = PHY_BACKPLANE_1000BASE_KX;
1478 + } else if (!strcasecmp(bm, "10gbase-kr")) {
1479 + bp_mode = PHY_BACKPLANE_10GBASE_KR;
1480 + } else {
1481 + dev_err(&phydev->mdio.dev, "Unknown backplane-mode\n");
1482 + return -EINVAL;
1483 + }
1484 +
1485 + lane_node = of_parse_phandle(phy_node, "fsl,lane-handle", 0);
1486 + if (!lane_node) {
1487 + dev_err(&phydev->mdio.dev, "parse fsl,lane-handle failed\n");
1488 + return -EINVAL;
1489 + }
1490 +
1491 + ret = of_address_to_resource(lane_node, 0, &res_lane);
1492 + if (ret) {
1493 + dev_err(&phydev->mdio.dev, "could not obtain memory map\n");
1494 + return ret;
1495 + }
1496 +
1497 + of_node_put(lane_node);
1498 + ret = of_property_read_u32_array(phy_node, "fsl,lane-reg",
1499 + (u32 *)&lane, 2);
1500 + if (ret) {
1501 + dev_err(&phydev->mdio.dev, "could not get fsl,lane-reg\n");
1502 + return -EINVAL;
1503 + }
1504 +
1505 + phydev->priv = devm_ioremap_nocache(&phydev->mdio.dev,
1506 + res_lane.start + lane[0],
1507 + lane[1]);
1508 + if (!phydev->priv) {
1509 + dev_err(&phydev->mdio.dev, "ioremap_nocache failed\n");
1510 + return -ENOMEM;
1511 + }
1512 +
1513 + if (bp_mode == PHY_BACKPLANE_1000BASE_KX) {
1514 + phydev->speed = SPEED_1000;
1515 + /* configure the lane for 1000BASE-KX */
1516 + lane_set_1gkx(phydev->priv);
1517 + return 0;
1518 + }
1519 +
1520 + xgkr_inst = devm_kzalloc(&phydev->mdio.dev,
1521 + sizeof(*xgkr_inst), GFP_KERNEL);
1522 + if (!xgkr_inst)
1523 + return -ENOMEM;
1524 +
1525 + xgkr_inst->reg_base = phydev->priv;
1526 + xgkr_inst->phydev = phydev;
1527 + phydev->priv = xgkr_inst;
1528 +
1529 + if (bp_mode == PHY_BACKPLANE_10GBASE_KR) {
1530 + phydev->speed = SPEED_10000;
1531 + INIT_DELAYED_WORK(&xgkr_inst->xgkr_wk, xgkr_state_machine);
1532 + }
1533 +
1534 + return 0;
1535 +}
1536 +
1537 +static int fsl_backplane_aneg_done(struct phy_device *phydev)
1538 +{
1539 + return 1;
1540 +}
1541 +
1542 +static int fsl_backplane_config_aneg(struct phy_device *phydev)
1543 +{
1544 + if (phydev->speed == SPEED_10000) {
1545 + phydev->supported |= SUPPORTED_10000baseKR_Full;
1546 + start_xgkr_an(phydev);
1547 + } else if (phydev->speed == SPEED_1000) {
1548 + phydev->supported |= SUPPORTED_1000baseKX_Full;
1549 + start_1gkx_an(phydev);
1550 + }
1551 +
1552 + phydev->advertising = phydev->supported;
1553 + phydev->duplex = 1;
1554 +
1555 + return 0;
1556 +}
1557 +
1558 +static int fsl_backplane_suspend(struct phy_device *phydev)
1559 +{
1560 + if (phydev->speed == SPEED_10000) {
1561 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1562 +
1563 + cancel_delayed_work_sync(&xgkr_inst->xgkr_wk);
1564 + }
1565 + return 0;
1566 +}
1567 +
1568 +static int fsl_backplane_resume(struct phy_device *phydev)
1569 +{
1570 + if (phydev->speed == SPEED_10000) {
1571 + struct fsl_xgkr_inst *xgkr_inst = phydev->priv;
1572 +
1573 + init_inst(xgkr_inst, 1);
1574 + queue_delayed_work(system_power_efficient_wq,
1575 + &xgkr_inst->xgkr_wk,
1576 + msecs_to_jiffies(XGKR_TIMEOUT));
1577 + }
1578 + return 0;
1579 +}
1580 +
1581 +static int fsl_backplane_read_status(struct phy_device *phydev)
1582 +{
1583 + if (is_link_up(phydev))
1584 + phydev->link = 1;
1585 + else
1586 + phydev->link = 0;
1587 +
1588 + return 0;
1589 +}
1590 +
1591 +static struct phy_driver fsl_backplane_driver[] = {
1592 + {
1593 + .phy_id = FSL_PCS_PHY_ID,
1594 + .name = "Freescale Backplane",
1595 + .phy_id_mask = 0xffffffff,
1596 + .features = SUPPORTED_Backplane | SUPPORTED_Autoneg |
1597 + SUPPORTED_MII,
1598 + .probe = fsl_backplane_probe,
1599 + .aneg_done = fsl_backplane_aneg_done,
1600 + .config_aneg = fsl_backplane_config_aneg,
1601 + .read_status = fsl_backplane_read_status,
1602 + .suspend = fsl_backplane_suspend,
1603 + .resume = fsl_backplane_resume,
1604 + },
1605 +};
1606 +
1607 +module_phy_driver(fsl_backplane_driver);
1608 +
1609 +static struct mdio_device_id __maybe_unused freescale_tbl[] = {
1610 + { FSL_PCS_PHY_ID, 0xffffffff },
1611 + { }
1612 +};
1613 +
1614 +MODULE_DEVICE_TABLE(mdio, freescale_tbl);
1615 +
1616 +MODULE_DESCRIPTION("Freescale Backplane driver");
1617 +MODULE_AUTHOR("Shaohui Xie <Shaohui.Xie@freescale.com>");
1618 +MODULE_LICENSE("GPL v2");
1619 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1620 index f3e64a89..42cdd5b7 100644
1621 --- a/drivers/net/phy/phy.c
1622 +++ b/drivers/net/phy/phy.c
1623 @@ -585,7 +585,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
1624 return 0;
1625
1626 case SIOCSHWTSTAMP:
1627 - if (phydev->drv->hwtstamp)
1628 + if (phydev->drv && phydev->drv->hwtstamp)
1629 return phydev->drv->hwtstamp(phydev, ifr);
1630 /* fall through */
1631
1632 @@ -610,6 +610,9 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
1633 bool trigger = 0;
1634 int err;
1635
1636 + if (!phydev->drv)
1637 + return -EIO;
1638 +
1639 mutex_lock(&phydev->lock);
1640
1641 if (AUTONEG_DISABLE == phydev->autoneg)
1642 @@ -1009,7 +1012,7 @@ void phy_state_machine(struct work_struct *work)
1643
1644 old_state = phydev->state;
1645
1646 - if (phydev->drv->link_change_notify)
1647 + if (phydev->drv && phydev->drv->link_change_notify)
1648 phydev->drv->link_change_notify(phydev);
1649
1650 switch (phydev->state) {
1651 @@ -1311,6 +1314,9 @@ EXPORT_SYMBOL(phy_write_mmd_indirect);
1652 */
1653 int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
1654 {
1655 + if (!phydev->drv)
1656 + return -EIO;
1657 +
1658 /* According to 802.3az,the EEE is supported only in full duplex-mode.
1659 * Also EEE feature is active when core is operating with MII, GMII
1660 * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
1661 @@ -1388,6 +1394,9 @@ EXPORT_SYMBOL(phy_init_eee);
1662 */
1663 int phy_get_eee_err(struct phy_device *phydev)
1664 {
1665 + if (!phydev->drv)
1666 + return -EIO;
1667 +
1668 return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR, MDIO_MMD_PCS);
1669 }
1670 EXPORT_SYMBOL(phy_get_eee_err);
1671 @@ -1404,6 +1413,9 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
1672 {
1673 int val;
1674
1675 + if (!phydev->drv)
1676 + return -EIO;
1677 +
1678 /* Get Supported EEE */
1679 val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, MDIO_MMD_PCS);
1680 if (val < 0)
1681 @@ -1437,6 +1449,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
1682 {
1683 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
1684
1685 + if (!phydev->drv)
1686 + return -EIO;
1687 +
1688 /* Mask prohibited EEE modes */
1689 val &= ~phydev->eee_broken_modes;
1690
1691 @@ -1448,7 +1463,7 @@ EXPORT_SYMBOL(phy_ethtool_set_eee);
1692
1693 int phy_ethtool_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1694 {
1695 - if (phydev->drv->set_wol)
1696 + if (phydev->drv && phydev->drv->set_wol)
1697 return phydev->drv->set_wol(phydev, wol);
1698
1699 return -EOPNOTSUPP;
1700 @@ -1457,7 +1472,7 @@ EXPORT_SYMBOL(phy_ethtool_set_wol);
1701
1702 void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
1703 {
1704 - if (phydev->drv->get_wol)
1705 + if (phydev->drv && phydev->drv->get_wol)
1706 phydev->drv->get_wol(phydev, wol);
1707 }
1708 EXPORT_SYMBOL(phy_ethtool_get_wol);
1709 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1710 index 5fdc491e..039f9664 100644
1711 --- a/drivers/net/phy/phy_device.c
1712 +++ b/drivers/net/phy/phy_device.c
1713 @@ -1046,7 +1046,7 @@ int phy_suspend(struct phy_device *phydev)
1714 if (wol.wolopts)
1715 return -EBUSY;
1716
1717 - if (phydrv->suspend)
1718 + if (phydev->drv && phydrv->suspend)
1719 ret = phydrv->suspend(phydev);
1720
1721 if (ret)
1722 @@ -1063,7 +1063,7 @@ int phy_resume(struct phy_device *phydev)
1723 struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver);
1724 int ret = 0;
1725
1726 - if (phydrv->resume)
1727 + if (phydev->drv && phydrv->resume)
1728 ret = phydrv->resume(phydev);
1729
1730 if (ret)
1731 @@ -1726,7 +1726,7 @@ static int phy_remove(struct device *dev)
1732 phydev->state = PHY_DOWN;
1733 mutex_unlock(&phydev->lock);
1734
1735 - if (phydev->drv->remove)
1736 + if (phydev->drv && phydev->drv->remove)
1737 phydev->drv->remove(phydev);
1738 phydev->drv = NULL;
1739
1740 diff --git a/drivers/net/phy/swphy.c b/drivers/net/phy/swphy.c
1741 index 34f58f23..52ddddbe 100644
1742 --- a/drivers/net/phy/swphy.c
1743 +++ b/drivers/net/phy/swphy.c
1744 @@ -77,6 +77,7 @@ static const struct swmii_regs duplex[] = {
1745 static int swphy_decode_speed(int speed)
1746 {
1747 switch (speed) {
1748 + case 10000:
1749 case 1000:
1750 return SWMII_SPEED_1000;
1751 case 100:
1752 diff --git a/include/linux/phy.h b/include/linux/phy.h
1753 index 850c8b51..5f253f1a 100644
1754 --- a/include/linux/phy.h
1755 +++ b/include/linux/phy.h
1756 @@ -81,6 +81,7 @@ typedef enum {
1757 PHY_INTERFACE_MODE_MOCA,
1758 PHY_INTERFACE_MODE_QSGMII,
1759 PHY_INTERFACE_MODE_TRGMII,
1760 + PHY_INTERFACE_MODE_SGMII_2500,
1761 PHY_INTERFACE_MODE_MAX,
1762 } phy_interface_t;
1763
1764 @@ -784,6 +785,9 @@ int phy_stop_interrupts(struct phy_device *phydev);
1765
1766 static inline int phy_read_status(struct phy_device *phydev)
1767 {
1768 + if (!phydev->drv)
1769 + return -EIO;
1770 +
1771 return phydev->drv->read_status(phydev);
1772 }
1773
1774 --
1775 2.14.1
1776