1 /*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 *
28 */
29 #include "opt_platform.h"
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bitset.h>
37 #include <sys/bitstring.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/pciio.h>
45 #include <sys/pcpu.h>
46 #include <sys/proc.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_media.h>
56
57 #include <machine/bus.h>
58
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61
62 #include "thunder_bgx.h"
63 #include "thunder_bgx_var.h"
64 #include "nic_reg.h"
65 #include "nic.h"
66
67 #include "lmac_if.h"
68
69 #define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface"
70
71 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
72
73 #define BGX_NODE_ID_MASK 0x1
74 #define BGX_NODE_ID_SHIFT 24
75
76 #define DRV_NAME "thunder-BGX"
77 #define DRV_VERSION "1.0"
78
79 static int bgx_init_phy(struct bgx *);
80
81 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
82 static int lmac_count __unused; /* Total no of LMACs in system */
83
84 static int bgx_xaui_check_link(struct lmac *lmac);
85 static void bgx_get_qlm_mode(struct bgx *);
86 static void bgx_init_hw(struct bgx *);
87 static int bgx_lmac_enable(struct bgx *, uint8_t);
88 static void bgx_lmac_disable(struct bgx *, uint8_t);
89
90 static int thunder_bgx_probe(device_t);
91 static int thunder_bgx_attach(device_t);
92 static int thunder_bgx_detach(device_t);
93
94 static device_method_t thunder_bgx_methods[] = {
95 /* Device interface */
96 DEVMETHOD(device_probe, thunder_bgx_probe),
97 DEVMETHOD(device_attach, thunder_bgx_attach),
98 DEVMETHOD(device_detach, thunder_bgx_detach),
99
100 DEVMETHOD_END,
101 };
102
103 static driver_t thunder_bgx_driver = {
104 "bgx",
105 thunder_bgx_methods,
106 sizeof(struct lmac),
107 };
108
109 static devclass_t thunder_bgx_devclass;
110
111 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, thunder_bgx_devclass, 0, 0);
112 MODULE_VERSION(thunder_bgx, 1);
113 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
114 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
115 MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
116
117 static int
118 thunder_bgx_probe(device_t dev)
119 {
120 uint16_t vendor_id;
121 uint16_t device_id;
122
123 vendor_id = pci_get_vendor(dev);
124 device_id = pci_get_device(dev);
125
126 if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
127 device_id == PCI_DEVICE_ID_THUNDER_BGX) {
128 device_set_desc(dev, THUNDER_BGX_DEVSTR);
129 return (BUS_PROBE_DEFAULT);
130 }
131
132 return (ENXIO);
133 }
134
135 static int
136 thunder_bgx_attach(device_t dev)
137 {
138 struct bgx *bgx;
139 uint8_t lmacid;
140 int err;
141 int rid;
142 struct lmac *lmac;
143
144 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
145 bgx->dev = dev;
146
147 lmac = device_get_softc(dev);
148 lmac->bgx = bgx;
149 /* Enable bus mastering */
150 pci_enable_busmaster(dev);
151 /* Allocate resources - configuration registers */
152 rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
153 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
154 RF_ACTIVE);
155 if (bgx->reg_base == NULL) {
156 device_printf(dev, "Could not allocate CSR memory space\n");
157 err = ENXIO;
158 goto err_disable_device;
159 }
160
161 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
162 BGX_NODE_ID_MASK;
163 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
164
165 bgx_vnic[bgx->bgx_id] = bgx;
166 bgx_get_qlm_mode(bgx);
167
168 err = bgx_init_phy(bgx);
169 if (err != 0)
170 goto err_free_res;
171
172 bgx_init_hw(bgx);
173
174 /* Enable all LMACs */
175 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
176 err = bgx_lmac_enable(bgx, lmacid);
177 if (err) {
178 device_printf(dev, "BGX%d failed to enable lmac%d\n",
179 bgx->bgx_id, lmacid);
180 goto err_free_res;
181 }
182 }
183
184 return (0);
185
186 err_free_res:
187 bgx_vnic[bgx->bgx_id] = NULL;
188 bus_release_resource(dev, SYS_RES_MEMORY,
189 rman_get_rid(bgx->reg_base), bgx->reg_base);
190 err_disable_device:
191 free(bgx, M_BGX);
192 pci_disable_busmaster(dev);
193
194 return (err);
195 }
196
197 static int
198 thunder_bgx_detach(device_t dev)
199 {
200 struct lmac *lmac;
201 struct bgx *bgx;
202 uint8_t lmacid;
203
204 lmac = device_get_softc(dev);
205 bgx = lmac->bgx;
206 /* Disable all LMACs */
207 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
208 bgx_lmac_disable(bgx, lmacid);
209
210 bgx_vnic[bgx->bgx_id] = NULL;
211 bus_release_resource(dev, SYS_RES_MEMORY,
212 rman_get_rid(bgx->reg_base), bgx->reg_base);
213 free(bgx, M_BGX);
214 pci_disable_busmaster(dev);
215
216 return (0);
217 }
218
219 /* Register read/write APIs */
220 static uint64_t
221 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
222 {
223 bus_space_handle_t addr;
224
225 addr = ((uint32_t)lmac << 20) + offset;
226
227 return (bus_read_8(bgx->reg_base, addr));
228 }
229
230 static void
231 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
232 {
233 bus_space_handle_t addr;
234
235 addr = ((uint32_t)lmac << 20) + offset;
236
237 bus_write_8(bgx->reg_base, addr, val);
238 }
239
240 static void
241 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
242 {
243 bus_space_handle_t addr;
244
245 addr = ((uint32_t)lmac << 20) + offset;
246
247 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
248 }
249
250 static int
251 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
252 boolean_t zero)
253 {
254 int timeout = 10;
255 uint64_t reg_val;
256
257 while (timeout) {
258 reg_val = bgx_reg_read(bgx, lmac, reg);
259 if (zero && !(reg_val & mask))
260 return (0);
261 if (!zero && (reg_val & mask))
262 return (0);
263
264 DELAY(100);
265 timeout--;
266 }
267 return (ETIMEDOUT);
268 }
269
270 /* Return number of BGX present in HW */
271 u_int
272 bgx_get_map(int node)
273 {
274 int i;
275 u_int map = 0;
276
277 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
278 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
279 map |= (1 << i);
280 }
281
282 return (map);
283 }
284
285 /* Return number of LMAC configured for this BGX */
286 int
287 bgx_get_lmac_count(int node, int bgx_idx)
288 {
289 struct bgx *bgx;
290
291 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
292 if (bgx != NULL)
293 return (bgx->lmac_count);
294
295 return (0);
296 }
297
298 /* Returns the current link status of LMAC */
299 void
300 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
301 {
302 struct bgx_link_status *link = (struct bgx_link_status *)status;
303 struct bgx *bgx;
304 struct lmac *lmac;
305
306 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
307 if (bgx == NULL)
308 return;
309
310 lmac = &bgx->lmac[lmacid];
311 link->link_up = lmac->link_up;
312 link->duplex = lmac->last_duplex;
313 link->speed = lmac->last_speed;
314 }
315
316 const uint8_t
317 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
318 {
319 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
320
321 if (bgx != NULL)
322 return (bgx->lmac[lmacid].mac);
323
324 return (NULL);
325 }
326
327 void
328 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
329 {
330 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
331
332 if (bgx == NULL)
333 return;
334
335 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
336 }
337
338 static void
339 bgx_sgmii_change_link_state(struct lmac *lmac)
340 {
341 struct bgx *bgx = lmac->bgx;
342 uint64_t cmr_cfg;
343 uint64_t port_cfg = 0;
344 uint64_t misc_ctl = 0;
345
346 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
347 cmr_cfg &= ~CMR_EN;
348 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
349
350 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
351 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
352
353 if (lmac->link_up) {
354 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
355 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
356 port_cfg |= (lmac->last_duplex << 2);
357 } else {
358 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
359 }
360
361 switch (lmac->last_speed) {
362 case 10:
363 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
364 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
365 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
366 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
367 misc_ctl |= 50; /* samp_pt */
368 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
369 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
370 break;
371 case 100:
372 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
373 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
374 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
375 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
376 misc_ctl |= 5; /* samp_pt */
377 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
378 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
379 break;
380 case 1000:
381 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
382 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
383 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
384 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
385 misc_ctl |= 1; /* samp_pt */
386 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
387 if (lmac->last_duplex)
388 bgx_reg_write(bgx, lmac->lmacid,
389 BGX_GMP_GMI_TXX_BURST, 0);
390 else
391 bgx_reg_write(bgx, lmac->lmacid,
392 BGX_GMP_GMI_TXX_BURST, 8192);
393 break;
394 default:
395 break;
396 }
397 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
398 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
399
400 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
401
402 /* renable lmac */
403 cmr_cfg |= CMR_EN;
404 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
405 }
406
407 static void
408 bgx_lmac_handler(void *arg)
409 {
410 struct lmac *lmac;
411 int link, duplex, speed;
412 int link_changed = 0;
413 int err;
414
415 lmac = (struct lmac *)arg;
416
417 err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
418 &link, &duplex, &speed);
419 if (err != 0)
420 goto out;
421
422 if (!link && lmac->last_link)
423 link_changed = -1;
424
425 if (link &&
426 (lmac->last_duplex != duplex ||
427 lmac->last_link != link ||
428 lmac->last_speed != speed)) {
429 link_changed = 1;
430 }
431
432 lmac->last_link = link;
433 lmac->last_speed = speed;
434 lmac->last_duplex = duplex;
435
436 if (!link_changed)
437 goto out;
438
439 if (link_changed > 0)
440 lmac->link_up = true;
441 else
442 lmac->link_up = false;
443
444 if (lmac->is_sgmii)
445 bgx_sgmii_change_link_state(lmac);
446 else
447 bgx_xaui_check_link(lmac);
448
449 out:
450 callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
451 }
452
453 uint64_t
454 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
455 {
456 struct bgx *bgx;
457
458 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
459 if (bgx == NULL)
460 return (0);
461
462 if (idx > 8)
463 lmac = (0);
464 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
465 }
466
467 uint64_t
468 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
469 {
470 struct bgx *bgx;
471
472 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
473 if (bgx == NULL)
474 return (0);
475
476 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
477 }
478
479 static void
480 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
481 {
482 uint64_t offset;
483
484 while (bgx->lmac[lmac].dmac > 0) {
485 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
486 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
487 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
488 bgx->lmac[lmac].dmac--;
489 }
490 }
491
492 void
493 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
494 {
495 uint64_t offset;
496 struct bgx *bgx;
497
498 #ifdef BGX_IN_PROMISCUOUS_MODE
499 return;
500 #endif
501
502 bgx_idx += node * MAX_BGX_PER_CN88XX;
503 bgx = bgx_vnic[bgx_idx];
504
505 if (bgx == NULL) {
506 printf("BGX%d not yet initialized, ignoring DMAC addition\n",
507 bgx_idx);
508 return;
509 }
510
511 dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
512 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
513 device_printf(bgx->dev,
514 "Max DMAC filters for LMAC%d reached, ignoring\n",
515 lmac);
516 return;
517 }
518
519 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
520 bgx->lmac[lmac].dmac = 1;
521
522 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
523 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
524 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
525 bgx->lmac[lmac].dmac++;
526
527 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
528 (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
529 (BCAST_ACCEPT << 0));
530 }
531
532 /* Configure BGX LMAC in internal loopback mode */
533 void
534 bgx_lmac_internal_loopback(int node, int bgx_idx,
535 int lmac_idx, boolean_t enable)
536 {
537 struct bgx *bgx;
538 struct lmac *lmac;
539 uint64_t cfg;
540
541 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
542 if (bgx == NULL)
543 return;
544
545 lmac = &bgx->lmac[lmac_idx];
546 if (lmac->is_sgmii) {
547 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
548 if (enable)
549 cfg |= PCS_MRX_CTL_LOOPBACK1;
550 else
551 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
552 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
553 } else {
554 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
555 if (enable)
556 cfg |= SPU_CTL_LOOPBACK;
557 else
558 cfg &= ~SPU_CTL_LOOPBACK;
559 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
560 }
561 }
562
563 static int
564 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
565 {
566 uint64_t cfg;
567
568 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
569 /* max packet size */
570 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
571
572 /* Disable frame alignment if using preamble */
573 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
574 if (cfg & 1)
575 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
576
577 /* Enable lmac */
578 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
579
580 /* PCS reset */
581 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
582 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
583 PCS_MRX_CTL_RESET, TRUE) != 0) {
584 device_printf(bgx->dev, "BGX PCS reset not completed\n");
585 return (ENXIO);
586 }
587
588 /* power down, reset autoneg, autoneg enable */
589 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
590 cfg &= ~PCS_MRX_CTL_PWR_DN;
591 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
592 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
593
594 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
595 PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
596 device_printf(bgx->dev, "BGX AN_CPT not completed\n");
597 return (ENXIO);
598 }
599
600 return (0);
601 }
602
603 static int
604 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
605 {
606 uint64_t cfg;
607
608 /* Reset SPU */
609 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
610 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
611 SPU_CTL_RESET, TRUE) != 0) {
612 device_printf(bgx->dev, "BGX SPU reset not completed\n");
613 return (ENXIO);
614 }
615
616 /* Disable LMAC */
617 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
618 cfg &= ~CMR_EN;
619 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
620
621 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
622 /* Set interleaved running disparity for RXAUI */
623 if (bgx->lmac_type != BGX_MODE_RXAUI) {
624 bgx_reg_modify(bgx, lmacid,
625 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
626 } else {
627 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
628 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
629 }
630
631 /* clear all interrupts */
632 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
633 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
634 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
635 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
636 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
637 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
638
639 if (bgx->use_training) {
640 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
641 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
642 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
643 /* training enable */
644 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
645 SPU_PMD_CRTL_TRAIN_EN);
646 }
647
648 /* Append FCS to each packet */
649 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
650
651 /* Disable forward error correction */
652 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
653 cfg &= ~SPU_FEC_CTL_FEC_EN;
654 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
655
656 /* Disable autoneg */
657 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
658 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
659 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
660
661 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
662 if (bgx->lmac_type == BGX_MODE_10G_KR)
663 cfg |= (1 << 23);
664 else if (bgx->lmac_type == BGX_MODE_40G_KR)
665 cfg |= (1 << 24);
666 else
667 cfg &= ~((1 << 23) | (1 << 24));
668 cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
669 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
670
671 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
672 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
673 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
674
675 /* Enable lmac */
676 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
677
678 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
679 cfg &= ~SPU_CTL_LOW_POWER;
680 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
681
682 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
683 cfg &= ~SMU_TX_CTL_UNI_EN;
684 cfg |= SMU_TX_CTL_DIC_EN;
685 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
686
687 /* take lmac_count into account */
688 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
689 /* max packet size */
690 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
691
692 return (0);
693 }
694
695 static int
696 bgx_xaui_check_link(struct lmac *lmac)
697 {
698 struct bgx *bgx = lmac->bgx;
699 int lmacid = lmac->lmacid;
700 int lmac_type = bgx->lmac_type;
701 uint64_t cfg;
702
703 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
704 if (bgx->use_training) {
705 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
706 if ((cfg & (1UL << 13)) == 0) {
707 cfg = (1UL << 13) | (1UL << 14);
708 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
709 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
710 cfg |= (1UL << 0);
711 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
712 return (ENXIO);
713 }
714 }
715
716 /* wait for PCS to come out of reset */
717 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
718 SPU_CTL_RESET, TRUE) != 0) {
719 device_printf(bgx->dev, "BGX SPU reset not completed\n");
720 return (ENXIO);
721 }
722
723 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
724 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
725 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
726 SPU_BR_STATUS_BLK_LOCK, FALSE)) {
727 device_printf(bgx->dev,
728 "SPU_BR_STATUS_BLK_LOCK not completed\n");
729 return (ENXIO);
730 }
731 } else {
732 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
733 SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
734 device_printf(bgx->dev,
735 "SPU_BX_STATUS_RX_ALIGN not completed\n");
736 return (ENXIO);
737 }
738 }
739
740 /* Clear rcvflt bit (latching high) and read it back */
741 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
742 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
743 device_printf(bgx->dev, "Receive fault, retry training\n");
744 if (bgx->use_training) {
745 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
746 if ((cfg & (1UL << 13)) == 0) {
747 cfg = (1UL << 13) | (1UL << 14);
748 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
749 cfg = bgx_reg_read(bgx, lmacid,
750 BGX_SPUX_BR_PMD_CRTL);
751 cfg |= (1UL << 0);
752 bgx_reg_write(bgx, lmacid,
753 BGX_SPUX_BR_PMD_CRTL, cfg);
754 return (ENXIO);
755 }
756 }
757 return (ENXIO);
758 }
759
760 /* Wait for MAC RX to be ready */
761 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
762 SMU_RX_CTL_STATUS, TRUE) != 0) {
763 device_printf(bgx->dev, "SMU RX link not okay\n");
764 return (ENXIO);
765 }
766
767 /* Wait for BGX RX to be idle */
768 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
769 SMU_CTL_RX_IDLE, FALSE) != 0) {
770 device_printf(bgx->dev, "SMU RX not idle\n");
771 return (ENXIO);
772 }
773
774 /* Wait for BGX TX to be idle */
775 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
776 SMU_CTL_TX_IDLE, FALSE) != 0) {
777 device_printf(bgx->dev, "SMU TX not idle\n");
778 return (ENXIO);
779 }
780
781 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
782 SPU_STATUS2_RCVFLT) != 0) {
783 device_printf(bgx->dev, "Receive fault\n");
784 return (ENXIO);
785 }
786
787 /* Receive link is latching low. Force it high and verify it */
788 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
789 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
790 SPU_STATUS1_RCV_LNK, FALSE) != 0) {
791 device_printf(bgx->dev, "SPU receive link down\n");
792 return (ENXIO);
793 }
794
795 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
796 cfg &= ~SPU_MISC_CTL_RX_DIS;
797 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
798 return (0);
799 }
800
801 static void
802 bgx_poll_for_link(void *arg)
803 {
804 struct lmac *lmac;
805 uint64_t link;
806
807 lmac = (struct lmac *)arg;
808
809 /* Receive link is latching low. Force it high and verify it */
810 bgx_reg_modify(lmac->bgx, lmac->lmacid,
811 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
812 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
813 SPU_STATUS1_RCV_LNK, false);
814
815 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
816 if (link & SPU_STATUS1_RCV_LNK) {
817 lmac->link_up = 1;
818 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
819 lmac->last_speed = 40000;
820 else
821 lmac->last_speed = 10000;
822 lmac->last_duplex = 1;
823 } else {
824 lmac->link_up = 0;
825 }
826
827 if (lmac->last_link != lmac->link_up) {
828 lmac->last_link = lmac->link_up;
829 if (lmac->link_up)
830 bgx_xaui_check_link(lmac);
831 }
832
833 callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
834 }
835
836 static int
837 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
838 {
839 uint64_t __unused dmac_bcast = (1UL << 48) - 1;
840 struct lmac *lmac;
841 uint64_t cfg;
842
843 lmac = &bgx->lmac[lmacid];
844 lmac->bgx = bgx;
845
846 if (bgx->lmac_type == BGX_MODE_SGMII) {
847 lmac->is_sgmii = 1;
848 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
849 return -1;
850 } else {
851 lmac->is_sgmii = 0;
852 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
853 return -1;
854 }
855
856 if (lmac->is_sgmii) {
857 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
858 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
859 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
860 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
861 } else {
862 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
863 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
864 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
865 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
866 }
867
868 /* Enable lmac */
869 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
870 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
871
872 /* Restore default cfg, incase low level firmware changed it */
873 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
874
875 /* Add broadcast MAC into all LMAC's DMAC filters */
876 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
877
878 if ((bgx->lmac_type != BGX_MODE_XFI) &&
879 (bgx->lmac_type != BGX_MODE_XAUI) &&
880 (bgx->lmac_type != BGX_MODE_XLAUI) &&
881 (bgx->lmac_type != BGX_MODE_40G_KR) &&
882 (bgx->lmac_type != BGX_MODE_10G_KR)) {
883 if (lmac->phy_if_dev == NULL) {
884 device_printf(bgx->dev,
885 "LMAC%d missing interface to PHY\n", lmacid);
886 return (ENXIO);
887 }
888
889 if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
890 lmacid) != 0) {
891 device_printf(bgx->dev,
892 "LMAC%d could not connect to PHY\n", lmacid);
893 return (ENXIO);
894 }
895 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
896 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
897 mtx_lock(&lmac->check_link_mtx);
898 bgx_lmac_handler(lmac);
899 mtx_unlock(&lmac->check_link_mtx);
900 } else {
901 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
902 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
903 mtx_lock(&lmac->check_link_mtx);
904 bgx_poll_for_link(lmac);
905 mtx_unlock(&lmac->check_link_mtx);
906 }
907
908 return (0);
909 }
910
911 static void
912 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
913 {
914 struct lmac *lmac;
915 uint64_t cmrx_cfg;
916
917 lmac = &bgx->lmac[lmacid];
918
919 /* Stop callout */
920 callout_drain(&lmac->check_link);
921 mtx_destroy(&lmac->check_link_mtx);
922
923 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
924 cmrx_cfg &= ~(1 << 15);
925 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
926 bgx_flush_dmac_addrs(bgx, lmacid);
927
928 if ((bgx->lmac_type != BGX_MODE_XFI) &&
929 (bgx->lmac_type != BGX_MODE_XLAUI) &&
930 (bgx->lmac_type != BGX_MODE_40G_KR) &&
931 (bgx->lmac_type != BGX_MODE_10G_KR)) {
932 if (lmac->phy_if_dev == NULL) {
933 device_printf(bgx->dev,
934 "LMAC%d missing interface to PHY\n", lmacid);
935 return;
936 }
937 if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
938 lmacid) != 0) {
939 device_printf(bgx->dev,
940 "LMAC%d could not disconnect PHY\n", lmacid);
941 return;
942 }
943 lmac->phy_if_dev = NULL;
944 }
945 }
946
947 static void
948 bgx_set_num_ports(struct bgx *bgx)
949 {
950 uint64_t lmac_count;
951
952 switch (bgx->qlm_mode) {
953 case QLM_MODE_SGMII:
954 bgx->lmac_count = 4;
955 bgx->lmac_type = BGX_MODE_SGMII;
956 bgx->lane_to_sds = 0;
957 break;
958 case QLM_MODE_XAUI_1X4:
959 bgx->lmac_count = 1;
960 bgx->lmac_type = BGX_MODE_XAUI;
961 bgx->lane_to_sds = 0xE4;
962 break;
963 case QLM_MODE_RXAUI_2X2:
964 bgx->lmac_count = 2;
965 bgx->lmac_type = BGX_MODE_RXAUI;
966 bgx->lane_to_sds = 0xE4;
967 break;
968 case QLM_MODE_XFI_4X1:
969 bgx->lmac_count = 4;
970 bgx->lmac_type = BGX_MODE_XFI;
971 bgx->lane_to_sds = 0;
972 break;
973 case QLM_MODE_XLAUI_1X4:
974 bgx->lmac_count = 1;
975 bgx->lmac_type = BGX_MODE_XLAUI;
976 bgx->lane_to_sds = 0xE4;
977 break;
978 case QLM_MODE_10G_KR_4X1:
979 bgx->lmac_count = 4;
980 bgx->lmac_type = BGX_MODE_10G_KR;
981 bgx->lane_to_sds = 0;
982 bgx->use_training = 1;
983 break;
984 case QLM_MODE_40G_KR4_1X4:
985 bgx->lmac_count = 1;
986 bgx->lmac_type = BGX_MODE_40G_KR;
987 bgx->lane_to_sds = 0xE4;
988 bgx->use_training = 1;
989 break;
990 default:
991 bgx->lmac_count = 0;
992 break;
993 }
994
995 /*
996 * Check if low level firmware has programmed LMAC count
997 * based on board type, if yes consider that otherwise
998 * the default static values
999 */
1000 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
1001 if (lmac_count != 4)
1002 bgx->lmac_count = lmac_count;
1003 }
1004
1005 static void
1006 bgx_init_hw(struct bgx *bgx)
1007 {
1008 int i;
1009
1010 bgx_set_num_ports(bgx);
1011
1012 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1013 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1014 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1015
1016 /* Set lmac type and lane2serdes mapping */
1017 for (i = 0; i < bgx->lmac_count; i++) {
1018 if (bgx->lmac_type == BGX_MODE_RXAUI) {
1019 if (i)
1020 bgx->lane_to_sds = 0x0e;
1021 else
1022 bgx->lane_to_sds = 0x04;
1023 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1024 (bgx->lmac_type << 8) | bgx->lane_to_sds);
1025 continue;
1026 }
1027 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1028 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1029 bgx->lmac[i].lmacid_bd = lmac_count;
1030 lmac_count++;
1031 }
1032
1033 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1034 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1035
1036 /* Set the backpressure AND mask */
1037 for (i = 0; i < bgx->lmac_count; i++) {
1038 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1039 ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1040 (i * MAX_BGX_CHANS_PER_LMAC));
1041 }
1042
1043 /* Disable all MAC filtering */
1044 for (i = 0; i < RX_DMAC_COUNT; i++)
1045 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1046
1047 /* Disable MAC steering (NCSI traffic) */
1048 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1049 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1050 }
1051
1052 static void
1053 bgx_get_qlm_mode(struct bgx *bgx)
1054 {
1055 device_t dev = bgx->dev;;
1056 int lmac_type;
1057 int train_en;
1058
1059 /* Read LMAC0 type to figure out QLM mode
1060 * This is configured by low level firmware
1061 */
1062 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1063 lmac_type = (lmac_type >> 8) & 0x07;
1064
1065 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1066 SPU_PMD_CRTL_TRAIN_EN;
1067
1068 switch (lmac_type) {
1069 case BGX_MODE_SGMII:
1070 bgx->qlm_mode = QLM_MODE_SGMII;
1071 if (bootverbose) {
1072 device_printf(dev, "BGX%d QLM mode: SGMII\n",
1073 bgx->bgx_id);
1074 }
1075 break;
1076 case BGX_MODE_XAUI:
1077 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1078 if (bootverbose) {
1079 device_printf(dev, "BGX%d QLM mode: XAUI\n",
1080 bgx->bgx_id);
1081 }
1082 break;
1083 case BGX_MODE_RXAUI:
1084 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1085 if (bootverbose) {
1086 device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1087 bgx->bgx_id);
1088 }
1089 break;
1090 case BGX_MODE_XFI:
1091 if (!train_en) {
1092 bgx->qlm_mode = QLM_MODE_XFI_4X1;
1093 if (bootverbose) {
1094 device_printf(dev, "BGX%d QLM mode: XFI\n",
1095 bgx->bgx_id);
1096 }
1097 } else {
1098 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1099 if (bootverbose) {
1100 device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1101 bgx->bgx_id);
1102 }
1103 }
1104 break;
1105 case BGX_MODE_XLAUI:
1106 if (!train_en) {
1107 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1108 if (bootverbose) {
1109 device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1110 bgx->bgx_id);
1111 }
1112 } else {
1113 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1114 if (bootverbose) {
1115 device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1116 bgx->bgx_id);
1117 }
1118 }
1119 break;
1120 default:
1121 bgx->qlm_mode = QLM_MODE_SGMII;
1122 if (bootverbose) {
1123 device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1124 bgx->bgx_id);
1125 }
1126 }
1127 }
1128
1129 static int
1130 bgx_init_phy(struct bgx *bgx)
1131 {
1132 int err;
1133
1134 /* By default we fail */
1135 err = ENXIO;
1136 #ifdef FDT
1137 err = bgx_fdt_init_phy(bgx);
1138 #endif
1139 #ifdef ACPI
1140 if (err != 0) {
1141 /* ARM64TODO: Add ACPI function here */
1142 }
1143 #endif
1144 return (err);
1145 }
Cache object: 0370e3677a00778f2e2691e1f8675620
|