1 /*
2 * Copyright (C) 2015 Cavium Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD$
27 *
28 */
29 #include "opt_platform.h"
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bitset.h>
37 #include <sys/bitstring.h>
38 #include <sys/bus.h>
39 #include <sys/endian.h>
40 #include <sys/kernel.h>
41 #include <sys/malloc.h>
42 #include <sys/module.h>
43 #include <sys/rman.h>
44 #include <sys/pciio.h>
45 #include <sys/pcpu.h>
46 #include <sys/proc.h>
47 #include <sys/socket.h>
48 #include <sys/sockio.h>
49 #include <sys/cpuset.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52
53 #include <net/ethernet.h>
54 #include <net/if.h>
55 #include <net/if_media.h>
56
57 #include <machine/bus.h>
58
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
61
62 #include "thunder_bgx.h"
63 #include "thunder_bgx_var.h"
64 #include "nic_reg.h"
65 #include "nic.h"
66
67 #include "lmac_if.h"
68
69 #define THUNDER_BGX_DEVSTR "ThunderX BGX Ethernet I/O Interface"
70
71 MALLOC_DEFINE(M_BGX, "thunder_bgx", "ThunderX BGX dynamic memory");
72
73 #define BGX_NODE_ID_MASK 0x1
74 #define BGX_NODE_ID_SHIFT 24
75
76 #define DRV_NAME "thunder-BGX"
77 #define DRV_VERSION "1.0"
78
79 static int bgx_init_phy(struct bgx *);
80
81 static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
82 static int lmac_count __unused; /* Total no of LMACs in system */
83
84 static int bgx_xaui_check_link(struct lmac *lmac);
85 static void bgx_get_qlm_mode(struct bgx *);
86 static void bgx_init_hw(struct bgx *);
87 static int bgx_lmac_enable(struct bgx *, uint8_t);
88 static void bgx_lmac_disable(struct bgx *, uint8_t);
89
90 static int thunder_bgx_probe(device_t);
91 static int thunder_bgx_attach(device_t);
92 static int thunder_bgx_detach(device_t);
93
94 static device_method_t thunder_bgx_methods[] = {
95 /* Device interface */
96 DEVMETHOD(device_probe, thunder_bgx_probe),
97 DEVMETHOD(device_attach, thunder_bgx_attach),
98 DEVMETHOD(device_detach, thunder_bgx_detach),
99
100 DEVMETHOD_END,
101 };
102
103 static driver_t thunder_bgx_driver = {
104 "bgx",
105 thunder_bgx_methods,
106 sizeof(struct lmac),
107 };
108
109 DRIVER_MODULE(thunder_bgx, pci, thunder_bgx_driver, 0, 0);
110 MODULE_VERSION(thunder_bgx, 1);
111 MODULE_DEPEND(thunder_bgx, pci, 1, 1, 1);
112 MODULE_DEPEND(thunder_bgx, ether, 1, 1, 1);
113 MODULE_DEPEND(thunder_bgx, thunder_mdio, 1, 1, 1);
114
115 static int
116 thunder_bgx_probe(device_t dev)
117 {
118 uint16_t vendor_id;
119 uint16_t device_id;
120
121 vendor_id = pci_get_vendor(dev);
122 device_id = pci_get_device(dev);
123
124 if (vendor_id == PCI_VENDOR_ID_CAVIUM &&
125 device_id == PCI_DEVICE_ID_THUNDER_BGX) {
126 device_set_desc(dev, THUNDER_BGX_DEVSTR);
127 return (BUS_PROBE_DEFAULT);
128 }
129
130 return (ENXIO);
131 }
132
133 static int
134 thunder_bgx_attach(device_t dev)
135 {
136 struct bgx *bgx;
137 uint8_t lmacid;
138 int err;
139 int rid;
140 struct lmac *lmac;
141
142 bgx = malloc(sizeof(*bgx), M_BGX, (M_WAITOK | M_ZERO));
143 bgx->dev = dev;
144
145 lmac = device_get_softc(dev);
146 lmac->bgx = bgx;
147 /* Enable bus mastering */
148 pci_enable_busmaster(dev);
149 /* Allocate resources - configuration registers */
150 rid = PCIR_BAR(PCI_CFG_REG_BAR_NUM);
151 bgx->reg_base = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
152 RF_ACTIVE);
153 if (bgx->reg_base == NULL) {
154 device_printf(dev, "Could not allocate CSR memory space\n");
155 err = ENXIO;
156 goto err_disable_device;
157 }
158
159 bgx->bgx_id = (rman_get_start(bgx->reg_base) >> BGX_NODE_ID_SHIFT) &
160 BGX_NODE_ID_MASK;
161 bgx->bgx_id += nic_get_node_id(bgx->reg_base) * MAX_BGX_PER_CN88XX;
162
163 bgx_vnic[bgx->bgx_id] = bgx;
164 bgx_get_qlm_mode(bgx);
165
166 err = bgx_init_phy(bgx);
167 if (err != 0)
168 goto err_free_res;
169
170 bgx_init_hw(bgx);
171
172 /* Enable all LMACs */
173 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++) {
174 err = bgx_lmac_enable(bgx, lmacid);
175 if (err) {
176 device_printf(dev, "BGX%d failed to enable lmac%d\n",
177 bgx->bgx_id, lmacid);
178 goto err_free_res;
179 }
180 }
181
182 return (0);
183
184 err_free_res:
185 bgx_vnic[bgx->bgx_id] = NULL;
186 bus_release_resource(dev, SYS_RES_MEMORY,
187 rman_get_rid(bgx->reg_base), bgx->reg_base);
188 err_disable_device:
189 free(bgx, M_BGX);
190 pci_disable_busmaster(dev);
191
192 return (err);
193 }
194
195 static int
196 thunder_bgx_detach(device_t dev)
197 {
198 struct lmac *lmac;
199 struct bgx *bgx;
200 uint8_t lmacid;
201
202 lmac = device_get_softc(dev);
203 bgx = lmac->bgx;
204 /* Disable all LMACs */
205 for (lmacid = 0; lmacid < bgx->lmac_count; lmacid++)
206 bgx_lmac_disable(bgx, lmacid);
207
208 bgx_vnic[bgx->bgx_id] = NULL;
209 bus_release_resource(dev, SYS_RES_MEMORY,
210 rman_get_rid(bgx->reg_base), bgx->reg_base);
211 free(bgx, M_BGX);
212 pci_disable_busmaster(dev);
213
214 return (0);
215 }
216
217 /* Register read/write APIs */
218 static uint64_t
219 bgx_reg_read(struct bgx *bgx, uint8_t lmac, uint64_t offset)
220 {
221 bus_space_handle_t addr;
222
223 addr = ((uint32_t)lmac << 20) + offset;
224
225 return (bus_read_8(bgx->reg_base, addr));
226 }
227
228 static void
229 bgx_reg_write(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
230 {
231 bus_space_handle_t addr;
232
233 addr = ((uint32_t)lmac << 20) + offset;
234
235 bus_write_8(bgx->reg_base, addr, val);
236 }
237
238 static void
239 bgx_reg_modify(struct bgx *bgx, uint8_t lmac, uint64_t offset, uint64_t val)
240 {
241 bus_space_handle_t addr;
242
243 addr = ((uint32_t)lmac << 20) + offset;
244
245 bus_write_8(bgx->reg_base, addr, val | bus_read_8(bgx->reg_base, addr));
246 }
247
248 static int
249 bgx_poll_reg(struct bgx *bgx, uint8_t lmac, uint64_t reg, uint64_t mask,
250 boolean_t zero)
251 {
252 int timeout = 10;
253 uint64_t reg_val;
254
255 while (timeout) {
256 reg_val = bgx_reg_read(bgx, lmac, reg);
257 if (zero && !(reg_val & mask))
258 return (0);
259 if (!zero && (reg_val & mask))
260 return (0);
261
262 DELAY(100);
263 timeout--;
264 }
265 return (ETIMEDOUT);
266 }
267
268 /* Return number of BGX present in HW */
269 u_int
270 bgx_get_map(int node)
271 {
272 int i;
273 u_int map = 0;
274
275 for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
276 if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
277 map |= (1 << i);
278 }
279
280 return (map);
281 }
282
283 /* Return number of LMAC configured for this BGX */
284 int
285 bgx_get_lmac_count(int node, int bgx_idx)
286 {
287 struct bgx *bgx;
288
289 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
290 if (bgx != NULL)
291 return (bgx->lmac_count);
292
293 return (0);
294 }
295
296 /* Returns the current link status of LMAC */
297 void
298 bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
299 {
300 struct bgx_link_status *link = (struct bgx_link_status *)status;
301 struct bgx *bgx;
302 struct lmac *lmac;
303
304 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
305 if (bgx == NULL)
306 return;
307
308 lmac = &bgx->lmac[lmacid];
309 link->link_up = lmac->link_up;
310 link->duplex = lmac->last_duplex;
311 link->speed = lmac->last_speed;
312 }
313
314 const uint8_t
315 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
316 {
317 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
318
319 if (bgx != NULL)
320 return (bgx->lmac[lmacid].mac);
321
322 return (NULL);
323 }
324
325 void
326 bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const uint8_t *mac)
327 {
328 struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
329
330 if (bgx == NULL)
331 return;
332
333 memcpy(bgx->lmac[lmacid].mac, mac, ETHER_ADDR_LEN);
334 }
335
336 static void
337 bgx_sgmii_change_link_state(struct lmac *lmac)
338 {
339 struct bgx *bgx = lmac->bgx;
340 uint64_t cmr_cfg;
341 uint64_t port_cfg = 0;
342 uint64_t misc_ctl = 0;
343
344 cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
345 cmr_cfg &= ~CMR_EN;
346 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
347
348 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
349 misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
350
351 if (lmac->link_up) {
352 misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
353 port_cfg &= ~GMI_PORT_CFG_DUPLEX;
354 port_cfg |= (lmac->last_duplex << 2);
355 } else {
356 misc_ctl |= PCS_MISC_CTL_GMX_ENO;
357 }
358
359 switch (lmac->last_speed) {
360 case 10:
361 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
362 port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
363 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
364 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
365 misc_ctl |= 50; /* samp_pt */
366 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
367 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
368 break;
369 case 100:
370 port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
371 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
372 port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
373 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
374 misc_ctl |= 5; /* samp_pt */
375 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
376 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
377 break;
378 case 1000:
379 port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
380 port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
381 port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
382 misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
383 misc_ctl |= 1; /* samp_pt */
384 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
385 if (lmac->last_duplex)
386 bgx_reg_write(bgx, lmac->lmacid,
387 BGX_GMP_GMI_TXX_BURST, 0);
388 else
389 bgx_reg_write(bgx, lmac->lmacid,
390 BGX_GMP_GMI_TXX_BURST, 8192);
391 break;
392 default:
393 break;
394 }
395 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
396 bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
397
398 port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
399
400 /* renable lmac */
401 cmr_cfg |= CMR_EN;
402 bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
403 }
404
405 static void
406 bgx_lmac_handler(void *arg)
407 {
408 struct lmac *lmac;
409 int link, duplex, speed;
410 int link_changed = 0;
411 int err;
412
413 lmac = (struct lmac *)arg;
414
415 err = LMAC_MEDIA_STATUS(lmac->phy_if_dev, lmac->lmacid,
416 &link, &duplex, &speed);
417 if (err != 0)
418 goto out;
419
420 if (!link && lmac->last_link)
421 link_changed = -1;
422
423 if (link &&
424 (lmac->last_duplex != duplex ||
425 lmac->last_link != link ||
426 lmac->last_speed != speed)) {
427 link_changed = 1;
428 }
429
430 lmac->last_link = link;
431 lmac->last_speed = speed;
432 lmac->last_duplex = duplex;
433
434 if (!link_changed)
435 goto out;
436
437 if (link_changed > 0)
438 lmac->link_up = true;
439 else
440 lmac->link_up = false;
441
442 if (lmac->is_sgmii)
443 bgx_sgmii_change_link_state(lmac);
444 else
445 bgx_xaui_check_link(lmac);
446
447 out:
448 callout_reset(&lmac->check_link, hz * 2, bgx_lmac_handler, lmac);
449 }
450
451 uint64_t
452 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
453 {
454 struct bgx *bgx;
455
456 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
457 if (bgx == NULL)
458 return (0);
459
460 if (idx > 8)
461 lmac = (0);
462 return (bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8)));
463 }
464
465 uint64_t
466 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
467 {
468 struct bgx *bgx;
469
470 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
471 if (bgx == NULL)
472 return (0);
473
474 return (bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8)));
475 }
476
477 static void
478 bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
479 {
480 uint64_t offset;
481
482 while (bgx->lmac[lmac].dmac > 0) {
483 offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(uint64_t)) +
484 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
485 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
486 bgx->lmac[lmac].dmac--;
487 }
488 }
489
490 void
491 bgx_add_dmac_addr(uint64_t dmac, int node, int bgx_idx, int lmac)
492 {
493 uint64_t offset;
494 struct bgx *bgx;
495
496 #ifdef BGX_IN_PROMISCUOUS_MODE
497 return;
498 #endif
499
500 bgx_idx += node * MAX_BGX_PER_CN88XX;
501 bgx = bgx_vnic[bgx_idx];
502
503 if (bgx == NULL) {
504 printf("BGX%d not yet initialized, ignoring DMAC addition\n",
505 bgx_idx);
506 return;
507 }
508
509 dmac = dmac | (1UL << 48) | ((uint64_t)lmac << 49); /* Enable DMAC */
510 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC) {
511 device_printf(bgx->dev,
512 "Max DMAC filters for LMAC%d reached, ignoring\n",
513 lmac);
514 return;
515 }
516
517 if (bgx->lmac[lmac].dmac == MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE)
518 bgx->lmac[lmac].dmac = 1;
519
520 offset = (bgx->lmac[lmac].dmac * sizeof(uint64_t)) +
521 (lmac * MAX_DMAC_PER_LMAC * sizeof(uint64_t));
522 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, dmac);
523 bgx->lmac[lmac].dmac++;
524
525 bgx_reg_write(bgx, lmac, BGX_CMRX_RX_DMAC_CTL,
526 (CAM_ACCEPT << 3) | (MCAST_MODE_CAM_FILTER << 1) |
527 (BCAST_ACCEPT << 0));
528 }
529
530 /* Configure BGX LMAC in internal loopback mode */
531 void
532 bgx_lmac_internal_loopback(int node, int bgx_idx,
533 int lmac_idx, boolean_t enable)
534 {
535 struct bgx *bgx;
536 struct lmac *lmac;
537 uint64_t cfg;
538
539 bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
540 if (bgx == NULL)
541 return;
542
543 lmac = &bgx->lmac[lmac_idx];
544 if (lmac->is_sgmii) {
545 cfg = bgx_reg_read(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL);
546 if (enable)
547 cfg |= PCS_MRX_CTL_LOOPBACK1;
548 else
549 cfg &= ~PCS_MRX_CTL_LOOPBACK1;
550 bgx_reg_write(bgx, lmac_idx, BGX_GMP_PCS_MRX_CTL, cfg);
551 } else {
552 cfg = bgx_reg_read(bgx, lmac_idx, BGX_SPUX_CONTROL1);
553 if (enable)
554 cfg |= SPU_CTL_LOOPBACK;
555 else
556 cfg &= ~SPU_CTL_LOOPBACK;
557 bgx_reg_write(bgx, lmac_idx, BGX_SPUX_CONTROL1, cfg);
558 }
559 }
560
561 static int
562 bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
563 {
564 uint64_t cfg;
565
566 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
567 /* max packet size */
568 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
569
570 /* Disable frame alignment if using preamble */
571 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
572 if (cfg & 1)
573 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
574
575 /* Enable lmac */
576 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
577
578 /* PCS reset */
579 bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
580 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
581 PCS_MRX_CTL_RESET, TRUE) != 0) {
582 device_printf(bgx->dev, "BGX PCS reset not completed\n");
583 return (ENXIO);
584 }
585
586 /* power down, reset autoneg, autoneg enable */
587 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
588 cfg &= ~PCS_MRX_CTL_PWR_DN;
589 cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
590 bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
591
592 if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
593 PCS_MRX_STATUS_AN_CPT, FALSE) != 0) {
594 device_printf(bgx->dev, "BGX AN_CPT not completed\n");
595 return (ENXIO);
596 }
597
598 return (0);
599 }
600
601 static int
602 bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
603 {
604 uint64_t cfg;
605
606 /* Reset SPU */
607 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
608 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
609 SPU_CTL_RESET, TRUE) != 0) {
610 device_printf(bgx->dev, "BGX SPU reset not completed\n");
611 return (ENXIO);
612 }
613
614 /* Disable LMAC */
615 cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
616 cfg &= ~CMR_EN;
617 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
618
619 bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
620 /* Set interleaved running disparity for RXAUI */
621 if (bgx->lmac_type != BGX_MODE_RXAUI) {
622 bgx_reg_modify(bgx, lmacid,
623 BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
624 } else {
625 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
626 SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
627 }
628
629 /* clear all interrupts */
630 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
631 bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
632 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
633 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
634 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
635 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
636
637 if (bgx->use_training) {
638 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
639 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
640 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
641 /* training enable */
642 bgx_reg_modify(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL,
643 SPU_PMD_CRTL_TRAIN_EN);
644 }
645
646 /* Append FCS to each packet */
647 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
648
649 /* Disable forward error correction */
650 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
651 cfg &= ~SPU_FEC_CTL_FEC_EN;
652 bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
653
654 /* Disable autoneg */
655 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
656 cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
657 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
658
659 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
660 if (bgx->lmac_type == BGX_MODE_10G_KR)
661 cfg |= (1 << 23);
662 else if (bgx->lmac_type == BGX_MODE_40G_KR)
663 cfg |= (1 << 24);
664 else
665 cfg &= ~((1 << 23) | (1 << 24));
666 cfg = cfg & (~((1UL << 25) | (1UL << 22) | (1UL << 12)));
667 bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
668
669 cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
670 cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
671 bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
672
673 /* Enable lmac */
674 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
675
676 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
677 cfg &= ~SPU_CTL_LOW_POWER;
678 bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
679
680 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
681 cfg &= ~SMU_TX_CTL_UNI_EN;
682 cfg |= SMU_TX_CTL_DIC_EN;
683 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
684
685 /* take lmac_count into account */
686 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
687 /* max packet size */
688 bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
689
690 return (0);
691 }
692
693 static int
694 bgx_xaui_check_link(struct lmac *lmac)
695 {
696 struct bgx *bgx = lmac->bgx;
697 int lmacid = lmac->lmacid;
698 int lmac_type = bgx->lmac_type;
699 uint64_t cfg;
700
701 bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
702 if (bgx->use_training) {
703 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
704 if ((cfg & (1UL << 13)) == 0) {
705 cfg = (1UL << 13) | (1UL << 14);
706 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
707 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
708 cfg |= (1UL << 0);
709 bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
710 return (ENXIO);
711 }
712 }
713
714 /* wait for PCS to come out of reset */
715 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1,
716 SPU_CTL_RESET, TRUE) != 0) {
717 device_printf(bgx->dev, "BGX SPU reset not completed\n");
718 return (ENXIO);
719 }
720
721 if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
722 (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
723 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
724 SPU_BR_STATUS_BLK_LOCK, FALSE)) {
725 device_printf(bgx->dev,
726 "SPU_BR_STATUS_BLK_LOCK not completed\n");
727 return (ENXIO);
728 }
729 } else {
730 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
731 SPU_BX_STATUS_RX_ALIGN, FALSE) != 0) {
732 device_printf(bgx->dev,
733 "SPU_BX_STATUS_RX_ALIGN not completed\n");
734 return (ENXIO);
735 }
736 }
737
738 /* Clear rcvflt bit (latching high) and read it back */
739 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
740 if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
741 device_printf(bgx->dev, "Receive fault, retry training\n");
742 if (bgx->use_training) {
743 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
744 if ((cfg & (1UL << 13)) == 0) {
745 cfg = (1UL << 13) | (1UL << 14);
746 bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
747 cfg = bgx_reg_read(bgx, lmacid,
748 BGX_SPUX_BR_PMD_CRTL);
749 cfg |= (1UL << 0);
750 bgx_reg_write(bgx, lmacid,
751 BGX_SPUX_BR_PMD_CRTL, cfg);
752 return (ENXIO);
753 }
754 }
755 return (ENXIO);
756 }
757
758 /* Wait for MAC RX to be ready */
759 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
760 SMU_RX_CTL_STATUS, TRUE) != 0) {
761 device_printf(bgx->dev, "SMU RX link not okay\n");
762 return (ENXIO);
763 }
764
765 /* Wait for BGX RX to be idle */
766 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
767 SMU_CTL_RX_IDLE, FALSE) != 0) {
768 device_printf(bgx->dev, "SMU RX not idle\n");
769 return (ENXIO);
770 }
771
772 /* Wait for BGX TX to be idle */
773 if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL,
774 SMU_CTL_TX_IDLE, FALSE) != 0) {
775 device_printf(bgx->dev, "SMU TX not idle\n");
776 return (ENXIO);
777 }
778
779 if ((bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) &
780 SPU_STATUS2_RCVFLT) != 0) {
781 device_printf(bgx->dev, "Receive fault\n");
782 return (ENXIO);
783 }
784
785 /* Receive link is latching low. Force it high and verify it */
786 bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
787 if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
788 SPU_STATUS1_RCV_LNK, FALSE) != 0) {
789 device_printf(bgx->dev, "SPU receive link down\n");
790 return (ENXIO);
791 }
792
793 cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
794 cfg &= ~SPU_MISC_CTL_RX_DIS;
795 bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
796 return (0);
797 }
798
799 static void
800 bgx_poll_for_link(void *arg)
801 {
802 struct lmac *lmac;
803 uint64_t link;
804
805 lmac = (struct lmac *)arg;
806
807 /* Receive link is latching low. Force it high and verify it */
808 bgx_reg_modify(lmac->bgx, lmac->lmacid,
809 BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
810 bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
811 SPU_STATUS1_RCV_LNK, false);
812
813 link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
814 if (link & SPU_STATUS1_RCV_LNK) {
815 lmac->link_up = 1;
816 if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
817 lmac->last_speed = 40000;
818 else
819 lmac->last_speed = 10000;
820 lmac->last_duplex = 1;
821 } else {
822 lmac->link_up = 0;
823 }
824
825 if (lmac->last_link != lmac->link_up) {
826 lmac->last_link = lmac->link_up;
827 if (lmac->link_up)
828 bgx_xaui_check_link(lmac);
829 }
830
831 callout_reset(&lmac->check_link, hz * 2, bgx_poll_for_link, lmac);
832 }
833
834 static int
835 bgx_lmac_enable(struct bgx *bgx, uint8_t lmacid)
836 {
837 uint64_t __unused dmac_bcast = (1UL << 48) - 1;
838 struct lmac *lmac;
839 uint64_t cfg;
840
841 lmac = &bgx->lmac[lmacid];
842 lmac->bgx = bgx;
843
844 if (bgx->lmac_type == BGX_MODE_SGMII) {
845 lmac->is_sgmii = 1;
846 if (bgx_lmac_sgmii_init(bgx, lmacid) != 0)
847 return -1;
848 } else {
849 lmac->is_sgmii = 0;
850 if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
851 return -1;
852 }
853
854 if (lmac->is_sgmii) {
855 cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
856 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
857 bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
858 bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
859 } else {
860 cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
861 cfg |= ((1UL << 2) | (1UL << 1)); /* FCS and PAD */
862 bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
863 bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
864 }
865
866 /* Enable lmac */
867 bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
868 CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
869
870 /* Restore default cfg, incase low level firmware changed it */
871 bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
872
873 /* Add broadcast MAC into all LMAC's DMAC filters */
874 bgx_add_dmac_addr(dmac_bcast, 0, bgx->bgx_id, lmacid);
875
876 if ((bgx->lmac_type != BGX_MODE_XFI) &&
877 (bgx->lmac_type != BGX_MODE_XAUI) &&
878 (bgx->lmac_type != BGX_MODE_XLAUI) &&
879 (bgx->lmac_type != BGX_MODE_40G_KR) &&
880 (bgx->lmac_type != BGX_MODE_10G_KR)) {
881 if (lmac->phy_if_dev == NULL) {
882 device_printf(bgx->dev,
883 "LMAC%d missing interface to PHY\n", lmacid);
884 return (ENXIO);
885 }
886
887 if (LMAC_PHY_CONNECT(lmac->phy_if_dev, lmac->phyaddr,
888 lmacid) != 0) {
889 device_printf(bgx->dev,
890 "LMAC%d could not connect to PHY\n", lmacid);
891 return (ENXIO);
892 }
893 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
894 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
895 mtx_lock(&lmac->check_link_mtx);
896 bgx_lmac_handler(lmac);
897 mtx_unlock(&lmac->check_link_mtx);
898 } else {
899 mtx_init(&lmac->check_link_mtx, "BGX link poll", NULL, MTX_DEF);
900 callout_init_mtx(&lmac->check_link, &lmac->check_link_mtx, 0);
901 mtx_lock(&lmac->check_link_mtx);
902 bgx_poll_for_link(lmac);
903 mtx_unlock(&lmac->check_link_mtx);
904 }
905
906 return (0);
907 }
908
909 static void
910 bgx_lmac_disable(struct bgx *bgx, uint8_t lmacid)
911 {
912 struct lmac *lmac;
913 uint64_t cmrx_cfg;
914
915 lmac = &bgx->lmac[lmacid];
916
917 /* Stop callout */
918 callout_drain(&lmac->check_link);
919 mtx_destroy(&lmac->check_link_mtx);
920
921 cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
922 cmrx_cfg &= ~(1 << 15);
923 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
924 bgx_flush_dmac_addrs(bgx, lmacid);
925
926 if ((bgx->lmac_type != BGX_MODE_XFI) &&
927 (bgx->lmac_type != BGX_MODE_XLAUI) &&
928 (bgx->lmac_type != BGX_MODE_40G_KR) &&
929 (bgx->lmac_type != BGX_MODE_10G_KR)) {
930 if (lmac->phy_if_dev == NULL) {
931 device_printf(bgx->dev,
932 "LMAC%d missing interface to PHY\n", lmacid);
933 return;
934 }
935 if (LMAC_PHY_DISCONNECT(lmac->phy_if_dev, lmac->phyaddr,
936 lmacid) != 0) {
937 device_printf(bgx->dev,
938 "LMAC%d could not disconnect PHY\n", lmacid);
939 return;
940 }
941 lmac->phy_if_dev = NULL;
942 }
943 }
944
945 static void
946 bgx_set_num_ports(struct bgx *bgx)
947 {
948 uint64_t lmac_count;
949
950 switch (bgx->qlm_mode) {
951 case QLM_MODE_SGMII:
952 bgx->lmac_count = 4;
953 bgx->lmac_type = BGX_MODE_SGMII;
954 bgx->lane_to_sds = 0;
955 break;
956 case QLM_MODE_XAUI_1X4:
957 bgx->lmac_count = 1;
958 bgx->lmac_type = BGX_MODE_XAUI;
959 bgx->lane_to_sds = 0xE4;
960 break;
961 case QLM_MODE_RXAUI_2X2:
962 bgx->lmac_count = 2;
963 bgx->lmac_type = BGX_MODE_RXAUI;
964 bgx->lane_to_sds = 0xE4;
965 break;
966 case QLM_MODE_XFI_4X1:
967 bgx->lmac_count = 4;
968 bgx->lmac_type = BGX_MODE_XFI;
969 bgx->lane_to_sds = 0;
970 break;
971 case QLM_MODE_XLAUI_1X4:
972 bgx->lmac_count = 1;
973 bgx->lmac_type = BGX_MODE_XLAUI;
974 bgx->lane_to_sds = 0xE4;
975 break;
976 case QLM_MODE_10G_KR_4X1:
977 bgx->lmac_count = 4;
978 bgx->lmac_type = BGX_MODE_10G_KR;
979 bgx->lane_to_sds = 0;
980 bgx->use_training = 1;
981 break;
982 case QLM_MODE_40G_KR4_1X4:
983 bgx->lmac_count = 1;
984 bgx->lmac_type = BGX_MODE_40G_KR;
985 bgx->lane_to_sds = 0xE4;
986 bgx->use_training = 1;
987 break;
988 default:
989 bgx->lmac_count = 0;
990 break;
991 }
992
993 /*
994 * Check if low level firmware has programmed LMAC count
995 * based on board type, if yes consider that otherwise
996 * the default static values
997 */
998 lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
999 if (lmac_count != 4)
1000 bgx->lmac_count = lmac_count;
1001 }
1002
1003 static void
1004 bgx_init_hw(struct bgx *bgx)
1005 {
1006 int i;
1007
1008 bgx_set_num_ports(bgx);
1009
1010 bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
1011 if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
1012 device_printf(bgx->dev, "BGX%d BIST failed\n", bgx->bgx_id);
1013
1014 /* Set lmac type and lane2serdes mapping */
1015 for (i = 0; i < bgx->lmac_count; i++) {
1016 if (bgx->lmac_type == BGX_MODE_RXAUI) {
1017 if (i)
1018 bgx->lane_to_sds = 0x0e;
1019 else
1020 bgx->lane_to_sds = 0x04;
1021 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1022 (bgx->lmac_type << 8) | bgx->lane_to_sds);
1023 continue;
1024 }
1025 bgx_reg_write(bgx, i, BGX_CMRX_CFG,
1026 (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
1027 bgx->lmac[i].lmacid_bd = lmac_count;
1028 lmac_count++;
1029 }
1030
1031 bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
1032 bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
1033
1034 /* Set the backpressure AND mask */
1035 for (i = 0; i < bgx->lmac_count; i++) {
1036 bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
1037 ((1UL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
1038 (i * MAX_BGX_CHANS_PER_LMAC));
1039 }
1040
1041 /* Disable all MAC filtering */
1042 for (i = 0; i < RX_DMAC_COUNT; i++)
1043 bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
1044
1045 /* Disable MAC steering (NCSI traffic) */
1046 for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
1047 bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
1048 }
1049
1050 static void
1051 bgx_get_qlm_mode(struct bgx *bgx)
1052 {
1053 device_t dev = bgx->dev;
1054 int lmac_type;
1055 int train_en;
1056
1057 /* Read LMAC0 type to figure out QLM mode
1058 * This is configured by low level firmware
1059 */
1060 lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
1061 lmac_type = (lmac_type >> 8) & 0x07;
1062
1063 train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
1064 SPU_PMD_CRTL_TRAIN_EN;
1065
1066 switch (lmac_type) {
1067 case BGX_MODE_SGMII:
1068 bgx->qlm_mode = QLM_MODE_SGMII;
1069 if (bootverbose) {
1070 device_printf(dev, "BGX%d QLM mode: SGMII\n",
1071 bgx->bgx_id);
1072 }
1073 break;
1074 case BGX_MODE_XAUI:
1075 bgx->qlm_mode = QLM_MODE_XAUI_1X4;
1076 if (bootverbose) {
1077 device_printf(dev, "BGX%d QLM mode: XAUI\n",
1078 bgx->bgx_id);
1079 }
1080 break;
1081 case BGX_MODE_RXAUI:
1082 bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
1083 if (bootverbose) {
1084 device_printf(dev, "BGX%d QLM mode: RXAUI\n",
1085 bgx->bgx_id);
1086 }
1087 break;
1088 case BGX_MODE_XFI:
1089 if (!train_en) {
1090 bgx->qlm_mode = QLM_MODE_XFI_4X1;
1091 if (bootverbose) {
1092 device_printf(dev, "BGX%d QLM mode: XFI\n",
1093 bgx->bgx_id);
1094 }
1095 } else {
1096 bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
1097 if (bootverbose) {
1098 device_printf(dev, "BGX%d QLM mode: 10G_KR\n",
1099 bgx->bgx_id);
1100 }
1101 }
1102 break;
1103 case BGX_MODE_XLAUI:
1104 if (!train_en) {
1105 bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
1106 if (bootverbose) {
1107 device_printf(dev, "BGX%d QLM mode: XLAUI\n",
1108 bgx->bgx_id);
1109 }
1110 } else {
1111 bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
1112 if (bootverbose) {
1113 device_printf(dev, "BGX%d QLM mode: 40G_KR4\n",
1114 bgx->bgx_id);
1115 }
1116 }
1117 break;
1118 default:
1119 bgx->qlm_mode = QLM_MODE_SGMII;
1120 if (bootverbose) {
1121 device_printf(dev, "BGX%d QLM default mode: SGMII\n",
1122 bgx->bgx_id);
1123 }
1124 }
1125 }
1126
1127 static int
1128 bgx_init_phy(struct bgx *bgx)
1129 {
1130 int err;
1131
1132 /* By default we fail */
1133 err = ENXIO;
1134 #ifdef FDT
1135 err = bgx_fdt_init_phy(bgx);
1136 #endif
1137 #ifdef ACPI
1138 if (err != 0) {
1139 /* ARM64TODO: Add ACPI function here */
1140 }
1141 #endif
1142 return (err);
1143 }
Cache object: 90969aeffc56691f16bf7610bd66f141
|