1 /**************************************************************************
2
3 Copyright (c) 2007-2009 Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
11
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 ***************************************************************************/
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD: releng/8.0/sys/dev/cxgb/common/cxgb_xgmac.c 189643 2009-03-10 19:22:45Z gnn $");
32
33 #include <cxgb_include.h>
34
35 #undef msleep
36 #define msleep t3_os_sleep
37
38
39 static inline int macidx(const struct cmac *mac)
40 {
41 return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
42 }
43
44 static void xaui_serdes_reset(struct cmac *mac)
45 {
46 static const unsigned int clear[] = {
47 F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
48 F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
49 };
50
51 int i;
52 adapter_t *adap = mac->adapter;
53 u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
54
55 t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
56 F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
57 F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
58 F_RESETPLL23 | F_RESETPLL01);
59 (void)t3_read_reg(adap, ctrl);
60 udelay(15);
61
62 for (i = 0; i < ARRAY_SIZE(clear); i++) {
63 t3_set_reg_field(adap, ctrl, clear[i], 0);
64 udelay(15);
65 }
66 }
67
68 /**
69 * t3b_pcs_reset - reset the PCS on T3B+ adapters
70 * @mac: the XGMAC handle
71 *
72 * Reset the XGMAC PCS block on T3B+ adapters.
73 */
74 void t3b_pcs_reset(struct cmac *mac)
75 {
76 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
77 F_PCS_RESET_, 0);
78 udelay(20);
79 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
80 F_PCS_RESET_);
81 }
82
83 /**
84 * t3_mac_reset - reset a MAC
85 * @mac: the MAC to reset
86 *
87 * Reset the given MAC.
88 */
89 int t3_mac_reset(struct cmac *mac)
90 {
91 static struct addr_val_pair mac_reset_avp[] = {
92 { A_XGM_TX_CTRL, 0 },
93 { A_XGM_RX_CTRL, 0 },
94 { A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
95 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST },
96 { A_XGM_RX_HASH_LOW, 0 },
97 { A_XGM_RX_HASH_HIGH, 0 },
98 { A_XGM_RX_EXACT_MATCH_LOW_1, 0 },
99 { A_XGM_RX_EXACT_MATCH_LOW_2, 0 },
100 { A_XGM_RX_EXACT_MATCH_LOW_3, 0 },
101 { A_XGM_RX_EXACT_MATCH_LOW_4, 0 },
102 { A_XGM_RX_EXACT_MATCH_LOW_5, 0 },
103 { A_XGM_RX_EXACT_MATCH_LOW_6, 0 },
104 { A_XGM_RX_EXACT_MATCH_LOW_7, 0 },
105 { A_XGM_RX_EXACT_MATCH_LOW_8, 0 },
106 { A_XGM_STAT_CTRL, F_CLRSTATS }
107 };
108 u32 val;
109 adapter_t *adap = mac->adapter;
110 unsigned int oft = mac->offset;
111
112 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
113 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
114
115 t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
116 t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
117 F_RXSTRFRWRD | F_DISERRFRAMES,
118 uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
119 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0, F_UNDERUNFIX);
120
121 if (uses_xaui(adap)) {
122 if (adap->params.rev == 0) {
123 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
124 F_RXENABLE | F_TXENABLE);
125 if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
126 F_CMULOCK, 1, 5, 2)) {
127 CH_ERR(adap,
128 "MAC %d XAUI SERDES CMU lock failed\n",
129 macidx(mac));
130 return -1;
131 }
132 t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
133 F_SERDESRESET_);
134 } else
135 xaui_serdes_reset(mac);
136 }
137
138
139 if (mac->multiport) {
140 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
141 MAX_FRAME_SIZE - 4);
142 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft, 0,
143 F_DISPREAMBLE);
144 t3_set_reg_field(adap, A_XGM_RX_CFG + oft, 0, F_COPYPREAMBLE |
145 F_ENNON802_3PREAMBLE);
146 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + oft,
147 V_TXFIFOTHRESH(M_TXFIFOTHRESH),
148 V_TXFIFOTHRESH(64));
149 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
150 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
151 }
152
153 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + oft,
154 V_RXMAXFRAMERSIZE(M_RXMAXFRAMERSIZE),
155 V_RXMAXFRAMERSIZE(MAX_FRAME_SIZE) | F_RXENFRAMER);
156
157 val = F_MAC_RESET_ | F_XGMAC_STOP_EN;
158 if (!mac->multiport)
159 val |= F_XG2G_RESET_;
160 if (uses_xaui(adap))
161 val |= F_PCS_RESET_;
162 else
163 val |= F_RGMII_RESET_;
164 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
165 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
166 if ((val & F_PCS_RESET_) && adap->params.rev) {
167 msleep(1);
168 t3b_pcs_reset(mac);
169 }
170
171 memset(&mac->stats, 0, sizeof(mac->stats));
172 return 0;
173 }
174
175 static int t3b2_mac_reset(struct cmac *mac)
176 {
177 u32 val;
178 adapter_t *adap = mac->adapter;
179 unsigned int oft = mac->offset;
180 int idx = macidx(mac);
181 unsigned int store;
182
183 /* Stop egress traffic to xgm*/
184 if (!macidx(mac))
185 t3_set_reg_field(adap, A_MPS_CFG, F_PORT0ACTIVE, 0);
186 else
187 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE, 0);
188
189 /* This will reduce the number of TXTOGGLES */
190 /* Clear: to stop the NIC traffic */
191 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 0);
192 /* Ensure TX drains */
193 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN, 0);
194
195 /* PCS in reset */
196 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
197 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
198
199 /* Store A_TP_TX_DROP_CFG_CH0 */
200 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
201 store = t3_read_reg(adap, A_TP_TX_DROP_CFG_CH0 + idx);
202
203 msleep(10);
204
205 /* Change DROP_CFG to 0xc0000011 */
206 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
207 t3_write_reg(adap, A_TP_PIO_DATA, 0xc0000011);
208
209 /* Check for xgm Rx fifo empty */
210 /* Increased loop count to 1000 from 5 cover 1G and 100Mbps case */
211 if (t3_wait_op_done(adap, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT + oft,
212 0x80000000, 1, 1000, 2)) {
213 CH_ERR(adap, "MAC %d Rx fifo drain failed\n",
214 macidx(mac));
215 return -1;
216 }
217
218 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, 0); /*MAC in reset*/
219 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
220
221 val = F_MAC_RESET_;
222 if (is_10G(adap))
223 val |= F_PCS_RESET_;
224 else if (uses_xaui(adap))
225 val |= F_PCS_RESET_ | F_XG2G_RESET_;
226 else
227 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
228 t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
229 (void) t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
230 if ((val & F_PCS_RESET_) && adap->params.rev) {
231 msleep(1);
232 t3b_pcs_reset(mac);
233 }
234 t3_write_reg(adap, A_XGM_RX_CFG + oft,
235 F_DISPAUSEFRAMES | F_EN1536BFRAMES |
236 F_RMFCS | F_ENJUMBO | F_ENHASHMCAST );
237
238 /* Restore the DROP_CFG */
239 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
240 t3_write_reg(adap, A_TP_PIO_DATA, store);
241
242 /* Resume egress traffic to xgm */
243 if (!macidx(mac))
244 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT0ACTIVE);
245 else
246 t3_set_reg_field(adap, A_MPS_CFG, 0, F_PORT1ACTIVE);
247
248 /* Set: re-enable NIC traffic */
249 t3_set_reg_field(adap, A_MPS_CFG, F_ENFORCEPKT, 1);
250
251 return 0;
252 }
253
254 /*
255 * Set the exact match register 'idx' to recognize the given Ethernet address.
256 */
257 static void set_addr_filter(struct cmac *mac, int idx, const u8 *addr)
258 {
259 u32 addr_lo, addr_hi;
260 unsigned int oft = mac->offset + idx * 8;
261
262 addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
263 addr_hi = (addr[5] << 8) | addr[4];
264
265 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
266 t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
267 }
268
269 /**
270 * t3_mac_set_address - set one of the station's unicast MAC addresses
271 * @mac: the MAC handle
272 * @idx: index of the exact address match filter to use
273 * @addr: the Ethernet address
274 *
275 * Set one of the station's unicast MAC addresses.
276 */
277 int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
278 {
279 if (mac->multiport)
280 idx = mac->ext_port + idx * mac->adapter->params.nports;
281 if (idx >= mac->nucast)
282 return -EINVAL;
283 set_addr_filter(mac, idx, addr);
284 if (mac->multiport && idx < mac->adapter->params.nports)
285 t3_vsc7323_set_addr(mac->adapter, addr, idx);
286 return 0;
287 }
288
289 /**
290 * t3_mac_set_num_ucast - set the number of unicast addresses needed
291 * @mac: the MAC handle
292 * @n: number of unicast addresses needed
293 *
294 * Specify the number of exact address filters that should be reserved for
295 * unicast addresses. Caller should reload the unicast and multicast
296 * addresses after calling this.
297 *
298 * Generally, this is 1 with the first one used for the station address,
299 * and the rest are available for multicast addresses.
300 */
301 int t3_mac_set_num_ucast(struct cmac *mac, unsigned char n)
302 {
303 if (n > EXACT_ADDR_FILTERS)
304 return -EINVAL;
305 mac->nucast = n;
306 return 0;
307 }
308
309 void t3_mac_disable_exact_filters(struct cmac *mac)
310 {
311 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_LOW_1;
312
313 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
314 u32 v = t3_read_reg(mac->adapter, reg);
315 t3_write_reg(mac->adapter, reg, v);
316 }
317 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
318 }
319
320 void t3_mac_enable_exact_filters(struct cmac *mac)
321 {
322 unsigned int i, reg = mac->offset + A_XGM_RX_EXACT_MATCH_HIGH_1;
323
324 for (i = 0; i < EXACT_ADDR_FILTERS; i++, reg += 8) {
325 u32 v = t3_read_reg(mac->adapter, reg);
326 t3_write_reg(mac->adapter, reg, v);
327 }
328 t3_read_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1); /* flush */
329 }
330
331 /* Calculate the RX hash filter index of an Ethernet address */
332 static int hash_hw_addr(const u8 *addr)
333 {
334 int hash = 0, octet, bit, i = 0, c;
335
336 for (octet = 0; octet < 6; ++octet)
337 for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
338 hash ^= (c & 1) << i;
339 if (++i == 6)
340 i = 0;
341 }
342 return hash;
343 }
344
345 /**
346 * t3_mac_set_rx_mode - set the Rx mode and address filters
347 * @mac: the MAC to configure
348 * @rm: structure containing the Rx mode and MAC addresses needed
349 *
350 * Configures the MAC Rx mode (promiscuity, etc) and exact and hash
351 * address filters.
352 */
353 int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
354 {
355 u32 hash_lo, hash_hi;
356 adapter_t *adap = mac->adapter;
357 unsigned int oft = mac->offset;
358
359 if (promisc_rx_mode(rm))
360 mac->promisc_map |= 1 << mac->ext_port;
361 else
362 mac->promisc_map &= ~(1 << mac->ext_port);
363 t3_set_reg_field(adap, A_XGM_RX_CFG + oft, F_COPYALLFRAMES,
364 mac->promisc_map ? F_COPYALLFRAMES : 0);
365
366 if (allmulti_rx_mode(rm) || mac->multiport)
367 hash_lo = hash_hi = 0xffffffff;
368 else {
369 u8 *addr;
370 int exact_addr_idx = mac->nucast;
371
372 hash_lo = hash_hi = 0;
373 while ((addr = t3_get_next_mcaddr(rm)))
374 if (exact_addr_idx < EXACT_ADDR_FILTERS)
375 set_addr_filter(mac, exact_addr_idx++, addr);
376 else {
377 int hash = hash_hw_addr(addr);
378
379 if (hash < 32)
380 hash_lo |= (1 << hash);
381 else
382 hash_hi |= (1 << (hash - 32));
383 }
384 }
385
386 t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
387 t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
388 return 0;
389 }
390
391 static int rx_fifo_hwm(int mtu)
392 {
393 int hwm;
394
395 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100);
396 return min(hwm, MAC_RXFIFO_SIZE - 8192);
397 }
398
399 /**
400 * t3_mac_set_mtu - set the MAC MTU
401 * @mac: the MAC to configure
402 * @mtu: the MTU
403 *
404 * Sets the MAC MTU and adjusts the FIFO PAUSE watermarks accordingly.
405 */
406 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
407 {
408 int hwm, lwm, divisor;
409 int ipg;
410 unsigned int thres, v, reg;
411 adapter_t *adap = mac->adapter;
412
413 /*
414 * MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
415 * packet size register includes header, but not FCS.
416 */
417 mtu += 14;
418 if (mac->multiport)
419 mtu += 8; /* for preamble */
420 if (mtu > MAX_FRAME_SIZE - 4)
421 return -EINVAL;
422 if (mac->multiport)
423 return t3_vsc7323_set_mtu(adap, mtu - 4, mac->ext_port);
424
425 if (adap->params.rev >= T3_REV_B2 &&
426 (t3_read_reg(adap, A_XGM_RX_CTRL + mac->offset) & F_RXEN)) {
427 t3_mac_disable_exact_filters(mac);
428 v = t3_read_reg(adap, A_XGM_RX_CFG + mac->offset);
429 t3_set_reg_field(adap, A_XGM_RX_CFG + mac->offset,
430 F_ENHASHMCAST | F_COPYALLFRAMES, F_DISBCAST);
431
432 reg = adap->params.rev == T3_REV_B2 ?
433 A_XGM_RX_MAX_PKT_SIZE_ERR_CNT : A_XGM_RXFIFO_CFG;
434
435 /* drain RX FIFO */
436 if (t3_wait_op_done(adap, reg + mac->offset,
437 F_RXFIFO_EMPTY, 1, 20, 5)) {
438 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
439 t3_mac_enable_exact_filters(mac);
440 return -EIO;
441 }
442 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
443 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
444 V_RXMAXPKTSIZE(mtu));
445 t3_write_reg(adap, A_XGM_RX_CFG + mac->offset, v);
446 t3_mac_enable_exact_filters(mac);
447 } else
448 t3_set_reg_field(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset,
449 V_RXMAXPKTSIZE(M_RXMAXPKTSIZE),
450 V_RXMAXPKTSIZE(mtu));
451 /*
452 * Adjust the PAUSE frame watermarks. We always set the LWM, and the
453 * HWM only if flow-control is enabled.
454 */
455 hwm = rx_fifo_hwm(mtu);
456 lwm = min(3 * (int) mtu, MAC_RXFIFO_SIZE /4);
457 v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
458 v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
459 v |= V_RXFIFOPAUSELWM(lwm / 8);
460 if (G_RXFIFOPAUSEHWM(v))
461 v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
462 V_RXFIFOPAUSEHWM(hwm / 8);
463
464 t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
465
466 /* Adjust the TX FIFO threshold based on the MTU */
467 thres = (adap->params.vpd.cclk * 1000) / 15625;
468 thres = (thres * mtu) / 1000;
469 if (is_10G(adap))
470 thres /= 10;
471 thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
472 thres = max(thres, 8U); /* need at least 8 */
473 ipg = (adap->params.rev == T3_REV_C) ? 0 : 1;
474 t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
475 V_TXFIFOTHRESH(M_TXFIFOTHRESH) | V_TXIPG(M_TXIPG),
476 V_TXFIFOTHRESH(thres) | V_TXIPG(ipg));
477
478 /* Assuming a minimum drain rate of 2.5Gbps...
479 */
480 if (adap->params.rev > 0) {
481 divisor = (adap->params.rev == T3_REV_C) ? 64 : 8;
482 t3_write_reg(adap, A_XGM_PAUSE_TIMER + mac->offset,
483 (hwm - lwm) * 4 / divisor);
484 }
485 t3_write_reg(adap, A_XGM_TX_PAUSE_QUANTA + mac->offset,
486 MAC_RXFIFO_SIZE * 4 * 8 / 512);
487 return 0;
488 }
489
490 /**
491 * t3_mac_set_speed_duplex_fc - set MAC speed, duplex and flow control
492 * @mac: the MAC to configure
493 * @speed: the desired speed (10/100/1000/10000)
494 * @duplex: the desired duplex
495 * @fc: desired Tx/Rx PAUSE configuration
496 *
497 * Set the MAC speed, duplex (actually only full-duplex is supported), and
498 * flow control. If a parameter value is negative the corresponding
499 * MAC setting is left at its current value.
500 */
501 int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
502 {
503 u32 val;
504 adapter_t *adap = mac->adapter;
505 unsigned int oft = mac->offset;
506
507 if (duplex >= 0 && duplex != DUPLEX_FULL)
508 return -EINVAL;
509 if (mac->multiport) {
510 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
511 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
512 val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
513 A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
514 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
515
516 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
517 F_TXPAUSEEN);
518 return t3_vsc7323_set_speed_fc(adap, speed, fc, mac->ext_port);
519 }
520 if (speed >= 0) {
521 if (speed == SPEED_10)
522 val = V_PORTSPEED(0);
523 else if (speed == SPEED_100)
524 val = V_PORTSPEED(1);
525 else if (speed == SPEED_1000)
526 val = V_PORTSPEED(2);
527 else if (speed == SPEED_10000)
528 val = V_PORTSPEED(3);
529 else
530 return -EINVAL;
531
532 t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
533 V_PORTSPEED(M_PORTSPEED), val);
534 }
535
536 val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
537 val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
538 if (fc & PAUSE_TX)
539 val |= V_RXFIFOPAUSEHWM(rx_fifo_hwm(t3_read_reg(adap,
540 A_XGM_RX_MAX_PKT_SIZE + oft)) / 8);
541 t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
542
543 t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
544 (fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
545 return 0;
546 }
547
548 /**
549 * t3_mac_enable - enable the MAC in the given directions
550 * @mac: the MAC to configure
551 * @which: bitmap indicating which directions to enable
552 *
553 * Enables the MAC for operation in the given directions.
554 * %MAC_DIRECTION_TX enables the Tx direction, and %MAC_DIRECTION_RX
555 * enables the Rx one.
556 */
557 int t3_mac_enable(struct cmac *mac, int which)
558 {
559 int idx = macidx(mac);
560 adapter_t *adap = mac->adapter;
561 unsigned int oft = mac->offset;
562 struct mac_stats *s = &mac->stats;
563
564 if (mac->multiport)
565 return t3_vsc7323_enable(adap, mac->ext_port, which);
566
567 if (which & MAC_DIRECTION_TX) {
568 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
569 t3_write_reg(adap, A_TP_PIO_DATA,
570 adap->params.rev == T3_REV_C ?
571 0xc4ffff01 : 0xc0ede401);
572 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
573 t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx,
574 adap->params.rev == T3_REV_C ?
575 0 : 1 << idx);
576
577 t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
578
579 t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CNT_CH0 + idx);
580 mac->tx_mcnt = s->tx_frames;
581 mac->tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
582 A_TP_PIO_DATA)));
583 mac->tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
584 A_XGM_TX_SPI4_SOP_EOP_CNT +
585 oft)));
586 mac->rx_mcnt = s->rx_frames;
587 mac->rx_pause = s->rx_pause;
588 mac->rx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
589 A_XGM_RX_SPI4_SOP_EOP_CNT +
590 oft)));
591 mac->rx_ocnt = s->rx_fifo_ovfl;
592 mac->txen = F_TXEN;
593 mac->toggle_cnt = 0;
594 }
595 if (which & MAC_DIRECTION_RX)
596 t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
597 return 0;
598 }
599
600 /**
601 * t3_mac_disable - disable the MAC in the given directions
602 * @mac: the MAC to configure
603 * @which: bitmap indicating which directions to disable
604 *
605 * Disables the MAC in the given directions.
606 * %MAC_DIRECTION_TX disables the Tx direction, and %MAC_DIRECTION_RX
607 * disables the Rx one.
608 */
609 int t3_mac_disable(struct cmac *mac, int which)
610 {
611 adapter_t *adap = mac->adapter;
612
613 if (mac->multiport)
614 return t3_vsc7323_disable(adap, mac->ext_port, which);
615
616 if (which & MAC_DIRECTION_TX) {
617 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
618 mac->txen = 0;
619 }
620 if (which & MAC_DIRECTION_RX) {
621 int val = F_MAC_RESET_;
622
623 t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
624 F_PCS_RESET_, 0);
625 msleep(100);
626 t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
627 if (is_10G(adap))
628 val |= F_PCS_RESET_;
629 else if (uses_xaui(adap))
630 val |= F_PCS_RESET_ | F_XG2G_RESET_;
631 else
632 val |= F_RGMII_RESET_ | F_XG2G_RESET_;
633 t3_write_reg(mac->adapter, A_XGM_RESET_CTRL + mac->offset, val);
634 }
635 return 0;
636 }
637
638 int t3b2_mac_watchdog_task(struct cmac *mac)
639 {
640 int status;
641 unsigned int tx_tcnt, tx_xcnt;
642 adapter_t *adap = mac->adapter;
643 struct mac_stats *s = &mac->stats;
644 u64 tx_mcnt = s->tx_frames;
645
646 if (mac->multiport)
647 tx_mcnt = t3_read_reg(adap, A_XGM_STAT_TX_FRAME_LOW);
648
649 status = 0;
650 tx_xcnt = 1; /* By default tx_xcnt is making progress*/
651 tx_tcnt = mac->tx_tcnt; /* If tx_mcnt is progressing ignore tx_tcnt*/
652 if (tx_mcnt == mac->tx_mcnt && mac->rx_pause == s->rx_pause) {
653 tx_xcnt = (G_TXSPI4SOPCNT(t3_read_reg(adap,
654 A_XGM_TX_SPI4_SOP_EOP_CNT +
655 mac->offset)));
656 if (tx_xcnt == 0) {
657 t3_write_reg(adap, A_TP_PIO_ADDR,
658 A_TP_TX_DROP_CNT_CH0 + macidx(mac));
659 tx_tcnt = (G_TXDROPCNTCH0RCVD(t3_read_reg(adap,
660 A_TP_PIO_DATA)));
661 } else
662 goto out;
663
664 } else {
665 mac->toggle_cnt = 0;
666 goto out;
667 }
668
669 if ((tx_tcnt != mac->tx_tcnt) && (mac->tx_xcnt == 0)) {
670 if (mac->toggle_cnt > 4) {
671 status = 2;
672 goto out;
673 } else {
674 status = 1;
675 goto out;
676 }
677 } else {
678 mac->toggle_cnt = 0;
679 goto out;
680 }
681
682 out:
683 mac->tx_tcnt = tx_tcnt;
684 mac->tx_xcnt = tx_xcnt;
685 mac->tx_mcnt = s->tx_frames;
686 mac->rx_pause = s->rx_pause;
687 if (status == 1) {
688 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
689 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
690 t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, mac->txen);
691 t3_read_reg(adap, A_XGM_TX_CTRL + mac->offset); /* flush */
692 mac->toggle_cnt++;
693 } else if (status == 2) {
694 t3b2_mac_reset(mac);
695 mac->toggle_cnt = 0;
696 }
697 return status;
698 }
699
700 /**
701 * t3_mac_update_stats - accumulate MAC statistics
702 * @mac: the MAC handle
703 *
704 * This function is called periodically to accumulate the current values
705 * of the RMON counters into the port statistics. Since the packet
706 * counters are only 32 bits they can overflow in ~286 secs at 10G, so the
707 * function should be called more frequently than that. The byte counters
708 * are 45-bit wide, they would overflow in ~7.8 hours.
709 */
710 const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
711 {
712 #define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
713 #define RMON_UPDATE(mac, name, reg) \
714 (mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
715 #define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
716 (mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
717 ((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
718
719 u32 v, lo;
720
721 if (mac->multiport)
722 return t3_vsc7323_update_stats(mac);
723
724 RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
725 RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
726 RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
727 RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
728 RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
729 RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
730 RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
731 RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
732 RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
733
734 RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
735
736 v = RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
737 if (mac->adapter->params.rev == T3_REV_B2)
738 v &= 0x7fffffff;
739 mac->stats.rx_too_long += v;
740
741 RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
742 RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
743 RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
744 RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
745 RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
746 RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
747 RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
748
749 RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
750 RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
751 RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
752 RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
753 RMON_UPDATE(mac, tx_pause, TX_PAUSE);
754 /* This counts error frames in general (bad FCS, underrun, etc). */
755 RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
756
757 RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
758 RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
759 RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
760 RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
761 RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
762 RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
763 RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
764
765 /* The next stat isn't clear-on-read. */
766 t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
767 v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
768 lo = (u32)mac->stats.rx_cong_drops;
769 mac->stats.rx_cong_drops += (u64)(v - lo);
770
771 return &mac->stats;
772 }
Cache object: 3e380aabc8404d457785b74fe5106f8b
|