FreeBSD/Linux Kernel Cross Reference
sys/dev/patm/if_patm.c
1 /*-
2 * Copyright (c) 2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * Author: Hartmut Brandt <harti@freebsd.org>
28 *
29 * Driver for IDT77252 based cards like ProSum's.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/8.3/sys/dev/patm/if_patm.c 148887 2005-08-09 10:20:02Z rwatson $");
34
35 #include "opt_inet.h"
36 #include "opt_natm.h"
37
38 #include <sys/types.h>
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/malloc.h>
42 #include <sys/kernel.h>
43 #include <sys/bus.h>
44 #include <sys/errno.h>
45 #include <sys/conf.h>
46 #include <sys/module.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/queue.h>
51 #include <sys/condvar.h>
52 #include <sys/endian.h>
53 #include <vm/uma.h>
54
55 #include <sys/sockio.h>
56 #include <sys/mbuf.h>
57 #include <sys/socket.h>
58
59 #include <net/if.h>
60 #include <net/if_media.h>
61 #include <net/if_atm.h>
62 #include <net/route.h>
63 #include <netinet/in.h>
64 #include <netinet/if_atm.h>
65
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70 #include <sys/mbpool.h>
71
72 #include <dev/utopia/utopia.h>
73 #include <dev/patm/idt77252reg.h>
74 #include <dev/patm/if_patmvar.h>
75
76 static void patm_tst_init(struct patm_softc *sc);
77 static void patm_scd_init(struct patm_softc *sc);
78
79 /*
80 * Start the card. This assumes the mutex to be held
81 */
82 void
83 patm_initialize(struct patm_softc *sc)
84 {
85 uint32_t cfg;
86 u_int i;
87
88 patm_debug(sc, ATTACH, "configuring...");
89
90 /* clear SRAM */
91 for (i = 0; i < sc->mmap->sram * 1024; i += 4)
92 patm_sram_write4(sc, i, 0, 0, 0, 0);
93 patm_scd_init(sc);
94
95 /* configuration register. Setting NOIDLE makes the timing wrong! */
96 cfg = IDT_CFG_TXFIFO9 | IDT_CFG_RXQ512 | PATM_CFG_VPI |
97 /* IDT_CFG_NOIDLE | */ sc->mmap->rxtab;
98 if (!(sc->flags & PATM_UNASS))
99 cfg |= IDT_CFG_IDLECLP;
100 patm_nor_write(sc, IDT_NOR_CFG, cfg);
101
102 /* clean all the status queues and the Raw handle */
103 memset(sc->tsq, 0, sc->sq_size);
104
105 /* initialize RSQ */
106 patm_debug(sc, ATTACH, "RSQ %llx", (unsigned long long)sc->rsq_phy);
107 patm_nor_write(sc, IDT_NOR_RSQB, sc->rsq_phy);
108 patm_nor_write(sc, IDT_NOR_RSQT, sc->rsq_phy);
109 patm_nor_write(sc, IDT_NOR_RSQH, 0);
110 sc->rsq_last = PATM_RSQ_SIZE - 1;
111
112 /* initialize TSTB */
113 patm_nor_write(sc, IDT_NOR_TSTB, sc->mmap->tst1base << 2);
114 patm_tst_init(sc);
115
116 /* initialize TSQ */
117 for (i = 0; i < IDT_TSQ_SIZE; i++)
118 sc->tsq[i].stamp = htole32(IDT_TSQE_EMPTY);
119 patm_nor_write(sc, IDT_NOR_TSQB, sc->tsq_phy);
120 patm_nor_write(sc, IDT_NOR_TSQH, 0);
121 patm_nor_write(sc, IDT_NOR_TSQT, 0);
122 sc->tsq_next = sc->tsq;
123
124 /* GP */
125 #if BYTE_ORDER == BIG_ENDIAN && 0
126 patm_nor_write(sc, IDT_NOR_GP, IDT_GP_BIGE);
127 #else
128 patm_nor_write(sc, IDT_NOR_GP, 0);
129 #endif
130
131 /* VPM */
132 patm_nor_write(sc, IDT_NOR_VPM, 0);
133
134 /* RxFIFO */
135 patm_nor_write(sc, IDT_NOR_RXFD,
136 IDT_RXFD(sc->mmap->rxfifo_addr, sc->mmap->rxfifo_code));
137 patm_nor_write(sc, IDT_NOR_RXFT, 0);
138 patm_nor_write(sc, IDT_NOR_RXFH, 0);
139
140 /* RAWHND */
141 patm_debug(sc, ATTACH, "RWH %llx",
142 (unsigned long long)sc->rawhnd_phy);
143 patm_nor_write(sc, IDT_NOR_RAWHND, sc->rawhnd_phy);
144
145 /* ABRSTD */
146 patm_nor_write(sc, IDT_NOR_ABRSTD,
147 IDT_ABRSTD(sc->mmap->abrstd_addr, sc->mmap->abrstd_code));
148 for (i = 0; i < sc->mmap->abrstd_size; i++)
149 patm_sram_write(sc, sc->mmap->abrstd_addr + i, 0);
150 patm_nor_write(sc, IDT_NOR_ABRRQ, 0);
151 patm_nor_write(sc, IDT_NOR_VBRRQ, 0);
152
153 /* rate tables */
154 if (sc->flags & PATM_25M) {
155 for (i = 0; i < patm_rtables_size; i++)
156 patm_sram_write(sc, sc->mmap->rtables + i,
157 patm_rtables25[i]);
158 } else {
159 for (i = 0; i < patm_rtables_size; i++)
160 patm_sram_write(sc, sc->mmap->rtables + i,
161 patm_rtables155[i]);
162 }
163 patm_nor_write(sc, IDT_NOR_RTBL, sc->mmap->rtables << 2);
164
165 /* Maximum deficit */
166 patm_nor_write(sc, IDT_NOR_MXDFCT, 32 | IDT_MDFCT_LCI | IDT_MDFCT_LNI);
167
168 /* Free buffer queues */
169 patm_nor_write(sc, IDT_NOR_FBQP0, 0);
170 patm_nor_write(sc, IDT_NOR_FBQP1, 0);
171 patm_nor_write(sc, IDT_NOR_FBQP2, 0);
172 patm_nor_write(sc, IDT_NOR_FBQP3, 0);
173
174 patm_nor_write(sc, IDT_NOR_FBQWP0, 0);
175 patm_nor_write(sc, IDT_NOR_FBQWP1, 0);
176 patm_nor_write(sc, IDT_NOR_FBQWP2, 0);
177 patm_nor_write(sc, IDT_NOR_FBQWP3, 0);
178
179 patm_nor_write(sc, IDT_NOR_FBQS0,
180 (SMBUF_THRESHOLD << 28) |
181 (SMBUF_NI_THRESH << 24) |
182 (SMBUF_CI_THRESH << 20) |
183 SMBUF_CELLS);
184 patm_nor_write(sc, IDT_NOR_FBQS1,
185 (LMBUF_THRESHOLD << 28) |
186 (LMBUF_NI_THRESH << 24) |
187 (LMBUF_CI_THRESH << 20) |
188 LMBUF_CELLS);
189 patm_nor_write(sc, IDT_NOR_FBQS2,
190 (VMBUF_THRESHOLD << 28) | VMBUF_CELLS);
191 patm_nor_write(sc, IDT_NOR_FBQS3, 0);
192
193 /* make SCD0 for UBR0 */
194 if ((sc->scd0 = patm_scd_alloc(sc)) == NULL) {
195 patm_printf(sc, "cannot create UBR0 SCD\n");
196 patm_reset(sc);
197 return;
198 }
199 sc->scd0->q.ifq_maxlen = PATM_DLFT_MAXQ;
200
201 patm_scd_setup(sc, sc->scd0);
202 patm_tct_setup(sc, sc->scd0, NULL);
203
204 patm_debug(sc, ATTACH, "go...");
205
206 sc->utopia.flags &= ~UTP_FL_POLL_CARRIER;
207 sc->ifp->if_drv_flags |= IFF_DRV_RUNNING;
208
209 /* enable interrupts, Tx and Rx paths */
210 cfg |= IDT_CFG_RXPTH | IDT_CFG_RXIIMM | IDT_CFG_RAWIE | IDT_CFG_RQFIE |
211 IDT_CFG_TIMOIE | IDT_CFG_FBIE | IDT_CFG_TXENB | IDT_CFG_TXINT |
212 IDT_CFG_TXUIE | IDT_CFG_TXSFI | IDT_CFG_PHYIE;
213 patm_nor_write(sc, IDT_NOR_CFG, cfg);
214
215 for (i = 0; i < sc->mmap->max_conn; i++)
216 if (sc->vccs[i] != NULL)
217 patm_load_vc(sc, sc->vccs[i], 1);
218
219 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
220 sc->utopia.carrier == UTP_CARR_OK);
221 }
222
223 /*
224 * External callable start function
225 */
226 void
227 patm_init(void *p)
228 {
229 struct patm_softc *sc = p;
230
231 mtx_lock(&sc->mtx);
232 patm_stop(sc);
233 patm_initialize(sc);
234 mtx_unlock(&sc->mtx);
235 }
236
237 /*
238 * Stop the interface
239 */
240 void
241 patm_stop(struct patm_softc *sc)
242 {
243 u_int i;
244 struct mbuf *m;
245 struct patm_txmap *map;
246 struct patm_scd *scd;
247
248 sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
249 sc->utopia.flags |= UTP_FL_POLL_CARRIER;
250
251 patm_reset(sc);
252
253 mtx_lock(&sc->tst_lock);
254 i = sc->tst_state;
255 sc->tst_state = 0;
256 callout_stop(&sc->tst_callout);
257 mtx_unlock(&sc->tst_lock);
258
259 if (i != 0) {
260 /* this means we are just entering or leaving the timeout.
261 * wait a little bit. Doing this correctly would be more
262 * involved */
263 DELAY(1000);
264 }
265
266 /*
267 * Give any waiters on closing a VCC a chance. They will stop
268 * to wait if they see that IFF_DRV_RUNNING disappeared.
269 */
270 cv_broadcast(&sc->vcc_cv);
271
272 /* free large buffers */
273 patm_debug(sc, ATTACH, "freeing large buffers...");
274 for (i = 0; i < sc->lbuf_max; i++)
275 if (sc->lbufs[i].m != NULL)
276 patm_lbuf_free(sc, &sc->lbufs[i]);
277
278 /* free small buffers that are on the card */
279 patm_debug(sc, ATTACH, "freeing small buffers...");
280 mbp_card_free(sc->sbuf_pool);
281
282 /* free aal0 buffers that are on the card */
283 patm_debug(sc, ATTACH, "freeing aal0 buffers...");
284 mbp_card_free(sc->vbuf_pool);
285
286 /* freeing partial receive chains and reset vcc state */
287 for (i = 0; i < sc->mmap->max_conn; i++) {
288 if (sc->vccs[i] != NULL) {
289 if (sc->vccs[i]->chain != NULL) {
290 m_freem(sc->vccs[i]->chain);
291 sc->vccs[i]->chain = NULL;
292 sc->vccs[i]->last = NULL;
293 }
294
295 if (sc->vccs[i]->vflags & (PATM_VCC_RX_CLOSING |
296 PATM_VCC_TX_CLOSING)) {
297 uma_zfree(sc->vcc_zone, sc->vccs[i]);
298 sc->vccs[i] = NULL;
299 } else {
300 /* keep */
301 sc->vccs[i]->vflags &= ~PATM_VCC_OPEN;
302 sc->vccs[i]->cps = 0;
303 sc->vccs[i]->scd = NULL;
304 }
305 }
306 }
307
308 /* stop all active SCDs */
309 while ((scd = LIST_FIRST(&sc->scd_list)) != NULL) {
310 /* free queue packets */
311 for (;;) {
312 _IF_DEQUEUE(&scd->q, m);
313 if (m == NULL)
314 break;
315 m_freem(m);
316 }
317
318 /* free transmitting packets */
319 for (i = 0; i < IDT_TSQE_TAG_SPACE; i++) {
320 if ((m = scd->on_card[i]) != NULL) {
321 scd->on_card[i] = 0;
322 map = m->m_pkthdr.header;
323
324 bus_dmamap_unload(sc->tx_tag, map->map);
325 SLIST_INSERT_HEAD(&sc->tx_maps_free, map, link);
326 m_freem(m);
327 }
328 }
329 patm_scd_free(sc, scd);
330 }
331 sc->scd0 = NULL;
332
333 sc->flags &= ~PATM_CLR;
334
335 /* reset raw cell queue */
336 sc->rawh = NULL;
337
338 ATMEV_SEND_IFSTATE_CHANGED(IFP2IFATM(sc->ifp),
339 sc->utopia.carrier == UTP_CARR_OK);
340 }
341
342 /*
343 * Stop the card and reset it
344 */
345 void
346 patm_reset(struct patm_softc *sc)
347 {
348
349 patm_debug(sc, ATTACH, "resetting...");
350
351 patm_nor_write(sc, IDT_NOR_CFG, IDT_CFG_SWRST);
352 DELAY(200);
353 patm_nor_write(sc, IDT_NOR_CFG, 0);
354 DELAY(200);
355
356 patm_nor_write(sc, IDT_NOR_RSQH, 0);
357 patm_nor_write(sc, IDT_NOR_TSQH, 0);
358
359 patm_nor_write(sc, IDT_NOR_GP, IDT_GP_PHY_RST);
360 DELAY(50);
361 patm_nor_write(sc, IDT_NOR_GP, IDT_GP_EEDO | IDT_GP_EECS);
362 DELAY(50);
363 }
364
365 /*
366 * Initialize the soft TST to contain only ABR scheduling and
367 * write it to SRAM
368 */
369 static void
370 patm_tst_init(struct patm_softc *sc)
371 {
372 u_int i;
373 u_int base, idle;
374
375 base = sc->mmap->tst1base;
376 idle = sc->mmap->tst1base + sc->mmap->tst_size;
377
378 /* soft */
379 for (i = 0; i < sc->mmap->tst_size - 1; i++)
380 sc->tst_soft[i] = IDT_TST_VBR;
381
382 sc->tst_state = 0;
383 sc->tst_jump[0] = base + sc->mmap->tst_size - 1;
384 sc->tst_jump[1] = idle + sc->mmap->tst_size - 1;
385 sc->tst_base[0] = base;
386 sc->tst_base[1] = idle;
387
388 /* TST1 */
389 for (i = 0; i < sc->mmap->tst_size - 1; i++)
390 patm_sram_write(sc, base + i, IDT_TST_VBR);
391 patm_sram_write(sc, sc->tst_jump[0], IDT_TST_BR | (base << 2));
392
393 /* TST2 */
394 for (i = 0; i < sc->mmap->tst_size - 1; i++)
395 patm_sram_write(sc, idle + i, IDT_TST_VBR);
396 patm_sram_write(sc, sc->tst_jump[1], IDT_TST_BR | (idle << 2));
397
398 sc->tst_free = sc->mmap->tst_size - 1;
399 sc->tst_reserve = sc->tst_free * PATM_TST_RESERVE / 100;
400 sc->bwrem = IFP2IFATM(sc->ifp)->mib.pcr;
401 }
402
403 /*
404 * Initialize the SCDs. This is done by building a list of all free
405 * SCDs in SRAM. The first word of each potential SCD is used as a
406 * link to the next free SCD. The list is rooted in softc.
407 */
408 static void
409 patm_scd_init(struct patm_softc *sc)
410 {
411 u_int s; /* SRAM address of current SCD */
412
413 sc->scd_free = 0;
414 for (s = sc->mmap->scd_base; s + 12 <= sc->mmap->tst1base; s += 12) {
415 patm_sram_write(sc, s, sc->scd_free);
416 sc->scd_free = s;
417 }
418 }
419
420 /*
421 * allocate an SCQ
422 */
423 struct patm_scd *
424 patm_scd_alloc(struct patm_softc *sc)
425 {
426 u_int sram, next; /* SRAM address of this and next SCD */
427 int error;
428 void *p;
429 struct patm_scd *scd;
430 bus_dmamap_t map;
431 bus_addr_t phy;
432
433 /* get an SCD from the free list */
434 if ((sram = sc->scd_free) == 0)
435 return (NULL);
436 next = patm_sram_read(sc, sram);
437
438 /* allocate memory for the queue and our host stuff */
439 error = bus_dmamem_alloc(sc->scd_tag, &p, BUS_DMA_NOWAIT, &map);
440 if (error != 0)
441 return (NULL);
442 phy = 0x3ff;
443 error = bus_dmamap_load(sc->scd_tag, map, p, sizeof(scd->scq),
444 patm_load_callback, &phy, BUS_DMA_NOWAIT);
445 if (error != 0) {
446 bus_dmamem_free(sc->scd_tag, p, map);
447 return (NULL);
448 }
449 KASSERT((phy & 0x1ff) == 0, ("SCD not aligned %lx", (u_long)phy));
450
451 scd = p;
452 bzero(scd, sizeof(*scd));
453
454 scd->sram = sram;
455 scd->phy = phy;
456 scd->map = map;
457 scd->space = IDT_SCQ_SIZE;
458 scd->last_tag = IDT_TSQE_TAG_SPACE - 1;
459 scd->q.ifq_maxlen = PATM_TX_IFQLEN;
460
461 /* remove the scd from the free list */
462 sc->scd_free = next;
463 LIST_INSERT_HEAD(&sc->scd_list, scd, link);
464
465 return (scd);
466 }
467
468 /*
469 * Free an SCD
470 */
471 void
472 patm_scd_free(struct patm_softc *sc, struct patm_scd *scd)
473 {
474
475 LIST_REMOVE(scd, link);
476
477 /* clear SCD and insert link word */
478 patm_sram_write4(sc, scd->sram, sc->scd_free, 0, 0, 0);
479 patm_sram_write4(sc, scd->sram, 0, 0, 0, 0);
480 patm_sram_write4(sc, scd->sram, 0, 0, 0, 0);
481
482 /* put on free list */
483 sc->scd_free = scd->sram;
484
485 /* free memory */
486 bus_dmamap_unload(sc->scd_tag, scd->map);
487 bus_dmamem_free(sc->scd_tag, scd, scd->map);
488 }
489
490 /*
491 * DMA loading helper function. This function handles the loading of
492 * all one segment DMA maps. The argument is a pointer to a bus_addr_t
493 * which must contain the desired alignment of the address as a bitmap.
494 */
495 void
496 patm_load_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
497 {
498 bus_addr_t *phy = arg;
499
500 if (error)
501 return;
502
503 KASSERT(nsegs == 1,
504 ("too many segments for DMA: %d", nsegs));
505 KASSERT(segs[0].ds_addr <= 0xffffffffUL,
506 ("phys addr too large %lx", (u_long)segs[0].ds_addr));
507 KASSERT((segs[0].ds_addr & *phy) == 0,
508 ("bad alignment %lx:%lx", (u_long)segs[0].ds_addr, (u_long)*phy));
509
510 *phy = segs[0].ds_addr;
511 }
Cache object: 6250b7edbabe56c1f560253fd15a16e6
|