1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright(c) 2018-2019 Realtek Corporation
3 */
4
5 #if defined(__FreeBSD__)
6 #define LINUXKPI_PARAM_PREFIX rtw88_pci_
7 #endif
8
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include "main.h"
12 #include "pci.h"
13 #include "reg.h"
14 #include "tx.h"
15 #include "rx.h"
16 #include "fw.h"
17 #include "ps.h"
18 #include "debug.h"
19 #if defined(__FreeBSD__)
20 #include <linux/pm.h>
21 #endif
22
23 static bool rtw_disable_msi;
24 static bool rtw_pci_disable_aspm;
25 module_param_named(disable_msi, rtw_disable_msi, bool, 0644);
26 module_param_named(disable_aspm, rtw_pci_disable_aspm, bool, 0644);
27 MODULE_PARM_DESC(disable_msi, "Set Y to disable MSI interrupt support");
28 MODULE_PARM_DESC(disable_aspm, "Set Y to disable PCI ASPM support");
29
30 static u32 rtw_pci_tx_queue_idx_addr[] = {
31 [RTW_TX_QUEUE_BK] = RTK_PCI_TXBD_IDX_BKQ,
32 [RTW_TX_QUEUE_BE] = RTK_PCI_TXBD_IDX_BEQ,
33 [RTW_TX_QUEUE_VI] = RTK_PCI_TXBD_IDX_VIQ,
34 [RTW_TX_QUEUE_VO] = RTK_PCI_TXBD_IDX_VOQ,
35 [RTW_TX_QUEUE_MGMT] = RTK_PCI_TXBD_IDX_MGMTQ,
36 [RTW_TX_QUEUE_HI0] = RTK_PCI_TXBD_IDX_HI0Q,
37 [RTW_TX_QUEUE_H2C] = RTK_PCI_TXBD_IDX_H2CQ,
38 };
39
40 static u8 rtw_pci_get_tx_qsel(struct sk_buff *skb, u8 queue)
41 {
42 switch (queue) {
43 case RTW_TX_QUEUE_BCN:
44 return TX_DESC_QSEL_BEACON;
45 case RTW_TX_QUEUE_H2C:
46 return TX_DESC_QSEL_H2C;
47 case RTW_TX_QUEUE_MGMT:
48 return TX_DESC_QSEL_MGMT;
49 case RTW_TX_QUEUE_HI0:
50 return TX_DESC_QSEL_HIGH;
51 default:
52 return skb->priority;
53 }
54 };
55
56 static u8 rtw_pci_read8(struct rtw_dev *rtwdev, u32 addr)
57 {
58 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
59
60 #if defined(__linux__)
61 return readb(rtwpci->mmap + addr);
62 #elif defined(__FreeBSD__)
63 u8 val;
64
65 val = bus_read_1((struct resource *)rtwpci->mmap, addr);
66 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R08 (%#010x) -> %#04x\n", addr, val);
67 return (val);
68 #endif
69 }
70
71 static u16 rtw_pci_read16(struct rtw_dev *rtwdev, u32 addr)
72 {
73 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
74
75 #if defined(__linux__)
76 return readw(rtwpci->mmap + addr);
77 #elif defined(__FreeBSD__)
78 u16 val;
79
80 val = bus_read_2((struct resource *)rtwpci->mmap, addr);
81 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R16 (%#010x) -> %#06x\n", addr, val);
82 return (val);
83 #endif
84 }
85
86 static u32 rtw_pci_read32(struct rtw_dev *rtwdev, u32 addr)
87 {
88 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
89
90 #if defined(__linux__)
91 return readl(rtwpci->mmap + addr);
92 #elif defined(__FreeBSD__)
93 u32 val;
94
95 val = bus_read_4((struct resource *)rtwpci->mmap, addr);
96 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "R32 (%#010x) -> %#010x\n", addr, val);
97 return (val);
98 #endif
99 }
100
101 static void rtw_pci_write8(struct rtw_dev *rtwdev, u32 addr, u8 val)
102 {
103 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
104
105 #if defined(__linux__)
106 writeb(val, rtwpci->mmap + addr);
107 #elif defined(__FreeBSD__)
108 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W08 (%#010x) <- %#04x\n", addr, val);
109 return (bus_write_1((struct resource *)rtwpci->mmap, addr, val));
110 #endif
111 }
112
113 static void rtw_pci_write16(struct rtw_dev *rtwdev, u32 addr, u16 val)
114 {
115 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
116
117 #if defined(__linux__)
118 writew(val, rtwpci->mmap + addr);
119 #elif defined(__FreeBSD__)
120 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W16 (%#010x) <- %#06x\n", addr, val);
121 return (bus_write_2((struct resource *)rtwpci->mmap, addr, val));
122 #endif
123 }
124
125 static void rtw_pci_write32(struct rtw_dev *rtwdev, u32 addr, u32 val)
126 {
127 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
128
129 #if defined(__linux__)
130 writel(val, rtwpci->mmap + addr);
131 #elif defined(__FreeBSD__)
132 rtw_dbg(rtwdev, RTW_DBG_IO_RW, "W32 (%#010x) <- %#010x\n", addr, val);
133 return (bus_write_4((struct resource *)rtwpci->mmap, addr, val));
134 #endif
135 }
136
137 #if defined(__linux__) && 0
138 static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
139 {
140 int offset = tx_ring->r.desc_size * idx;
141
142 return tx_ring->r.head + offset;
143 }
144 #endif
145
146 static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
147 struct rtw_pci_tx_ring *tx_ring)
148 {
149 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
150 struct rtw_pci_tx_data *tx_data;
151 struct sk_buff *skb, *tmp;
152 dma_addr_t dma;
153
154 /* free every skb remained in tx list */
155 skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
156 __skb_unlink(skb, &tx_ring->queue);
157 tx_data = rtw_pci_get_tx_data(skb);
158 dma = tx_data->dma;
159
160 dma_unmap_single(&pdev->dev, dma, skb->len, DMA_TO_DEVICE);
161 dev_kfree_skb_any(skb);
162 }
163 }
164
165 static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
166 struct rtw_pci_tx_ring *tx_ring)
167 {
168 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
169 u8 *head = tx_ring->r.head;
170 u32 len = tx_ring->r.len;
171 int ring_sz = len * tx_ring->r.desc_size;
172
173 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
174
175 /* free the ring itself */
176 dma_free_coherent(&pdev->dev, ring_sz, head, tx_ring->r.dma);
177 tx_ring->r.head = NULL;
178 }
179
180 static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
181 struct rtw_pci_rx_ring *rx_ring)
182 {
183 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
184 struct sk_buff *skb;
185 int buf_sz = RTK_PCI_RX_BUF_SIZE;
186 dma_addr_t dma;
187 int i;
188
189 for (i = 0; i < rx_ring->r.len; i++) {
190 skb = rx_ring->buf[i];
191 if (!skb)
192 continue;
193
194 dma = *((dma_addr_t *)skb->cb);
195 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
196 dev_kfree_skb(skb);
197 rx_ring->buf[i] = NULL;
198 }
199 }
200
201 static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
202 struct rtw_pci_rx_ring *rx_ring)
203 {
204 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
205 u8 *head = rx_ring->r.head;
206 int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
207
208 rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
209
210 dma_free_coherent(&pdev->dev, ring_sz, head, rx_ring->r.dma);
211 }
212
213 static void rtw_pci_free_trx_ring(struct rtw_dev *rtwdev)
214 {
215 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
216 struct rtw_pci_tx_ring *tx_ring;
217 struct rtw_pci_rx_ring *rx_ring;
218 int i;
219
220 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
221 tx_ring = &rtwpci->tx_rings[i];
222 rtw_pci_free_tx_ring(rtwdev, tx_ring);
223 }
224
225 for (i = 0; i < RTK_MAX_RX_QUEUE_NUM; i++) {
226 rx_ring = &rtwpci->rx_rings[i];
227 rtw_pci_free_rx_ring(rtwdev, rx_ring);
228 }
229 }
230
231 static int rtw_pci_init_tx_ring(struct rtw_dev *rtwdev,
232 struct rtw_pci_tx_ring *tx_ring,
233 u8 desc_size, u32 len)
234 {
235 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
236 int ring_sz = desc_size * len;
237 dma_addr_t dma;
238 u8 *head;
239
240 if (len > TRX_BD_IDX_MASK) {
241 rtw_err(rtwdev, "len %d exceeds maximum TX entries\n", len);
242 return -EINVAL;
243 }
244
245 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
246 if (!head) {
247 rtw_err(rtwdev, "failed to allocate tx ring\n");
248 return -ENOMEM;
249 }
250
251 skb_queue_head_init(&tx_ring->queue);
252 tx_ring->r.head = head;
253 tx_ring->r.dma = dma;
254 tx_ring->r.len = len;
255 tx_ring->r.desc_size = desc_size;
256 tx_ring->r.wp = 0;
257 tx_ring->r.rp = 0;
258
259 return 0;
260 }
261
262 static int rtw_pci_reset_rx_desc(struct rtw_dev *rtwdev, struct sk_buff *skb,
263 struct rtw_pci_rx_ring *rx_ring,
264 u32 idx, u32 desc_sz)
265 {
266 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
267 struct rtw_pci_rx_buffer_desc *buf_desc;
268 int buf_sz = RTK_PCI_RX_BUF_SIZE;
269 dma_addr_t dma;
270
271 if (!skb)
272 return -EINVAL;
273
274 dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
275 if (dma_mapping_error(&pdev->dev, dma))
276 return -EBUSY;
277
278 *((dma_addr_t *)skb->cb) = dma;
279 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
280 idx * desc_sz);
281 memset(buf_desc, 0, sizeof(*buf_desc));
282 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
283 buf_desc->dma = cpu_to_le32(dma);
284
285 return 0;
286 }
287
288 static void rtw_pci_sync_rx_desc_device(struct rtw_dev *rtwdev, dma_addr_t dma,
289 struct rtw_pci_rx_ring *rx_ring,
290 u32 idx, u32 desc_sz)
291 {
292 struct device *dev = rtwdev->dev;
293 struct rtw_pci_rx_buffer_desc *buf_desc;
294 int buf_sz = RTK_PCI_RX_BUF_SIZE;
295
296 dma_sync_single_for_device(dev, dma, buf_sz, DMA_FROM_DEVICE);
297
298 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
299 idx * desc_sz);
300 memset(buf_desc, 0, sizeof(*buf_desc));
301 buf_desc->buf_size = cpu_to_le16(RTK_PCI_RX_BUF_SIZE);
302 buf_desc->dma = cpu_to_le32(dma);
303 }
304
305 static int rtw_pci_init_rx_ring(struct rtw_dev *rtwdev,
306 struct rtw_pci_rx_ring *rx_ring,
307 u8 desc_size, u32 len)
308 {
309 struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
310 struct sk_buff *skb = NULL;
311 dma_addr_t dma;
312 u8 *head;
313 int ring_sz = desc_size * len;
314 int buf_sz = RTK_PCI_RX_BUF_SIZE;
315 int i, allocated;
316 int ret = 0;
317
318 head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
319 if (!head) {
320 rtw_err(rtwdev, "failed to allocate rx ring\n");
321 return -ENOMEM;
322 }
323 rx_ring->r.head = head;
324
325 for (i = 0; i < len; i++) {
326 skb = dev_alloc_skb(buf_sz);
327 if (!skb) {
328 allocated = i;
329 ret = -ENOMEM;
330 goto err_out;
331 }
332
333 memset(skb->data, 0, buf_sz);
334 rx_ring->buf[i] = skb;
335 ret = rtw_pci_reset_rx_desc(rtwdev, skb, rx_ring, i, desc_size);
336 if (ret) {
337 allocated = i;
338 dev_kfree_skb_any(skb);
339 goto err_out;
340 }
341 }
342
343 rx_ring->r.dma = dma;
344 rx_ring->r.len = len;
345 rx_ring->r.desc_size = desc_size;
346 rx_ring->r.wp = 0;
347 rx_ring->r.rp = 0;
348
349 return 0;
350
351 err_out:
352 for (i = 0; i < allocated; i++) {
353 skb = rx_ring->buf[i];
354 if (!skb)
355 continue;
356 dma = *((dma_addr_t *)skb->cb);
357 dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
358 dev_kfree_skb_any(skb);
359 rx_ring->buf[i] = NULL;
360 }
361 dma_free_coherent(&pdev->dev, ring_sz, head, dma);
362
363 rtw_err(rtwdev, "failed to init rx buffer\n");
364
365 return ret;
366 }
367
368 static int rtw_pci_init_trx_ring(struct rtw_dev *rtwdev)
369 {
370 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
371 struct rtw_pci_tx_ring *tx_ring;
372 struct rtw_pci_rx_ring *rx_ring;
373 struct rtw_chip_info *chip = rtwdev->chip;
374 int i = 0, j = 0, tx_alloced = 0, rx_alloced = 0;
375 int tx_desc_size, rx_desc_size;
376 u32 len;
377 int ret;
378
379 tx_desc_size = chip->tx_buf_desc_sz;
380
381 for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++) {
382 tx_ring = &rtwpci->tx_rings[i];
383 len = max_num_of_tx_queue(i);
384 ret = rtw_pci_init_tx_ring(rtwdev, tx_ring, tx_desc_size, len);
385 if (ret)
386 goto out;
387 }
388
389 rx_desc_size = chip->rx_buf_desc_sz;
390
391 for (j = 0; j < RTK_MAX_RX_QUEUE_NUM; j++) {
392 rx_ring = &rtwpci->rx_rings[j];
393 ret = rtw_pci_init_rx_ring(rtwdev, rx_ring, rx_desc_size,
394 RTK_MAX_RX_DESC_NUM);
395 if (ret)
396 goto out;
397 }
398
399 return 0;
400
401 out:
402 tx_alloced = i;
403 for (i = 0; i < tx_alloced; i++) {
404 tx_ring = &rtwpci->tx_rings[i];
405 rtw_pci_free_tx_ring(rtwdev, tx_ring);
406 }
407
408 rx_alloced = j;
409 for (j = 0; j < rx_alloced; j++) {
410 rx_ring = &rtwpci->rx_rings[j];
411 rtw_pci_free_rx_ring(rtwdev, rx_ring);
412 }
413
414 return ret;
415 }
416
417 static void rtw_pci_deinit(struct rtw_dev *rtwdev)
418 {
419 rtw_pci_free_trx_ring(rtwdev);
420 }
421
422 static int rtw_pci_init(struct rtw_dev *rtwdev)
423 {
424 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
425 int ret = 0;
426
427 rtwpci->irq_mask[0] = IMR_HIGHDOK |
428 IMR_MGNTDOK |
429 IMR_BKDOK |
430 IMR_BEDOK |
431 IMR_VIDOK |
432 IMR_VODOK |
433 IMR_ROK |
434 IMR_BCNDMAINT_E |
435 IMR_C2HCMD |
436 0;
437 rtwpci->irq_mask[1] = IMR_TXFOVW |
438 0;
439 rtwpci->irq_mask[3] = IMR_H2CDOK |
440 0;
441 spin_lock_init(&rtwpci->irq_lock);
442 spin_lock_init(&rtwpci->hwirq_lock);
443 ret = rtw_pci_init_trx_ring(rtwdev);
444
445 return ret;
446 }
447
448 static void rtw_pci_reset_buf_desc(struct rtw_dev *rtwdev)
449 {
450 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
451 u32 len;
452 u8 tmp;
453 dma_addr_t dma;
454
455 tmp = rtw_read8(rtwdev, RTK_PCI_CTRL + 3);
456 rtw_write8(rtwdev, RTK_PCI_CTRL + 3, tmp | 0xf7);
457
458 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BCN].r.dma;
459 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BCNQ, dma);
460
461 if (!rtw_chip_wcpu_11n(rtwdev)) {
462 len = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.len;
463 dma = rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.dma;
464 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.rp = 0;
465 rtwpci->tx_rings[RTW_TX_QUEUE_H2C].r.wp = 0;
466 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_H2CQ, len & TRX_BD_IDX_MASK);
467 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_H2CQ, dma);
468 }
469
470 len = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.len;
471 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.dma;
472 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.rp = 0;
473 rtwpci->tx_rings[RTW_TX_QUEUE_BK].r.wp = 0;
474 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BKQ, len & TRX_BD_IDX_MASK);
475 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BKQ, dma);
476
477 len = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.len;
478 dma = rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.dma;
479 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.rp = 0;
480 rtwpci->tx_rings[RTW_TX_QUEUE_BE].r.wp = 0;
481 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_BEQ, len & TRX_BD_IDX_MASK);
482 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_BEQ, dma);
483
484 len = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.len;
485 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.dma;
486 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.rp = 0;
487 rtwpci->tx_rings[RTW_TX_QUEUE_VO].r.wp = 0;
488 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VOQ, len & TRX_BD_IDX_MASK);
489 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VOQ, dma);
490
491 len = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.len;
492 dma = rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.dma;
493 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.rp = 0;
494 rtwpci->tx_rings[RTW_TX_QUEUE_VI].r.wp = 0;
495 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_VIQ, len & TRX_BD_IDX_MASK);
496 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_VIQ, dma);
497
498 len = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.len;
499 dma = rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.dma;
500 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.rp = 0;
501 rtwpci->tx_rings[RTW_TX_QUEUE_MGMT].r.wp = 0;
502 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_MGMTQ, len & TRX_BD_IDX_MASK);
503 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_MGMTQ, dma);
504
505 len = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.len;
506 dma = rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.dma;
507 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.rp = 0;
508 rtwpci->tx_rings[RTW_TX_QUEUE_HI0].r.wp = 0;
509 rtw_write16(rtwdev, RTK_PCI_TXBD_NUM_HI0Q, len & TRX_BD_IDX_MASK);
510 rtw_write32(rtwdev, RTK_PCI_TXBD_DESA_HI0Q, dma);
511
512 len = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.len;
513 dma = rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.dma;
514 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.rp = 0;
515 rtwpci->rx_rings[RTW_RX_QUEUE_MPDU].r.wp = 0;
516 rtw_write16(rtwdev, RTK_PCI_RXBD_NUM_MPDUQ, len & TRX_BD_IDX_MASK);
517 rtw_write32(rtwdev, RTK_PCI_RXBD_DESA_MPDUQ, dma);
518
519 /* reset read/write point */
520 rtw_write32(rtwdev, RTK_PCI_TXBD_RWPTR_CLR, 0xffffffff);
521
522 /* reset H2C Queue index in a single write */
523 if (rtw_chip_wcpu_11ac(rtwdev))
524 rtw_write32_set(rtwdev, RTK_PCI_TXBD_H2CQ_CSR,
525 BIT_CLR_H2CQ_HOST_IDX | BIT_CLR_H2CQ_HW_IDX);
526 }
527
528 static void rtw_pci_reset_trx_ring(struct rtw_dev *rtwdev)
529 {
530 rtw_pci_reset_buf_desc(rtwdev);
531 }
532
533 static void rtw_pci_enable_interrupt(struct rtw_dev *rtwdev,
534 struct rtw_pci *rtwpci, bool exclude_rx)
535 {
536 unsigned long flags;
537 u32 imr0_unmask = exclude_rx ? IMR_ROK : 0;
538
539 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
540
541 rtw_write32(rtwdev, RTK_PCI_HIMR0, rtwpci->irq_mask[0] & ~imr0_unmask);
542 rtw_write32(rtwdev, RTK_PCI_HIMR1, rtwpci->irq_mask[1]);
543 if (rtw_chip_wcpu_11ac(rtwdev))
544 rtw_write32(rtwdev, RTK_PCI_HIMR3, rtwpci->irq_mask[3]);
545
546 rtwpci->irq_enabled = true;
547
548 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
549 }
550
551 static void rtw_pci_disable_interrupt(struct rtw_dev *rtwdev,
552 struct rtw_pci *rtwpci)
553 {
554 unsigned long flags;
555
556 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
557
558 if (!rtwpci->irq_enabled)
559 goto out;
560
561 rtw_write32(rtwdev, RTK_PCI_HIMR0, 0);
562 rtw_write32(rtwdev, RTK_PCI_HIMR1, 0);
563 if (rtw_chip_wcpu_11ac(rtwdev))
564 rtw_write32(rtwdev, RTK_PCI_HIMR3, 0);
565
566 rtwpci->irq_enabled = false;
567
568 out:
569 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
570 }
571
572 static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
573 {
574 /* reset dma and rx tag */
575 rtw_write32_set(rtwdev, RTK_PCI_CTRL,
576 BIT_RST_TRXDMA_INTF | BIT_RX_TAG_EN);
577 rtwpci->rx_tag = 0;
578 }
579
580 static int rtw_pci_setup(struct rtw_dev *rtwdev)
581 {
582 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
583
584 rtw_pci_reset_trx_ring(rtwdev);
585 rtw_pci_dma_reset(rtwdev, rtwpci);
586
587 return 0;
588 }
589
590 static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
591 {
592 struct rtw_pci_tx_ring *tx_ring;
593 u8 queue;
594
595 rtw_pci_reset_trx_ring(rtwdev);
596 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
597 tx_ring = &rtwpci->tx_rings[queue];
598 rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
599 }
600 }
601
602 static void rtw_pci_napi_start(struct rtw_dev *rtwdev)
603 {
604 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
605
606 if (test_and_set_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
607 return;
608
609 napi_enable(&rtwpci->napi);
610 }
611
612 static void rtw_pci_napi_stop(struct rtw_dev *rtwdev)
613 {
614 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
615
616 if (!test_and_clear_bit(RTW_PCI_FLAG_NAPI_RUNNING, rtwpci->flags))
617 return;
618
619 napi_synchronize(&rtwpci->napi);
620 napi_disable(&rtwpci->napi);
621 }
622
623 static int rtw_pci_start(struct rtw_dev *rtwdev)
624 {
625 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
626
627 rtw_pci_napi_start(rtwdev);
628
629 spin_lock_bh(&rtwpci->irq_lock);
630 rtwpci->running = true;
631 rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
632 spin_unlock_bh(&rtwpci->irq_lock);
633
634 return 0;
635 }
636
637 static void rtw_pci_stop(struct rtw_dev *rtwdev)
638 {
639 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
640 struct pci_dev *pdev = rtwpci->pdev;
641
642 spin_lock_bh(&rtwpci->irq_lock);
643 rtwpci->running = false;
644 rtw_pci_disable_interrupt(rtwdev, rtwpci);
645 spin_unlock_bh(&rtwpci->irq_lock);
646
647 synchronize_irq(pdev->irq);
648 rtw_pci_napi_stop(rtwdev);
649
650 spin_lock_bh(&rtwpci->irq_lock);
651 rtw_pci_dma_release(rtwdev, rtwpci);
652 spin_unlock_bh(&rtwpci->irq_lock);
653 }
654
655 static void rtw_pci_deep_ps_enter(struct rtw_dev *rtwdev)
656 {
657 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
658 struct rtw_pci_tx_ring *tx_ring;
659 bool tx_empty = true;
660 u8 queue;
661
662 if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
663 goto enter_deep_ps;
664
665 lockdep_assert_held(&rtwpci->irq_lock);
666
667 /* Deep PS state is not allowed to TX-DMA */
668 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
669 /* BCN queue is rsvd page, does not have DMA interrupt
670 * H2C queue is managed by firmware
671 */
672 if (queue == RTW_TX_QUEUE_BCN ||
673 queue == RTW_TX_QUEUE_H2C)
674 continue;
675
676 tx_ring = &rtwpci->tx_rings[queue];
677
678 /* check if there is any skb DMAing */
679 if (skb_queue_len(&tx_ring->queue)) {
680 tx_empty = false;
681 break;
682 }
683 }
684
685 if (!tx_empty) {
686 rtw_dbg(rtwdev, RTW_DBG_PS,
687 "TX path not empty, cannot enter deep power save state\n");
688 return;
689 }
690 enter_deep_ps:
691 set_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags);
692 rtw_power_mode_change(rtwdev, true);
693 }
694
695 static void rtw_pci_deep_ps_leave(struct rtw_dev *rtwdev)
696 {
697 #if defined(__linux__)
698 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
699
700 lockdep_assert_held(&rtwpci->irq_lock);
701 #elif defined(__FreeBSD__)
702 lockdep_assert_held(&((struct rtw_pci *)rtwdev->priv)->irq_lock);
703 #endif
704
705 if (test_and_clear_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
706 rtw_power_mode_change(rtwdev, false);
707 }
708
709 static void rtw_pci_deep_ps(struct rtw_dev *rtwdev, bool enter)
710 {
711 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
712
713 spin_lock_bh(&rtwpci->irq_lock);
714
715 if (enter && !test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
716 rtw_pci_deep_ps_enter(rtwdev);
717
718 if (!enter && test_bit(RTW_FLAG_LEISURE_PS_DEEP, rtwdev->flags))
719 rtw_pci_deep_ps_leave(rtwdev);
720
721 spin_unlock_bh(&rtwpci->irq_lock);
722 }
723
724 static u8 ac_to_hwq[] = {
725 [IEEE80211_AC_VO] = RTW_TX_QUEUE_VO,
726 [IEEE80211_AC_VI] = RTW_TX_QUEUE_VI,
727 [IEEE80211_AC_BE] = RTW_TX_QUEUE_BE,
728 [IEEE80211_AC_BK] = RTW_TX_QUEUE_BK,
729 };
730
731 #if defined(__linux__)
732 static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
733 #elif defined(__FreeBSD__)
734 rtw88_static_assert(ARRAY_SIZE(ac_to_hwq) == IEEE80211_NUM_ACS);
735 #endif
736
737 static u8 rtw_hw_queue_mapping(struct sk_buff *skb)
738 {
739 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
740 __le16 fc = hdr->frame_control;
741 u8 q_mapping = skb_get_queue_mapping(skb);
742 u8 queue;
743
744 if (unlikely(ieee80211_is_beacon(fc)))
745 queue = RTW_TX_QUEUE_BCN;
746 else if (unlikely(ieee80211_is_mgmt(fc) || ieee80211_is_ctl(fc)))
747 queue = RTW_TX_QUEUE_MGMT;
748 else if (is_broadcast_ether_addr(hdr->addr1) ||
749 is_multicast_ether_addr(hdr->addr1))
750 queue = RTW_TX_QUEUE_HI0;
751 else if (WARN_ON_ONCE(q_mapping >= ARRAY_SIZE(ac_to_hwq)))
752 queue = ac_to_hwq[IEEE80211_AC_BE];
753 else
754 queue = ac_to_hwq[q_mapping];
755
756 return queue;
757 }
758
759 static void rtw_pci_release_rsvd_page(struct rtw_pci *rtwpci,
760 struct rtw_pci_tx_ring *ring)
761 {
762 struct sk_buff *prev = skb_dequeue(&ring->queue);
763 struct rtw_pci_tx_data *tx_data;
764 dma_addr_t dma;
765
766 if (!prev)
767 return;
768
769 tx_data = rtw_pci_get_tx_data(prev);
770 dma = tx_data->dma;
771 dma_unmap_single(&rtwpci->pdev->dev, dma, prev->len, DMA_TO_DEVICE);
772 dev_kfree_skb_any(prev);
773 }
774
775 static void rtw_pci_dma_check(struct rtw_dev *rtwdev,
776 struct rtw_pci_rx_ring *rx_ring,
777 u32 idx)
778 {
779 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
780 struct rtw_chip_info *chip = rtwdev->chip;
781 struct rtw_pci_rx_buffer_desc *buf_desc;
782 u32 desc_sz = chip->rx_buf_desc_sz;
783 u16 total_pkt_size;
784
785 buf_desc = (struct rtw_pci_rx_buffer_desc *)(rx_ring->r.head +
786 idx * desc_sz);
787 total_pkt_size = le16_to_cpu(buf_desc->total_pkt_size);
788
789 /* rx tag mismatch, throw a warning */
790 if (total_pkt_size != rtwpci->rx_tag)
791 rtw_warn(rtwdev, "pci bus timeout, check dma status\n");
792
793 rtwpci->rx_tag = (rtwpci->rx_tag + 1) % RX_TAG_MAX;
794 }
795
796 static u32 __pci_get_hw_tx_ring_rp(struct rtw_dev *rtwdev, u8 pci_q)
797 {
798 u32 bd_idx_addr = rtw_pci_tx_queue_idx_addr[pci_q];
799 u32 bd_idx = rtw_read16(rtwdev, bd_idx_addr + 2);
800
801 return FIELD_GET(TRX_BD_IDX_MASK, bd_idx);
802 }
803
804 static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
805 {
806 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
807 struct rtw_pci_tx_ring *ring = &rtwpci->tx_rings[pci_q];
808 u32 cur_rp;
809 u8 i;
810
811 /* Because the time taked by the I/O in __pci_get_hw_tx_ring_rp is a
812 * bit dynamic, it's hard to define a reasonable fixed total timeout to
813 * use read_poll_timeout* helper. Instead, we can ensure a reasonable
814 * polling times, so we just use for loop with udelay here.
815 */
816 for (i = 0; i < 30; i++) {
817 cur_rp = __pci_get_hw_tx_ring_rp(rtwdev, pci_q);
818 if (cur_rp == ring->r.wp)
819 return;
820
821 udelay(1);
822 }
823
824 if (!drop)
825 rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
826 }
827
828 static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
829 bool drop)
830 {
831 u8 q;
832
833 for (q = 0; q < RTK_MAX_TX_QUEUE_NUM; q++) {
834 /* It may be not necessary to flush BCN and H2C tx queues. */
835 if (q == RTW_TX_QUEUE_BCN || q == RTW_TX_QUEUE_H2C)
836 continue;
837
838 if (pci_queues & BIT(q))
839 __pci_flush_queue(rtwdev, q, drop);
840 }
841 }
842
843 static void rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 queues, bool drop)
844 {
845 u32 pci_queues = 0;
846 u8 i;
847
848 /* If all of the hardware queues are requested to flush,
849 * flush all of the pci queues.
850 */
851 if (queues == BIT(rtwdev->hw->queues) - 1) {
852 pci_queues = BIT(RTK_MAX_TX_QUEUE_NUM) - 1;
853 } else {
854 for (i = 0; i < rtwdev->hw->queues; i++)
855 if (queues & BIT(i))
856 pci_queues |= BIT(ac_to_hwq[i]);
857 }
858
859 __rtw_pci_flush_queues(rtwdev, pci_queues, drop);
860 }
861
862 static void rtw_pci_tx_kick_off_queue(struct rtw_dev *rtwdev, u8 queue)
863 {
864 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
865 struct rtw_pci_tx_ring *ring;
866 u32 bd_idx;
867
868 ring = &rtwpci->tx_rings[queue];
869 bd_idx = rtw_pci_tx_queue_idx_addr[queue];
870
871 spin_lock_bh(&rtwpci->irq_lock);
872 if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_TX_WAKE))
873 rtw_pci_deep_ps_leave(rtwdev);
874 rtw_write16(rtwdev, bd_idx, ring->r.wp & TRX_BD_IDX_MASK);
875 spin_unlock_bh(&rtwpci->irq_lock);
876 }
877
878 static void rtw_pci_tx_kick_off(struct rtw_dev *rtwdev)
879 {
880 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
881 u8 queue;
882
883 for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++)
884 if (test_and_clear_bit(queue, rtwpci->tx_queued))
885 rtw_pci_tx_kick_off_queue(rtwdev, queue);
886 }
887
888 static int rtw_pci_tx_write_data(struct rtw_dev *rtwdev,
889 struct rtw_tx_pkt_info *pkt_info,
890 struct sk_buff *skb, u8 queue)
891 {
892 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
893 struct rtw_chip_info *chip = rtwdev->chip;
894 struct rtw_pci_tx_ring *ring;
895 struct rtw_pci_tx_data *tx_data;
896 dma_addr_t dma;
897 u32 tx_pkt_desc_sz = chip->tx_pkt_desc_sz;
898 u32 tx_buf_desc_sz = chip->tx_buf_desc_sz;
899 u32 size;
900 u32 psb_len;
901 u8 *pkt_desc;
902 struct rtw_pci_tx_buffer_desc *buf_desc;
903
904 ring = &rtwpci->tx_rings[queue];
905
906 size = skb->len;
907
908 if (queue == RTW_TX_QUEUE_BCN)
909 rtw_pci_release_rsvd_page(rtwpci, ring);
910 else if (!avail_desc(ring->r.wp, ring->r.rp, ring->r.len))
911 return -ENOSPC;
912
913 pkt_desc = skb_push(skb, chip->tx_pkt_desc_sz);
914 memset(pkt_desc, 0, tx_pkt_desc_sz);
915 pkt_info->qsel = rtw_pci_get_tx_qsel(skb, queue);
916 rtw_tx_fill_tx_desc(pkt_info, skb);
917 dma = dma_map_single(&rtwpci->pdev->dev, skb->data, skb->len,
918 DMA_TO_DEVICE);
919 if (dma_mapping_error(&rtwpci->pdev->dev, dma))
920 return -EBUSY;
921
922 /* after this we got dma mapped, there is no way back */
923 buf_desc = get_tx_buffer_desc(ring, tx_buf_desc_sz);
924 memset(buf_desc, 0, tx_buf_desc_sz);
925 psb_len = (skb->len - 1) / 128 + 1;
926 if (queue == RTW_TX_QUEUE_BCN)
927 psb_len |= 1 << RTK_PCI_TXBD_OWN_OFFSET;
928
929 buf_desc[0].psb_len = cpu_to_le16(psb_len);
930 buf_desc[0].buf_size = cpu_to_le16(tx_pkt_desc_sz);
931 buf_desc[0].dma = cpu_to_le32(dma);
932 buf_desc[1].buf_size = cpu_to_le16(size);
933 buf_desc[1].dma = cpu_to_le32(dma + tx_pkt_desc_sz);
934
935 tx_data = rtw_pci_get_tx_data(skb);
936 tx_data->dma = dma;
937 tx_data->sn = pkt_info->sn;
938
939 spin_lock_bh(&rtwpci->irq_lock);
940
941 skb_queue_tail(&ring->queue, skb);
942
943 if (queue == RTW_TX_QUEUE_BCN)
944 goto out_unlock;
945
946 /* update write-index, and kick it off later */
947 set_bit(queue, rtwpci->tx_queued);
948 if (++ring->r.wp >= ring->r.len)
949 ring->r.wp = 0;
950
951 out_unlock:
952 spin_unlock_bh(&rtwpci->irq_lock);
953
954 return 0;
955 }
956
957 static int rtw_pci_write_data_rsvd_page(struct rtw_dev *rtwdev, u8 *buf,
958 u32 size)
959 {
960 struct sk_buff *skb;
961 struct rtw_tx_pkt_info pkt_info = {0};
962 u8 reg_bcn_work;
963 int ret;
964
965 skb = rtw_tx_write_data_rsvd_page_get(rtwdev, &pkt_info, buf, size);
966 if (!skb)
967 return -ENOMEM;
968
969 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_BCN);
970 if (ret) {
971 #if defined(__FreeBSD__)
972 dev_kfree_skb_any(skb);
973 #endif
974 rtw_err(rtwdev, "failed to write rsvd page data\n");
975 return ret;
976 }
977
978 /* reserved pages go through beacon queue */
979 reg_bcn_work = rtw_read8(rtwdev, RTK_PCI_TXBD_BCN_WORK);
980 reg_bcn_work |= BIT_PCI_BCNQ_FLAG;
981 rtw_write8(rtwdev, RTK_PCI_TXBD_BCN_WORK, reg_bcn_work);
982
983 return 0;
984 }
985
986 static int rtw_pci_write_data_h2c(struct rtw_dev *rtwdev, u8 *buf, u32 size)
987 {
988 struct sk_buff *skb;
989 struct rtw_tx_pkt_info pkt_info = {0};
990 int ret;
991
992 skb = rtw_tx_write_data_h2c_get(rtwdev, &pkt_info, buf, size);
993 if (!skb)
994 return -ENOMEM;
995
996 ret = rtw_pci_tx_write_data(rtwdev, &pkt_info, skb, RTW_TX_QUEUE_H2C);
997 if (ret) {
998 #if defined(__FreeBSD__)
999 dev_kfree_skb_any(skb);
1000 #endif
1001 rtw_err(rtwdev, "failed to write h2c data\n");
1002 return ret;
1003 }
1004
1005 rtw_pci_tx_kick_off_queue(rtwdev, RTW_TX_QUEUE_H2C);
1006
1007 return 0;
1008 }
1009
1010 static int rtw_pci_tx_write(struct rtw_dev *rtwdev,
1011 struct rtw_tx_pkt_info *pkt_info,
1012 struct sk_buff *skb)
1013 {
1014 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1015 struct rtw_pci_tx_ring *ring;
1016 u8 queue = rtw_hw_queue_mapping(skb);
1017 int ret;
1018
1019 ret = rtw_pci_tx_write_data(rtwdev, pkt_info, skb, queue);
1020 if (ret)
1021 return ret;
1022
1023 ring = &rtwpci->tx_rings[queue];
1024 spin_lock_bh(&rtwpci->irq_lock);
1025 if (avail_desc(ring->r.wp, ring->r.rp, ring->r.len) < 2) {
1026 ieee80211_stop_queue(rtwdev->hw, skb_get_queue_mapping(skb));
1027 ring->queue_stopped = true;
1028 }
1029 spin_unlock_bh(&rtwpci->irq_lock);
1030
1031 return 0;
1032 }
1033
1034 static void rtw_pci_tx_isr(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1035 u8 hw_queue)
1036 {
1037 struct ieee80211_hw *hw = rtwdev->hw;
1038 struct ieee80211_tx_info *info;
1039 struct rtw_pci_tx_ring *ring;
1040 struct rtw_pci_tx_data *tx_data;
1041 struct sk_buff *skb;
1042 u32 count;
1043 u32 bd_idx_addr;
1044 u32 bd_idx, cur_rp, rp_idx;
1045 u16 q_map;
1046
1047 ring = &rtwpci->tx_rings[hw_queue];
1048
1049 bd_idx_addr = rtw_pci_tx_queue_idx_addr[hw_queue];
1050 bd_idx = rtw_read32(rtwdev, bd_idx_addr);
1051 cur_rp = bd_idx >> 16;
1052 cur_rp &= TRX_BD_IDX_MASK;
1053 rp_idx = ring->r.rp;
1054 if (cur_rp >= ring->r.rp)
1055 count = cur_rp - ring->r.rp;
1056 else
1057 count = ring->r.len - (ring->r.rp - cur_rp);
1058
1059 while (count--) {
1060 skb = skb_dequeue(&ring->queue);
1061 if (!skb) {
1062 rtw_err(rtwdev, "failed to dequeue %d skb TX queue %d, BD=0x%08x, rp %d -> %d\n",
1063 count, hw_queue, bd_idx, ring->r.rp, cur_rp);
1064 break;
1065 }
1066 tx_data = rtw_pci_get_tx_data(skb);
1067 dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
1068 DMA_TO_DEVICE);
1069
1070 /* just free command packets from host to card */
1071 if (hw_queue == RTW_TX_QUEUE_H2C) {
1072 dev_kfree_skb_irq(skb);
1073 continue;
1074 }
1075
1076 if (ring->queue_stopped &&
1077 avail_desc(ring->r.wp, rp_idx, ring->r.len) > 4) {
1078 q_map = skb_get_queue_mapping(skb);
1079 ieee80211_wake_queue(hw, q_map);
1080 ring->queue_stopped = false;
1081 }
1082
1083 if (++rp_idx >= ring->r.len)
1084 rp_idx = 0;
1085
1086 skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
1087
1088 info = IEEE80211_SKB_CB(skb);
1089
1090 /* enqueue to wait for tx report */
1091 if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
1092 rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
1093 continue;
1094 }
1095
1096 /* always ACK for others, then they won't be marked as drop */
1097 if (info->flags & IEEE80211_TX_CTL_NO_ACK)
1098 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
1099 else
1100 info->flags |= IEEE80211_TX_STAT_ACK;
1101
1102 ieee80211_tx_info_clear_status(info);
1103 ieee80211_tx_status_irqsafe(hw, skb);
1104 }
1105
1106 ring->r.rp = cur_rp;
1107 }
1108
1109 static void rtw_pci_rx_isr(struct rtw_dev *rtwdev)
1110 {
1111 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1112 struct napi_struct *napi = &rtwpci->napi;
1113
1114 napi_schedule(napi);
1115 }
1116
1117 static int rtw_pci_get_hw_rx_ring_nr(struct rtw_dev *rtwdev,
1118 struct rtw_pci *rtwpci)
1119 {
1120 struct rtw_pci_rx_ring *ring;
1121 int count = 0;
1122 u32 tmp, cur_wp;
1123
1124 ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1125 tmp = rtw_read32(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ);
1126 cur_wp = u32_get_bits(tmp, TRX_BD_HW_IDX_MASK);
1127 if (cur_wp >= ring->r.wp)
1128 count = cur_wp - ring->r.wp;
1129 else
1130 count = ring->r.len - (ring->r.wp - cur_wp);
1131
1132 return count;
1133 }
1134
1135 static u32 rtw_pci_rx_napi(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci,
1136 u8 hw_queue, u32 limit)
1137 {
1138 struct rtw_chip_info *chip = rtwdev->chip;
1139 struct napi_struct *napi = &rtwpci->napi;
1140 struct rtw_pci_rx_ring *ring = &rtwpci->rx_rings[RTW_RX_QUEUE_MPDU];
1141 struct rtw_rx_pkt_stat pkt_stat;
1142 struct ieee80211_rx_status rx_status;
1143 struct sk_buff *skb, *new;
1144 u32 cur_rp = ring->r.rp;
1145 u32 count, rx_done = 0;
1146 u32 pkt_offset;
1147 u32 pkt_desc_sz = chip->rx_pkt_desc_sz;
1148 u32 buf_desc_sz = chip->rx_buf_desc_sz;
1149 u32 new_len;
1150 u8 *rx_desc;
1151 dma_addr_t dma;
1152
1153 count = rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci);
1154 count = min(count, limit);
1155
1156 while (count--) {
1157 rtw_pci_dma_check(rtwdev, ring, cur_rp);
1158 skb = ring->buf[cur_rp];
1159 dma = *((dma_addr_t *)skb->cb);
1160 dma_sync_single_for_cpu(rtwdev->dev, dma, RTK_PCI_RX_BUF_SIZE,
1161 DMA_FROM_DEVICE);
1162 rx_desc = skb->data;
1163 chip->ops->query_rx_desc(rtwdev, rx_desc, &pkt_stat, &rx_status);
1164
1165 /* offset from rx_desc to payload */
1166 pkt_offset = pkt_desc_sz + pkt_stat.drv_info_sz +
1167 pkt_stat.shift;
1168
1169 /* allocate a new skb for this frame,
1170 * discard the frame if none available
1171 */
1172 new_len = pkt_stat.pkt_len + pkt_offset;
1173 new = dev_alloc_skb(new_len);
1174 if (WARN_ONCE(!new, "rx routine starvation\n"))
1175 goto next_rp;
1176
1177 /* put the DMA data including rx_desc from phy to new skb */
1178 skb_put_data(new, skb->data, new_len);
1179
1180 if (pkt_stat.is_c2h) {
1181 rtw_fw_c2h_cmd_rx_irqsafe(rtwdev, pkt_offset, new);
1182 } else {
1183 /* remove rx_desc */
1184 skb_pull(new, pkt_offset);
1185
1186 rtw_rx_stats(rtwdev, pkt_stat.vif, new);
1187 memcpy(new->cb, &rx_status, sizeof(rx_status));
1188 ieee80211_rx_napi(rtwdev->hw, NULL, new, napi);
1189 rx_done++;
1190 }
1191
1192 next_rp:
1193 /* new skb delivered to mac80211, re-enable original skb DMA */
1194 rtw_pci_sync_rx_desc_device(rtwdev, dma, ring, cur_rp,
1195 buf_desc_sz);
1196
1197 /* host read next element in ring */
1198 if (++cur_rp >= ring->r.len)
1199 cur_rp = 0;
1200 }
1201
1202 ring->r.rp = cur_rp;
1203 /* 'rp', the last position we have read, is seen as previous posistion
1204 * of 'wp' that is used to calculate 'count' next time.
1205 */
1206 ring->r.wp = cur_rp;
1207 rtw_write16(rtwdev, RTK_PCI_RXBD_IDX_MPDUQ, ring->r.rp);
1208
1209 return rx_done;
1210 }
1211
1212 static void rtw_pci_irq_recognized(struct rtw_dev *rtwdev,
1213 struct rtw_pci *rtwpci, u32 *irq_status)
1214 {
1215 unsigned long flags;
1216
1217 spin_lock_irqsave(&rtwpci->hwirq_lock, flags);
1218
1219 irq_status[0] = rtw_read32(rtwdev, RTK_PCI_HISR0);
1220 irq_status[1] = rtw_read32(rtwdev, RTK_PCI_HISR1);
1221 if (rtw_chip_wcpu_11ac(rtwdev))
1222 irq_status[3] = rtw_read32(rtwdev, RTK_PCI_HISR3);
1223 else
1224 irq_status[3] = 0;
1225 irq_status[0] &= rtwpci->irq_mask[0];
1226 irq_status[1] &= rtwpci->irq_mask[1];
1227 irq_status[3] &= rtwpci->irq_mask[3];
1228 rtw_write32(rtwdev, RTK_PCI_HISR0, irq_status[0]);
1229 rtw_write32(rtwdev, RTK_PCI_HISR1, irq_status[1]);
1230 if (rtw_chip_wcpu_11ac(rtwdev))
1231 rtw_write32(rtwdev, RTK_PCI_HISR3, irq_status[3]);
1232
1233 spin_unlock_irqrestore(&rtwpci->hwirq_lock, flags);
1234 }
1235
1236 static irqreturn_t rtw_pci_interrupt_handler(int irq, void *dev)
1237 {
1238 struct rtw_dev *rtwdev = dev;
1239 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1240
1241 /* disable RTW PCI interrupt to avoid more interrupts before the end of
1242 * thread function
1243 *
1244 * disable HIMR here to also avoid new HISR flag being raised before
1245 * the HISRs have been Write-1-cleared for MSI. If not all of the HISRs
1246 * are cleared, the edge-triggered interrupt will not be generated when
1247 * a new HISR flag is set.
1248 */
1249 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1250
1251 return IRQ_WAKE_THREAD;
1252 }
1253
1254 static irqreturn_t rtw_pci_interrupt_threadfn(int irq, void *dev)
1255 {
1256 struct rtw_dev *rtwdev = dev;
1257 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1258 u32 irq_status[4];
1259 bool rx = false;
1260
1261 spin_lock_bh(&rtwpci->irq_lock);
1262 rtw_pci_irq_recognized(rtwdev, rtwpci, irq_status);
1263
1264 if (irq_status[0] & IMR_MGNTDOK)
1265 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_MGMT);
1266 if (irq_status[0] & IMR_HIGHDOK)
1267 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_HI0);
1268 if (irq_status[0] & IMR_BEDOK)
1269 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BE);
1270 if (irq_status[0] & IMR_BKDOK)
1271 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_BK);
1272 if (irq_status[0] & IMR_VODOK)
1273 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VO);
1274 if (irq_status[0] & IMR_VIDOK)
1275 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_VI);
1276 if (irq_status[3] & IMR_H2CDOK)
1277 rtw_pci_tx_isr(rtwdev, rtwpci, RTW_TX_QUEUE_H2C);
1278 if (irq_status[0] & IMR_ROK) {
1279 rtw_pci_rx_isr(rtwdev);
1280 rx = true;
1281 }
1282 if (unlikely(irq_status[0] & IMR_C2HCMD))
1283 rtw_fw_c2h_cmd_isr(rtwdev);
1284
1285 /* all of the jobs for this interrupt have been done */
1286 if (rtwpci->running)
1287 rtw_pci_enable_interrupt(rtwdev, rtwpci, rx);
1288 spin_unlock_bh(&rtwpci->irq_lock);
1289
1290 return IRQ_HANDLED;
1291 }
1292
1293 static int rtw_pci_io_mapping(struct rtw_dev *rtwdev,
1294 struct pci_dev *pdev)
1295 {
1296 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1297 unsigned long len;
1298 u8 bar_id = 2;
1299 int ret;
1300
1301 ret = pci_request_regions(pdev, KBUILD_MODNAME);
1302 if (ret) {
1303 rtw_err(rtwdev, "failed to request pci regions\n");
1304 return ret;
1305 }
1306
1307 #if defined(__FreeBSD__)
1308 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1309 if (ret) {
1310 rtw_err(rtwdev, "failed to set dma mask to 32-bit\n");
1311 goto err_release_regions;
1312 }
1313
1314 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1315 if (ret) {
1316 rtw_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
1317 goto err_release_regions;
1318 }
1319 #endif
1320
1321 len = pci_resource_len(pdev, bar_id);
1322 #if defined(__FreeBSD__)
1323 linuxkpi_pcim_want_to_use_bus_functions(pdev);
1324 #endif
1325 rtwpci->mmap = pci_iomap(pdev, bar_id, len);
1326 if (!rtwpci->mmap) {
1327 pci_release_regions(pdev);
1328 rtw_err(rtwdev, "failed to map pci memory\n");
1329 return -ENOMEM;
1330 }
1331
1332 return 0;
1333 #if defined(__FreeBSD__)
1334 err_release_regions:
1335 pci_release_regions(pdev);
1336 return ret;
1337 #endif
1338 }
1339
1340 static void rtw_pci_io_unmapping(struct rtw_dev *rtwdev,
1341 struct pci_dev *pdev)
1342 {
1343 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1344
1345 if (rtwpci->mmap) {
1346 pci_iounmap(pdev, rtwpci->mmap);
1347 pci_release_regions(pdev);
1348 }
1349 }
1350
1351 static void rtw_dbi_write8(struct rtw_dev *rtwdev, u16 addr, u8 data)
1352 {
1353 u16 write_addr;
1354 u16 remainder = addr & ~(BITS_DBI_WREN | BITS_DBI_ADDR_MASK);
1355 u8 flag;
1356 u8 cnt;
1357
1358 write_addr = addr & BITS_DBI_ADDR_MASK;
1359 write_addr |= u16_encode_bits(BIT(remainder), BITS_DBI_WREN);
1360 rtw_write8(rtwdev, REG_DBI_WDATA_V1 + remainder, data);
1361 rtw_write16(rtwdev, REG_DBI_FLAG_V1, write_addr);
1362 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_WFLAG >> 16);
1363
1364 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1365 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1366 if (flag == 0)
1367 return;
1368
1369 udelay(10);
1370 }
1371
1372 WARN(flag, "failed to write to DBI register, addr=0x%04x\n", addr);
1373 }
1374
1375 static int rtw_dbi_read8(struct rtw_dev *rtwdev, u16 addr, u8 *value)
1376 {
1377 u16 read_addr = addr & BITS_DBI_ADDR_MASK;
1378 u8 flag;
1379 u8 cnt;
1380
1381 rtw_write16(rtwdev, REG_DBI_FLAG_V1, read_addr);
1382 rtw_write8(rtwdev, REG_DBI_FLAG_V1 + 2, BIT_DBI_RFLAG >> 16);
1383
1384 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1385 flag = rtw_read8(rtwdev, REG_DBI_FLAG_V1 + 2);
1386 if (flag == 0) {
1387 read_addr = REG_DBI_RDATA_V1 + (addr & 3);
1388 *value = rtw_read8(rtwdev, read_addr);
1389 return 0;
1390 }
1391
1392 udelay(10);
1393 }
1394
1395 WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
1396 return -EIO;
1397 }
1398
1399 static void rtw_mdio_write(struct rtw_dev *rtwdev, u8 addr, u16 data, bool g1)
1400 {
1401 u8 page;
1402 u8 wflag;
1403 u8 cnt;
1404
1405 rtw_write16(rtwdev, REG_MDIO_V1, data);
1406
1407 page = addr < RTW_PCI_MDIO_PG_SZ ? 0 : 1;
1408 page += g1 ? RTW_PCI_MDIO_PG_OFFS_G1 : RTW_PCI_MDIO_PG_OFFS_G2;
1409 rtw_write8(rtwdev, REG_PCIE_MIX_CFG, addr & BITS_MDIO_ADDR_MASK);
1410 rtw_write8(rtwdev, REG_PCIE_MIX_CFG + 3, page);
1411 rtw_write32_mask(rtwdev, REG_PCIE_MIX_CFG, BIT_MDIO_WFLAG_V1, 1);
1412
1413 for (cnt = 0; cnt < RTW_PCI_WR_RETRY_CNT; cnt++) {
1414 wflag = rtw_read32_mask(rtwdev, REG_PCIE_MIX_CFG,
1415 BIT_MDIO_WFLAG_V1);
1416 if (wflag == 0)
1417 return;
1418
1419 udelay(10);
1420 }
1421
1422 WARN(wflag, "failed to write to MDIO register, addr=0x%02x\n", addr);
1423 }
1424
1425 static void rtw_pci_clkreq_set(struct rtw_dev *rtwdev, bool enable)
1426 {
1427 u8 value;
1428 int ret;
1429
1430 if (rtw_pci_disable_aspm)
1431 return;
1432
1433 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1434 if (ret) {
1435 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1436 return;
1437 }
1438
1439 if (enable)
1440 value |= BIT_CLKREQ_SW_EN;
1441 else
1442 value &= ~BIT_CLKREQ_SW_EN;
1443
1444 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1445 }
1446
1447 static void rtw_pci_clkreq_pad_low(struct rtw_dev *rtwdev, bool enable)
1448 {
1449 u8 value;
1450 int ret;
1451
1452 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1453 if (ret) {
1454 rtw_err(rtwdev, "failed to read CLKREQ_L1, ret=%d", ret);
1455 return;
1456 }
1457
1458 if (enable)
1459 value &= ~BIT_CLKREQ_N_PAD;
1460 else
1461 value |= BIT_CLKREQ_N_PAD;
1462
1463 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1464 }
1465
1466 static void rtw_pci_aspm_set(struct rtw_dev *rtwdev, bool enable)
1467 {
1468 u8 value;
1469 int ret;
1470
1471 if (rtw_pci_disable_aspm)
1472 return;
1473
1474 ret = rtw_dbi_read8(rtwdev, RTK_PCIE_LINK_CFG, &value);
1475 if (ret) {
1476 rtw_err(rtwdev, "failed to read ASPM, ret=%d", ret);
1477 return;
1478 }
1479
1480 if (enable)
1481 value |= BIT_L1_SW_EN;
1482 else
1483 value &= ~BIT_L1_SW_EN;
1484
1485 rtw_dbi_write8(rtwdev, RTK_PCIE_LINK_CFG, value);
1486 }
1487
1488 static void rtw_pci_link_ps(struct rtw_dev *rtwdev, bool enter)
1489 {
1490 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1491
1492 /* Like CLKREQ, ASPM is also implemented by two HW modules, and can
1493 * only be enabled when host supports it.
1494 *
1495 * And ASPM mechanism should be enabled when driver/firmware enters
1496 * power save mode, without having heavy traffic. Because we've
1497 * experienced some inter-operability issues that the link tends
1498 * to enter L1 state on the fly even when driver is having high
1499 * throughput. This is probably because the ASPM behavior slightly
1500 * varies from different SOC.
1501 */
1502 if (!(rtwpci->link_ctrl & PCI_EXP_LNKCTL_ASPM_L1))
1503 return;
1504
1505 if ((enter && atomic_dec_if_positive(&rtwpci->link_usage) == 0) ||
1506 (!enter && atomic_inc_return(&rtwpci->link_usage) == 1))
1507 rtw_pci_aspm_set(rtwdev, enter);
1508 }
1509
1510 static void rtw_pci_link_cfg(struct rtw_dev *rtwdev)
1511 {
1512 struct rtw_chip_info *chip = rtwdev->chip;
1513 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1514 struct pci_dev *pdev = rtwpci->pdev;
1515 u16 link_ctrl;
1516 int ret;
1517
1518 /* RTL8822CE has enabled REFCLK auto calibration, it does not need
1519 * to add clock delay to cover the REFCLK timing gap.
1520 */
1521 if (chip->id == RTW_CHIP_TYPE_8822C)
1522 rtw_dbi_write8(rtwdev, RTK_PCIE_CLKDLY_CTRL, 0);
1523
1524 /* Though there is standard PCIE configuration space to set the
1525 * link control register, but by Realtek's design, driver should
1526 * check if host supports CLKREQ/ASPM to enable the HW module.
1527 *
1528 * These functions are implemented by two HW modules associated,
1529 * one is responsible to access PCIE configuration space to
1530 * follow the host settings, and another is in charge of doing
1531 * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
1532 * the host does not support it, and due to some reasons or wrong
1533 * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
1534 * loss if HW misbehaves on the link.
1535 *
1536 * Hence it's designed that driver should first check the PCIE
1537 * configuration space is sync'ed and enabled, then driver can turn
1538 * on the other module that is actually working on the mechanism.
1539 */
1540 ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
1541 if (ret) {
1542 rtw_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
1543 return;
1544 }
1545
1546 if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
1547 rtw_pci_clkreq_set(rtwdev, true);
1548
1549 rtwpci->link_ctrl = link_ctrl;
1550 }
1551
1552 static void rtw_pci_interface_cfg(struct rtw_dev *rtwdev)
1553 {
1554 struct rtw_chip_info *chip = rtwdev->chip;
1555
1556 switch (chip->id) {
1557 case RTW_CHIP_TYPE_8822C:
1558 if (rtwdev->hal.cut_version >= RTW_CHIP_VER_CUT_D)
1559 rtw_write32_mask(rtwdev, REG_HCI_MIX_CFG,
1560 BIT_PCIE_EMAC_PDN_AUX_TO_FAST_CLK, 1);
1561 break;
1562 default:
1563 break;
1564 }
1565 }
1566
1567 static void rtw_pci_phy_cfg(struct rtw_dev *rtwdev)
1568 {
1569 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1570 struct rtw_chip_info *chip = rtwdev->chip;
1571 struct pci_dev *pdev = rtwpci->pdev;
1572 const struct rtw_intf_phy_para *para;
1573 u16 cut;
1574 u16 value;
1575 u16 offset;
1576 int i;
1577 int ret;
1578
1579 cut = BIT(0) << rtwdev->hal.cut_version;
1580
1581 for (i = 0; i < chip->intf_table->n_gen1_para; i++) {
1582 para = &chip->intf_table->gen1_para[i];
1583 if (!(para->cut_mask & cut))
1584 continue;
1585 if (para->offset == 0xffff)
1586 break;
1587 offset = para->offset;
1588 value = para->value;
1589 if (para->ip_sel == RTW_IP_SEL_PHY)
1590 rtw_mdio_write(rtwdev, offset, value, true);
1591 else
1592 rtw_dbi_write8(rtwdev, offset, value);
1593 }
1594
1595 for (i = 0; i < chip->intf_table->n_gen2_para; i++) {
1596 para = &chip->intf_table->gen2_para[i];
1597 if (!(para->cut_mask & cut))
1598 continue;
1599 if (para->offset == 0xffff)
1600 break;
1601 offset = para->offset;
1602 value = para->value;
1603 if (para->ip_sel == RTW_IP_SEL_PHY)
1604 rtw_mdio_write(rtwdev, offset, value, false);
1605 else
1606 rtw_dbi_write8(rtwdev, offset, value);
1607 }
1608
1609 rtw_pci_link_cfg(rtwdev);
1610
1611 /* Disable 8821ce completion timeout by default */
1612 if (chip->id == RTW_CHIP_TYPE_8821C) {
1613 ret = pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
1614 PCI_EXP_DEVCTL2_COMP_TMOUT_DIS);
1615 if (ret)
1616 rtw_err(rtwdev, "failed to set PCI cap, ret = %d\n",
1617 ret);
1618 }
1619 }
1620
1621 static int __maybe_unused rtw_pci_suspend(struct device *dev)
1622 {
1623 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1624 struct rtw_dev *rtwdev = hw->priv;
1625 struct rtw_chip_info *chip = rtwdev->chip;
1626 struct rtw_efuse *efuse = &rtwdev->efuse;
1627
1628 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1629 rtw_pci_clkreq_pad_low(rtwdev, true);
1630 return 0;
1631 }
1632
1633 static int __maybe_unused rtw_pci_resume(struct device *dev)
1634 {
1635 struct ieee80211_hw *hw = dev_get_drvdata(dev);
1636 struct rtw_dev *rtwdev = hw->priv;
1637 struct rtw_chip_info *chip = rtwdev->chip;
1638 struct rtw_efuse *efuse = &rtwdev->efuse;
1639
1640 if (chip->id == RTW_CHIP_TYPE_8822C && efuse->rfe_option == 6)
1641 rtw_pci_clkreq_pad_low(rtwdev, false);
1642 return 0;
1643 }
1644
1645 SIMPLE_DEV_PM_OPS(rtw_pm_ops, rtw_pci_suspend, rtw_pci_resume);
1646 EXPORT_SYMBOL(rtw_pm_ops);
1647
1648 static int rtw_pci_claim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1649 {
1650 int ret;
1651
1652 ret = pci_enable_device(pdev);
1653 if (ret) {
1654 rtw_err(rtwdev, "failed to enable pci device\n");
1655 return ret;
1656 }
1657
1658 pci_set_master(pdev);
1659 pci_set_drvdata(pdev, rtwdev->hw);
1660 SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
1661
1662 return 0;
1663 }
1664
1665 static void rtw_pci_declaim(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1666 {
1667 pci_clear_master(pdev);
1668 pci_disable_device(pdev);
1669 }
1670
1671 static int rtw_pci_setup_resource(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1672 {
1673 struct rtw_pci *rtwpci;
1674 int ret;
1675
1676 rtwpci = (struct rtw_pci *)rtwdev->priv;
1677 rtwpci->pdev = pdev;
1678
1679 /* after this driver can access to hw registers */
1680 ret = rtw_pci_io_mapping(rtwdev, pdev);
1681 if (ret) {
1682 rtw_err(rtwdev, "failed to request pci io region\n");
1683 goto err_out;
1684 }
1685
1686 ret = rtw_pci_init(rtwdev);
1687 if (ret) {
1688 rtw_err(rtwdev, "failed to allocate pci resources\n");
1689 goto err_io_unmap;
1690 }
1691
1692 return 0;
1693
1694 err_io_unmap:
1695 rtw_pci_io_unmapping(rtwdev, pdev);
1696
1697 err_out:
1698 return ret;
1699 }
1700
1701 static void rtw_pci_destroy(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1702 {
1703 rtw_pci_deinit(rtwdev);
1704 rtw_pci_io_unmapping(rtwdev, pdev);
1705 }
1706
1707 static struct rtw_hci_ops rtw_pci_ops = {
1708 .tx_write = rtw_pci_tx_write,
1709 .tx_kick_off = rtw_pci_tx_kick_off,
1710 .flush_queues = rtw_pci_flush_queues,
1711 .setup = rtw_pci_setup,
1712 .start = rtw_pci_start,
1713 .stop = rtw_pci_stop,
1714 .deep_ps = rtw_pci_deep_ps,
1715 .link_ps = rtw_pci_link_ps,
1716 .interface_cfg = rtw_pci_interface_cfg,
1717
1718 .read8 = rtw_pci_read8,
1719 .read16 = rtw_pci_read16,
1720 .read32 = rtw_pci_read32,
1721 .write8 = rtw_pci_write8,
1722 .write16 = rtw_pci_write16,
1723 .write32 = rtw_pci_write32,
1724 .write_data_rsvd_page = rtw_pci_write_data_rsvd_page,
1725 .write_data_h2c = rtw_pci_write_data_h2c,
1726 };
1727
1728 static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1729 {
1730 unsigned int flags = PCI_IRQ_LEGACY;
1731 int ret;
1732
1733 if (!rtw_disable_msi)
1734 flags |= PCI_IRQ_MSI;
1735
1736 ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
1737 if (ret < 0) {
1738 rtw_err(rtwdev, "failed to alloc PCI irq vectors\n");
1739 return ret;
1740 }
1741
1742 ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
1743 rtw_pci_interrupt_handler,
1744 rtw_pci_interrupt_threadfn,
1745 IRQF_SHARED, KBUILD_MODNAME, rtwdev);
1746 if (ret) {
1747 rtw_err(rtwdev, "failed to request irq %d\n", ret);
1748 pci_free_irq_vectors(pdev);
1749 }
1750
1751 return ret;
1752 }
1753
1754 static void rtw_pci_free_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
1755 {
1756 devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
1757 pci_free_irq_vectors(pdev);
1758 }
1759
1760 static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
1761 {
1762 struct rtw_pci *rtwpci = container_of(napi, struct rtw_pci, napi);
1763 struct rtw_dev *rtwdev = container_of((void *)rtwpci, struct rtw_dev,
1764 priv);
1765 int work_done = 0;
1766
1767 if (rtwpci->rx_no_aspm)
1768 rtw_pci_link_ps(rtwdev, false);
1769
1770 while (work_done < budget) {
1771 u32 work_done_once;
1772
1773 work_done_once = rtw_pci_rx_napi(rtwdev, rtwpci, RTW_RX_QUEUE_MPDU,
1774 budget - work_done);
1775 if (work_done_once == 0)
1776 break;
1777 work_done += work_done_once;
1778 }
1779 if (work_done < budget) {
1780 napi_complete_done(napi, work_done);
1781 spin_lock_bh(&rtwpci->irq_lock);
1782 if (rtwpci->running)
1783 rtw_pci_enable_interrupt(rtwdev, rtwpci, false);
1784 spin_unlock_bh(&rtwpci->irq_lock);
1785 /* When ISR happens during polling and before napi_complete
1786 * while no further data is received. Data on the dma_ring will
1787 * not be processed immediately. Check whether dma ring is
1788 * empty and perform napi_schedule accordingly.
1789 */
1790 if (rtw_pci_get_hw_rx_ring_nr(rtwdev, rtwpci))
1791 napi_schedule(napi);
1792 }
1793 if (rtwpci->rx_no_aspm)
1794 rtw_pci_link_ps(rtwdev, true);
1795
1796 return work_done;
1797 }
1798
1799 static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
1800 {
1801 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1802
1803 init_dummy_netdev(&rtwpci->netdev);
1804 netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
1805 }
1806
1807 static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
1808 {
1809 struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
1810
1811 rtw_pci_napi_stop(rtwdev);
1812 netif_napi_del(&rtwpci->napi);
1813 }
1814
1815 int rtw_pci_probe(struct pci_dev *pdev,
1816 const struct pci_device_id *id)
1817 {
1818 struct pci_dev *bridge = pci_upstream_bridge(pdev);
1819 struct ieee80211_hw *hw;
1820 struct rtw_dev *rtwdev;
1821 struct rtw_pci *rtwpci;
1822 int drv_data_size;
1823 int ret;
1824
1825 drv_data_size = sizeof(struct rtw_dev) + sizeof(struct rtw_pci);
1826 hw = ieee80211_alloc_hw(drv_data_size, &rtw_ops);
1827 if (!hw) {
1828 dev_err(&pdev->dev, "failed to allocate hw\n");
1829 return -ENOMEM;
1830 }
1831
1832 rtwdev = hw->priv;
1833 rtwdev->hw = hw;
1834 rtwdev->dev = &pdev->dev;
1835 rtwdev->chip = (struct rtw_chip_info *)id->driver_data;
1836 rtwdev->hci.ops = &rtw_pci_ops;
1837 rtwdev->hci.type = RTW_HCI_TYPE_PCIE;
1838
1839 rtwpci = (struct rtw_pci *)rtwdev->priv;
1840 atomic_set(&rtwpci->link_usage, 1);
1841
1842 ret = rtw_core_init(rtwdev);
1843 if (ret)
1844 goto err_release_hw;
1845
1846 rtw_dbg(rtwdev, RTW_DBG_PCI,
1847 "rtw88 pci probe: vendor=0x%4.04X device=0x%4.04X rev=%d\n",
1848 pdev->vendor, pdev->device, pdev->revision);
1849
1850 ret = rtw_pci_claim(rtwdev, pdev);
1851 if (ret) {
1852 rtw_err(rtwdev, "failed to claim pci device\n");
1853 goto err_deinit_core;
1854 }
1855
1856 ret = rtw_pci_setup_resource(rtwdev, pdev);
1857 if (ret) {
1858 rtw_err(rtwdev, "failed to setup pci resources\n");
1859 goto err_pci_declaim;
1860 }
1861
1862 rtw_pci_napi_init(rtwdev);
1863
1864 ret = rtw_chip_info_setup(rtwdev);
1865 if (ret) {
1866 rtw_err(rtwdev, "failed to setup chip information\n");
1867 goto err_destroy_pci;
1868 }
1869
1870 /* Disable PCIe ASPM L1 while doing NAPI poll for 8821CE */
1871 if (rtwdev->chip->id == RTW_CHIP_TYPE_8821C && bridge->vendor == PCI_VENDOR_ID_INTEL)
1872 rtwpci->rx_no_aspm = true;
1873
1874 rtw_pci_phy_cfg(rtwdev);
1875
1876 ret = rtw_register_hw(rtwdev, hw);
1877 if (ret) {
1878 rtw_err(rtwdev, "failed to register hw\n");
1879 goto err_destroy_pci;
1880 }
1881
1882 ret = rtw_pci_request_irq(rtwdev, pdev);
1883 if (ret) {
1884 ieee80211_unregister_hw(hw);
1885 goto err_destroy_pci;
1886 }
1887
1888 return 0;
1889
1890 err_destroy_pci:
1891 rtw_pci_napi_deinit(rtwdev);
1892 rtw_pci_destroy(rtwdev, pdev);
1893
1894 err_pci_declaim:
1895 rtw_pci_declaim(rtwdev, pdev);
1896
1897 err_deinit_core:
1898 rtw_core_deinit(rtwdev);
1899
1900 err_release_hw:
1901 ieee80211_free_hw(hw);
1902
1903 return ret;
1904 }
1905 EXPORT_SYMBOL(rtw_pci_probe);
1906
1907 void rtw_pci_remove(struct pci_dev *pdev)
1908 {
1909 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1910 struct rtw_dev *rtwdev;
1911 struct rtw_pci *rtwpci;
1912
1913 if (!hw)
1914 return;
1915
1916 rtwdev = hw->priv;
1917 rtwpci = (struct rtw_pci *)rtwdev->priv;
1918
1919 rtw_unregister_hw(rtwdev, hw);
1920 rtw_pci_disable_interrupt(rtwdev, rtwpci);
1921 rtw_pci_napi_deinit(rtwdev);
1922 rtw_pci_destroy(rtwdev, pdev);
1923 rtw_pci_declaim(rtwdev, pdev);
1924 rtw_pci_free_irq(rtwdev, pdev);
1925 rtw_core_deinit(rtwdev);
1926 ieee80211_free_hw(hw);
1927 }
1928 EXPORT_SYMBOL(rtw_pci_remove);
1929
1930 void rtw_pci_shutdown(struct pci_dev *pdev)
1931 {
1932 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1933 struct rtw_dev *rtwdev;
1934 struct rtw_chip_info *chip;
1935
1936 if (!hw)
1937 return;
1938
1939 rtwdev = hw->priv;
1940 chip = rtwdev->chip;
1941
1942 if (chip->ops->shutdown)
1943 chip->ops->shutdown(rtwdev);
1944
1945 pci_set_power_state(pdev, PCI_D3hot);
1946 }
1947 EXPORT_SYMBOL(rtw_pci_shutdown);
1948
1949 MODULE_AUTHOR("Realtek Corporation");
1950 MODULE_DESCRIPTION("Realtek 802.11ac wireless PCI driver");
1951 MODULE_LICENSE("Dual BSD/GPL");
1952 #if defined(__FreeBSD__)
1953 MODULE_VERSION(rtw_pci, 1);
1954 MODULE_DEPEND(rtw_pci, linuxkpi, 1, 1, 1);
1955 MODULE_DEPEND(rtw_pci, linuxkpi_wlan, 1, 1, 1);
1956 #ifdef CONFIG_RTW88_DEBUGFS
1957 MODULE_DEPEND(rtw_pci, lindebugfs, 1, 1, 1);
1958 #endif
1959 #endif
Cache object: b8ad9b6e17e5b1d20921f24dbe284bf8
|