1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007 - 2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "adf_4xxx_hw_data.h"
9 #include "adf_gen4_hw_data.h"
10 #include "adf_fw_counters.h"
11 #include "adf_cfg_device.h"
12 #include <sys/types.h>
13 #include <sys/kernel.h>
14 #include <sys/malloc.h>
15 #include <machine/bus_dma.h>
16 #include <dev/pci/pcireg.h>
17 #include "adf_heartbeat_dbg.h"
18 #include "adf_cnvnr_freq_counters.h"
19
20 static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
21
22 #define ADF_SYSTEM_DEVICE(device_id) \
23 { \
24 PCI_VENDOR_ID_INTEL, device_id \
25 }
26
27 static const struct pci_device_id adf_pci_tbl[] =
28 { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
29 ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
30 {
31 0,
32 } };
33
34 static int
35 adf_probe(device_t dev)
36 {
37 const struct pci_device_id *id;
38
39 for (id = adf_pci_tbl; id->vendor != 0; id++) {
40 if (pci_get_vendor(dev) == id->vendor &&
41 pci_get_device(dev) == id->device) {
42 device_set_desc(dev,
43 "Intel " ADF_4XXX_DEVICE_NAME
44 " QuickAssist");
45 return BUS_PROBE_GENERIC;
46 }
47 }
48 return ENXIO;
49 }
50
51 static void
52 adf_cleanup_accel(struct adf_accel_dev *accel_dev)
53 {
54 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
55 int i;
56
57 if (accel_dev->dma_tag)
58 bus_dma_tag_destroy(accel_dev->dma_tag);
59 for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
60 struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
61
62 if (bar->virt_addr)
63 bus_free_resource(accel_pci_dev->pci_dev,
64 SYS_RES_MEMORY,
65 bar->virt_addr);
66 }
67
68 if (accel_dev->hw_device) {
69 switch (pci_get_device(accel_pci_dev->pci_dev)) {
70 case ADF_4XXX_PCI_DEVICE_ID:
71 case ADF_401XX_PCI_DEVICE_ID:
72 adf_clean_hw_data_4xxx(accel_dev->hw_device);
73 break;
74 default:
75 break;
76 }
77 free(accel_dev->hw_device, M_QAT_4XXX);
78 accel_dev->hw_device = NULL;
79 }
80 adf_cfg_dev_remove(accel_dev);
81 adf_devmgr_rm_dev(accel_dev, NULL);
82 }
83
84 static int
85 adf_attach(device_t dev)
86 {
87 struct adf_accel_dev *accel_dev;
88 struct adf_accel_pci *accel_pci_dev;
89 struct adf_hw_device_data *hw_data;
90 unsigned int i, bar_nr;
91 int ret, rid;
92 struct adf_cfg_device *cfg_dev = NULL;
93
94 /* Set pci MaxPayLoad to 256. Implemented to avoid the issue of
95 * Pci-passthrough causing Maxpayload to be reset to 128 bytes
96 * when the device is reset.
97 */
98 if (pci_get_max_payload(dev) != 256)
99 pci_set_max_payload(dev, 256);
100
101 accel_dev = device_get_softc(dev);
102
103 INIT_LIST_HEAD(&accel_dev->crypto_list);
104 accel_pci_dev = &accel_dev->accel_pci_dev;
105 accel_pci_dev->pci_dev = dev;
106
107 if (bus_get_domain(dev, &accel_pci_dev->node) != 0)
108 accel_pci_dev->node = 0;
109
110 /* Add accel device to accel table.
111 * This should be called before adf_cleanup_accel is called
112 */
113 if (adf_devmgr_add_dev(accel_dev, NULL)) {
114 device_printf(dev, "Failed to add new accelerator device.\n");
115 return ENXIO;
116 }
117
118 /* Allocate and configure device configuration structure */
119 hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO);
120
121 accel_dev->hw_device = hw_data;
122 adf_init_hw_data_4xxx(accel_dev->hw_device);
123 accel_pci_dev->revid = pci_get_revid(dev);
124 hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4);
125 if (accel_pci_dev->revid == 0x00) {
126 device_printf(dev, "A0 stepping is not supported.\n");
127 ret = ENODEV;
128 goto out_err;
129 }
130
131 /* Get PPAERUCM values and store */
132 ret = adf_aer_store_ppaerucm_reg(dev, hw_data);
133 if (ret)
134 goto out_err;
135
136 /* Get Accelerators and Accelerators Engines masks */
137 hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
138 hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
139
140 accel_pci_dev->sku = hw_data->get_sku(hw_data);
141 /* If the device has no acceleration engines then ignore it. */
142 if (!hw_data->accel_mask || !hw_data->ae_mask ||
143 (~hw_data->ae_mask & 0x01)) {
144 device_printf(dev, "No acceleration units found\n");
145 ret = ENXIO;
146 goto out_err;
147 }
148
149 /* Create device configuration table */
150 ret = adf_cfg_dev_add(accel_dev);
151 if (ret)
152 goto out_err;
153 ret = adf_clock_debugfs_add(accel_dev);
154 if (ret)
155 goto out_err;
156
157 pci_set_max_read_req(dev, 1024);
158
159 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
160 1,
161 0,
162 BUS_SPACE_MAXADDR,
163 BUS_SPACE_MAXADDR,
164 NULL,
165 NULL,
166 BUS_SPACE_MAXSIZE,
167 /* BUS_SPACE_UNRESTRICTED */ 1,
168 BUS_SPACE_MAXSIZE,
169 0,
170 NULL,
171 NULL,
172 &accel_dev->dma_tag);
173 if (ret)
174 goto out_err;
175
176 if (hw_data->get_accel_cap) {
177 hw_data->accel_capabilities_mask =
178 hw_data->get_accel_cap(accel_dev);
179 }
180
181 /* Find and map all the device's BARS */
182 i = 0;
183 for (bar_nr = 0; i < ADF_PCI_MAX_BARS && bar_nr < PCIR_MAX_BAR_0;
184 bar_nr++) {
185 struct adf_bar *bar;
186
187 rid = PCIR_BAR(bar_nr);
188 if (bus_get_resource(dev, SYS_RES_MEMORY, rid, NULL, NULL) != 0)
189 continue;
190 bar = &accel_pci_dev->pci_bars[i++];
191 bar->virt_addr = bus_alloc_resource_any(dev,
192 SYS_RES_MEMORY,
193 &rid,
194 RF_ACTIVE);
195 if (!bar->virt_addr) {
196 device_printf(dev, "Failed to map BAR %d\n", bar_nr);
197 ret = ENXIO;
198 goto out_err;
199 }
200 bar->base_addr = rman_get_start(bar->virt_addr);
201 bar->size = rman_get_size(bar->virt_addr);
202 }
203 pci_enable_busmaster(dev);
204
205 if (!accel_dev->hw_device->config_device) {
206 ret = EFAULT;
207 goto out_err;
208 }
209
210 ret = accel_dev->hw_device->config_device(accel_dev);
211 if (ret)
212 goto out_err;
213
214 ret = adf_dev_init(accel_dev);
215 if (ret)
216 goto out_dev_shutdown;
217
218 ret = adf_dev_start(accel_dev);
219 if (ret)
220 goto out_dev_stop;
221
222 cfg_dev = accel_dev->cfg->dev;
223 adf_cfg_device_clear(cfg_dev, accel_dev);
224 free(cfg_dev, M_QAT);
225 accel_dev->cfg->dev = NULL;
226 return ret;
227 out_dev_stop:
228 adf_dev_stop(accel_dev);
229 out_dev_shutdown:
230 adf_dev_shutdown(accel_dev);
231 out_err:
232 adf_cleanup_accel(accel_dev);
233 return ret;
234 }
235
236 static int
237 adf_detach(device_t dev)
238 {
239 struct adf_accel_dev *accel_dev = device_get_softc(dev);
240
241 if (adf_dev_stop(accel_dev)) {
242 device_printf(dev, "Failed to stop QAT accel dev\n");
243 return EBUSY;
244 }
245
246 adf_dev_shutdown(accel_dev);
247
248 adf_cleanup_accel(accel_dev);
249
250 return 0;
251 }
252
253 static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe),
254 DEVMETHOD(device_attach, adf_attach),
255 DEVMETHOD(device_detach, adf_detach),
256
257 DEVMETHOD_END };
258
259 static driver_t adf_driver = { "qat",
260 adf_methods,
261 sizeof(struct adf_accel_dev) };
262
263 DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD);
264 MODULE_VERSION(qat_4xxx, 1);
265 MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1);
266 MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1);
267 MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1);
Cache object: 838fc7a6678e34b02de10ecc439f1039
|