1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "adf_transport_internal.h"
17
18 #define ADF_ARB_NUM 4
19 #define ADF_ARB_REG_SIZE 0x4
20 #define ADF_ARB_WTR_SIZE 0x20
21 #define ADF_ARB_OFFSET 0x30000
22 #define ADF_ARB_REG_SLOT 0x1000
23 #define ADF_ARB_WTR_OFFSET 0x010
24 #define ADF_ARB_RO_EN_OFFSET 0x090
25 #define ADF_ARB_WQCFG_OFFSET 0x100
26 #define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
27 #define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
28
29 #define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
30 ADF_CSR_WR(csr_addr, \
31 ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)), \
32 value)
33
34 #define WRITE_CSR_ARB_SARCONFIG(csr_addr, csr_offset, index, value) \
35 ADF_CSR_WR(csr_addr, (csr_offset) + (ADF_ARB_REG_SIZE * (index)), value)
36 #define READ_CSR_ARB_RINGSRVARBEN(csr_addr, index) \
37 ADF_CSR_RD(csr_addr, \
38 ADF_ARB_RINGSRVARBEN_OFFSET + (ADF_ARB_REG_SLOT * (index)))
39
40 static DEFINE_MUTEX(csr_arb_lock);
41
42 #define WRITE_CSR_ARB_WRK_2_SER_MAP( \
43 csr_addr, csr_offset, wrk_to_ser_map_offset, index, value) \
44 ADF_CSR_WR(csr_addr, \
45 ((csr_offset) + (wrk_to_ser_map_offset)) + \
46 (ADF_ARB_REG_SIZE * (index)), \
47 value)
48
49 int
50 adf_init_arb(struct adf_accel_dev *accel_dev)
51 {
52 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
53 struct arb_info info;
54 struct resource *csr = accel_dev->transport->banks[0].csr_addr;
55 u32 arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
56 u32 arb;
57
58 hw_data->get_arb_info(&info);
59
60 /* Service arb configured for 32 bytes responses and
61 * ring flow control check enabled.
62 */
63 for (arb = 0; arb < ADF_ARB_NUM; arb++)
64 WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, arb, arb_cfg);
65
66 return 0;
67 }
68
69 int
70 adf_init_gen2_arb(struct adf_accel_dev *accel_dev)
71 {
72 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
73 struct arb_info info;
74 struct resource *csr = accel_dev->transport->banks[0].csr_addr;
75 u32 i;
76 const u32 *thd_2_arb_cfg;
77
78 /* invoke common adf_init_arb */
79 adf_init_arb(accel_dev);
80
81 hw_data->get_arb_info(&info);
82
83 /* Map worker threads to service arbiters */
84 hw_data->get_arb_mapping(accel_dev, &thd_2_arb_cfg);
85 if (!thd_2_arb_cfg)
86 return EFAULT;
87
88 for (i = 0; i < hw_data->num_engines; i++)
89 WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
90 info.arbiter_offset,
91 info.wrk_thd_2_srv_arb_map,
92 i,
93 *(thd_2_arb_cfg + i));
94 return 0;
95 }
96
97 void
98 adf_update_ring_arb(struct adf_etr_ring_data *ring)
99 {
100 int shift;
101 u32 arben, arben_tx, arben_rx, arb_mask;
102 struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
103 struct adf_hw_csr_info *csr_info = &accel_dev->hw_device->csr_info;
104 struct adf_hw_csr_ops *csr_ops = &csr_info->csr_ops;
105
106 arb_mask = csr_info->arb_enable_mask;
107 shift = hweight32(arb_mask);
108
109 arben_tx = ring->bank->ring_mask & arb_mask;
110 arben_rx = (ring->bank->ring_mask >> shift) & arb_mask;
111 arben = arben_tx & arben_rx;
112 csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
113 ring->bank->bank_number,
114 arben);
115 }
116
117 void
118 adf_enable_ring_arb(struct adf_accel_dev *accel_dev,
119 void *csr_addr,
120 unsigned int bank_nr,
121 unsigned int mask)
122 {
123 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
124 struct resource *csr = csr_addr;
125 u32 arbenable;
126
127 if (!csr)
128 return;
129
130 mutex_lock(&csr_arb_lock);
131 arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
132 arbenable |= mask & 0xFF;
133 csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
134
135 mutex_unlock(&csr_arb_lock);
136 }
137
138 void
139 adf_disable_ring_arb(struct adf_accel_dev *accel_dev,
140 void *csr_addr,
141 unsigned int bank_nr,
142 unsigned int mask)
143 {
144 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
145 struct resource *csr = csr_addr;
146 u32 arbenable;
147
148 if (!csr_addr)
149 return;
150
151 mutex_lock(&csr_arb_lock);
152 arbenable = csr_ops->read_csr_ring_srv_arb_en(csr, bank_nr);
153 arbenable &= ~mask & 0xFF;
154 csr_ops->write_csr_ring_srv_arb_en(csr, bank_nr, arbenable);
155 mutex_unlock(&csr_arb_lock);
156 }
157
158 void
159 adf_exit_arb(struct adf_accel_dev *accel_dev)
160 {
161 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
162 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
163 struct arb_info info;
164 struct resource *csr;
165 unsigned int i;
166
167 if (!accel_dev->transport)
168 return;
169
170 csr = accel_dev->transport->banks[0].csr_addr;
171
172 hw_data->get_arb_info(&info);
173
174 /* Reset arbiter configuration */
175 for (i = 0; i < ADF_ARB_NUM; i++)
176 WRITE_CSR_ARB_SARCONFIG(csr, info.arbiter_offset, i, 0);
177
178 /* Unmap worker threads to service arbiters */
179 if (hw_data->get_arb_mapping) {
180 for (i = 0; i < hw_data->num_engines; i++)
181 WRITE_CSR_ARB_WRK_2_SER_MAP(csr,
182 info.arbiter_offset,
183 info.wrk_thd_2_srv_arb_map,
184 i,
185 0);
186 }
187
188 /* Disable arbitration on all rings */
189 for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
190 csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
191 }
192
193 void
194 adf_disable_arb(struct adf_accel_dev *accel_dev)
195 {
196 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
197 struct resource *csr;
198 unsigned int i;
199
200 if (!accel_dev || !accel_dev->transport)
201 return;
202
203 csr = accel_dev->transport->banks[0].csr_addr;
204
205 /* Disable arbitration on all rings */
206 for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
207 csr_ops->write_csr_ring_srv_arb_en(csr, i, 0);
208 }
Cache object: 7a4dac14d30f5f7438b80f68d6c53f4c
|