1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/delay.h>
4 #include "adf_accel_devices.h"
5 #include "adf_transport_internal.h"
6 #include "adf_transport_access_macros.h"
7 #include "adf_cfg.h"
8 #include "adf_common_drv.h"
9 
adf_modulo(u32 data,u32 shift)10 static inline u32 adf_modulo(u32 data, u32 shift)
11 {
12 	u32 div = data >> shift;
13 	u32 mult = div << shift;
14 
15 	return data - mult;
16 }
17 
adf_check_ring_alignment(u64 addr,u64 size)18 static inline int adf_check_ring_alignment(u64 addr, u64 size)
19 {
20 	if (((size - 1) & addr) != 0)
21 		return -EFAULT;
22 	return 0;
23 }
24 
adf_verify_ring_size(u32 msg_size,u32 msg_num)25 static int adf_verify_ring_size(u32 msg_size, u32 msg_num)
26 {
27 	int i = ADF_MIN_RING_SIZE;
28 
29 	for (; i <= ADF_MAX_RING_SIZE; i++)
30 		if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
31 			return i;
32 
33 	return ADF_DEFAULT_RING_SIZE;
34 }
35 
adf_reserve_ring(struct adf_etr_bank_data * bank,u32 ring)36 static int adf_reserve_ring(struct adf_etr_bank_data *bank, u32 ring)
37 {
38 	spin_lock(&bank->lock);
39 	if (bank->ring_mask & (1 << ring)) {
40 		spin_unlock(&bank->lock);
41 		return -EFAULT;
42 	}
43 	bank->ring_mask |= (1 << ring);
44 	spin_unlock(&bank->lock);
45 	return 0;
46 }
47 
adf_unreserve_ring(struct adf_etr_bank_data * bank,u32 ring)48 static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
49 {
50 	spin_lock(&bank->lock);
51 	bank->ring_mask &= ~(1 << ring);
52 	spin_unlock(&bank->lock);
53 }
54 
adf_enable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)55 static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
56 {
57 	spin_lock_bh(&bank->lock);
58 	bank->irq_mask |= (1 << ring);
59 	spin_unlock_bh(&bank->lock);
60 	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
61 	WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
62 			      bank->irq_coalesc_timer);
63 }
64 
adf_disable_ring_irq(struct adf_etr_bank_data * bank,u32 ring)65 static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
66 {
67 	spin_lock_bh(&bank->lock);
68 	bank->irq_mask &= ~(1 << ring);
69 	spin_unlock_bh(&bank->lock);
70 	WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
71 }
72 
adf_send_message(struct adf_etr_ring_data * ring,u32 * msg)73 int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
74 {
75 	if (atomic_add_return(1, ring->inflights) >
76 	    ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
77 		atomic_dec(ring->inflights);
78 		return -EAGAIN;
79 	}
80 	spin_lock_bh(&ring->lock);
81 	memcpy((void *)((uintptr_t)ring->base_addr + ring->tail), msg,
82 	       ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
83 
84 	ring->tail = adf_modulo(ring->tail +
85 				ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
86 				ADF_RING_SIZE_MODULO(ring->ring_size));
87 	WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
88 			    ring->ring_number, ring->tail);
89 	spin_unlock_bh(&ring->lock);
90 	return 0;
91 }
92 
adf_handle_response(struct adf_etr_ring_data * ring)93 static int adf_handle_response(struct adf_etr_ring_data *ring)
94 {
95 	u32 msg_counter = 0;
96 	u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
97 
98 	while (*msg != ADF_RING_EMPTY_SIG) {
99 		ring->callback((u32 *)msg);
100 		atomic_dec(ring->inflights);
101 		*msg = ADF_RING_EMPTY_SIG;
102 		ring->head = adf_modulo(ring->head +
103 					ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
104 					ADF_RING_SIZE_MODULO(ring->ring_size));
105 		msg_counter++;
106 		msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
107 	}
108 	if (msg_counter > 0)
109 		WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
110 				    ring->bank->bank_number,
111 				    ring->ring_number, ring->head);
112 	return 0;
113 }
114 
adf_configure_tx_ring(struct adf_etr_ring_data * ring)115 static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
116 {
117 	u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
118 
119 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
120 			      ring->ring_number, ring_config);
121 }
122 
adf_configure_rx_ring(struct adf_etr_ring_data * ring)123 static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
124 {
125 	u32 ring_config =
126 			BUILD_RESP_RING_CONFIG(ring->ring_size,
127 					       ADF_RING_NEAR_WATERMARK_512,
128 					       ADF_RING_NEAR_WATERMARK_0);
129 
130 	WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
131 			      ring->ring_number, ring_config);
132 }
133 
adf_init_ring(struct adf_etr_ring_data * ring)134 static int adf_init_ring(struct adf_etr_ring_data *ring)
135 {
136 	struct adf_etr_bank_data *bank = ring->bank;
137 	struct adf_accel_dev *accel_dev = bank->accel_dev;
138 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
139 	u64 ring_base;
140 	u32 ring_size_bytes =
141 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
142 
143 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
144 	ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
145 					     ring_size_bytes, &ring->dma_addr,
146 					     GFP_KERNEL);
147 	if (!ring->base_addr)
148 		return -ENOMEM;
149 
150 	memset(ring->base_addr, 0x7F, ring_size_bytes);
151 	/* The base_addr has to be aligned to the size of the buffer */
152 	if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
153 		dev_err(&GET_DEV(accel_dev), "Ring address not aligned\n");
154 		dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
155 				  ring->base_addr, ring->dma_addr);
156 		return -EFAULT;
157 	}
158 
159 	if (hw_data->tx_rings_mask & (1 << ring->ring_number))
160 		adf_configure_tx_ring(ring);
161 
162 	else
163 		adf_configure_rx_ring(ring);
164 
165 	ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
166 	WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
167 			    ring->ring_number, ring_base);
168 	spin_lock_init(&ring->lock);
169 	return 0;
170 }
171 
adf_cleanup_ring(struct adf_etr_ring_data * ring)172 static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
173 {
174 	u32 ring_size_bytes =
175 			ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
176 	ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
177 
178 	if (ring->base_addr) {
179 		memset(ring->base_addr, 0x7F, ring_size_bytes);
180 		dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
181 				  ring_size_bytes, ring->base_addr,
182 				  ring->dma_addr);
183 	}
184 }
185 
adf_create_ring(struct adf_accel_dev * accel_dev,const char * section,u32 bank_num,u32 num_msgs,u32 msg_size,const char * ring_name,adf_callback_fn callback,int poll_mode,struct adf_etr_ring_data ** ring_ptr)186 int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
187 		    u32 bank_num, u32 num_msgs,
188 		    u32 msg_size, const char *ring_name,
189 		    adf_callback_fn callback, int poll_mode,
190 		    struct adf_etr_ring_data **ring_ptr)
191 {
192 	struct adf_etr_data *transport_data = accel_dev->transport;
193 	struct adf_etr_bank_data *bank;
194 	struct adf_etr_ring_data *ring;
195 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
196 	u32 ring_num;
197 	int ret;
198 
199 	if (bank_num >= GET_MAX_BANKS(accel_dev)) {
200 		dev_err(&GET_DEV(accel_dev), "Invalid bank number\n");
201 		return -EFAULT;
202 	}
203 	if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
204 		dev_err(&GET_DEV(accel_dev), "Invalid msg size\n");
205 		return -EFAULT;
206 	}
207 	if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
208 			      ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
209 		dev_err(&GET_DEV(accel_dev),
210 			"Invalid ring size for given msg size\n");
211 		return -EFAULT;
212 	}
213 	if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
214 		dev_err(&GET_DEV(accel_dev), "Section %s, no such entry : %s\n",
215 			section, ring_name);
216 		return -EFAULT;
217 	}
218 	if (kstrtouint(val, 10, &ring_num)) {
219 		dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
220 		return -EFAULT;
221 	}
222 	if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
223 		dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
224 		return -EFAULT;
225 	}
226 
227 	bank = &transport_data->banks[bank_num];
228 	if (adf_reserve_ring(bank, ring_num)) {
229 		dev_err(&GET_DEV(accel_dev), "Ring %d, %s already exists.\n",
230 			ring_num, ring_name);
231 		return -EFAULT;
232 	}
233 	ring = &bank->rings[ring_num];
234 	ring->ring_number = ring_num;
235 	ring->bank = bank;
236 	ring->callback = callback;
237 	ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
238 	ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
239 	ring->head = 0;
240 	ring->tail = 0;
241 	atomic_set(ring->inflights, 0);
242 	ret = adf_init_ring(ring);
243 	if (ret)
244 		goto err;
245 
246 	/* Enable HW arbitration for the given ring */
247 	adf_update_ring_arb(ring);
248 
249 	if (adf_ring_debugfs_add(ring, ring_name)) {
250 		dev_err(&GET_DEV(accel_dev),
251 			"Couldn't add ring debugfs entry\n");
252 		ret = -EFAULT;
253 		goto err;
254 	}
255 
256 	/* Enable interrupts if needed */
257 	if (callback && (!poll_mode))
258 		adf_enable_ring_irq(bank, ring->ring_number);
259 	*ring_ptr = ring;
260 	return 0;
261 err:
262 	adf_cleanup_ring(ring);
263 	adf_unreserve_ring(bank, ring_num);
264 	adf_update_ring_arb(ring);
265 	return ret;
266 }
267 
adf_remove_ring(struct adf_etr_ring_data * ring)268 void adf_remove_ring(struct adf_etr_ring_data *ring)
269 {
270 	struct adf_etr_bank_data *bank = ring->bank;
271 
272 	/* Disable interrupts for the given ring */
273 	adf_disable_ring_irq(bank, ring->ring_number);
274 
275 	/* Clear PCI config space */
276 	WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
277 			      ring->ring_number, 0);
278 	WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
279 			    ring->ring_number, 0);
280 	adf_ring_debugfs_rm(ring);
281 	adf_unreserve_ring(bank, ring->ring_number);
282 	/* Disable HW arbitration for the given ring */
283 	adf_update_ring_arb(ring);
284 	adf_cleanup_ring(ring);
285 }
286 
adf_ring_response_handler(struct adf_etr_bank_data * bank)287 static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
288 {
289 	u32 empty_rings, i;
290 
291 	empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
292 	empty_rings = ~empty_rings & bank->irq_mask;
293 
294 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
295 		if (empty_rings & (1 << i))
296 			adf_handle_response(&bank->rings[i]);
297 	}
298 }
299 
adf_response_handler(uintptr_t bank_addr)300 void adf_response_handler(uintptr_t bank_addr)
301 {
302 	struct adf_etr_bank_data *bank = (void *)bank_addr;
303 
304 	/* Handle all the responses and reenable IRQs */
305 	adf_ring_response_handler(bank);
306 	WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
307 				   bank->irq_mask);
308 }
309 
adf_get_cfg_int(struct adf_accel_dev * accel_dev,const char * section,const char * format,u32 key,u32 * value)310 static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
311 				  const char *section, const char *format,
312 				  u32 key, u32 *value)
313 {
314 	char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
315 	char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
316 
317 	snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
318 
319 	if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
320 		return -EFAULT;
321 
322 	if (kstrtouint(val_buf, 10, value))
323 		return -EFAULT;
324 	return 0;
325 }
326 
adf_get_coalesc_timer(struct adf_etr_bank_data * bank,const char * section,u32 bank_num_in_accel)327 static void adf_get_coalesc_timer(struct adf_etr_bank_data *bank,
328 				  const char *section,
329 				  u32 bank_num_in_accel)
330 {
331 	if (adf_get_cfg_int(bank->accel_dev, section,
332 			    ADF_ETRMGR_COALESCE_TIMER_FORMAT,
333 			    bank_num_in_accel, &bank->irq_coalesc_timer))
334 		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
335 
336 	if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
337 	    ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
338 		bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
339 }
340 
adf_init_bank(struct adf_accel_dev * accel_dev,struct adf_etr_bank_data * bank,u32 bank_num,void __iomem * csr_addr)341 static int adf_init_bank(struct adf_accel_dev *accel_dev,
342 			 struct adf_etr_bank_data *bank,
343 			 u32 bank_num, void __iomem *csr_addr)
344 {
345 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
346 	struct adf_etr_ring_data *ring;
347 	struct adf_etr_ring_data *tx_ring;
348 	u32 i, coalesc_enabled = 0;
349 
350 	memset(bank, 0, sizeof(*bank));
351 	bank->bank_number = bank_num;
352 	bank->csr_addr = csr_addr;
353 	bank->accel_dev = accel_dev;
354 	spin_lock_init(&bank->lock);
355 
356 	/* Enable IRQ coalescing always. This will allow to use
357 	 * the optimised flag and coalesc register.
358 	 * If it is disabled in the config file just use min time value */
359 	if ((adf_get_cfg_int(accel_dev, "Accelerator0",
360 			     ADF_ETRMGR_COALESCING_ENABLED_FORMAT, bank_num,
361 			     &coalesc_enabled) == 0) && coalesc_enabled)
362 		adf_get_coalesc_timer(bank, "Accelerator0", bank_num);
363 	else
364 		bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
365 
366 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
367 		WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
368 		WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
369 		ring = &bank->rings[i];
370 		if (hw_data->tx_rings_mask & (1 << i)) {
371 			ring->inflights =
372 				kzalloc_node(sizeof(atomic_t),
373 					     GFP_KERNEL,
374 					     dev_to_node(&GET_DEV(accel_dev)));
375 			if (!ring->inflights)
376 				goto err;
377 		} else {
378 			if (i < hw_data->tx_rx_gap) {
379 				dev_err(&GET_DEV(accel_dev),
380 					"Invalid tx rings mask config\n");
381 				goto err;
382 			}
383 			tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
384 			ring->inflights = tx_ring->inflights;
385 		}
386 	}
387 	if (adf_bank_debugfs_add(bank)) {
388 		dev_err(&GET_DEV(accel_dev),
389 			"Failed to add bank debugfs entry\n");
390 		goto err;
391 	}
392 
393 	WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
394 	WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
395 	return 0;
396 err:
397 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
398 		ring = &bank->rings[i];
399 		if (hw_data->tx_rings_mask & (1 << i))
400 			kfree(ring->inflights);
401 	}
402 	return -ENOMEM;
403 }
404 
405 /**
406  * adf_init_etr_data() - Initialize transport rings for acceleration device
407  * @accel_dev:  Pointer to acceleration device.
408  *
409  * Function is the initializes the communications channels (rings) to the
410  * acceleration device accel_dev.
411  * To be used by QAT device specific drivers.
412  *
413  * Return: 0 on success, error code otherwise.
414  */
adf_init_etr_data(struct adf_accel_dev * accel_dev)415 int adf_init_etr_data(struct adf_accel_dev *accel_dev)
416 {
417 	struct adf_etr_data *etr_data;
418 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
419 	void __iomem *csr_addr;
420 	u32 size;
421 	u32 num_banks = 0;
422 	int i, ret;
423 
424 	etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
425 				dev_to_node(&GET_DEV(accel_dev)));
426 	if (!etr_data)
427 		return -ENOMEM;
428 
429 	num_banks = GET_MAX_BANKS(accel_dev);
430 	size = num_banks * sizeof(struct adf_etr_bank_data);
431 	etr_data->banks = kzalloc_node(size, GFP_KERNEL,
432 				       dev_to_node(&GET_DEV(accel_dev)));
433 	if (!etr_data->banks) {
434 		ret = -ENOMEM;
435 		goto err_bank;
436 	}
437 
438 	accel_dev->transport = etr_data;
439 	i = hw_data->get_etr_bar_id(hw_data);
440 	csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
441 
442 	/* accel_dev->debugfs_dir should always be non-NULL here */
443 	etr_data->debug = debugfs_create_dir("transport",
444 					     accel_dev->debugfs_dir);
445 
446 	for (i = 0; i < num_banks; i++) {
447 		ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
448 				    csr_addr);
449 		if (ret)
450 			goto err_bank_all;
451 	}
452 
453 	return 0;
454 
455 err_bank_all:
456 	debugfs_remove(etr_data->debug);
457 	kfree(etr_data->banks);
458 err_bank:
459 	kfree(etr_data);
460 	accel_dev->transport = NULL;
461 	return ret;
462 }
463 EXPORT_SYMBOL_GPL(adf_init_etr_data);
464 
cleanup_bank(struct adf_etr_bank_data * bank)465 static void cleanup_bank(struct adf_etr_bank_data *bank)
466 {
467 	u32 i;
468 
469 	for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
470 		struct adf_accel_dev *accel_dev = bank->accel_dev;
471 		struct adf_hw_device_data *hw_data = accel_dev->hw_device;
472 		struct adf_etr_ring_data *ring = &bank->rings[i];
473 
474 		if (bank->ring_mask & (1 << i))
475 			adf_cleanup_ring(ring);
476 
477 		if (hw_data->tx_rings_mask & (1 << i))
478 			kfree(ring->inflights);
479 	}
480 	adf_bank_debugfs_rm(bank);
481 	memset(bank, 0, sizeof(*bank));
482 }
483 
adf_cleanup_etr_handles(struct adf_accel_dev * accel_dev)484 static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
485 {
486 	struct adf_etr_data *etr_data = accel_dev->transport;
487 	u32 i, num_banks = GET_MAX_BANKS(accel_dev);
488 
489 	for (i = 0; i < num_banks; i++)
490 		cleanup_bank(&etr_data->banks[i]);
491 }
492 
493 /**
494  * adf_cleanup_etr_data() - Clear transport rings for acceleration device
495  * @accel_dev:  Pointer to acceleration device.
496  *
497  * Function is the clears the communications channels (rings) of the
498  * acceleration device accel_dev.
499  * To be used by QAT device specific drivers.
500  *
501  * Return: void
502  */
adf_cleanup_etr_data(struct adf_accel_dev * accel_dev)503 void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
504 {
505 	struct adf_etr_data *etr_data = accel_dev->transport;
506 
507 	if (etr_data) {
508 		adf_cleanup_etr_handles(accel_dev);
509 		debugfs_remove(etr_data->debug);
510 		kfree(etr_data->banks);
511 		kfree(etr_data);
512 		accel_dev->transport = NULL;
513 	}
514 }
515 EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
516