1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Loopback bridge driver for the Greybus loopback module.
4  *
5  * Copyright 2014 Google Inc.
6  * Copyright 2014 Linaro Ltd.
7  */
8 
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/kthread.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <linux/sizes.h>
19 #include <linux/cdev.h>
20 #include <linux/fs.h>
21 #include <linux/kfifo.h>
22 #include <linux/debugfs.h>
23 #include <linux/list_sort.h>
24 #include <linux/spinlock.h>
25 #include <linux/workqueue.h>
26 #include <linux/atomic.h>
27 #include <linux/pm_runtime.h>
28 
29 #include <asm/div64.h>
30 
31 #include "greybus.h"
32 #include "connection.h"
33 
34 #define NSEC_PER_DAY 86400000000000ULL
35 
36 struct gb_loopback_stats {
37 	u32 min;
38 	u32 max;
39 	u64 sum;
40 	u32 count;
41 };
42 
43 struct gb_loopback_device {
44 	struct dentry *root;
45 	u32 count;
46 	size_t size_max;
47 
48 	/* We need to take a lock in atomic context */
49 	spinlock_t lock;
50 	struct list_head list;
51 	struct list_head list_op_async;
52 	wait_queue_head_t wq;
53 };
54 
55 static struct gb_loopback_device gb_dev;
56 
57 struct gb_loopback_async_operation {
58 	struct gb_loopback *gb;
59 	struct gb_operation *operation;
60 	ktime_t ts;
61 	int (*completion)(struct gb_loopback_async_operation *op_async);
62 };
63 
64 struct gb_loopback {
65 	struct gb_connection *connection;
66 
67 	struct dentry *file;
68 	struct kfifo kfifo_lat;
69 	struct mutex mutex;
70 	struct task_struct *task;
71 	struct list_head entry;
72 	struct device *dev;
73 	wait_queue_head_t wq;
74 	wait_queue_head_t wq_completion;
75 	atomic_t outstanding_operations;
76 
77 	/* Per connection stats */
78 	ktime_t ts;
79 	struct gb_loopback_stats latency;
80 	struct gb_loopback_stats throughput;
81 	struct gb_loopback_stats requests_per_second;
82 	struct gb_loopback_stats apbridge_unipro_latency;
83 	struct gb_loopback_stats gbphy_firmware_latency;
84 
85 	int type;
86 	int async;
87 	int id;
88 	u32 size;
89 	u32 iteration_max;
90 	u32 iteration_count;
91 	int us_wait;
92 	u32 error;
93 	u32 requests_completed;
94 	u32 requests_timedout;
95 	u32 timeout;
96 	u32 jiffy_timeout;
97 	u32 timeout_min;
98 	u32 timeout_max;
99 	u32 outstanding_operations_max;
100 	u32 lbid;
101 	u64 elapsed_nsecs;
102 	u32 apbridge_latency_ts;
103 	u32 gbphy_latency_ts;
104 
105 	u32 send_count;
106 };
107 
108 static struct class loopback_class = {
109 	.name		= "gb_loopback",
110 	.owner		= THIS_MODULE,
111 };
112 static DEFINE_IDA(loopback_ida);
113 
114 /* Min/max values in jiffies */
115 #define GB_LOOPBACK_TIMEOUT_MIN				1
116 #define GB_LOOPBACK_TIMEOUT_MAX				10000
117 
118 #define GB_LOOPBACK_FIFO_DEFAULT			8192
119 
120 static unsigned int kfifo_depth = GB_LOOPBACK_FIFO_DEFAULT;
121 module_param(kfifo_depth, uint, 0444);
122 
123 /* Maximum size of any one send data buffer we support */
124 #define MAX_PACKET_SIZE (PAGE_SIZE * 2)
125 
126 #define GB_LOOPBACK_US_WAIT_MAX				1000000
127 
128 /* interface sysfs attributes */
129 #define gb_loopback_ro_attr(field)				\
130 static ssize_t field##_show(struct device *dev,			\
131 			    struct device_attribute *attr,		\
132 			    char *buf)					\
133 {									\
134 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
135 	return sprintf(buf, "%u\n", gb->field);			\
136 }									\
137 static DEVICE_ATTR_RO(field)
138 
139 #define gb_loopback_ro_stats_attr(name, field, type)		\
140 static ssize_t name##_##field##_show(struct device *dev,	\
141 			    struct device_attribute *attr,		\
142 			    char *buf)					\
143 {									\
144 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
145 	/* Report 0 for min and max if no transfer successed */		\
146 	if (!gb->requests_completed)					\
147 		return sprintf(buf, "0\n");				\
148 	return sprintf(buf, "%"#type"\n", gb->name.field);	\
149 }									\
150 static DEVICE_ATTR_RO(name##_##field)
151 
152 #define gb_loopback_ro_avg_attr(name)			\
153 static ssize_t name##_avg_show(struct device *dev,		\
154 			    struct device_attribute *attr,		\
155 			    char *buf)					\
156 {									\
157 	struct gb_loopback_stats *stats;				\
158 	struct gb_loopback *gb;						\
159 	u64 avg, rem;							\
160 	u32 count;							\
161 	gb = dev_get_drvdata(dev);			\
162 	stats = &gb->name;					\
163 	count = stats->count ? stats->count : 1;			\
164 	avg = stats->sum + count / 2000000; /* round closest */		\
165 	rem = do_div(avg, count);					\
166 	rem *= 1000000;							\
167 	do_div(rem, count);						\
168 	return sprintf(buf, "%llu.%06u\n", avg, (u32)rem);		\
169 }									\
170 static DEVICE_ATTR_RO(name##_avg)
171 
172 #define gb_loopback_stats_attrs(field)				\
173 	gb_loopback_ro_stats_attr(field, min, u);		\
174 	gb_loopback_ro_stats_attr(field, max, u);		\
175 	gb_loopback_ro_avg_attr(field)
176 
177 #define gb_loopback_attr(field, type)					\
178 static ssize_t field##_show(struct device *dev,				\
179 			    struct device_attribute *attr,		\
180 			    char *buf)					\
181 {									\
182 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
183 	return sprintf(buf, "%"#type"\n", gb->field);			\
184 }									\
185 static ssize_t field##_store(struct device *dev,			\
186 			    struct device_attribute *attr,		\
187 			    const char *buf,				\
188 			    size_t len)					\
189 {									\
190 	int ret;							\
191 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
192 	mutex_lock(&gb->mutex);						\
193 	ret = sscanf(buf, "%"#type, &gb->field);			\
194 	if (ret != 1)							\
195 		len = -EINVAL;						\
196 	else								\
197 		gb_loopback_check_attr(gb, bundle);			\
198 	mutex_unlock(&gb->mutex);					\
199 	return len;							\
200 }									\
201 static DEVICE_ATTR_RW(field)
202 
203 #define gb_dev_loopback_ro_attr(field, conn)				\
204 static ssize_t field##_show(struct device *dev,		\
205 			    struct device_attribute *attr,		\
206 			    char *buf)					\
207 {									\
208 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
209 	return sprintf(buf, "%u\n", gb->field);				\
210 }									\
211 static DEVICE_ATTR_RO(field)
212 
213 #define gb_dev_loopback_rw_attr(field, type)				\
214 static ssize_t field##_show(struct device *dev,				\
215 			    struct device_attribute *attr,		\
216 			    char *buf)					\
217 {									\
218 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
219 	return sprintf(buf, "%"#type"\n", gb->field);			\
220 }									\
221 static ssize_t field##_store(struct device *dev,			\
222 			    struct device_attribute *attr,		\
223 			    const char *buf,				\
224 			    size_t len)					\
225 {									\
226 	int ret;							\
227 	struct gb_loopback *gb = dev_get_drvdata(dev);			\
228 	mutex_lock(&gb->mutex);						\
229 	ret = sscanf(buf, "%"#type, &gb->field);			\
230 	if (ret != 1)							\
231 		len = -EINVAL;						\
232 	else								\
233 		gb_loopback_check_attr(gb);		\
234 	mutex_unlock(&gb->mutex);					\
235 	return len;							\
236 }									\
237 static DEVICE_ATTR_RW(field)
238 
239 static void gb_loopback_reset_stats(struct gb_loopback *gb);
gb_loopback_check_attr(struct gb_loopback * gb)240 static void gb_loopback_check_attr(struct gb_loopback *gb)
241 {
242 	if (gb->us_wait > GB_LOOPBACK_US_WAIT_MAX)
243 		gb->us_wait = GB_LOOPBACK_US_WAIT_MAX;
244 	if (gb->size > gb_dev.size_max)
245 		gb->size = gb_dev.size_max;
246 	gb->requests_timedout = 0;
247 	gb->requests_completed = 0;
248 	gb->iteration_count = 0;
249 	gb->send_count = 0;
250 	gb->error = 0;
251 
252 	if (kfifo_depth < gb->iteration_max) {
253 		dev_warn(gb->dev,
254 			 "cannot log bytes %u kfifo_depth %u\n",
255 			 gb->iteration_max, kfifo_depth);
256 	}
257 	kfifo_reset_out(&gb->kfifo_lat);
258 
259 	switch (gb->type) {
260 	case GB_LOOPBACK_TYPE_PING:
261 	case GB_LOOPBACK_TYPE_TRANSFER:
262 	case GB_LOOPBACK_TYPE_SINK:
263 		gb->jiffy_timeout = usecs_to_jiffies(gb->timeout);
264 		if (!gb->jiffy_timeout)
265 			gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MIN;
266 		else if (gb->jiffy_timeout > GB_LOOPBACK_TIMEOUT_MAX)
267 			gb->jiffy_timeout = GB_LOOPBACK_TIMEOUT_MAX;
268 		gb_loopback_reset_stats(gb);
269 		wake_up(&gb->wq);
270 		break;
271 	default:
272 		gb->type = 0;
273 		break;
274 	}
275 }
276 
277 /* Time to send and receive one message */
278 gb_loopback_stats_attrs(latency);
279 /* Number of requests sent per second on this cport */
280 gb_loopback_stats_attrs(requests_per_second);
281 /* Quantity of data sent and received on this cport */
282 gb_loopback_stats_attrs(throughput);
283 /* Latency across the UniPro link from APBridge's perspective */
284 gb_loopback_stats_attrs(apbridge_unipro_latency);
285 /* Firmware induced overhead in the GPBridge */
286 gb_loopback_stats_attrs(gbphy_firmware_latency);
287 
288 /* Number of errors encountered during loop */
289 gb_loopback_ro_attr(error);
290 /* Number of requests successfully completed async */
291 gb_loopback_ro_attr(requests_completed);
292 /* Number of requests timed out async */
293 gb_loopback_ro_attr(requests_timedout);
294 /* Timeout minimum in useconds */
295 gb_loopback_ro_attr(timeout_min);
296 /* Timeout minimum in useconds */
297 gb_loopback_ro_attr(timeout_max);
298 
299 /*
300  * Type of loopback message to send based on protocol type definitions
301  * 0 => Don't send message
302  * 2 => Send ping message continuously (message without payload)
303  * 3 => Send transfer message continuously (message with payload,
304  *					   payload returned in response)
305  * 4 => Send a sink message (message with payload, no payload in response)
306  */
307 gb_dev_loopback_rw_attr(type, d);
308 /* Size of transfer message payload: 0-4096 bytes */
309 gb_dev_loopback_rw_attr(size, u);
310 /* Time to wait between two messages: 0-1000 ms */
311 gb_dev_loopback_rw_attr(us_wait, d);
312 /* Maximum iterations for a given operation: 1-(2^32-1), 0 implies infinite */
313 gb_dev_loopback_rw_attr(iteration_max, u);
314 /* The current index of the for (i = 0; i < iteration_max; i++) loop */
315 gb_dev_loopback_ro_attr(iteration_count, false);
316 /* A flag to indicate synchronous or asynchronous operations */
317 gb_dev_loopback_rw_attr(async, u);
318 /* Timeout of an individual asynchronous request */
319 gb_dev_loopback_rw_attr(timeout, u);
320 /* Maximum number of in-flight operations before back-off */
321 gb_dev_loopback_rw_attr(outstanding_operations_max, u);
322 
323 static struct attribute *loopback_attrs[] = {
324 	&dev_attr_latency_min.attr,
325 	&dev_attr_latency_max.attr,
326 	&dev_attr_latency_avg.attr,
327 	&dev_attr_requests_per_second_min.attr,
328 	&dev_attr_requests_per_second_max.attr,
329 	&dev_attr_requests_per_second_avg.attr,
330 	&dev_attr_throughput_min.attr,
331 	&dev_attr_throughput_max.attr,
332 	&dev_attr_throughput_avg.attr,
333 	&dev_attr_apbridge_unipro_latency_min.attr,
334 	&dev_attr_apbridge_unipro_latency_max.attr,
335 	&dev_attr_apbridge_unipro_latency_avg.attr,
336 	&dev_attr_gbphy_firmware_latency_min.attr,
337 	&dev_attr_gbphy_firmware_latency_max.attr,
338 	&dev_attr_gbphy_firmware_latency_avg.attr,
339 	&dev_attr_type.attr,
340 	&dev_attr_size.attr,
341 	&dev_attr_us_wait.attr,
342 	&dev_attr_iteration_count.attr,
343 	&dev_attr_iteration_max.attr,
344 	&dev_attr_async.attr,
345 	&dev_attr_error.attr,
346 	&dev_attr_requests_completed.attr,
347 	&dev_attr_requests_timedout.attr,
348 	&dev_attr_timeout.attr,
349 	&dev_attr_outstanding_operations_max.attr,
350 	&dev_attr_timeout_min.attr,
351 	&dev_attr_timeout_max.attr,
352 	NULL,
353 };
354 ATTRIBUTE_GROUPS(loopback);
355 
356 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error);
357 
gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)358 static u32 gb_loopback_nsec_to_usec_latency(u64 elapsed_nsecs)
359 {
360 	do_div(elapsed_nsecs, NSEC_PER_USEC);
361 	return elapsed_nsecs;
362 }
363 
__gb_loopback_calc_latency(u64 t1,u64 t2)364 static u64 __gb_loopback_calc_latency(u64 t1, u64 t2)
365 {
366 	if (t2 > t1)
367 		return t2 - t1;
368 	else
369 		return NSEC_PER_DAY - t2 + t1;
370 }
371 
gb_loopback_calc_latency(ktime_t ts,ktime_t te)372 static u64 gb_loopback_calc_latency(ktime_t ts, ktime_t te)
373 {
374 	return __gb_loopback_calc_latency(ktime_to_ns(ts), ktime_to_ns(te));
375 }
376 
gb_loopback_operation_sync(struct gb_loopback * gb,int type,void * request,int request_size,void * response,int response_size)377 static int gb_loopback_operation_sync(struct gb_loopback *gb, int type,
378 				      void *request, int request_size,
379 				      void *response, int response_size)
380 {
381 	struct gb_operation *operation;
382 	ktime_t ts, te;
383 	int ret;
384 
385 	ts = ktime_get();
386 	operation = gb_operation_create(gb->connection, type, request_size,
387 					response_size, GFP_KERNEL);
388 	if (!operation)
389 		return -ENOMEM;
390 
391 	if (request_size)
392 		memcpy(operation->request->payload, request, request_size);
393 
394 	ret = gb_operation_request_send_sync(operation);
395 	if (ret) {
396 		dev_err(&gb->connection->bundle->dev,
397 			"synchronous operation failed: %d\n", ret);
398 		goto out_put_operation;
399 	} else {
400 		if (response_size == operation->response->payload_size) {
401 			memcpy(response, operation->response->payload,
402 			       response_size);
403 		} else {
404 			dev_err(&gb->connection->bundle->dev,
405 				"response size %zu expected %d\n",
406 				operation->response->payload_size,
407 				response_size);
408 			ret = -EINVAL;
409 			goto out_put_operation;
410 		}
411 	}
412 
413 	te = ktime_get();
414 
415 	/* Calculate the total time the message took */
416 	gb->elapsed_nsecs = gb_loopback_calc_latency(ts, te);
417 
418 out_put_operation:
419 	gb_operation_put(operation);
420 
421 	return ret;
422 }
423 
gb_loopback_async_wait_all(struct gb_loopback * gb)424 static void gb_loopback_async_wait_all(struct gb_loopback *gb)
425 {
426 	wait_event(gb->wq_completion,
427 		   !atomic_read(&gb->outstanding_operations));
428 }
429 
gb_loopback_async_operation_callback(struct gb_operation * operation)430 static void gb_loopback_async_operation_callback(struct gb_operation *operation)
431 {
432 	struct gb_loopback_async_operation *op_async;
433 	struct gb_loopback *gb;
434 	ktime_t te;
435 	int result;
436 
437 	te = ktime_get();
438 	result = gb_operation_result(operation);
439 	op_async = gb_operation_get_data(operation);
440 	gb = op_async->gb;
441 
442 	mutex_lock(&gb->mutex);
443 
444 	if (!result && op_async->completion)
445 		result = op_async->completion(op_async);
446 
447 	if (!result) {
448 		gb->elapsed_nsecs = gb_loopback_calc_latency(op_async->ts, te);
449 	} else {
450 		gb->error++;
451 		if (result == -ETIMEDOUT)
452 			gb->requests_timedout++;
453 	}
454 
455 	gb->iteration_count++;
456 	gb_loopback_calculate_stats(gb, result);
457 
458 	mutex_unlock(&gb->mutex);
459 
460 	dev_dbg(&gb->connection->bundle->dev, "complete operation %d\n",
461 		operation->id);
462 
463 	/* Wake up waiters */
464 	atomic_dec(&op_async->gb->outstanding_operations);
465 	wake_up(&gb->wq_completion);
466 
467 	/* Release resources */
468 	gb_operation_put(operation);
469 	kfree(op_async);
470 }
471 
gb_loopback_async_operation(struct gb_loopback * gb,int type,void * request,int request_size,int response_size,void * completion)472 static int gb_loopback_async_operation(struct gb_loopback *gb, int type,
473 				       void *request, int request_size,
474 				       int response_size,
475 				       void *completion)
476 {
477 	struct gb_loopback_async_operation *op_async;
478 	struct gb_operation *operation;
479 	int ret;
480 
481 	op_async = kzalloc(sizeof(*op_async), GFP_KERNEL);
482 	if (!op_async)
483 		return -ENOMEM;
484 
485 	operation = gb_operation_create(gb->connection, type, request_size,
486 					response_size, GFP_KERNEL);
487 	if (!operation) {
488 		kfree(op_async);
489 		return -ENOMEM;
490 	}
491 
492 	if (request_size)
493 		memcpy(operation->request->payload, request, request_size);
494 
495 	gb_operation_set_data(operation, op_async);
496 
497 	op_async->gb = gb;
498 	op_async->operation = operation;
499 	op_async->completion = completion;
500 
501 	op_async->ts = ktime_get();
502 
503 	atomic_inc(&gb->outstanding_operations);
504 	ret = gb_operation_request_send(operation,
505 					gb_loopback_async_operation_callback,
506 					jiffies_to_msecs(gb->jiffy_timeout),
507 					GFP_KERNEL);
508 	if (ret) {
509 		atomic_dec(&gb->outstanding_operations);
510 		gb_operation_put(operation);
511 		kfree(op_async);
512 	}
513 	return ret;
514 }
515 
gb_loopback_sync_sink(struct gb_loopback * gb,u32 len)516 static int gb_loopback_sync_sink(struct gb_loopback *gb, u32 len)
517 {
518 	struct gb_loopback_transfer_request *request;
519 	int retval;
520 
521 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
522 	if (!request)
523 		return -ENOMEM;
524 
525 	request->len = cpu_to_le32(len);
526 	retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_SINK,
527 					    request, len + sizeof(*request),
528 					    NULL, 0);
529 	kfree(request);
530 	return retval;
531 }
532 
gb_loopback_sync_transfer(struct gb_loopback * gb,u32 len)533 static int gb_loopback_sync_transfer(struct gb_loopback *gb, u32 len)
534 {
535 	struct gb_loopback_transfer_request *request;
536 	struct gb_loopback_transfer_response *response;
537 	int retval;
538 
539 	gb->apbridge_latency_ts = 0;
540 	gb->gbphy_latency_ts = 0;
541 
542 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
543 	if (!request)
544 		return -ENOMEM;
545 	response = kmalloc(len + sizeof(*response), GFP_KERNEL);
546 	if (!response) {
547 		kfree(request);
548 		return -ENOMEM;
549 	}
550 
551 	memset(request->data, 0x5A, len);
552 
553 	request->len = cpu_to_le32(len);
554 	retval = gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_TRANSFER,
555 					    request, len + sizeof(*request),
556 					    response, len + sizeof(*response));
557 	if (retval)
558 		goto gb_error;
559 
560 	if (memcmp(request->data, response->data, len)) {
561 		dev_err(&gb->connection->bundle->dev,
562 			"Loopback Data doesn't match\n");
563 		retval = -EREMOTEIO;
564 	}
565 	gb->apbridge_latency_ts = (u32)__le32_to_cpu(response->reserved0);
566 	gb->gbphy_latency_ts = (u32)__le32_to_cpu(response->reserved1);
567 
568 gb_error:
569 	kfree(request);
570 	kfree(response);
571 
572 	return retval;
573 }
574 
gb_loopback_sync_ping(struct gb_loopback * gb)575 static int gb_loopback_sync_ping(struct gb_loopback *gb)
576 {
577 	return gb_loopback_operation_sync(gb, GB_LOOPBACK_TYPE_PING,
578 					  NULL, 0, NULL, 0);
579 }
580 
gb_loopback_async_sink(struct gb_loopback * gb,u32 len)581 static int gb_loopback_async_sink(struct gb_loopback *gb, u32 len)
582 {
583 	struct gb_loopback_transfer_request *request;
584 	int retval;
585 
586 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
587 	if (!request)
588 		return -ENOMEM;
589 
590 	request->len = cpu_to_le32(len);
591 	retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_SINK,
592 					     request, len + sizeof(*request),
593 					     0, NULL);
594 	kfree(request);
595 	return retval;
596 }
597 
gb_loopback_async_transfer_complete(struct gb_loopback_async_operation * op_async)598 static int gb_loopback_async_transfer_complete(
599 				struct gb_loopback_async_operation *op_async)
600 {
601 	struct gb_loopback *gb;
602 	struct gb_operation *operation;
603 	struct gb_loopback_transfer_request *request;
604 	struct gb_loopback_transfer_response *response;
605 	size_t len;
606 	int retval = 0;
607 
608 	gb = op_async->gb;
609 	operation = op_async->operation;
610 	request = operation->request->payload;
611 	response = operation->response->payload;
612 	len = le32_to_cpu(request->len);
613 
614 	if (memcmp(request->data, response->data, len)) {
615 		dev_err(&gb->connection->bundle->dev,
616 			"Loopback Data doesn't match operation id %d\n",
617 			operation->id);
618 		retval = -EREMOTEIO;
619 	} else {
620 		gb->apbridge_latency_ts =
621 			(u32)__le32_to_cpu(response->reserved0);
622 		gb->gbphy_latency_ts =
623 			(u32)__le32_to_cpu(response->reserved1);
624 	}
625 
626 	return retval;
627 }
628 
gb_loopback_async_transfer(struct gb_loopback * gb,u32 len)629 static int gb_loopback_async_transfer(struct gb_loopback *gb, u32 len)
630 {
631 	struct gb_loopback_transfer_request *request;
632 	int retval, response_len;
633 
634 	request = kmalloc(len + sizeof(*request), GFP_KERNEL);
635 	if (!request)
636 		return -ENOMEM;
637 
638 	memset(request->data, 0x5A, len);
639 
640 	request->len = cpu_to_le32(len);
641 	response_len = sizeof(struct gb_loopback_transfer_response);
642 	retval = gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_TRANSFER,
643 					     request, len + sizeof(*request),
644 					     len + response_len,
645 					     gb_loopback_async_transfer_complete);
646 	if (retval)
647 		goto gb_error;
648 
649 gb_error:
650 	kfree(request);
651 	return retval;
652 }
653 
gb_loopback_async_ping(struct gb_loopback * gb)654 static int gb_loopback_async_ping(struct gb_loopback *gb)
655 {
656 	return gb_loopback_async_operation(gb, GB_LOOPBACK_TYPE_PING,
657 					   NULL, 0, 0, NULL);
658 }
659 
gb_loopback_request_handler(struct gb_operation * operation)660 static int gb_loopback_request_handler(struct gb_operation *operation)
661 {
662 	struct gb_connection *connection = operation->connection;
663 	struct gb_loopback_transfer_request *request;
664 	struct gb_loopback_transfer_response *response;
665 	struct device *dev = &connection->bundle->dev;
666 	size_t len;
667 
668 	/* By convention, the AP initiates the version operation */
669 	switch (operation->type) {
670 	case GB_LOOPBACK_TYPE_PING:
671 	case GB_LOOPBACK_TYPE_SINK:
672 		return 0;
673 	case GB_LOOPBACK_TYPE_TRANSFER:
674 		if (operation->request->payload_size < sizeof(*request)) {
675 			dev_err(dev, "transfer request too small (%zu < %zu)\n",
676 				operation->request->payload_size,
677 				sizeof(*request));
678 			return -EINVAL;	/* -EMSGSIZE */
679 		}
680 		request = operation->request->payload;
681 		len = le32_to_cpu(request->len);
682 		if (len > gb_dev.size_max) {
683 			dev_err(dev, "transfer request too large (%zu > %zu)\n",
684 				len, gb_dev.size_max);
685 			return -EINVAL;
686 		}
687 
688 		if (!gb_operation_response_alloc(operation,
689 				len + sizeof(*response), GFP_KERNEL)) {
690 			dev_err(dev, "error allocating response\n");
691 			return -ENOMEM;
692 		}
693 		response = operation->response->payload;
694 		response->len = cpu_to_le32(len);
695 		if (len)
696 			memcpy(response->data, request->data, len);
697 
698 		return 0;
699 	default:
700 		dev_err(dev, "unsupported request: %u\n", operation->type);
701 		return -EINVAL;
702 	}
703 }
704 
gb_loopback_reset_stats(struct gb_loopback * gb)705 static void gb_loopback_reset_stats(struct gb_loopback *gb)
706 {
707 	struct gb_loopback_stats reset = {
708 		.min = U32_MAX,
709 	};
710 
711 	/* Reset per-connection stats */
712 	memcpy(&gb->latency, &reset,
713 	       sizeof(struct gb_loopback_stats));
714 	memcpy(&gb->throughput, &reset,
715 	       sizeof(struct gb_loopback_stats));
716 	memcpy(&gb->requests_per_second, &reset,
717 	       sizeof(struct gb_loopback_stats));
718 	memcpy(&gb->apbridge_unipro_latency, &reset,
719 	       sizeof(struct gb_loopback_stats));
720 	memcpy(&gb->gbphy_firmware_latency, &reset,
721 	       sizeof(struct gb_loopback_stats));
722 
723 	/* Should be initialized at least once per transaction set */
724 	gb->apbridge_latency_ts = 0;
725 	gb->gbphy_latency_ts = 0;
726 	gb->ts = ktime_set(0, 0);
727 }
728 
gb_loopback_update_stats(struct gb_loopback_stats * stats,u32 val)729 static void gb_loopback_update_stats(struct gb_loopback_stats *stats, u32 val)
730 {
731 	if (stats->min > val)
732 		stats->min = val;
733 	if (stats->max < val)
734 		stats->max = val;
735 	stats->sum += val;
736 	stats->count++;
737 }
738 
gb_loopback_update_stats_window(struct gb_loopback_stats * stats,u64 val,u32 count)739 static void gb_loopback_update_stats_window(struct gb_loopback_stats *stats,
740 					    u64 val, u32 count)
741 {
742 	stats->sum += val;
743 	stats->count += count;
744 
745 	do_div(val, count);
746 	if (stats->min > val)
747 		stats->min = val;
748 	if (stats->max < val)
749 		stats->max = val;
750 }
751 
gb_loopback_requests_update(struct gb_loopback * gb,u32 latency)752 static void gb_loopback_requests_update(struct gb_loopback *gb, u32 latency)
753 {
754 	u64 req = gb->requests_completed * USEC_PER_SEC;
755 
756 	gb_loopback_update_stats_window(&gb->requests_per_second, req, latency);
757 }
758 
gb_loopback_throughput_update(struct gb_loopback * gb,u32 latency)759 static void gb_loopback_throughput_update(struct gb_loopback *gb, u32 latency)
760 {
761 	u64 aggregate_size = sizeof(struct gb_operation_msg_hdr) * 2;
762 
763 	switch (gb->type) {
764 	case GB_LOOPBACK_TYPE_PING:
765 		break;
766 	case GB_LOOPBACK_TYPE_SINK:
767 		aggregate_size += sizeof(struct gb_loopback_transfer_request) +
768 				  gb->size;
769 		break;
770 	case GB_LOOPBACK_TYPE_TRANSFER:
771 		aggregate_size += sizeof(struct gb_loopback_transfer_request) +
772 				  sizeof(struct gb_loopback_transfer_response) +
773 				  gb->size * 2;
774 		break;
775 	default:
776 		return;
777 	}
778 
779 	aggregate_size *= gb->requests_completed;
780 	aggregate_size *= USEC_PER_SEC;
781 	gb_loopback_update_stats_window(&gb->throughput, aggregate_size,
782 					latency);
783 }
784 
gb_loopback_calculate_latency_stats(struct gb_loopback * gb)785 static void gb_loopback_calculate_latency_stats(struct gb_loopback *gb)
786 {
787 	u32 lat;
788 
789 	/* Express latency in terms of microseconds */
790 	lat = gb_loopback_nsec_to_usec_latency(gb->elapsed_nsecs);
791 
792 	/* Log latency stastic */
793 	gb_loopback_update_stats(&gb->latency, lat);
794 
795 	/* Raw latency log on a per thread basis */
796 	kfifo_in(&gb->kfifo_lat, (unsigned char *)&lat, sizeof(lat));
797 
798 	/* Log the firmware supplied latency values */
799 	gb_loopback_update_stats(&gb->apbridge_unipro_latency,
800 				 gb->apbridge_latency_ts);
801 	gb_loopback_update_stats(&gb->gbphy_firmware_latency,
802 				 gb->gbphy_latency_ts);
803 }
804 
gb_loopback_calculate_stats(struct gb_loopback * gb,bool error)805 static void gb_loopback_calculate_stats(struct gb_loopback *gb, bool error)
806 {
807 	u64 nlat;
808 	u32 lat;
809 	ktime_t te;
810 
811 	if (!error) {
812 		gb->requests_completed++;
813 		gb_loopback_calculate_latency_stats(gb);
814 	}
815 
816 	te = ktime_get();
817 	nlat = gb_loopback_calc_latency(gb->ts, te);
818 	if (nlat >= NSEC_PER_SEC || gb->iteration_count == gb->iteration_max) {
819 		lat = gb_loopback_nsec_to_usec_latency(nlat);
820 
821 		gb_loopback_throughput_update(gb, lat);
822 		gb_loopback_requests_update(gb, lat);
823 
824 		if (gb->iteration_count != gb->iteration_max) {
825 			gb->ts = te;
826 			gb->requests_completed = 0;
827 		}
828 	}
829 }
830 
gb_loopback_async_wait_to_send(struct gb_loopback * gb)831 static void gb_loopback_async_wait_to_send(struct gb_loopback *gb)
832 {
833 	if (!(gb->async && gb->outstanding_operations_max))
834 		return;
835 	wait_event_interruptible(gb->wq_completion,
836 				 (atomic_read(&gb->outstanding_operations) <
837 				  gb->outstanding_operations_max) ||
838 				  kthread_should_stop());
839 }
840 
gb_loopback_fn(void * data)841 static int gb_loopback_fn(void *data)
842 {
843 	int error = 0;
844 	int us_wait = 0;
845 	int type;
846 	int ret;
847 	u32 size;
848 
849 	struct gb_loopback *gb = data;
850 	struct gb_bundle *bundle = gb->connection->bundle;
851 
852 	ret = gb_pm_runtime_get_sync(bundle);
853 	if (ret)
854 		return ret;
855 
856 	while (1) {
857 		if (!gb->type) {
858 			gb_pm_runtime_put_autosuspend(bundle);
859 			wait_event_interruptible(gb->wq, gb->type ||
860 						 kthread_should_stop());
861 			ret = gb_pm_runtime_get_sync(bundle);
862 			if (ret)
863 				return ret;
864 		}
865 
866 		if (kthread_should_stop())
867 			break;
868 
869 		/* Limit the maximum number of in-flight async operations */
870 		gb_loopback_async_wait_to_send(gb);
871 		if (kthread_should_stop())
872 			break;
873 
874 		mutex_lock(&gb->mutex);
875 
876 		/* Optionally terminate */
877 		if (gb->send_count == gb->iteration_max) {
878 			mutex_unlock(&gb->mutex);
879 
880 			/* Wait for synchronous and asynchronus completion */
881 			gb_loopback_async_wait_all(gb);
882 
883 			/* Mark complete unless user-space has poked us */
884 			mutex_lock(&gb->mutex);
885 			if (gb->iteration_count == gb->iteration_max) {
886 				gb->type = 0;
887 				gb->send_count = 0;
888 				sysfs_notify(&gb->dev->kobj,  NULL,
889 						"iteration_count");
890 				dev_dbg(&bundle->dev, "load test complete\n");
891 			} else {
892 				dev_dbg(&bundle->dev,
893 					"continuing on with new test set\n");
894 			}
895 			mutex_unlock(&gb->mutex);
896 			continue;
897 		}
898 		size = gb->size;
899 		us_wait = gb->us_wait;
900 		type = gb->type;
901 		if (ktime_to_ns(gb->ts) == 0)
902 			gb->ts = ktime_get();
903 
904 		/* Else operations to perform */
905 		if (gb->async) {
906 			if (type == GB_LOOPBACK_TYPE_PING)
907 				error = gb_loopback_async_ping(gb);
908 			else if (type == GB_LOOPBACK_TYPE_TRANSFER)
909 				error = gb_loopback_async_transfer(gb, size);
910 			else if (type == GB_LOOPBACK_TYPE_SINK)
911 				error = gb_loopback_async_sink(gb, size);
912 
913 			if (error) {
914 				gb->error++;
915 				gb->iteration_count++;
916 			}
917 		} else {
918 			/* We are effectively single threaded here */
919 			if (type == GB_LOOPBACK_TYPE_PING)
920 				error = gb_loopback_sync_ping(gb);
921 			else if (type == GB_LOOPBACK_TYPE_TRANSFER)
922 				error = gb_loopback_sync_transfer(gb, size);
923 			else if (type == GB_LOOPBACK_TYPE_SINK)
924 				error = gb_loopback_sync_sink(gb, size);
925 
926 			if (error)
927 				gb->error++;
928 			gb->iteration_count++;
929 			gb_loopback_calculate_stats(gb, !!error);
930 		}
931 		gb->send_count++;
932 		mutex_unlock(&gb->mutex);
933 
934 		if (us_wait) {
935 			if (us_wait < 20000)
936 				usleep_range(us_wait, us_wait + 100);
937 			else
938 				msleep(us_wait / 1000);
939 		}
940 	}
941 
942 	gb_pm_runtime_put_autosuspend(bundle);
943 
944 	return 0;
945 }
946 
gb_loopback_dbgfs_latency_show_common(struct seq_file * s,struct kfifo * kfifo,struct mutex * mutex)947 static int gb_loopback_dbgfs_latency_show_common(struct seq_file *s,
948 						 struct kfifo *kfifo,
949 						 struct mutex *mutex)
950 {
951 	u32 latency;
952 	int retval;
953 
954 	if (kfifo_len(kfifo) == 0) {
955 		retval = -EAGAIN;
956 		goto done;
957 	}
958 
959 	mutex_lock(mutex);
960 	retval = kfifo_out(kfifo, &latency, sizeof(latency));
961 	if (retval > 0) {
962 		seq_printf(s, "%u", latency);
963 		retval = 0;
964 	}
965 	mutex_unlock(mutex);
966 done:
967 	return retval;
968 }
969 
gb_loopback_dbgfs_latency_show(struct seq_file * s,void * unused)970 static int gb_loopback_dbgfs_latency_show(struct seq_file *s, void *unused)
971 {
972 	struct gb_loopback *gb = s->private;
973 
974 	return gb_loopback_dbgfs_latency_show_common(s, &gb->kfifo_lat,
975 						     &gb->mutex);
976 }
977 
gb_loopback_latency_open(struct inode * inode,struct file * file)978 static int gb_loopback_latency_open(struct inode *inode, struct file *file)
979 {
980 	return single_open(file, gb_loopback_dbgfs_latency_show,
981 			   inode->i_private);
982 }
983 
984 static const struct file_operations gb_loopback_debugfs_latency_ops = {
985 	.open		= gb_loopback_latency_open,
986 	.read		= seq_read,
987 	.llseek		= seq_lseek,
988 	.release	= single_release,
989 };
990 
gb_loopback_bus_id_compare(void * priv,struct list_head * lha,struct list_head * lhb)991 static int gb_loopback_bus_id_compare(void *priv, struct list_head *lha,
992 				      struct list_head *lhb)
993 {
994 	struct gb_loopback *a = list_entry(lha, struct gb_loopback, entry);
995 	struct gb_loopback *b = list_entry(lhb, struct gb_loopback, entry);
996 	struct gb_connection *ca = a->connection;
997 	struct gb_connection *cb = b->connection;
998 
999 	if (ca->bundle->intf->interface_id < cb->bundle->intf->interface_id)
1000 		return -1;
1001 	if (cb->bundle->intf->interface_id < ca->bundle->intf->interface_id)
1002 		return 1;
1003 	if (ca->bundle->id < cb->bundle->id)
1004 		return -1;
1005 	if (cb->bundle->id < ca->bundle->id)
1006 		return 1;
1007 	if (ca->intf_cport_id < cb->intf_cport_id)
1008 		return -1;
1009 	else if (cb->intf_cport_id < ca->intf_cport_id)
1010 		return 1;
1011 
1012 	return 0;
1013 }
1014 
gb_loopback_insert_id(struct gb_loopback * gb)1015 static void gb_loopback_insert_id(struct gb_loopback *gb)
1016 {
1017 	struct gb_loopback *gb_list;
1018 	u32 new_lbid = 0;
1019 
1020 	/* perform an insertion sort */
1021 	list_add_tail(&gb->entry, &gb_dev.list);
1022 	list_sort(NULL, &gb_dev.list, gb_loopback_bus_id_compare);
1023 	list_for_each_entry(gb_list, &gb_dev.list, entry) {
1024 		gb_list->lbid = 1 << new_lbid;
1025 		new_lbid++;
1026 	}
1027 }
1028 
1029 #define DEBUGFS_NAMELEN 32
1030 
gb_loopback_probe(struct gb_bundle * bundle,const struct greybus_bundle_id * id)1031 static int gb_loopback_probe(struct gb_bundle *bundle,
1032 			     const struct greybus_bundle_id *id)
1033 {
1034 	struct greybus_descriptor_cport *cport_desc;
1035 	struct gb_connection *connection;
1036 	struct gb_loopback *gb;
1037 	struct device *dev;
1038 	int retval;
1039 	char name[DEBUGFS_NAMELEN];
1040 	unsigned long flags;
1041 
1042 	if (bundle->num_cports != 1)
1043 		return -ENODEV;
1044 
1045 	cport_desc = &bundle->cport_desc[0];
1046 	if (cport_desc->protocol_id != GREYBUS_PROTOCOL_LOOPBACK)
1047 		return -ENODEV;
1048 
1049 	gb = kzalloc(sizeof(*gb), GFP_KERNEL);
1050 	if (!gb)
1051 		return -ENOMEM;
1052 
1053 	connection = gb_connection_create(bundle, le16_to_cpu(cport_desc->id),
1054 					  gb_loopback_request_handler);
1055 	if (IS_ERR(connection)) {
1056 		retval = PTR_ERR(connection);
1057 		goto out_kzalloc;
1058 	}
1059 
1060 	gb->connection = connection;
1061 	greybus_set_drvdata(bundle, gb);
1062 
1063 	init_waitqueue_head(&gb->wq);
1064 	init_waitqueue_head(&gb->wq_completion);
1065 	atomic_set(&gb->outstanding_operations, 0);
1066 	gb_loopback_reset_stats(gb);
1067 
1068 	/* Reported values to user-space for min/max timeouts */
1069 	gb->timeout_min = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MIN);
1070 	gb->timeout_max = jiffies_to_usecs(GB_LOOPBACK_TIMEOUT_MAX);
1071 
1072 	if (!gb_dev.count) {
1073 		/* Calculate maximum payload */
1074 		gb_dev.size_max = gb_operation_get_payload_size_max(connection);
1075 		if (gb_dev.size_max <=
1076 			sizeof(struct gb_loopback_transfer_request)) {
1077 			retval = -EINVAL;
1078 			goto out_connection_destroy;
1079 		}
1080 		gb_dev.size_max -= sizeof(struct gb_loopback_transfer_request);
1081 	}
1082 
1083 	/* Create per-connection sysfs and debugfs data-points */
1084 	snprintf(name, sizeof(name), "raw_latency_%s",
1085 		 dev_name(&connection->bundle->dev));
1086 	gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
1087 				       &gb_loopback_debugfs_latency_ops);
1088 
1089 	gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
1090 	if (gb->id < 0) {
1091 		retval = gb->id;
1092 		goto out_debugfs_remove;
1093 	}
1094 
1095 	retval = gb_connection_enable(connection);
1096 	if (retval)
1097 		goto out_ida_remove;
1098 
1099 	dev = device_create_with_groups(&loopback_class,
1100 					&connection->bundle->dev,
1101 					MKDEV(0, 0), gb, loopback_groups,
1102 					"gb_loopback%d", gb->id);
1103 	if (IS_ERR(dev)) {
1104 		retval = PTR_ERR(dev);
1105 		goto out_connection_disable;
1106 	}
1107 	gb->dev = dev;
1108 
1109 	/* Allocate kfifo */
1110 	if (kfifo_alloc(&gb->kfifo_lat, kfifo_depth * sizeof(u32),
1111 			  GFP_KERNEL)) {
1112 		retval = -ENOMEM;
1113 		goto out_conn;
1114 	}
1115 	/* Fork worker thread */
1116 	mutex_init(&gb->mutex);
1117 	gb->task = kthread_run(gb_loopback_fn, gb, "gb_loopback");
1118 	if (IS_ERR(gb->task)) {
1119 		retval = PTR_ERR(gb->task);
1120 		goto out_kfifo;
1121 	}
1122 
1123 	spin_lock_irqsave(&gb_dev.lock, flags);
1124 	gb_loopback_insert_id(gb);
1125 	gb_dev.count++;
1126 	spin_unlock_irqrestore(&gb_dev.lock, flags);
1127 
1128 	gb_connection_latency_tag_enable(connection);
1129 
1130 	gb_pm_runtime_put_autosuspend(bundle);
1131 
1132 	return 0;
1133 
1134 out_kfifo:
1135 	kfifo_free(&gb->kfifo_lat);
1136 out_conn:
1137 	device_unregister(dev);
1138 out_connection_disable:
1139 	gb_connection_disable(connection);
1140 out_ida_remove:
1141 	ida_simple_remove(&loopback_ida, gb->id);
1142 out_debugfs_remove:
1143 	debugfs_remove(gb->file);
1144 out_connection_destroy:
1145 	gb_connection_destroy(connection);
1146 out_kzalloc:
1147 	kfree(gb);
1148 
1149 	return retval;
1150 }
1151 
gb_loopback_disconnect(struct gb_bundle * bundle)1152 static void gb_loopback_disconnect(struct gb_bundle *bundle)
1153 {
1154 	struct gb_loopback *gb = greybus_get_drvdata(bundle);
1155 	unsigned long flags;
1156 	int ret;
1157 
1158 	ret = gb_pm_runtime_get_sync(bundle);
1159 	if (ret)
1160 		gb_pm_runtime_get_noresume(bundle);
1161 
1162 	gb_connection_disable(gb->connection);
1163 
1164 	if (!IS_ERR_OR_NULL(gb->task))
1165 		kthread_stop(gb->task);
1166 
1167 	kfifo_free(&gb->kfifo_lat);
1168 	gb_connection_latency_tag_disable(gb->connection);
1169 	debugfs_remove(gb->file);
1170 
1171 	/*
1172 	 * FIXME: gb_loopback_async_wait_all() is redundant now, as connection
1173 	 * is disabled at the beginning and so we can't have any more
1174 	 * incoming/outgoing requests.
1175 	 */
1176 	gb_loopback_async_wait_all(gb);
1177 
1178 	spin_lock_irqsave(&gb_dev.lock, flags);
1179 	gb_dev.count--;
1180 	list_del(&gb->entry);
1181 	spin_unlock_irqrestore(&gb_dev.lock, flags);
1182 
1183 	device_unregister(gb->dev);
1184 	ida_simple_remove(&loopback_ida, gb->id);
1185 
1186 	gb_connection_destroy(gb->connection);
1187 	kfree(gb);
1188 }
1189 
1190 static const struct greybus_bundle_id gb_loopback_id_table[] = {
1191 	{ GREYBUS_DEVICE_CLASS(GREYBUS_CLASS_LOOPBACK) },
1192 	{ }
1193 };
1194 MODULE_DEVICE_TABLE(greybus, gb_loopback_id_table);
1195 
1196 static struct greybus_driver gb_loopback_driver = {
1197 	.name		= "loopback",
1198 	.probe		= gb_loopback_probe,
1199 	.disconnect	= gb_loopback_disconnect,
1200 	.id_table	= gb_loopback_id_table,
1201 };
1202 
loopback_init(void)1203 static int loopback_init(void)
1204 {
1205 	int retval;
1206 
1207 	INIT_LIST_HEAD(&gb_dev.list);
1208 	INIT_LIST_HEAD(&gb_dev.list_op_async);
1209 	spin_lock_init(&gb_dev.lock);
1210 	gb_dev.root = debugfs_create_dir("gb_loopback", NULL);
1211 
1212 	retval = class_register(&loopback_class);
1213 	if (retval)
1214 		goto err;
1215 
1216 	retval = greybus_register(&gb_loopback_driver);
1217 	if (retval)
1218 		goto err_unregister;
1219 
1220 	return 0;
1221 
1222 err_unregister:
1223 	class_unregister(&loopback_class);
1224 err:
1225 	debugfs_remove_recursive(gb_dev.root);
1226 	return retval;
1227 }
1228 module_init(loopback_init);
1229 
loopback_exit(void)1230 static void __exit loopback_exit(void)
1231 {
1232 	debugfs_remove_recursive(gb_dev.root);
1233 	greybus_deregister(&gb_loopback_driver);
1234 	class_unregister(&loopback_class);
1235 	ida_destroy(&loopback_ida);
1236 }
1237 module_exit(loopback_exit);
1238 
1239 MODULE_LICENSE("GPL v2");
1240