1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/sched/signal.h>
18 #include <linux/wait.h>
19 #include <linux/delay.h>
20 #include <linux/slab.h>
21 #include <linux/pm_runtime.h>
22
23 #include <linux/mei.h>
24
25 #include "mei_dev.h"
26 #include "hbm.h"
27 #include "client.h"
28
29 /**
30 * mei_me_cl_init - initialize me client
31 *
32 * @me_cl: me client
33 */
mei_me_cl_init(struct mei_me_client * me_cl)34 void mei_me_cl_init(struct mei_me_client *me_cl)
35 {
36 INIT_LIST_HEAD(&me_cl->list);
37 kref_init(&me_cl->refcnt);
38 }
39
40 /**
41 * mei_me_cl_get - increases me client refcount
42 *
43 * @me_cl: me client
44 *
45 * Locking: called under "dev->device_lock" lock
46 *
47 * Return: me client or NULL
48 */
mei_me_cl_get(struct mei_me_client * me_cl)49 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50 {
51 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
52 return me_cl;
53
54 return NULL;
55 }
56
57 /**
58 * mei_me_cl_release - free me client
59 *
60 * Locking: called under "dev->device_lock" lock
61 *
62 * @ref: me_client refcount
63 */
mei_me_cl_release(struct kref * ref)64 static void mei_me_cl_release(struct kref *ref)
65 {
66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt);
68
69 kfree(me_cl);
70 }
71
72 /**
73 * mei_me_cl_put - decrease me client refcount and free client if necessary
74 *
75 * Locking: called under "dev->device_lock" lock
76 *
77 * @me_cl: me client
78 */
mei_me_cl_put(struct mei_me_client * me_cl)79 void mei_me_cl_put(struct mei_me_client *me_cl)
80 {
81 if (me_cl)
82 kref_put(&me_cl->refcnt, mei_me_cl_release);
83 }
84
85 /**
86 * __mei_me_cl_del - delete me client from the list and decrease
87 * reference counter
88 *
89 * @dev: mei device
90 * @me_cl: me client
91 *
92 * Locking: dev->me_clients_rwsem
93 */
__mei_me_cl_del(struct mei_device * dev,struct mei_me_client * me_cl)94 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
95 {
96 if (!me_cl)
97 return;
98
99 list_del_init(&me_cl->list);
100 mei_me_cl_put(me_cl);
101 }
102
103 /**
104 * mei_me_cl_del - delete me client from the list and decrease
105 * reference counter
106 *
107 * @dev: mei device
108 * @me_cl: me client
109 */
mei_me_cl_del(struct mei_device * dev,struct mei_me_client * me_cl)110 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
111 {
112 down_write(&dev->me_clients_rwsem);
113 __mei_me_cl_del(dev, me_cl);
114 up_write(&dev->me_clients_rwsem);
115 }
116
117 /**
118 * mei_me_cl_add - add me client to the list
119 *
120 * @dev: mei device
121 * @me_cl: me client
122 */
mei_me_cl_add(struct mei_device * dev,struct mei_me_client * me_cl)123 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
124 {
125 down_write(&dev->me_clients_rwsem);
126 list_add(&me_cl->list, &dev->me_clients);
127 up_write(&dev->me_clients_rwsem);
128 }
129
130 /**
131 * __mei_me_cl_by_uuid - locate me client by uuid
132 * increases ref count
133 *
134 * @dev: mei device
135 * @uuid: me client uuid
136 *
137 * Return: me client or NULL if not found
138 *
139 * Locking: dev->me_clients_rwsem
140 */
__mei_me_cl_by_uuid(struct mei_device * dev,const uuid_le * uuid)141 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
142 const uuid_le *uuid)
143 {
144 struct mei_me_client *me_cl;
145 const uuid_le *pn;
146
147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
148
149 list_for_each_entry(me_cl, &dev->me_clients, list) {
150 pn = &me_cl->props.protocol_name;
151 if (uuid_le_cmp(*uuid, *pn) == 0)
152 return mei_me_cl_get(me_cl);
153 }
154
155 return NULL;
156 }
157
158 /**
159 * mei_me_cl_by_uuid - locate me client by uuid
160 * increases ref count
161 *
162 * @dev: mei device
163 * @uuid: me client uuid
164 *
165 * Return: me client or NULL if not found
166 *
167 * Locking: dev->me_clients_rwsem
168 */
mei_me_cl_by_uuid(struct mei_device * dev,const uuid_le * uuid)169 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
170 const uuid_le *uuid)
171 {
172 struct mei_me_client *me_cl;
173
174 down_read(&dev->me_clients_rwsem);
175 me_cl = __mei_me_cl_by_uuid(dev, uuid);
176 up_read(&dev->me_clients_rwsem);
177
178 return me_cl;
179 }
180
181 /**
182 * mei_me_cl_by_id - locate me client by client id
183 * increases ref count
184 *
185 * @dev: the device structure
186 * @client_id: me client id
187 *
188 * Return: me client or NULL if not found
189 *
190 * Locking: dev->me_clients_rwsem
191 */
mei_me_cl_by_id(struct mei_device * dev,u8 client_id)192 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
193 {
194
195 struct mei_me_client *__me_cl, *me_cl = NULL;
196
197 down_read(&dev->me_clients_rwsem);
198 list_for_each_entry(__me_cl, &dev->me_clients, list) {
199 if (__me_cl->client_id == client_id) {
200 me_cl = mei_me_cl_get(__me_cl);
201 break;
202 }
203 }
204 up_read(&dev->me_clients_rwsem);
205
206 return me_cl;
207 }
208
209 /**
210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
211 * increases ref count
212 *
213 * @dev: the device structure
214 * @uuid: me client uuid
215 * @client_id: me client id
216 *
217 * Return: me client or null if not found
218 *
219 * Locking: dev->me_clients_rwsem
220 */
__mei_me_cl_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 client_id)221 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
222 const uuid_le *uuid, u8 client_id)
223 {
224 struct mei_me_client *me_cl;
225 const uuid_le *pn;
226
227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
228
229 list_for_each_entry(me_cl, &dev->me_clients, list) {
230 pn = &me_cl->props.protocol_name;
231 if (uuid_le_cmp(*uuid, *pn) == 0 &&
232 me_cl->client_id == client_id)
233 return mei_me_cl_get(me_cl);
234 }
235
236 return NULL;
237 }
238
239
240 /**
241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
242 * increases ref count
243 *
244 * @dev: the device structure
245 * @uuid: me client uuid
246 * @client_id: me client id
247 *
248 * Return: me client or null if not found
249 */
mei_me_cl_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 client_id)250 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
251 const uuid_le *uuid, u8 client_id)
252 {
253 struct mei_me_client *me_cl;
254
255 down_read(&dev->me_clients_rwsem);
256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
257 up_read(&dev->me_clients_rwsem);
258
259 return me_cl;
260 }
261
262 /**
263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
264 *
265 * @dev: the device structure
266 * @uuid: me client uuid
267 *
268 * Locking: called under "dev->device_lock" lock
269 */
mei_me_cl_rm_by_uuid(struct mei_device * dev,const uuid_le * uuid)270 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
271 {
272 struct mei_me_client *me_cl;
273
274 dev_dbg(dev->dev, "remove %pUl\n", uuid);
275
276 down_write(&dev->me_clients_rwsem);
277 me_cl = __mei_me_cl_by_uuid(dev, uuid);
278 __mei_me_cl_del(dev, me_cl);
279 up_write(&dev->me_clients_rwsem);
280 }
281
282 /**
283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
284 *
285 * @dev: the device structure
286 * @uuid: me client uuid
287 * @id: me client id
288 *
289 * Locking: called under "dev->device_lock" lock
290 */
mei_me_cl_rm_by_uuid_id(struct mei_device * dev,const uuid_le * uuid,u8 id)291 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
292 {
293 struct mei_me_client *me_cl;
294
295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
296
297 down_write(&dev->me_clients_rwsem);
298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
299 __mei_me_cl_del(dev, me_cl);
300 up_write(&dev->me_clients_rwsem);
301 }
302
303 /**
304 * mei_me_cl_rm_all - remove all me clients
305 *
306 * @dev: the device structure
307 *
308 * Locking: called under "dev->device_lock" lock
309 */
mei_me_cl_rm_all(struct mei_device * dev)310 void mei_me_cl_rm_all(struct mei_device *dev)
311 {
312 struct mei_me_client *me_cl, *next;
313
314 down_write(&dev->me_clients_rwsem);
315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
316 __mei_me_cl_del(dev, me_cl);
317 up_write(&dev->me_clients_rwsem);
318 }
319
320 /**
321 * mei_cl_cmp_id - tells if the clients are the same
322 *
323 * @cl1: host client 1
324 * @cl2: host client 2
325 *
326 * Return: true - if the clients has same host and me ids
327 * false - otherwise
328 */
mei_cl_cmp_id(const struct mei_cl * cl1,const struct mei_cl * cl2)329 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
330 const struct mei_cl *cl2)
331 {
332 return cl1 && cl2 &&
333 (cl1->host_client_id == cl2->host_client_id) &&
334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
335 }
336
337 /**
338 * mei_io_cb_free - free mei_cb_private related memory
339 *
340 * @cb: mei callback struct
341 */
mei_io_cb_free(struct mei_cl_cb * cb)342 void mei_io_cb_free(struct mei_cl_cb *cb)
343 {
344 if (cb == NULL)
345 return;
346
347 list_del(&cb->list);
348 kfree(cb->buf.data);
349 kfree(cb);
350 }
351
352 /**
353 * mei_tx_cb_queue - queue tx callback
354 *
355 * Locking: called under "dev->device_lock" lock
356 *
357 * @cb: mei callback struct
358 * @head: an instance of list to queue on
359 */
mei_tx_cb_enqueue(struct mei_cl_cb * cb,struct list_head * head)360 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
361 struct list_head *head)
362 {
363 list_add_tail(&cb->list, head);
364 cb->cl->tx_cb_queued++;
365 }
366
367 /**
368 * mei_tx_cb_dequeue - dequeue tx callback
369 *
370 * Locking: called under "dev->device_lock" lock
371 *
372 * @cb: mei callback struct to dequeue and free
373 */
mei_tx_cb_dequeue(struct mei_cl_cb * cb)374 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
375 {
376 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
377 cb->cl->tx_cb_queued--;
378
379 mei_io_cb_free(cb);
380 }
381
382 /**
383 * mei_io_cb_init - allocate and initialize io callback
384 *
385 * @cl: mei client
386 * @type: operation type
387 * @fp: pointer to file structure
388 *
389 * Return: mei_cl_cb pointer or NULL;
390 */
mei_io_cb_init(struct mei_cl * cl,enum mei_cb_file_ops type,const struct file * fp)391 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
392 enum mei_cb_file_ops type,
393 const struct file *fp)
394 {
395 struct mei_cl_cb *cb;
396
397 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
398 if (!cb)
399 return NULL;
400
401 INIT_LIST_HEAD(&cb->list);
402 cb->fp = fp;
403 cb->cl = cl;
404 cb->buf_idx = 0;
405 cb->fop_type = type;
406 return cb;
407 }
408
409 /**
410 * mei_io_list_flush_cl - removes cbs belonging to the cl.
411 *
412 * @head: an instance of our list structure
413 * @cl: host client
414 */
mei_io_list_flush_cl(struct list_head * head,const struct mei_cl * cl)415 static void mei_io_list_flush_cl(struct list_head *head,
416 const struct mei_cl *cl)
417 {
418 struct mei_cl_cb *cb, *next;
419
420 list_for_each_entry_safe(cb, next, head, list) {
421 if (mei_cl_cmp_id(cl, cb->cl))
422 list_del_init(&cb->list);
423 }
424 }
425
426 /**
427 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
428 *
429 * @head: An instance of our list structure
430 * @cl: host client
431 */
mei_io_tx_list_free_cl(struct list_head * head,const struct mei_cl * cl)432 static void mei_io_tx_list_free_cl(struct list_head *head,
433 const struct mei_cl *cl)
434 {
435 struct mei_cl_cb *cb, *next;
436
437 list_for_each_entry_safe(cb, next, head, list) {
438 if (mei_cl_cmp_id(cl, cb->cl))
439 mei_tx_cb_dequeue(cb);
440 }
441 }
442
443 /**
444 * mei_io_list_free_fp - free cb from a list that matches file pointer
445 *
446 * @head: io list
447 * @fp: file pointer (matching cb file object), may be NULL
448 */
mei_io_list_free_fp(struct list_head * head,const struct file * fp)449 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
450 {
451 struct mei_cl_cb *cb, *next;
452
453 list_for_each_entry_safe(cb, next, head, list)
454 if (!fp || fp == cb->fp)
455 mei_io_cb_free(cb);
456 }
457
458 /**
459 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
460 *
461 * @cl: host client
462 * @length: size of the buffer
463 * @fop_type: operation type
464 * @fp: associated file pointer (might be NULL)
465 *
466 * Return: cb on success and NULL on failure
467 */
mei_cl_alloc_cb(struct mei_cl * cl,size_t length,enum mei_cb_file_ops fop_type,const struct file * fp)468 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
469 enum mei_cb_file_ops fop_type,
470 const struct file *fp)
471 {
472 struct mei_cl_cb *cb;
473
474 cb = mei_io_cb_init(cl, fop_type, fp);
475 if (!cb)
476 return NULL;
477
478 if (length == 0)
479 return cb;
480
481 cb->buf.data = kmalloc(length, GFP_KERNEL);
482 if (!cb->buf.data) {
483 mei_io_cb_free(cb);
484 return NULL;
485 }
486 cb->buf.size = length;
487
488 return cb;
489 }
490
491 /**
492 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
493 * and enqueuing of the control commands cb
494 *
495 * @cl: host client
496 * @length: size of the buffer
497 * @fop_type: operation type
498 * @fp: associated file pointer (might be NULL)
499 *
500 * Return: cb on success and NULL on failure
501 * Locking: called under "dev->device_lock" lock
502 */
mei_cl_enqueue_ctrl_wr_cb(struct mei_cl * cl,size_t length,enum mei_cb_file_ops fop_type,const struct file * fp)503 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
504 enum mei_cb_file_ops fop_type,
505 const struct file *fp)
506 {
507 struct mei_cl_cb *cb;
508
509 /* for RX always allocate at least client's mtu */
510 if (length)
511 length = max_t(size_t, length, mei_cl_mtu(cl));
512
513 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
514 if (!cb)
515 return NULL;
516
517 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
518 return cb;
519 }
520
521 /**
522 * mei_cl_read_cb - find this cl's callback in the read list
523 * for a specific file
524 *
525 * @cl: host client
526 * @fp: file pointer (matching cb file object), may be NULL
527 *
528 * Return: cb on success, NULL if cb is not found
529 */
mei_cl_read_cb(const struct mei_cl * cl,const struct file * fp)530 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
531 {
532 struct mei_cl_cb *cb;
533
534 list_for_each_entry(cb, &cl->rd_completed, list)
535 if (!fp || fp == cb->fp)
536 return cb;
537
538 return NULL;
539 }
540
541 /**
542 * mei_cl_flush_queues - flushes queue lists belonging to cl.
543 *
544 * @cl: host client
545 * @fp: file pointer (matching cb file object), may be NULL
546 *
547 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
548 */
mei_cl_flush_queues(struct mei_cl * cl,const struct file * fp)549 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
550 {
551 struct mei_device *dev;
552
553 if (WARN_ON(!cl || !cl->dev))
554 return -EINVAL;
555
556 dev = cl->dev;
557
558 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
559 mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
560 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
561 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
562 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
563 mei_io_list_free_fp(&cl->rd_pending, fp);
564 mei_io_list_free_fp(&cl->rd_completed, fp);
565
566 return 0;
567 }
568
569 /**
570 * mei_cl_init - initializes cl.
571 *
572 * @cl: host client to be initialized
573 * @dev: mei device
574 */
mei_cl_init(struct mei_cl * cl,struct mei_device * dev)575 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
576 {
577 memset(cl, 0, sizeof(struct mei_cl));
578 init_waitqueue_head(&cl->wait);
579 init_waitqueue_head(&cl->rx_wait);
580 init_waitqueue_head(&cl->tx_wait);
581 init_waitqueue_head(&cl->ev_wait);
582 INIT_LIST_HEAD(&cl->rd_completed);
583 INIT_LIST_HEAD(&cl->rd_pending);
584 INIT_LIST_HEAD(&cl->link);
585 cl->writing_state = MEI_IDLE;
586 cl->state = MEI_FILE_UNINITIALIZED;
587 cl->dev = dev;
588 }
589
590 /**
591 * mei_cl_allocate - allocates cl structure and sets it up.
592 *
593 * @dev: mei device
594 * Return: The allocated file or NULL on failure
595 */
mei_cl_allocate(struct mei_device * dev)596 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
597 {
598 struct mei_cl *cl;
599
600 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
601 if (!cl)
602 return NULL;
603
604 mei_cl_init(cl, dev);
605
606 return cl;
607 }
608
609 /**
610 * mei_cl_link - allocate host id in the host map
611 *
612 * @cl: host client
613 *
614 * Return: 0 on success
615 * -EINVAL on incorrect values
616 * -EMFILE if open count exceeded.
617 */
mei_cl_link(struct mei_cl * cl)618 int mei_cl_link(struct mei_cl *cl)
619 {
620 struct mei_device *dev;
621 int id;
622
623 if (WARN_ON(!cl || !cl->dev))
624 return -EINVAL;
625
626 dev = cl->dev;
627
628 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
629 if (id >= MEI_CLIENTS_MAX) {
630 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
631 return -EMFILE;
632 }
633
634 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
635 dev_err(dev->dev, "open_handle_count exceeded %d",
636 MEI_MAX_OPEN_HANDLE_COUNT);
637 return -EMFILE;
638 }
639
640 dev->open_handle_count++;
641
642 cl->host_client_id = id;
643 list_add_tail(&cl->link, &dev->file_list);
644
645 set_bit(id, dev->host_clients_map);
646
647 cl->state = MEI_FILE_INITIALIZING;
648
649 cl_dbg(dev, cl, "link cl\n");
650 return 0;
651 }
652
653 /**
654 * mei_cl_unlink - remove host client from the list
655 *
656 * @cl: host client
657 *
658 * Return: always 0
659 */
mei_cl_unlink(struct mei_cl * cl)660 int mei_cl_unlink(struct mei_cl *cl)
661 {
662 struct mei_device *dev;
663
664 /* don't shout on error exit path */
665 if (!cl)
666 return 0;
667
668 if (WARN_ON(!cl->dev))
669 return 0;
670
671 dev = cl->dev;
672
673 cl_dbg(dev, cl, "unlink client");
674
675 if (dev->open_handle_count > 0)
676 dev->open_handle_count--;
677
678 /* never clear the 0 bit */
679 if (cl->host_client_id)
680 clear_bit(cl->host_client_id, dev->host_clients_map);
681
682 list_del_init(&cl->link);
683
684 cl->state = MEI_FILE_UNINITIALIZED;
685 cl->writing_state = MEI_IDLE;
686
687 WARN_ON(!list_empty(&cl->rd_completed) ||
688 !list_empty(&cl->rd_pending) ||
689 !list_empty(&cl->link));
690
691 return 0;
692 }
693
mei_host_client_init(struct mei_device * dev)694 void mei_host_client_init(struct mei_device *dev)
695 {
696 dev->dev_state = MEI_DEV_ENABLED;
697 dev->reset_count = 0;
698
699 schedule_work(&dev->bus_rescan_work);
700
701 pm_runtime_mark_last_busy(dev->dev);
702 dev_dbg(dev->dev, "rpm: autosuspend\n");
703 pm_request_autosuspend(dev->dev);
704 }
705
706 /**
707 * mei_hbuf_acquire - try to acquire host buffer
708 *
709 * @dev: the device structure
710 * Return: true if host buffer was acquired
711 */
mei_hbuf_acquire(struct mei_device * dev)712 bool mei_hbuf_acquire(struct mei_device *dev)
713 {
714 if (mei_pg_state(dev) == MEI_PG_ON ||
715 mei_pg_in_transition(dev)) {
716 dev_dbg(dev->dev, "device is in pg\n");
717 return false;
718 }
719
720 if (!dev->hbuf_is_ready) {
721 dev_dbg(dev->dev, "hbuf is not ready\n");
722 return false;
723 }
724
725 dev->hbuf_is_ready = false;
726
727 return true;
728 }
729
730 /**
731 * mei_cl_wake_all - wake up readers, writers and event waiters so
732 * they can be interrupted
733 *
734 * @cl: host client
735 */
mei_cl_wake_all(struct mei_cl * cl)736 static void mei_cl_wake_all(struct mei_cl *cl)
737 {
738 struct mei_device *dev = cl->dev;
739
740 /* synchronized under device mutex */
741 if (waitqueue_active(&cl->rx_wait)) {
742 cl_dbg(dev, cl, "Waking up reading client!\n");
743 wake_up_interruptible(&cl->rx_wait);
744 }
745 /* synchronized under device mutex */
746 if (waitqueue_active(&cl->tx_wait)) {
747 cl_dbg(dev, cl, "Waking up writing client!\n");
748 wake_up_interruptible(&cl->tx_wait);
749 }
750 /* synchronized under device mutex */
751 if (waitqueue_active(&cl->ev_wait)) {
752 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
753 wake_up_interruptible(&cl->ev_wait);
754 }
755 /* synchronized under device mutex */
756 if (waitqueue_active(&cl->wait)) {
757 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
758 wake_up(&cl->wait);
759 }
760 }
761
762 /**
763 * mei_cl_set_disconnected - set disconnected state and clear
764 * associated states and resources
765 *
766 * @cl: host client
767 */
mei_cl_set_disconnected(struct mei_cl * cl)768 static void mei_cl_set_disconnected(struct mei_cl *cl)
769 {
770 struct mei_device *dev = cl->dev;
771
772 if (cl->state == MEI_FILE_DISCONNECTED ||
773 cl->state <= MEI_FILE_INITIALIZING)
774 return;
775
776 cl->state = MEI_FILE_DISCONNECTED;
777 mei_io_tx_list_free_cl(&dev->write_list, cl);
778 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
779 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
780 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
781 mei_cl_wake_all(cl);
782 cl->rx_flow_ctrl_creds = 0;
783 cl->tx_flow_ctrl_creds = 0;
784 cl->timer_count = 0;
785
786 if (!cl->me_cl)
787 return;
788
789 if (!WARN_ON(cl->me_cl->connect_count == 0))
790 cl->me_cl->connect_count--;
791
792 if (cl->me_cl->connect_count == 0)
793 cl->me_cl->tx_flow_ctrl_creds = 0;
794
795 mei_me_cl_put(cl->me_cl);
796 cl->me_cl = NULL;
797 }
798
mei_cl_set_connecting(struct mei_cl * cl,struct mei_me_client * me_cl)799 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
800 {
801 if (!mei_me_cl_get(me_cl))
802 return -ENOENT;
803
804 /* only one connection is allowed for fixed address clients */
805 if (me_cl->props.fixed_address) {
806 if (me_cl->connect_count) {
807 mei_me_cl_put(me_cl);
808 return -EBUSY;
809 }
810 }
811
812 cl->me_cl = me_cl;
813 cl->state = MEI_FILE_CONNECTING;
814 cl->me_cl->connect_count++;
815
816 return 0;
817 }
818
819 /*
820 * mei_cl_send_disconnect - send disconnect request
821 *
822 * @cl: host client
823 * @cb: callback block
824 *
825 * Return: 0, OK; otherwise, error.
826 */
mei_cl_send_disconnect(struct mei_cl * cl,struct mei_cl_cb * cb)827 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
828 {
829 struct mei_device *dev;
830 int ret;
831
832 dev = cl->dev;
833
834 ret = mei_hbm_cl_disconnect_req(dev, cl);
835 cl->status = ret;
836 if (ret) {
837 cl->state = MEI_FILE_DISCONNECT_REPLY;
838 return ret;
839 }
840
841 list_move_tail(&cb->list, &dev->ctrl_rd_list);
842 cl->timer_count = MEI_CONNECT_TIMEOUT;
843 mei_schedule_stall_timer(dev);
844
845 return 0;
846 }
847
848 /**
849 * mei_cl_irq_disconnect - processes close related operation from
850 * interrupt thread context - send disconnect request
851 *
852 * @cl: client
853 * @cb: callback block.
854 * @cmpl_list: complete list.
855 *
856 * Return: 0, OK; otherwise, error.
857 */
mei_cl_irq_disconnect(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)858 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
859 struct list_head *cmpl_list)
860 {
861 struct mei_device *dev = cl->dev;
862 u32 msg_slots;
863 int slots;
864 int ret;
865
866 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
867 slots = mei_hbuf_empty_slots(dev);
868 if (slots < 0)
869 return -EOVERFLOW;
870
871 if ((u32)slots < msg_slots)
872 return -EMSGSIZE;
873
874 ret = mei_cl_send_disconnect(cl, cb);
875 if (ret)
876 list_move_tail(&cb->list, cmpl_list);
877
878 return ret;
879 }
880
881 /**
882 * __mei_cl_disconnect - disconnect host client from the me one
883 * internal function runtime pm has to be already acquired
884 *
885 * @cl: host client
886 *
887 * Return: 0 on success, <0 on failure.
888 */
__mei_cl_disconnect(struct mei_cl * cl)889 static int __mei_cl_disconnect(struct mei_cl *cl)
890 {
891 struct mei_device *dev;
892 struct mei_cl_cb *cb;
893 int rets;
894
895 dev = cl->dev;
896
897 cl->state = MEI_FILE_DISCONNECTING;
898
899 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
900 if (!cb) {
901 rets = -ENOMEM;
902 goto out;
903 }
904
905 if (mei_hbuf_acquire(dev)) {
906 rets = mei_cl_send_disconnect(cl, cb);
907 if (rets) {
908 cl_err(dev, cl, "failed to disconnect.\n");
909 goto out;
910 }
911 }
912
913 mutex_unlock(&dev->device_lock);
914 wait_event_timeout(cl->wait,
915 cl->state == MEI_FILE_DISCONNECT_REPLY ||
916 cl->state == MEI_FILE_DISCONNECTED,
917 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
918 mutex_lock(&dev->device_lock);
919
920 rets = cl->status;
921 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
922 cl->state != MEI_FILE_DISCONNECTED) {
923 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
924 rets = -ETIME;
925 }
926
927 out:
928 /* we disconnect also on error */
929 mei_cl_set_disconnected(cl);
930 if (!rets)
931 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
932
933 mei_io_cb_free(cb);
934 return rets;
935 }
936
937 /**
938 * mei_cl_disconnect - disconnect host client from the me one
939 *
940 * @cl: host client
941 *
942 * Locking: called under "dev->device_lock" lock
943 *
944 * Return: 0 on success, <0 on failure.
945 */
mei_cl_disconnect(struct mei_cl * cl)946 int mei_cl_disconnect(struct mei_cl *cl)
947 {
948 struct mei_device *dev;
949 int rets;
950
951 if (WARN_ON(!cl || !cl->dev))
952 return -ENODEV;
953
954 dev = cl->dev;
955
956 cl_dbg(dev, cl, "disconnecting");
957
958 if (!mei_cl_is_connected(cl))
959 return 0;
960
961 if (mei_cl_is_fixed_address(cl)) {
962 mei_cl_set_disconnected(cl);
963 return 0;
964 }
965
966 if (dev->dev_state == MEI_DEV_POWER_DOWN) {
967 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
968 mei_cl_set_disconnected(cl);
969 return 0;
970 }
971
972 rets = pm_runtime_get(dev->dev);
973 if (rets < 0 && rets != -EINPROGRESS) {
974 pm_runtime_put_noidle(dev->dev);
975 cl_err(dev, cl, "rpm: get failed %d\n", rets);
976 return rets;
977 }
978
979 rets = __mei_cl_disconnect(cl);
980
981 cl_dbg(dev, cl, "rpm: autosuspend\n");
982 pm_runtime_mark_last_busy(dev->dev);
983 pm_runtime_put_autosuspend(dev->dev);
984
985 return rets;
986 }
987
988
989 /**
990 * mei_cl_is_other_connecting - checks if other
991 * client with the same me client id is connecting
992 *
993 * @cl: private data of the file object
994 *
995 * Return: true if other client is connected, false - otherwise.
996 */
mei_cl_is_other_connecting(struct mei_cl * cl)997 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
998 {
999 struct mei_device *dev;
1000 struct mei_cl_cb *cb;
1001
1002 dev = cl->dev;
1003
1004 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
1005 if (cb->fop_type == MEI_FOP_CONNECT &&
1006 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1007 return true;
1008 }
1009
1010 return false;
1011 }
1012
1013 /**
1014 * mei_cl_send_connect - send connect request
1015 *
1016 * @cl: host client
1017 * @cb: callback block
1018 *
1019 * Return: 0, OK; otherwise, error.
1020 */
mei_cl_send_connect(struct mei_cl * cl,struct mei_cl_cb * cb)1021 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1022 {
1023 struct mei_device *dev;
1024 int ret;
1025
1026 dev = cl->dev;
1027
1028 ret = mei_hbm_cl_connect_req(dev, cl);
1029 cl->status = ret;
1030 if (ret) {
1031 cl->state = MEI_FILE_DISCONNECT_REPLY;
1032 return ret;
1033 }
1034
1035 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1036 cl->timer_count = MEI_CONNECT_TIMEOUT;
1037 mei_schedule_stall_timer(dev);
1038 return 0;
1039 }
1040
1041 /**
1042 * mei_cl_irq_connect - send connect request in irq_thread context
1043 *
1044 * @cl: host client
1045 * @cb: callback block
1046 * @cmpl_list: complete list
1047 *
1048 * Return: 0, OK; otherwise, error.
1049 */
mei_cl_irq_connect(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1050 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1051 struct list_head *cmpl_list)
1052 {
1053 struct mei_device *dev = cl->dev;
1054 u32 msg_slots;
1055 int slots;
1056 int rets;
1057
1058 if (mei_cl_is_other_connecting(cl))
1059 return 0;
1060
1061 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1062 slots = mei_hbuf_empty_slots(dev);
1063 if (slots < 0)
1064 return -EOVERFLOW;
1065
1066 if ((u32)slots < msg_slots)
1067 return -EMSGSIZE;
1068
1069 rets = mei_cl_send_connect(cl, cb);
1070 if (rets)
1071 list_move_tail(&cb->list, cmpl_list);
1072
1073 return rets;
1074 }
1075
1076 /**
1077 * mei_cl_connect - connect host client to the me one
1078 *
1079 * @cl: host client
1080 * @me_cl: me client
1081 * @fp: pointer to file structure
1082 *
1083 * Locking: called under "dev->device_lock" lock
1084 *
1085 * Return: 0 on success, <0 on failure.
1086 */
mei_cl_connect(struct mei_cl * cl,struct mei_me_client * me_cl,const struct file * fp)1087 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1088 const struct file *fp)
1089 {
1090 struct mei_device *dev;
1091 struct mei_cl_cb *cb;
1092 int rets;
1093
1094 if (WARN_ON(!cl || !cl->dev || !me_cl))
1095 return -ENODEV;
1096
1097 dev = cl->dev;
1098
1099 rets = mei_cl_set_connecting(cl, me_cl);
1100 if (rets)
1101 goto nortpm;
1102
1103 if (mei_cl_is_fixed_address(cl)) {
1104 cl->state = MEI_FILE_CONNECTED;
1105 rets = 0;
1106 goto nortpm;
1107 }
1108
1109 rets = pm_runtime_get(dev->dev);
1110 if (rets < 0 && rets != -EINPROGRESS) {
1111 pm_runtime_put_noidle(dev->dev);
1112 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1113 goto nortpm;
1114 }
1115
1116 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1117 if (!cb) {
1118 rets = -ENOMEM;
1119 goto out;
1120 }
1121
1122 /* run hbuf acquire last so we don't have to undo */
1123 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1124 rets = mei_cl_send_connect(cl, cb);
1125 if (rets)
1126 goto out;
1127 }
1128
1129 mutex_unlock(&dev->device_lock);
1130 wait_event_timeout(cl->wait,
1131 (cl->state == MEI_FILE_CONNECTED ||
1132 cl->state == MEI_FILE_DISCONNECTED ||
1133 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1134 cl->state == MEI_FILE_DISCONNECT_REPLY),
1135 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1136 mutex_lock(&dev->device_lock);
1137
1138 if (!mei_cl_is_connected(cl)) {
1139 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1140 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1141 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1142 /* ignore disconnect return valuue;
1143 * in case of failure reset will be invoked
1144 */
1145 __mei_cl_disconnect(cl);
1146 rets = -EFAULT;
1147 goto out;
1148 }
1149
1150 /* timeout or something went really wrong */
1151 if (!cl->status)
1152 cl->status = -EFAULT;
1153 }
1154
1155 rets = cl->status;
1156 out:
1157 cl_dbg(dev, cl, "rpm: autosuspend\n");
1158 pm_runtime_mark_last_busy(dev->dev);
1159 pm_runtime_put_autosuspend(dev->dev);
1160
1161 mei_io_cb_free(cb);
1162
1163 nortpm:
1164 if (!mei_cl_is_connected(cl))
1165 mei_cl_set_disconnected(cl);
1166
1167 return rets;
1168 }
1169
1170 /**
1171 * mei_cl_alloc_linked - allocate and link host client
1172 *
1173 * @dev: the device structure
1174 *
1175 * Return: cl on success ERR_PTR on failure
1176 */
mei_cl_alloc_linked(struct mei_device * dev)1177 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1178 {
1179 struct mei_cl *cl;
1180 int ret;
1181
1182 cl = mei_cl_allocate(dev);
1183 if (!cl) {
1184 ret = -ENOMEM;
1185 goto err;
1186 }
1187
1188 ret = mei_cl_link(cl);
1189 if (ret)
1190 goto err;
1191
1192 return cl;
1193 err:
1194 kfree(cl);
1195 return ERR_PTR(ret);
1196 }
1197
1198 /**
1199 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1200 *
1201 * @cl: host client
1202 *
1203 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1204 */
mei_cl_tx_flow_ctrl_creds(struct mei_cl * cl)1205 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1206 {
1207 if (WARN_ON(!cl || !cl->me_cl))
1208 return -EINVAL;
1209
1210 if (cl->tx_flow_ctrl_creds > 0)
1211 return 1;
1212
1213 if (mei_cl_is_fixed_address(cl))
1214 return 1;
1215
1216 if (mei_cl_is_single_recv_buf(cl)) {
1217 if (cl->me_cl->tx_flow_ctrl_creds > 0)
1218 return 1;
1219 }
1220 return 0;
1221 }
1222
1223 /**
1224 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1225 * for a client
1226 *
1227 * @cl: host client
1228 *
1229 * Return:
1230 * 0 on success
1231 * -EINVAL when ctrl credits are <= 0
1232 */
mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl * cl)1233 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1234 {
1235 if (WARN_ON(!cl || !cl->me_cl))
1236 return -EINVAL;
1237
1238 if (mei_cl_is_fixed_address(cl))
1239 return 0;
1240
1241 if (mei_cl_is_single_recv_buf(cl)) {
1242 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1243 return -EINVAL;
1244 cl->me_cl->tx_flow_ctrl_creds--;
1245 } else {
1246 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1247 return -EINVAL;
1248 cl->tx_flow_ctrl_creds--;
1249 }
1250 return 0;
1251 }
1252
1253 /**
1254 * mei_cl_notify_fop2req - convert fop to proper request
1255 *
1256 * @fop: client notification start response command
1257 *
1258 * Return: MEI_HBM_NOTIFICATION_START/STOP
1259 */
mei_cl_notify_fop2req(enum mei_cb_file_ops fop)1260 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1261 {
1262 if (fop == MEI_FOP_NOTIFY_START)
1263 return MEI_HBM_NOTIFICATION_START;
1264 else
1265 return MEI_HBM_NOTIFICATION_STOP;
1266 }
1267
1268 /**
1269 * mei_cl_notify_req2fop - convert notification request top file operation type
1270 *
1271 * @req: hbm notification request type
1272 *
1273 * Return: MEI_FOP_NOTIFY_START/STOP
1274 */
mei_cl_notify_req2fop(u8 req)1275 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1276 {
1277 if (req == MEI_HBM_NOTIFICATION_START)
1278 return MEI_FOP_NOTIFY_START;
1279 else
1280 return MEI_FOP_NOTIFY_STOP;
1281 }
1282
1283 /**
1284 * mei_cl_irq_notify - send notification request in irq_thread context
1285 *
1286 * @cl: client
1287 * @cb: callback block.
1288 * @cmpl_list: complete list.
1289 *
1290 * Return: 0 on such and error otherwise.
1291 */
mei_cl_irq_notify(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1292 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1293 struct list_head *cmpl_list)
1294 {
1295 struct mei_device *dev = cl->dev;
1296 u32 msg_slots;
1297 int slots;
1298 int ret;
1299 bool request;
1300
1301 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1302 slots = mei_hbuf_empty_slots(dev);
1303 if (slots < 0)
1304 return -EOVERFLOW;
1305
1306 if ((u32)slots < msg_slots)
1307 return -EMSGSIZE;
1308
1309 request = mei_cl_notify_fop2req(cb->fop_type);
1310 ret = mei_hbm_cl_notify_req(dev, cl, request);
1311 if (ret) {
1312 cl->status = ret;
1313 list_move_tail(&cb->list, cmpl_list);
1314 return ret;
1315 }
1316
1317 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1318 return 0;
1319 }
1320
1321 /**
1322 * mei_cl_notify_request - send notification stop/start request
1323 *
1324 * @cl: host client
1325 * @fp: associate request with file
1326 * @request: 1 for start or 0 for stop
1327 *
1328 * Locking: called under "dev->device_lock" lock
1329 *
1330 * Return: 0 on such and error otherwise.
1331 */
mei_cl_notify_request(struct mei_cl * cl,const struct file * fp,u8 request)1332 int mei_cl_notify_request(struct mei_cl *cl,
1333 const struct file *fp, u8 request)
1334 {
1335 struct mei_device *dev;
1336 struct mei_cl_cb *cb;
1337 enum mei_cb_file_ops fop_type;
1338 int rets;
1339
1340 if (WARN_ON(!cl || !cl->dev))
1341 return -ENODEV;
1342
1343 dev = cl->dev;
1344
1345 if (!dev->hbm_f_ev_supported) {
1346 cl_dbg(dev, cl, "notifications not supported\n");
1347 return -EOPNOTSUPP;
1348 }
1349
1350 if (!mei_cl_is_connected(cl))
1351 return -ENODEV;
1352
1353 rets = pm_runtime_get(dev->dev);
1354 if (rets < 0 && rets != -EINPROGRESS) {
1355 pm_runtime_put_noidle(dev->dev);
1356 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1357 return rets;
1358 }
1359
1360 fop_type = mei_cl_notify_req2fop(request);
1361 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1362 if (!cb) {
1363 rets = -ENOMEM;
1364 goto out;
1365 }
1366
1367 if (mei_hbuf_acquire(dev)) {
1368 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1369 rets = -ENODEV;
1370 goto out;
1371 }
1372 list_move_tail(&cb->list, &dev->ctrl_rd_list);
1373 }
1374
1375 mutex_unlock(&dev->device_lock);
1376 wait_event_timeout(cl->wait,
1377 cl->notify_en == request || !mei_cl_is_connected(cl),
1378 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1379 mutex_lock(&dev->device_lock);
1380
1381 if (cl->notify_en != request && !cl->status)
1382 cl->status = -EFAULT;
1383
1384 rets = cl->status;
1385
1386 out:
1387 cl_dbg(dev, cl, "rpm: autosuspend\n");
1388 pm_runtime_mark_last_busy(dev->dev);
1389 pm_runtime_put_autosuspend(dev->dev);
1390
1391 mei_io_cb_free(cb);
1392 return rets;
1393 }
1394
1395 /**
1396 * mei_cl_notify - raise notification
1397 *
1398 * @cl: host client
1399 *
1400 * Locking: called under "dev->device_lock" lock
1401 */
mei_cl_notify(struct mei_cl * cl)1402 void mei_cl_notify(struct mei_cl *cl)
1403 {
1404 struct mei_device *dev;
1405
1406 if (!cl || !cl->dev)
1407 return;
1408
1409 dev = cl->dev;
1410
1411 if (!cl->notify_en)
1412 return;
1413
1414 cl_dbg(dev, cl, "notify event");
1415 cl->notify_ev = true;
1416 if (!mei_cl_bus_notify_event(cl))
1417 wake_up_interruptible(&cl->ev_wait);
1418
1419 if (cl->ev_async)
1420 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1421
1422 }
1423
1424 /**
1425 * mei_cl_notify_get - get or wait for notification event
1426 *
1427 * @cl: host client
1428 * @block: this request is blocking
1429 * @notify_ev: true if notification event was received
1430 *
1431 * Locking: called under "dev->device_lock" lock
1432 *
1433 * Return: 0 on such and error otherwise.
1434 */
mei_cl_notify_get(struct mei_cl * cl,bool block,bool * notify_ev)1435 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1436 {
1437 struct mei_device *dev;
1438 int rets;
1439
1440 *notify_ev = false;
1441
1442 if (WARN_ON(!cl || !cl->dev))
1443 return -ENODEV;
1444
1445 dev = cl->dev;
1446
1447 if (!dev->hbm_f_ev_supported) {
1448 cl_dbg(dev, cl, "notifications not supported\n");
1449 return -EOPNOTSUPP;
1450 }
1451
1452 if (!mei_cl_is_connected(cl))
1453 return -ENODEV;
1454
1455 if (cl->notify_ev)
1456 goto out;
1457
1458 if (!block)
1459 return -EAGAIN;
1460
1461 mutex_unlock(&dev->device_lock);
1462 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1463 mutex_lock(&dev->device_lock);
1464
1465 if (rets < 0)
1466 return rets;
1467
1468 out:
1469 *notify_ev = cl->notify_ev;
1470 cl->notify_ev = false;
1471 return 0;
1472 }
1473
1474 /**
1475 * mei_cl_read_start - the start read client message function.
1476 *
1477 * @cl: host client
1478 * @length: number of bytes to read
1479 * @fp: pointer to file structure
1480 *
1481 * Return: 0 on success, <0 on failure.
1482 */
mei_cl_read_start(struct mei_cl * cl,size_t length,const struct file * fp)1483 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1484 {
1485 struct mei_device *dev;
1486 struct mei_cl_cb *cb;
1487 int rets;
1488
1489 if (WARN_ON(!cl || !cl->dev))
1490 return -ENODEV;
1491
1492 dev = cl->dev;
1493
1494 if (!mei_cl_is_connected(cl))
1495 return -ENODEV;
1496
1497 if (!mei_me_cl_is_active(cl->me_cl)) {
1498 cl_err(dev, cl, "no such me client\n");
1499 return -ENOTTY;
1500 }
1501
1502 if (mei_cl_is_fixed_address(cl))
1503 return 0;
1504
1505 /* HW currently supports only one pending read */
1506 if (cl->rx_flow_ctrl_creds)
1507 return -EBUSY;
1508
1509 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1510 if (!cb)
1511 return -ENOMEM;
1512
1513 rets = pm_runtime_get(dev->dev);
1514 if (rets < 0 && rets != -EINPROGRESS) {
1515 pm_runtime_put_noidle(dev->dev);
1516 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1517 goto nortpm;
1518 }
1519
1520 rets = 0;
1521 if (mei_hbuf_acquire(dev)) {
1522 rets = mei_hbm_cl_flow_control_req(dev, cl);
1523 if (rets < 0)
1524 goto out;
1525
1526 list_move_tail(&cb->list, &cl->rd_pending);
1527 }
1528 cl->rx_flow_ctrl_creds++;
1529
1530 out:
1531 cl_dbg(dev, cl, "rpm: autosuspend\n");
1532 pm_runtime_mark_last_busy(dev->dev);
1533 pm_runtime_put_autosuspend(dev->dev);
1534 nortpm:
1535 if (rets)
1536 mei_io_cb_free(cb);
1537
1538 return rets;
1539 }
1540
1541 /**
1542 * mei_msg_hdr_init - initialize mei message header
1543 *
1544 * @mei_hdr: mei message header
1545 * @cb: message callback structure
1546 */
mei_msg_hdr_init(struct mei_msg_hdr * mei_hdr,struct mei_cl_cb * cb)1547 static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
1548 {
1549 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1550 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1551 mei_hdr->length = 0;
1552 mei_hdr->reserved = 0;
1553 mei_hdr->msg_complete = 0;
1554 mei_hdr->dma_ring = 0;
1555 mei_hdr->internal = cb->internal;
1556 }
1557
1558 /**
1559 * mei_cl_irq_write - write a message to device
1560 * from the interrupt thread context
1561 *
1562 * @cl: client
1563 * @cb: callback block.
1564 * @cmpl_list: complete list.
1565 *
1566 * Return: 0, OK; otherwise error.
1567 */
mei_cl_irq_write(struct mei_cl * cl,struct mei_cl_cb * cb,struct list_head * cmpl_list)1568 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1569 struct list_head *cmpl_list)
1570 {
1571 struct mei_device *dev;
1572 struct mei_msg_data *buf;
1573 struct mei_msg_hdr mei_hdr;
1574 size_t hdr_len = sizeof(mei_hdr);
1575 size_t len;
1576 size_t hbuf_len;
1577 int hbuf_slots;
1578 int rets;
1579 bool first_chunk;
1580
1581 if (WARN_ON(!cl || !cl->dev))
1582 return -ENODEV;
1583
1584 dev = cl->dev;
1585
1586 buf = &cb->buf;
1587
1588 first_chunk = cb->buf_idx == 0;
1589
1590 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1591 if (rets < 0)
1592 goto err;
1593
1594 if (rets == 0) {
1595 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1596 return 0;
1597 }
1598
1599 len = buf->size - cb->buf_idx;
1600 hbuf_slots = mei_hbuf_empty_slots(dev);
1601 if (hbuf_slots < 0) {
1602 rets = -EOVERFLOW;
1603 goto err;
1604 }
1605
1606 hbuf_len = mei_slots2data(hbuf_slots);
1607
1608 mei_msg_hdr_init(&mei_hdr, cb);
1609
1610 /**
1611 * Split the message only if we can write the whole host buffer
1612 * otherwise wait for next time the host buffer is empty.
1613 */
1614 if (len + hdr_len <= hbuf_len) {
1615 mei_hdr.length = len;
1616 mei_hdr.msg_complete = 1;
1617 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1618 mei_hdr.length = hbuf_len - hdr_len;
1619 } else {
1620 return 0;
1621 }
1622
1623 cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
1624 cb->buf.size, cb->buf_idx);
1625
1626 rets = mei_write_message(dev, &mei_hdr, hdr_len,
1627 buf->data + cb->buf_idx, mei_hdr.length);
1628 if (rets)
1629 goto err;
1630
1631 cl->status = 0;
1632 cl->writing_state = MEI_WRITING;
1633 cb->buf_idx += mei_hdr.length;
1634
1635 if (first_chunk) {
1636 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1637 rets = -EIO;
1638 goto err;
1639 }
1640 }
1641
1642 if (mei_hdr.msg_complete)
1643 list_move_tail(&cb->list, &dev->write_waiting_list);
1644
1645 return 0;
1646
1647 err:
1648 cl->status = rets;
1649 list_move_tail(&cb->list, cmpl_list);
1650 return rets;
1651 }
1652
1653 /**
1654 * mei_cl_write - submit a write cb to mei device
1655 * assumes device_lock is locked
1656 *
1657 * @cl: host client
1658 * @cb: write callback with filled data
1659 *
1660 * Return: number of bytes sent on success, <0 on failure.
1661 */
mei_cl_write(struct mei_cl * cl,struct mei_cl_cb * cb)1662 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1663 {
1664 struct mei_device *dev;
1665 struct mei_msg_data *buf;
1666 struct mei_msg_hdr mei_hdr;
1667 size_t hdr_len = sizeof(mei_hdr);
1668 size_t len;
1669 size_t hbuf_len;
1670 int hbuf_slots;
1671 ssize_t rets;
1672 bool blocking;
1673
1674 if (WARN_ON(!cl || !cl->dev))
1675 return -ENODEV;
1676
1677 if (WARN_ON(!cb))
1678 return -EINVAL;
1679
1680 dev = cl->dev;
1681
1682 buf = &cb->buf;
1683 len = buf->size;
1684 blocking = cb->blocking;
1685
1686 cl_dbg(dev, cl, "len=%zd\n", len);
1687
1688 rets = pm_runtime_get(dev->dev);
1689 if (rets < 0 && rets != -EINPROGRESS) {
1690 pm_runtime_put_noidle(dev->dev);
1691 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1692 goto free;
1693 }
1694
1695 cb->buf_idx = 0;
1696 cl->writing_state = MEI_IDLE;
1697
1698
1699 rets = mei_cl_tx_flow_ctrl_creds(cl);
1700 if (rets < 0)
1701 goto err;
1702
1703 mei_msg_hdr_init(&mei_hdr, cb);
1704
1705 if (rets == 0) {
1706 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1707 rets = len;
1708 goto out;
1709 }
1710
1711 if (!mei_hbuf_acquire(dev)) {
1712 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1713 rets = len;
1714 goto out;
1715 }
1716
1717 hbuf_slots = mei_hbuf_empty_slots(dev);
1718 if (hbuf_slots < 0) {
1719 rets = -EOVERFLOW;
1720 goto out;
1721 }
1722
1723 hbuf_len = mei_slots2data(hbuf_slots);
1724
1725 if (len + hdr_len <= hbuf_len) {
1726 mei_hdr.length = len;
1727 mei_hdr.msg_complete = 1;
1728 } else {
1729 mei_hdr.length = hbuf_len - hdr_len;
1730 }
1731
1732 rets = mei_write_message(dev, &mei_hdr, hdr_len,
1733 buf->data, mei_hdr.length);
1734 if (rets)
1735 goto err;
1736
1737 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
1738 if (rets)
1739 goto err;
1740
1741 cl->writing_state = MEI_WRITING;
1742 cb->buf_idx = mei_hdr.length;
1743
1744 out:
1745 if (mei_hdr.msg_complete)
1746 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
1747 else
1748 mei_tx_cb_enqueue(cb, &dev->write_list);
1749
1750 cb = NULL;
1751 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1752
1753 mutex_unlock(&dev->device_lock);
1754 rets = wait_event_interruptible(cl->tx_wait,
1755 cl->writing_state == MEI_WRITE_COMPLETE ||
1756 (!mei_cl_is_connected(cl)));
1757 mutex_lock(&dev->device_lock);
1758 /* wait_event_interruptible returns -ERESTARTSYS */
1759 if (rets) {
1760 if (signal_pending(current))
1761 rets = -EINTR;
1762 goto err;
1763 }
1764 if (cl->writing_state != MEI_WRITE_COMPLETE) {
1765 rets = -EFAULT;
1766 goto err;
1767 }
1768 }
1769
1770 rets = len;
1771 err:
1772 cl_dbg(dev, cl, "rpm: autosuspend\n");
1773 pm_runtime_mark_last_busy(dev->dev);
1774 pm_runtime_put_autosuspend(dev->dev);
1775 free:
1776 mei_io_cb_free(cb);
1777
1778 return rets;
1779 }
1780
1781
1782 /**
1783 * mei_cl_complete - processes completed operation for a client
1784 *
1785 * @cl: private data of the file object.
1786 * @cb: callback block.
1787 */
mei_cl_complete(struct mei_cl * cl,struct mei_cl_cb * cb)1788 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1789 {
1790 struct mei_device *dev = cl->dev;
1791
1792 switch (cb->fop_type) {
1793 case MEI_FOP_WRITE:
1794 mei_tx_cb_dequeue(cb);
1795 cl->writing_state = MEI_WRITE_COMPLETE;
1796 if (waitqueue_active(&cl->tx_wait)) {
1797 wake_up_interruptible(&cl->tx_wait);
1798 } else {
1799 pm_runtime_mark_last_busy(dev->dev);
1800 pm_request_autosuspend(dev->dev);
1801 }
1802 break;
1803
1804 case MEI_FOP_READ:
1805 list_add_tail(&cb->list, &cl->rd_completed);
1806 if (!mei_cl_is_fixed_address(cl) &&
1807 !WARN_ON(!cl->rx_flow_ctrl_creds))
1808 cl->rx_flow_ctrl_creds--;
1809 if (!mei_cl_bus_rx_event(cl))
1810 wake_up_interruptible(&cl->rx_wait);
1811 break;
1812
1813 case MEI_FOP_CONNECT:
1814 case MEI_FOP_DISCONNECT:
1815 case MEI_FOP_NOTIFY_STOP:
1816 case MEI_FOP_NOTIFY_START:
1817 if (waitqueue_active(&cl->wait))
1818 wake_up(&cl->wait);
1819
1820 break;
1821 case MEI_FOP_DISCONNECT_RSP:
1822 mei_io_cb_free(cb);
1823 mei_cl_set_disconnected(cl);
1824 break;
1825 default:
1826 BUG_ON(0);
1827 }
1828 }
1829
1830
1831 /**
1832 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1833 *
1834 * @dev: mei device
1835 */
mei_cl_all_disconnect(struct mei_device * dev)1836 void mei_cl_all_disconnect(struct mei_device *dev)
1837 {
1838 struct mei_cl *cl;
1839
1840 list_for_each_entry(cl, &dev->file_list, link)
1841 mei_cl_set_disconnected(cl);
1842 }
1843