1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * z/VM IUCV hypervisor console (HVC) device driver
4 *
5 * This HVC device driver provides terminal access using
6 * z/VM IUCV communication paths.
7 *
8 * Copyright IBM Corp. 2008, 2013
9 *
10 * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
11 */
12 #define KMSG_COMPONENT "hvc_iucv"
13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14
15 #include <linux/types.h>
16 #include <linux/slab.h>
17 #include <asm/ebcdic.h>
18 #include <linux/ctype.h>
19 #include <linux/delay.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
22 #include <linux/mempool.h>
23 #include <linux/moduleparam.h>
24 #include <linux/tty.h>
25 #include <linux/wait.h>
26 #include <net/iucv/iucv.h>
27
28 #include "hvc_console.h"
29
30
31 /* General device driver settings */
32 #define HVC_IUCV_MAGIC 0xc9e4c3e5
33 #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS
34 #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4)
35
36 /* IUCV TTY message */
37 #define MSG_VERSION 0x02 /* Message version */
38 #define MSG_TYPE_ERROR 0x01 /* Error message */
39 #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */
40 #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */
41 #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */
42 #define MSG_TYPE_DATA 0x10 /* Terminal data */
43
44 struct iucv_tty_msg {
45 u8 version; /* Message version */
46 u8 type; /* Message type */
47 #define MSG_MAX_DATALEN ((u16)(~0))
48 u16 datalen; /* Payload length */
49 u8 data[]; /* Payload buffer */
50 } __attribute__((packed));
51 #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data))
52
53 enum iucv_state_t {
54 IUCV_DISCONN = 0,
55 IUCV_CONNECTED = 1,
56 IUCV_SEVERED = 2,
57 };
58
59 enum tty_state_t {
60 TTY_CLOSED = 0,
61 TTY_OPENED = 1,
62 };
63
64 struct hvc_iucv_private {
65 struct hvc_struct *hvc; /* HVC struct reference */
66 u8 srv_name[8]; /* IUCV service name (ebcdic) */
67 unsigned char is_console; /* Linux console usage flag */
68 enum iucv_state_t iucv_state; /* IUCV connection status */
69 enum tty_state_t tty_state; /* TTY status */
70 struct iucv_path *path; /* IUCV path pointer */
71 spinlock_t lock; /* hvc_iucv_private lock */
72 #define SNDBUF_SIZE (PAGE_SIZE) /* must be < MSG_MAX_DATALEN */
73 void *sndbuf; /* send buffer */
74 size_t sndbuf_len; /* length of send buffer */
75 #define QUEUE_SNDBUF_DELAY (HZ / 25)
76 struct delayed_work sndbuf_work; /* work: send iucv msg(s) */
77 wait_queue_head_t sndbuf_waitq; /* wait for send completion */
78 struct list_head tty_outqueue; /* outgoing IUCV messages */
79 struct list_head tty_inqueue; /* incoming IUCV messages */
80 struct device *dev; /* device structure */
81 u8 info_path[16]; /* IUCV path info (dev attr) */
82 };
83
84 struct iucv_tty_buffer {
85 struct list_head list; /* list pointer */
86 struct iucv_message msg; /* store an IUCV message */
87 size_t offset; /* data buffer offset */
88 struct iucv_tty_msg *mbuf; /* buffer to store input/output data */
89 };
90
91 /* IUCV callback handler */
92 static int hvc_iucv_path_pending(struct iucv_path *, u8 *, u8 *);
93 static void hvc_iucv_path_severed(struct iucv_path *, u8 *);
94 static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *);
95 static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *);
96
97
98 /* Kernel module parameter: use one terminal device as default */
99 static unsigned long hvc_iucv_devices = 1;
100
101 /* Array of allocated hvc iucv tty lines... */
102 static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES];
103 #define IUCV_HVC_CON_IDX (0)
104 /* List of z/VM user ID filter entries (struct iucv_vmid_filter) */
105 #define MAX_VMID_FILTER (500)
106 #define FILTER_WILDCARD_CHAR '*'
107 static size_t hvc_iucv_filter_size;
108 static void *hvc_iucv_filter;
109 static const char *hvc_iucv_filter_string;
110 static DEFINE_RWLOCK(hvc_iucv_filter_lock);
111
112 /* Kmem cache and mempool for iucv_tty_buffer elements */
113 static struct kmem_cache *hvc_iucv_buffer_cache;
114 static mempool_t *hvc_iucv_mempool;
115
116 /* IUCV handler callback functions */
117 static struct iucv_handler hvc_iucv_handler = {
118 .path_pending = hvc_iucv_path_pending,
119 .path_severed = hvc_iucv_path_severed,
120 .message_complete = hvc_iucv_msg_complete,
121 .message_pending = hvc_iucv_msg_pending,
122 };
123
124
125 /**
126 * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance.
127 * @num: The HVC virtual terminal number (vtermno)
128 *
129 * This function returns the struct hvc_iucv_private instance that corresponds
130 * to the HVC virtual terminal number specified as parameter @num.
131 */
hvc_iucv_get_private(uint32_t num)132 static struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
133 {
134 if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices))
135 return NULL;
136 return hvc_iucv_table[num - HVC_IUCV_MAGIC];
137 }
138
139 /**
140 * alloc_tty_buffer() - Return a new struct iucv_tty_buffer element.
141 * @size: Size of the internal buffer used to store data.
142 * @flags: Memory allocation flags passed to mempool.
143 *
144 * This function allocates a new struct iucv_tty_buffer element and, optionally,
145 * allocates an internal data buffer with the specified size @size.
146 * The internal data buffer is always allocated with GFP_DMA which is
147 * required for receiving and sending data with IUCV.
148 * Note: The total message size arises from the internal buffer size and the
149 * members of the iucv_tty_msg structure.
150 * The function returns NULL if memory allocation has failed.
151 */
alloc_tty_buffer(size_t size,gfp_t flags)152 static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
153 {
154 struct iucv_tty_buffer *bufp;
155
156 bufp = mempool_alloc(hvc_iucv_mempool, flags);
157 if (!bufp)
158 return NULL;
159 memset(bufp, 0, sizeof(*bufp));
160
161 if (size > 0) {
162 bufp->msg.length = MSG_SIZE(size);
163 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
164 if (!bufp->mbuf) {
165 mempool_free(bufp, hvc_iucv_mempool);
166 return NULL;
167 }
168 bufp->mbuf->version = MSG_VERSION;
169 bufp->mbuf->type = MSG_TYPE_DATA;
170 bufp->mbuf->datalen = (u16) size;
171 }
172 return bufp;
173 }
174
175 /**
176 * destroy_tty_buffer() - destroy struct iucv_tty_buffer element.
177 * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL.
178 */
destroy_tty_buffer(struct iucv_tty_buffer * bufp)179 static void destroy_tty_buffer(struct iucv_tty_buffer *bufp)
180 {
181 kfree(bufp->mbuf);
182 mempool_free(bufp, hvc_iucv_mempool);
183 }
184
185 /**
186 * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element.
187 * @list: List containing struct iucv_tty_buffer elements.
188 */
destroy_tty_buffer_list(struct list_head * list)189 static void destroy_tty_buffer_list(struct list_head *list)
190 {
191 struct iucv_tty_buffer *ent, *next;
192
193 list_for_each_entry_safe(ent, next, list, list) {
194 list_del(&ent->list);
195 destroy_tty_buffer(ent);
196 }
197 }
198
199 /**
200 * hvc_iucv_write() - Receive IUCV message & write data to HVC buffer.
201 * @priv: Pointer to struct hvc_iucv_private
202 * @buf: HVC buffer for writing received terminal data.
203 * @count: HVC buffer size.
204 * @has_more_data: Pointer to an int variable.
205 *
206 * The function picks up pending messages from the input queue and receives
207 * the message data that is then written to the specified buffer @buf.
208 * If the buffer size @count is less than the data message size, the
209 * message is kept on the input queue and @has_more_data is set to 1.
210 * If all message data has been written, the message is removed from
211 * the input queue.
212 *
213 * The function returns the number of bytes written to the terminal, zero if
214 * there are no pending data messages available or if there is no established
215 * IUCV path.
216 * If the IUCV path has been severed, then -EPIPE is returned to cause a
217 * hang up (that is issued by the HVC layer).
218 */
hvc_iucv_write(struct hvc_iucv_private * priv,char * buf,int count,int * has_more_data)219 static int hvc_iucv_write(struct hvc_iucv_private *priv,
220 char *buf, int count, int *has_more_data)
221 {
222 struct iucv_tty_buffer *rb;
223 int written;
224 int rc;
225
226 /* immediately return if there is no IUCV connection */
227 if (priv->iucv_state == IUCV_DISCONN)
228 return 0;
229
230 /* if the IUCV path has been severed, return -EPIPE to inform the
231 * HVC layer to hang up the tty device. */
232 if (priv->iucv_state == IUCV_SEVERED)
233 return -EPIPE;
234
235 /* check if there are pending messages */
236 if (list_empty(&priv->tty_inqueue))
237 return 0;
238
239 /* receive an iucv message and flip data to the tty (ldisc) */
240 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list);
241
242 written = 0;
243 if (!rb->mbuf) { /* message not yet received ... */
244 /* allocate mem to store msg data; if no memory is available
245 * then leave the buffer on the list and re-try later */
246 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
247 if (!rb->mbuf)
248 return -ENOMEM;
249
250 rc = __iucv_message_receive(priv->path, &rb->msg, 0,
251 rb->mbuf, rb->msg.length, NULL);
252 switch (rc) {
253 case 0: /* Successful */
254 break;
255 case 2: /* No message found */
256 case 9: /* Message purged */
257 break;
258 default:
259 written = -EIO;
260 }
261 /* remove buffer if an error has occurred or received data
262 * is not correct */
263 if (rc || (rb->mbuf->version != MSG_VERSION) ||
264 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen)))
265 goto out_remove_buffer;
266 }
267
268 switch (rb->mbuf->type) {
269 case MSG_TYPE_DATA:
270 written = min_t(int, rb->mbuf->datalen - rb->offset, count);
271 memcpy(buf, rb->mbuf->data + rb->offset, written);
272 if (written < (rb->mbuf->datalen - rb->offset)) {
273 rb->offset += written;
274 *has_more_data = 1;
275 goto out_written;
276 }
277 break;
278
279 case MSG_TYPE_WINSIZE:
280 if (rb->mbuf->datalen != sizeof(struct winsize))
281 break;
282 /* The caller must ensure that the hvc is locked, which
283 * is the case when called from hvc_iucv_get_chars() */
284 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
285 break;
286
287 case MSG_TYPE_ERROR: /* ignored ... */
288 case MSG_TYPE_TERMENV: /* ignored ... */
289 case MSG_TYPE_TERMIOS: /* ignored ... */
290 break;
291 }
292
293 out_remove_buffer:
294 list_del(&rb->list);
295 destroy_tty_buffer(rb);
296 *has_more_data = !list_empty(&priv->tty_inqueue);
297
298 out_written:
299 return written;
300 }
301
302 /**
303 * hvc_iucv_get_chars() - HVC get_chars operation.
304 * @vtermno: HVC virtual terminal number.
305 * @buf: Pointer to a buffer to store data
306 * @count: Size of buffer available for writing
307 *
308 * The HVC thread calls this method to read characters from the back-end.
309 * If an IUCV communication path has been established, pending IUCV messages
310 * are received and data is copied into buffer @buf up to @count bytes.
311 *
312 * Locking: The routine gets called under an irqsave() spinlock; and
313 * the routine locks the struct hvc_iucv_private->lock to call
314 * helper functions.
315 */
hvc_iucv_get_chars(uint32_t vtermno,char * buf,int count)316 static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count)
317 {
318 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
319 int written;
320 int has_more_data;
321
322 if (count <= 0)
323 return 0;
324
325 if (!priv)
326 return -ENODEV;
327
328 spin_lock(&priv->lock);
329 has_more_data = 0;
330 written = hvc_iucv_write(priv, buf, count, &has_more_data);
331 spin_unlock(&priv->lock);
332
333 /* if there are still messages on the queue... schedule another run */
334 if (has_more_data)
335 hvc_kick();
336
337 return written;
338 }
339
340 /**
341 * hvc_iucv_queue() - Buffer terminal data for sending.
342 * @priv: Pointer to struct hvc_iucv_private instance.
343 * @buf: Buffer containing data to send.
344 * @count: Size of buffer and amount of data to send.
345 *
346 * The function queues data for sending. To actually send the buffered data,
347 * a work queue function is scheduled (with QUEUE_SNDBUF_DELAY).
348 * The function returns the number of data bytes that has been buffered.
349 *
350 * If the device is not connected, data is ignored and the function returns
351 * @count.
352 * If the buffer is full, the function returns 0.
353 * If an existing IUCV communicaton path has been severed, -EPIPE is returned
354 * (that can be passed to HVC layer to cause a tty hangup).
355 */
hvc_iucv_queue(struct hvc_iucv_private * priv,const char * buf,int count)356 static int hvc_iucv_queue(struct hvc_iucv_private *priv, const char *buf,
357 int count)
358 {
359 size_t len;
360
361 if (priv->iucv_state == IUCV_DISCONN)
362 return count; /* ignore data */
363
364 if (priv->iucv_state == IUCV_SEVERED)
365 return -EPIPE;
366
367 len = min_t(size_t, count, SNDBUF_SIZE - priv->sndbuf_len);
368 if (!len)
369 return 0;
370
371 memcpy(priv->sndbuf + priv->sndbuf_len, buf, len);
372 priv->sndbuf_len += len;
373
374 if (priv->iucv_state == IUCV_CONNECTED)
375 schedule_delayed_work(&priv->sndbuf_work, QUEUE_SNDBUF_DELAY);
376
377 return len;
378 }
379
380 /**
381 * hvc_iucv_send() - Send an IUCV message containing terminal data.
382 * @priv: Pointer to struct hvc_iucv_private instance.
383 *
384 * If an IUCV communication path has been established, the buffered output data
385 * is sent via an IUCV message and the number of bytes sent is returned.
386 * Returns 0 if there is no established IUCV communication path or
387 * -EPIPE if an existing IUCV communicaton path has been severed.
388 */
hvc_iucv_send(struct hvc_iucv_private * priv)389 static int hvc_iucv_send(struct hvc_iucv_private *priv)
390 {
391 struct iucv_tty_buffer *sb;
392 int rc, len;
393
394 if (priv->iucv_state == IUCV_SEVERED)
395 return -EPIPE;
396
397 if (priv->iucv_state == IUCV_DISCONN)
398 return -EIO;
399
400 if (!priv->sndbuf_len)
401 return 0;
402
403 /* allocate internal buffer to store msg data and also compute total
404 * message length */
405 sb = alloc_tty_buffer(priv->sndbuf_len, GFP_ATOMIC);
406 if (!sb)
407 return -ENOMEM;
408
409 memcpy(sb->mbuf->data, priv->sndbuf, priv->sndbuf_len);
410 sb->mbuf->datalen = (u16) priv->sndbuf_len;
411 sb->msg.length = MSG_SIZE(sb->mbuf->datalen);
412
413 list_add_tail(&sb->list, &priv->tty_outqueue);
414
415 rc = __iucv_message_send(priv->path, &sb->msg, 0, 0,
416 (void *) sb->mbuf, sb->msg.length);
417 if (rc) {
418 /* drop the message here; however we might want to handle
419 * 0x03 (msg limit reached) by trying again... */
420 list_del(&sb->list);
421 destroy_tty_buffer(sb);
422 }
423 len = priv->sndbuf_len;
424 priv->sndbuf_len = 0;
425
426 return len;
427 }
428
429 /**
430 * hvc_iucv_sndbuf_work() - Send buffered data over IUCV
431 * @work: Work structure.
432 *
433 * This work queue function sends buffered output data over IUCV and,
434 * if not all buffered data could be sent, reschedules itself.
435 */
hvc_iucv_sndbuf_work(struct work_struct * work)436 static void hvc_iucv_sndbuf_work(struct work_struct *work)
437 {
438 struct hvc_iucv_private *priv;
439
440 priv = container_of(work, struct hvc_iucv_private, sndbuf_work.work);
441
442 spin_lock_bh(&priv->lock);
443 hvc_iucv_send(priv);
444 spin_unlock_bh(&priv->lock);
445 }
446
447 /**
448 * hvc_iucv_put_chars() - HVC put_chars operation.
449 * @vtermno: HVC virtual terminal number.
450 * @buf: Pointer to an buffer to read data from
451 * @count: Size of buffer available for reading
452 *
453 * The HVC thread calls this method to write characters to the back-end.
454 * The function calls hvc_iucv_queue() to queue terminal data for sending.
455 *
456 * Locking: The method gets called under an irqsave() spinlock; and
457 * locks struct hvc_iucv_private->lock.
458 */
hvc_iucv_put_chars(uint32_t vtermno,const char * buf,int count)459 static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count)
460 {
461 struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno);
462 int queued;
463
464 if (count <= 0)
465 return 0;
466
467 if (!priv)
468 return -ENODEV;
469
470 spin_lock(&priv->lock);
471 queued = hvc_iucv_queue(priv, buf, count);
472 spin_unlock(&priv->lock);
473
474 return queued;
475 }
476
477 /**
478 * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time.
479 * @hp: Pointer to the HVC device (struct hvc_struct)
480 * @id: Additional data (originally passed to hvc_alloc): the index of an struct
481 * hvc_iucv_private instance.
482 *
483 * The function sets the tty state to TTY_OPENED for the struct hvc_iucv_private
484 * instance that is derived from @id. Always returns 0.
485 *
486 * Locking: struct hvc_iucv_private->lock, spin_lock_bh
487 */
hvc_iucv_notifier_add(struct hvc_struct * hp,int id)488 static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id)
489 {
490 struct hvc_iucv_private *priv;
491
492 priv = hvc_iucv_get_private(id);
493 if (!priv)
494 return 0;
495
496 spin_lock_bh(&priv->lock);
497 priv->tty_state = TTY_OPENED;
498 spin_unlock_bh(&priv->lock);
499
500 return 0;
501 }
502
503 /**
504 * hvc_iucv_cleanup() - Clean up and reset a z/VM IUCV HVC instance.
505 * @priv: Pointer to the struct hvc_iucv_private instance.
506 */
hvc_iucv_cleanup(struct hvc_iucv_private * priv)507 static void hvc_iucv_cleanup(struct hvc_iucv_private *priv)
508 {
509 destroy_tty_buffer_list(&priv->tty_outqueue);
510 destroy_tty_buffer_list(&priv->tty_inqueue);
511
512 priv->tty_state = TTY_CLOSED;
513 priv->iucv_state = IUCV_DISCONN;
514
515 priv->sndbuf_len = 0;
516 }
517
518 /**
519 * tty_outqueue_empty() - Test if the tty outq is empty
520 * @priv: Pointer to struct hvc_iucv_private instance.
521 */
tty_outqueue_empty(struct hvc_iucv_private * priv)522 static inline int tty_outqueue_empty(struct hvc_iucv_private *priv)
523 {
524 int rc;
525
526 spin_lock_bh(&priv->lock);
527 rc = list_empty(&priv->tty_outqueue);
528 spin_unlock_bh(&priv->lock);
529
530 return rc;
531 }
532
533 /**
534 * flush_sndbuf_sync() - Flush send buffer and wait for completion
535 * @priv: Pointer to struct hvc_iucv_private instance.
536 *
537 * The routine cancels a pending sndbuf work, calls hvc_iucv_send()
538 * to flush any buffered terminal output data and waits for completion.
539 */
flush_sndbuf_sync(struct hvc_iucv_private * priv)540 static void flush_sndbuf_sync(struct hvc_iucv_private *priv)
541 {
542 int sync_wait;
543
544 cancel_delayed_work_sync(&priv->sndbuf_work);
545
546 spin_lock_bh(&priv->lock);
547 hvc_iucv_send(priv); /* force sending buffered data */
548 sync_wait = !list_empty(&priv->tty_outqueue); /* anything queued ? */
549 spin_unlock_bh(&priv->lock);
550
551 if (sync_wait)
552 wait_event_timeout(priv->sndbuf_waitq,
553 tty_outqueue_empty(priv), HZ/10);
554 }
555
556 /**
557 * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up
558 * @priv: Pointer to hvc_iucv_private structure
559 *
560 * This routine severs an existing IUCV communication path and hangs
561 * up the underlying HVC terminal device.
562 * The hang-up occurs only if an IUCV communication path is established;
563 * otherwise there is no need to hang up the terminal device.
564 *
565 * The IUCV HVC hang-up is separated into two steps:
566 * 1. After the IUCV path has been severed, the iucv_state is set to
567 * IUCV_SEVERED.
568 * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the
569 * IUCV_SEVERED state causes the tty hang-up in the HVC layer.
570 *
571 * If the tty has not yet been opened, clean up the hvc_iucv_private
572 * structure to allow re-connects.
573 * If the tty has been opened, let get_chars() return -EPIPE to signal
574 * the HVC layer to hang up the tty and, if so, wake up the HVC thread
575 * to call get_chars()...
576 *
577 * Special notes on hanging up a HVC terminal instantiated as console:
578 * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops)
579 * 2. do_tty_hangup() calls tty->ops->close() for console_filp
580 * => no hangup notifier is called by HVC (default)
581 * 2. hvc_close() returns because of tty_hung_up_p(filp)
582 * => no delete notifier is called!
583 * Finally, the back-end is not being notified, thus, the tty session is
584 * kept active (TTY_OPEN) to be ready for re-connects.
585 *
586 * Locking: spin_lock(&priv->lock) w/o disabling bh
587 */
hvc_iucv_hangup(struct hvc_iucv_private * priv)588 static void hvc_iucv_hangup(struct hvc_iucv_private *priv)
589 {
590 struct iucv_path *path;
591
592 path = NULL;
593 spin_lock(&priv->lock);
594 if (priv->iucv_state == IUCV_CONNECTED) {
595 path = priv->path;
596 priv->path = NULL;
597 priv->iucv_state = IUCV_SEVERED;
598 if (priv->tty_state == TTY_CLOSED)
599 hvc_iucv_cleanup(priv);
600 else
601 /* console is special (see above) */
602 if (priv->is_console) {
603 hvc_iucv_cleanup(priv);
604 priv->tty_state = TTY_OPENED;
605 } else
606 hvc_kick();
607 }
608 spin_unlock(&priv->lock);
609
610 /* finally sever path (outside of priv->lock due to lock ordering) */
611 if (path) {
612 iucv_path_sever(path, NULL);
613 iucv_path_free(path);
614 }
615 }
616
617 /**
618 * hvc_iucv_notifier_hangup() - HVC notifier for TTY hangups.
619 * @hp: Pointer to the HVC device (struct hvc_struct)
620 * @id: Additional data (originally passed to hvc_alloc):
621 * the index of an struct hvc_iucv_private instance.
622 *
623 * This routine notifies the HVC back-end that a tty hangup (carrier loss,
624 * virtual or otherwise) has occurred.
625 * The z/VM IUCV HVC device driver ignores virtual hangups (vhangup())
626 * to keep an existing IUCV communication path established.
627 * (Background: vhangup() is called from user space (by getty or login) to
628 * disable writing to the tty by other applications).
629 * If the tty has been opened and an established IUCV path has been severed
630 * (we caused the tty hangup), the function calls hvc_iucv_cleanup().
631 *
632 * Locking: struct hvc_iucv_private->lock
633 */
hvc_iucv_notifier_hangup(struct hvc_struct * hp,int id)634 static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
635 {
636 struct hvc_iucv_private *priv;
637
638 priv = hvc_iucv_get_private(id);
639 if (!priv)
640 return;
641
642 flush_sndbuf_sync(priv);
643
644 spin_lock_bh(&priv->lock);
645 /* NOTE: If the hangup was scheduled by ourself (from the iucv
646 * path_servered callback [IUCV_SEVERED]), we have to clean up
647 * our structure and to set state to TTY_CLOSED.
648 * If the tty was hung up otherwise (e.g. vhangup()), then we
649 * ignore this hangup and keep an established IUCV path open...
650 * (...the reason is that we are not able to connect back to the
651 * client if we disconnect on hang up) */
652 priv->tty_state = TTY_CLOSED;
653
654 if (priv->iucv_state == IUCV_SEVERED)
655 hvc_iucv_cleanup(priv);
656 spin_unlock_bh(&priv->lock);
657 }
658
659 /**
660 * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
661 * @hp: Pointer the HVC device (struct hvc_struct)
662 * @raise: Non-zero to raise or zero to lower DTR/RTS lines
663 *
664 * This routine notifies the HVC back-end to raise or lower DTR/RTS
665 * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
666 * drop the IUCV connection (similar to hang up the modem).
667 */
hvc_iucv_dtr_rts(struct hvc_struct * hp,int raise)668 static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
669 {
670 struct hvc_iucv_private *priv;
671 struct iucv_path *path;
672
673 /* Raising the DTR/RTS is ignored as IUCV connections can be
674 * established at any times.
675 */
676 if (raise)
677 return;
678
679 priv = hvc_iucv_get_private(hp->vtermno);
680 if (!priv)
681 return;
682
683 /* Lowering the DTR/RTS lines disconnects an established IUCV
684 * connection.
685 */
686 flush_sndbuf_sync(priv);
687
688 spin_lock_bh(&priv->lock);
689 path = priv->path; /* save reference to IUCV path */
690 priv->path = NULL;
691 priv->iucv_state = IUCV_DISCONN;
692 spin_unlock_bh(&priv->lock);
693
694 /* Sever IUCV path outside of priv->lock due to lock ordering of:
695 * priv->lock <--> iucv_table_lock */
696 if (path) {
697 iucv_path_sever(path, NULL);
698 iucv_path_free(path);
699 }
700 }
701
702 /**
703 * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
704 * @hp: Pointer to the HVC device (struct hvc_struct)
705 * @id: Additional data (originally passed to hvc_alloc):
706 * the index of an struct hvc_iucv_private instance.
707 *
708 * This routine notifies the HVC back-end that the last tty device fd has been
709 * closed. The function cleans up tty resources. The clean-up of the IUCV
710 * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
711 * control setting.
712 *
713 * Locking: struct hvc_iucv_private->lock
714 */
hvc_iucv_notifier_del(struct hvc_struct * hp,int id)715 static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
716 {
717 struct hvc_iucv_private *priv;
718
719 priv = hvc_iucv_get_private(id);
720 if (!priv)
721 return;
722
723 flush_sndbuf_sync(priv);
724
725 spin_lock_bh(&priv->lock);
726 destroy_tty_buffer_list(&priv->tty_outqueue);
727 destroy_tty_buffer_list(&priv->tty_inqueue);
728 priv->tty_state = TTY_CLOSED;
729 priv->sndbuf_len = 0;
730 spin_unlock_bh(&priv->lock);
731 }
732
733 /**
734 * hvc_iucv_filter_connreq() - Filter connection request based on z/VM user ID
735 * @ipvmid: Originating z/VM user ID (right padded with blanks)
736 *
737 * Returns 0 if the z/VM user ID that is specified with @ipvmid is permitted to
738 * connect, otherwise non-zero.
739 */
hvc_iucv_filter_connreq(u8 ipvmid[8])740 static int hvc_iucv_filter_connreq(u8 ipvmid[8])
741 {
742 const char *wildcard, *filter_entry;
743 size_t i, len;
744
745 /* Note: default policy is ACCEPT if no filter is set */
746 if (!hvc_iucv_filter_size)
747 return 0;
748
749 for (i = 0; i < hvc_iucv_filter_size; i++) {
750 filter_entry = hvc_iucv_filter + (8 * i);
751
752 /* If a filter entry contains the filter wildcard character,
753 * reduce the length to match the leading portion of the user
754 * ID only (wildcard match). Characters following the wildcard
755 * are ignored.
756 */
757 wildcard = strnchr(filter_entry, 8, FILTER_WILDCARD_CHAR);
758 len = (wildcard) ? wildcard - filter_entry : 8;
759 if (0 == memcmp(ipvmid, filter_entry, len))
760 return 0;
761 }
762 return 1;
763 }
764
765 /**
766 * hvc_iucv_path_pending() - IUCV handler to process a connection request.
767 * @path: Pending path (struct iucv_path)
768 * @ipvmid: z/VM system identifier of originator
769 * @ipuser: User specified data for this path
770 * (AF_IUCV: port/service name and originator port)
771 *
772 * The function uses the @ipuser data to determine if the pending path belongs
773 * to a terminal managed by this device driver.
774 * If the path belongs to this driver, ensure that the terminal is not accessed
775 * multiple times (only one connection to a terminal is allowed).
776 * If the terminal is not yet connected, the pending path is accepted and is
777 * associated to the appropriate struct hvc_iucv_private instance.
778 *
779 * Returns 0 if @path belongs to a terminal managed by the this device driver;
780 * otherwise returns -ENODEV in order to dispatch this path to other handlers.
781 *
782 * Locking: struct hvc_iucv_private->lock
783 */
hvc_iucv_path_pending(struct iucv_path * path,u8 * ipvmid,u8 * ipuser)784 static int hvc_iucv_path_pending(struct iucv_path *path, u8 *ipvmid,
785 u8 *ipuser)
786 {
787 struct hvc_iucv_private *priv, *tmp;
788 u8 wildcard[9] = "lnxhvc ";
789 int i, rc, find_unused;
790 u8 nuser_data[16];
791 u8 vm_user_id[9];
792
793 ASCEBC(wildcard, sizeof(wildcard));
794 find_unused = !memcmp(wildcard, ipuser, 8);
795
796 /* First, check if the pending path request is managed by this
797 * IUCV handler:
798 * - find a disconnected device if ipuser contains the wildcard
799 * - find the device that matches the terminal ID in ipuser
800 */
801 priv = NULL;
802 for (i = 0; i < hvc_iucv_devices; i++) {
803 tmp = hvc_iucv_table[i];
804 if (!tmp)
805 continue;
806
807 if (find_unused) {
808 spin_lock(&tmp->lock);
809 if (tmp->iucv_state == IUCV_DISCONN)
810 priv = tmp;
811 spin_unlock(&tmp->lock);
812
813 } else if (!memcmp(tmp->srv_name, ipuser, 8))
814 priv = tmp;
815 if (priv)
816 break;
817 }
818 if (!priv)
819 return -ENODEV;
820
821 /* Enforce that ipvmid is allowed to connect to us */
822 read_lock(&hvc_iucv_filter_lock);
823 rc = hvc_iucv_filter_connreq(ipvmid);
824 read_unlock(&hvc_iucv_filter_lock);
825 if (rc) {
826 iucv_path_sever(path, ipuser);
827 iucv_path_free(path);
828 memcpy(vm_user_id, ipvmid, 8);
829 vm_user_id[8] = 0;
830 pr_info("A connection request from z/VM user ID %s "
831 "was refused\n", vm_user_id);
832 return 0;
833 }
834
835 spin_lock(&priv->lock);
836
837 /* If the terminal is already connected or being severed, then sever
838 * this path to enforce that there is only ONE established communication
839 * path per terminal. */
840 if (priv->iucv_state != IUCV_DISCONN) {
841 iucv_path_sever(path, ipuser);
842 iucv_path_free(path);
843 goto out_path_handled;
844 }
845
846 /* accept path */
847 memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */
848 memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */
849 path->msglim = 0xffff; /* IUCV MSGLIMIT */
850 path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */
851 rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv);
852 if (rc) {
853 iucv_path_sever(path, ipuser);
854 iucv_path_free(path);
855 goto out_path_handled;
856 }
857 priv->path = path;
858 priv->iucv_state = IUCV_CONNECTED;
859
860 /* store path information */
861 memcpy(priv->info_path, ipvmid, 8);
862 memcpy(priv->info_path + 8, ipuser + 8, 8);
863
864 /* flush buffered output data... */
865 schedule_delayed_work(&priv->sndbuf_work, 5);
866
867 out_path_handled:
868 spin_unlock(&priv->lock);
869 return 0;
870 }
871
872 /**
873 * hvc_iucv_path_severed() - IUCV handler to process a path sever.
874 * @path: Pending path (struct iucv_path)
875 * @ipuser: User specified data for this path
876 * (AF_IUCV: port/service name and originator port)
877 *
878 * This function calls the hvc_iucv_hangup() function for the
879 * respective IUCV HVC terminal.
880 *
881 * Locking: struct hvc_iucv_private->lock
882 */
hvc_iucv_path_severed(struct iucv_path * path,u8 * ipuser)883 static void hvc_iucv_path_severed(struct iucv_path *path, u8 *ipuser)
884 {
885 struct hvc_iucv_private *priv = path->private;
886
887 hvc_iucv_hangup(priv);
888 }
889
890 /**
891 * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message.
892 * @path: Pending path (struct iucv_path)
893 * @msg: Pointer to the IUCV message
894 *
895 * The function puts an incoming message on the input queue for later
896 * processing (by hvc_iucv_get_chars() / hvc_iucv_write()).
897 * If the tty has not yet been opened, the message is rejected.
898 *
899 * Locking: struct hvc_iucv_private->lock
900 */
hvc_iucv_msg_pending(struct iucv_path * path,struct iucv_message * msg)901 static void hvc_iucv_msg_pending(struct iucv_path *path,
902 struct iucv_message *msg)
903 {
904 struct hvc_iucv_private *priv = path->private;
905 struct iucv_tty_buffer *rb;
906
907 /* reject messages that exceed max size of iucv_tty_msg->datalen */
908 if (msg->length > MSG_SIZE(MSG_MAX_DATALEN)) {
909 iucv_message_reject(path, msg);
910 return;
911 }
912
913 spin_lock(&priv->lock);
914
915 /* reject messages if tty has not yet been opened */
916 if (priv->tty_state == TTY_CLOSED) {
917 iucv_message_reject(path, msg);
918 goto unlock_return;
919 }
920
921 /* allocate tty buffer to save iucv msg only */
922 rb = alloc_tty_buffer(0, GFP_ATOMIC);
923 if (!rb) {
924 iucv_message_reject(path, msg);
925 goto unlock_return; /* -ENOMEM */
926 }
927 rb->msg = *msg;
928
929 list_add_tail(&rb->list, &priv->tty_inqueue);
930
931 hvc_kick(); /* wake up hvc thread */
932
933 unlock_return:
934 spin_unlock(&priv->lock);
935 }
936
937 /**
938 * hvc_iucv_msg_complete() - IUCV handler to process message completion
939 * @path: Pending path (struct iucv_path)
940 * @msg: Pointer to the IUCV message
941 *
942 * The function is called upon completion of message delivery to remove the
943 * message from the outqueue. Additional delivery information can be found
944 * msg->audit: rejected messages (0x040000 (IPADRJCT)), and
945 * purged messages (0x010000 (IPADPGNR)).
946 *
947 * Locking: struct hvc_iucv_private->lock
948 */
hvc_iucv_msg_complete(struct iucv_path * path,struct iucv_message * msg)949 static void hvc_iucv_msg_complete(struct iucv_path *path,
950 struct iucv_message *msg)
951 {
952 struct hvc_iucv_private *priv = path->private;
953 struct iucv_tty_buffer *ent, *next;
954 LIST_HEAD(list_remove);
955
956 spin_lock(&priv->lock);
957 list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list)
958 if (ent->msg.id == msg->id) {
959 list_move(&ent->list, &list_remove);
960 break;
961 }
962 wake_up(&priv->sndbuf_waitq);
963 spin_unlock(&priv->lock);
964 destroy_tty_buffer_list(&list_remove);
965 }
966
hvc_iucv_dev_termid_show(struct device * dev,struct device_attribute * attr,char * buf)967 static ssize_t hvc_iucv_dev_termid_show(struct device *dev,
968 struct device_attribute *attr,
969 char *buf)
970 {
971 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
972 size_t len;
973
974 len = sizeof(priv->srv_name);
975 memcpy(buf, priv->srv_name, len);
976 EBCASC(buf, len);
977 buf[len++] = '\n';
978 return len;
979 }
980
hvc_iucv_dev_state_show(struct device * dev,struct device_attribute * attr,char * buf)981 static ssize_t hvc_iucv_dev_state_show(struct device *dev,
982 struct device_attribute *attr,
983 char *buf)
984 {
985 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
986 return sprintf(buf, "%u:%u\n", priv->iucv_state, priv->tty_state);
987 }
988
hvc_iucv_dev_peer_show(struct device * dev,struct device_attribute * attr,char * buf)989 static ssize_t hvc_iucv_dev_peer_show(struct device *dev,
990 struct device_attribute *attr,
991 char *buf)
992 {
993 struct hvc_iucv_private *priv = dev_get_drvdata(dev);
994 char vmid[9], ipuser[9];
995
996 memset(vmid, 0, sizeof(vmid));
997 memset(ipuser, 0, sizeof(ipuser));
998
999 spin_lock_bh(&priv->lock);
1000 if (priv->iucv_state == IUCV_CONNECTED) {
1001 memcpy(vmid, priv->info_path, 8);
1002 memcpy(ipuser, priv->info_path + 8, 8);
1003 }
1004 spin_unlock_bh(&priv->lock);
1005 EBCASC(ipuser, 8);
1006
1007 return sprintf(buf, "%s:%s\n", vmid, ipuser);
1008 }
1009
1010
1011 /* HVC operations */
1012 static const struct hv_ops hvc_iucv_ops = {
1013 .get_chars = hvc_iucv_get_chars,
1014 .put_chars = hvc_iucv_put_chars,
1015 .notifier_add = hvc_iucv_notifier_add,
1016 .notifier_del = hvc_iucv_notifier_del,
1017 .notifier_hangup = hvc_iucv_notifier_hangup,
1018 .dtr_rts = hvc_iucv_dtr_rts,
1019 };
1020
1021 /* IUCV HVC device attributes */
1022 static DEVICE_ATTR(termid, 0640, hvc_iucv_dev_termid_show, NULL);
1023 static DEVICE_ATTR(state, 0640, hvc_iucv_dev_state_show, NULL);
1024 static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
1025 static struct attribute *hvc_iucv_dev_attrs[] = {
1026 &dev_attr_termid.attr,
1027 &dev_attr_state.attr,
1028 &dev_attr_peer.attr,
1029 NULL,
1030 };
1031 static struct attribute_group hvc_iucv_dev_attr_group = {
1032 .attrs = hvc_iucv_dev_attrs,
1033 };
1034 static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
1035 &hvc_iucv_dev_attr_group,
1036 NULL,
1037 };
1038
1039
1040 /**
1041 * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
1042 * @id: hvc_iucv_table index
1043 * @is_console: Flag if the instance is used as Linux console
1044 *
1045 * This function allocates a new hvc_iucv_private structure and stores
1046 * the instance in hvc_iucv_table at index @id.
1047 * Returns 0 on success; otherwise non-zero.
1048 */
hvc_iucv_alloc(int id,unsigned int is_console)1049 static int __init hvc_iucv_alloc(int id, unsigned int is_console)
1050 {
1051 struct hvc_iucv_private *priv;
1052 char name[9];
1053 int rc;
1054
1055 priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL);
1056 if (!priv)
1057 return -ENOMEM;
1058
1059 spin_lock_init(&priv->lock);
1060 INIT_LIST_HEAD(&priv->tty_outqueue);
1061 INIT_LIST_HEAD(&priv->tty_inqueue);
1062 INIT_DELAYED_WORK(&priv->sndbuf_work, hvc_iucv_sndbuf_work);
1063 init_waitqueue_head(&priv->sndbuf_waitq);
1064
1065 priv->sndbuf = (void *) get_zeroed_page(GFP_KERNEL);
1066 if (!priv->sndbuf) {
1067 kfree(priv);
1068 return -ENOMEM;
1069 }
1070
1071 /* set console flag */
1072 priv->is_console = is_console;
1073
1074 /* allocate hvc device */
1075 priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */
1076 HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256);
1077 if (IS_ERR(priv->hvc)) {
1078 rc = PTR_ERR(priv->hvc);
1079 goto out_error_hvc;
1080 }
1081
1082 /* notify HVC thread instead of using polling */
1083 priv->hvc->irq_requested = 1;
1084
1085 /* setup iucv related information */
1086 snprintf(name, 9, "lnxhvc%-2d", id);
1087 memcpy(priv->srv_name, name, 8);
1088 ASCEBC(priv->srv_name, 8);
1089
1090 /* create and setup device */
1091 priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL);
1092 if (!priv->dev) {
1093 rc = -ENOMEM;
1094 goto out_error_dev;
1095 }
1096 dev_set_name(priv->dev, "hvc_iucv%d", id);
1097 dev_set_drvdata(priv->dev, priv);
1098 priv->dev->bus = &iucv_bus;
1099 priv->dev->parent = iucv_root;
1100 priv->dev->groups = hvc_iucv_dev_attr_groups;
1101 priv->dev->release = (void (*)(struct device *)) kfree;
1102 rc = device_register(priv->dev);
1103 if (rc) {
1104 put_device(priv->dev);
1105 goto out_error_dev;
1106 }
1107
1108 hvc_iucv_table[id] = priv;
1109 return 0;
1110
1111 out_error_dev:
1112 hvc_remove(priv->hvc);
1113 out_error_hvc:
1114 free_page((unsigned long) priv->sndbuf);
1115 kfree(priv);
1116
1117 return rc;
1118 }
1119
1120 /**
1121 * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances
1122 */
hvc_iucv_destroy(struct hvc_iucv_private * priv)1123 static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv)
1124 {
1125 hvc_remove(priv->hvc);
1126 device_unregister(priv->dev);
1127 free_page((unsigned long) priv->sndbuf);
1128 kfree(priv);
1129 }
1130
1131 /**
1132 * hvc_iucv_parse_filter() - Parse filter for a single z/VM user ID
1133 * @filter: String containing a comma-separated list of z/VM user IDs
1134 * @dest: Location where to store the parsed z/VM user ID
1135 */
hvc_iucv_parse_filter(const char * filter,char * dest)1136 static const char *hvc_iucv_parse_filter(const char *filter, char *dest)
1137 {
1138 const char *nextdelim, *residual;
1139 size_t len;
1140
1141 nextdelim = strchr(filter, ',');
1142 if (nextdelim) {
1143 len = nextdelim - filter;
1144 residual = nextdelim + 1;
1145 } else {
1146 len = strlen(filter);
1147 residual = filter + len;
1148 }
1149
1150 if (len == 0)
1151 return ERR_PTR(-EINVAL);
1152
1153 /* check for '\n' (if called from sysfs) */
1154 if (filter[len - 1] == '\n')
1155 len--;
1156
1157 /* prohibit filter entries containing the wildcard character only */
1158 if (len == 1 && *filter == FILTER_WILDCARD_CHAR)
1159 return ERR_PTR(-EINVAL);
1160
1161 if (len > 8)
1162 return ERR_PTR(-EINVAL);
1163
1164 /* pad with blanks and save upper case version of user ID */
1165 memset(dest, ' ', 8);
1166 while (len--)
1167 dest[len] = toupper(filter[len]);
1168 return residual;
1169 }
1170
1171 /**
1172 * hvc_iucv_setup_filter() - Set up z/VM user ID filter
1173 * @filter: String consisting of a comma-separated list of z/VM user IDs
1174 *
1175 * The function parses the @filter string and creates an array containing
1176 * the list of z/VM user ID filter entries.
1177 * Return code 0 means success, -EINVAL if the filter is syntactically
1178 * incorrect, -ENOMEM if there was not enough memory to allocate the
1179 * filter list array, or -ENOSPC if too many z/VM user IDs have been specified.
1180 */
hvc_iucv_setup_filter(const char * val)1181 static int hvc_iucv_setup_filter(const char *val)
1182 {
1183 const char *residual;
1184 int err;
1185 size_t size, count;
1186 void *array, *old_filter;
1187
1188 count = strlen(val);
1189 if (count == 0 || (count == 1 && val[0] == '\n')) {
1190 size = 0;
1191 array = NULL;
1192 goto out_replace_filter; /* clear filter */
1193 }
1194
1195 /* count user IDs in order to allocate sufficient memory */
1196 size = 1;
1197 residual = val;
1198 while ((residual = strchr(residual, ',')) != NULL) {
1199 residual++;
1200 size++;
1201 }
1202
1203 /* check if the specified list exceeds the filter limit */
1204 if (size > MAX_VMID_FILTER)
1205 return -ENOSPC;
1206
1207 array = kcalloc(size, 8, GFP_KERNEL);
1208 if (!array)
1209 return -ENOMEM;
1210
1211 count = size;
1212 residual = val;
1213 while (*residual && count) {
1214 residual = hvc_iucv_parse_filter(residual,
1215 array + ((size - count) * 8));
1216 if (IS_ERR(residual)) {
1217 err = PTR_ERR(residual);
1218 kfree(array);
1219 goto out_err;
1220 }
1221 count--;
1222 }
1223
1224 out_replace_filter:
1225 write_lock_bh(&hvc_iucv_filter_lock);
1226 old_filter = hvc_iucv_filter;
1227 hvc_iucv_filter_size = size;
1228 hvc_iucv_filter = array;
1229 write_unlock_bh(&hvc_iucv_filter_lock);
1230 kfree(old_filter);
1231
1232 err = 0;
1233 out_err:
1234 return err;
1235 }
1236
1237 /**
1238 * param_set_vmidfilter() - Set z/VM user ID filter parameter
1239 * @val: String consisting of a comma-separated list of z/VM user IDs
1240 * @kp: Kernel parameter pointing to hvc_iucv_filter array
1241 *
1242 * The function sets up the z/VM user ID filter specified as comma-separated
1243 * list of user IDs in @val.
1244 * Note: If it is called early in the boot process, @val is stored and
1245 * parsed later in hvc_iucv_init().
1246 */
param_set_vmidfilter(const char * val,const struct kernel_param * kp)1247 static int param_set_vmidfilter(const char *val, const struct kernel_param *kp)
1248 {
1249 int rc;
1250
1251 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1252 return -ENODEV;
1253
1254 if (!val)
1255 return -EINVAL;
1256
1257 rc = 0;
1258 if (slab_is_available())
1259 rc = hvc_iucv_setup_filter(val);
1260 else
1261 hvc_iucv_filter_string = val; /* defer... */
1262 return rc;
1263 }
1264
1265 /**
1266 * param_get_vmidfilter() - Get z/VM user ID filter
1267 * @buffer: Buffer to store z/VM user ID filter,
1268 * (buffer size assumption PAGE_SIZE)
1269 * @kp: Kernel parameter pointing to the hvc_iucv_filter array
1270 *
1271 * The function stores the filter as a comma-separated list of z/VM user IDs
1272 * in @buffer. Typically, sysfs routines call this function for attr show.
1273 */
param_get_vmidfilter(char * buffer,const struct kernel_param * kp)1274 static int param_get_vmidfilter(char *buffer, const struct kernel_param *kp)
1275 {
1276 int rc;
1277 size_t index, len;
1278 void *start, *end;
1279
1280 if (!MACHINE_IS_VM || !hvc_iucv_devices)
1281 return -ENODEV;
1282
1283 rc = 0;
1284 read_lock_bh(&hvc_iucv_filter_lock);
1285 for (index = 0; index < hvc_iucv_filter_size; index++) {
1286 start = hvc_iucv_filter + (8 * index);
1287 end = memchr(start, ' ', 8);
1288 len = (end) ? end - start : 8;
1289 memcpy(buffer + rc, start, len);
1290 rc += len;
1291 buffer[rc++] = ',';
1292 }
1293 read_unlock_bh(&hvc_iucv_filter_lock);
1294 if (rc)
1295 buffer[--rc] = '\0'; /* replace last comma and update rc */
1296 return rc;
1297 }
1298
1299 #define param_check_vmidfilter(name, p) __param_check(name, p, void)
1300
1301 static const struct kernel_param_ops param_ops_vmidfilter = {
1302 .set = param_set_vmidfilter,
1303 .get = param_get_vmidfilter,
1304 };
1305
1306 /**
1307 * hvc_iucv_init() - z/VM IUCV HVC device driver initialization
1308 */
hvc_iucv_init(void)1309 static int __init hvc_iucv_init(void)
1310 {
1311 int rc;
1312 unsigned int i;
1313
1314 if (!hvc_iucv_devices)
1315 return -ENODEV;
1316
1317 if (!MACHINE_IS_VM) {
1318 pr_notice("The z/VM IUCV HVC device driver cannot "
1319 "be used without z/VM\n");
1320 rc = -ENODEV;
1321 goto out_error;
1322 }
1323
1324 if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) {
1325 pr_err("%lu is not a valid value for the hvc_iucv= "
1326 "kernel parameter\n", hvc_iucv_devices);
1327 rc = -EINVAL;
1328 goto out_error;
1329 }
1330
1331 /* parse hvc_iucv_allow string and create z/VM user ID filter list */
1332 if (hvc_iucv_filter_string) {
1333 rc = hvc_iucv_setup_filter(hvc_iucv_filter_string);
1334 switch (rc) {
1335 case 0:
1336 break;
1337 case -ENOMEM:
1338 pr_err("Allocating memory failed with "
1339 "reason code=%d\n", 3);
1340 goto out_error;
1341 case -EINVAL:
1342 pr_err("hvc_iucv_allow= does not specify a valid "
1343 "z/VM user ID list\n");
1344 goto out_error;
1345 case -ENOSPC:
1346 pr_err("hvc_iucv_allow= specifies too many "
1347 "z/VM user IDs\n");
1348 goto out_error;
1349 default:
1350 goto out_error;
1351 }
1352 }
1353
1354 hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT,
1355 sizeof(struct iucv_tty_buffer),
1356 0, 0, NULL);
1357 if (!hvc_iucv_buffer_cache) {
1358 pr_err("Allocating memory failed with reason code=%d\n", 1);
1359 rc = -ENOMEM;
1360 goto out_error;
1361 }
1362
1363 hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR,
1364 hvc_iucv_buffer_cache);
1365 if (!hvc_iucv_mempool) {
1366 pr_err("Allocating memory failed with reason code=%d\n", 2);
1367 kmem_cache_destroy(hvc_iucv_buffer_cache);
1368 rc = -ENOMEM;
1369 goto out_error;
1370 }
1371
1372 /* register the first terminal device as console
1373 * (must be done before allocating hvc terminal devices) */
1374 rc = hvc_instantiate(HVC_IUCV_MAGIC, IUCV_HVC_CON_IDX, &hvc_iucv_ops);
1375 if (rc) {
1376 pr_err("Registering HVC terminal device as "
1377 "Linux console failed\n");
1378 goto out_error_memory;
1379 }
1380
1381 /* allocate hvc_iucv_private structs */
1382 for (i = 0; i < hvc_iucv_devices; i++) {
1383 rc = hvc_iucv_alloc(i, (i == IUCV_HVC_CON_IDX) ? 1 : 0);
1384 if (rc) {
1385 pr_err("Creating a new HVC terminal device "
1386 "failed with error code=%d\n", rc);
1387 goto out_error_hvc;
1388 }
1389 }
1390
1391 /* register IUCV callback handler */
1392 rc = iucv_register(&hvc_iucv_handler, 0);
1393 if (rc) {
1394 pr_err("Registering IUCV handlers failed with error code=%d\n",
1395 rc);
1396 goto out_error_hvc;
1397 }
1398
1399 return 0;
1400
1401 out_error_hvc:
1402 for (i = 0; i < hvc_iucv_devices; i++)
1403 if (hvc_iucv_table[i])
1404 hvc_iucv_destroy(hvc_iucv_table[i]);
1405 out_error_memory:
1406 mempool_destroy(hvc_iucv_mempool);
1407 kmem_cache_destroy(hvc_iucv_buffer_cache);
1408 out_error:
1409 kfree(hvc_iucv_filter);
1410 hvc_iucv_devices = 0; /* ensure that we do not provide any device */
1411 return rc;
1412 }
1413
1414 /**
1415 * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter
1416 * @val: Parameter value (numeric)
1417 */
hvc_iucv_config(char * val)1418 static int __init hvc_iucv_config(char *val)
1419 {
1420 return kstrtoul(val, 10, &hvc_iucv_devices);
1421 }
1422
1423
1424 device_initcall(hvc_iucv_init);
1425 __setup("hvc_iucv=", hvc_iucv_config);
1426 core_param(hvc_iucv_allow, hvc_iucv_filter, vmidfilter, 0640);
1427