1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef S390_IO_SCH_H
3 #define S390_IO_SCH_H
4 
5 #include <linux/types.h>
6 #include <asm/schid.h>
7 #include <asm/ccwdev.h>
8 #include <asm/irq.h>
9 #include "css.h"
10 #include "orb.h"
11 
12 struct io_subchannel_dma_area {
13 	struct ccw1 sense_ccw;	/* static ccw for sense command */
14 };
15 
16 struct io_subchannel_private {
17 	union orb orb;		/* operation request block */
18 	struct ccw_device *cdev;/* pointer to the child ccw device */
19 	struct {
20 		unsigned int suspend:1;	/* allow suspend */
21 		unsigned int prefetch:1;/* deny prefetch */
22 		unsigned int inter:1;	/* suppress intermediate interrupts */
23 	} __packed options;
24 	struct io_subchannel_dma_area *dma_area;
25 	dma_addr_t dma_area_dma;
26 } __aligned(8);
27 
28 #define to_io_private(n) ((struct io_subchannel_private *) \
29 			  dev_get_drvdata(&(n)->dev))
30 #define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
31 
sch_get_cdev(struct subchannel * sch)32 static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
33 {
34 	struct io_subchannel_private *priv = to_io_private(sch);
35 	return priv ? priv->cdev : NULL;
36 }
37 
sch_set_cdev(struct subchannel * sch,struct ccw_device * cdev)38 static inline void sch_set_cdev(struct subchannel *sch,
39 				struct ccw_device *cdev)
40 {
41 	struct io_subchannel_private *priv = to_io_private(sch);
42 	if (priv)
43 		priv->cdev = cdev;
44 }
45 
46 #define MAX_CIWS 8
47 
48 /*
49  * Possible status values for a CCW request's I/O.
50  */
51 enum io_status {
52 	IO_DONE,
53 	IO_RUNNING,
54 	IO_STATUS_ERROR,
55 	IO_PATH_ERROR,
56 	IO_REJECTED,
57 	IO_KILLED
58 };
59 
60 /**
61  * ccw_request - Internal CCW request.
62  * @cp: channel program to start
63  * @timeout: maximum allowable time in jiffies between start I/O and interrupt
64  * @maxretries: number of retries per I/O operation and path
65  * @lpm: mask of paths to use
66  * @check: optional callback that determines if results are final
67  * @filter: optional callback to adjust request status based on IRB data
68  * @callback: final callback
69  * @data: user-defined pointer passed to all callbacks
70  * @singlepath: if set, use only one path from @lpm per start I/O
71  * @cancel: non-zero if request was cancelled
72  * @done: non-zero if request was finished
73  * @mask: current path mask
74  * @retries: current number of retries
75  * @drc: delayed return code
76  */
77 struct ccw_request {
78 	struct ccw1 *cp;
79 	unsigned long timeout;
80 	u16 maxretries;
81 	u8 lpm;
82 	int (*check)(struct ccw_device *, void *);
83 	enum io_status (*filter)(struct ccw_device *, void *, struct irb *,
84 				 enum io_status);
85 	void (*callback)(struct ccw_device *, void *, int);
86 	void *data;
87 	unsigned int singlepath:1;
88 	/* These fields are used internally. */
89 	unsigned int cancel:1;
90 	unsigned int done:1;
91 	u16 mask;
92 	u16 retries;
93 	int drc;
94 } __attribute__((packed));
95 
96 /*
97  * sense-id response buffer layout
98  */
99 struct senseid {
100 	/* common part */
101 	u8  reserved;	/* always 0x'FF' */
102 	u16 cu_type;	/* control unit type */
103 	u8  cu_model;	/* control unit model */
104 	u16 dev_type;	/* device type */
105 	u8  dev_model;	/* device model */
106 	u8  unused;	/* padding byte */
107 	/* extended part */
108 	struct ciw ciw[MAX_CIWS];	/* variable # of CIWs */
109 }  __attribute__ ((packed, aligned(4)));
110 
111 enum cdev_todo {
112 	CDEV_TODO_NOTHING,
113 	CDEV_TODO_ENABLE_CMF,
114 	CDEV_TODO_REBIND,
115 	CDEV_TODO_REGISTER,
116 	CDEV_TODO_UNREG,
117 	CDEV_TODO_UNREG_EVAL,
118 };
119 
120 #define FAKE_CMD_IRB	1
121 #define FAKE_TM_IRB	2
122 
123 struct ccw_device_dma_area {
124 	struct senseid senseid;	/* SenseID info */
125 	struct ccw1 iccws[2];	/* ccws for SNID/SID/SPGID commands */
126 	struct irb irb;		/* device status */
127 	struct pgid pgid[8];	/* path group IDs per chpid*/
128 };
129 
130 struct ccw_device_private {
131 	struct ccw_device *cdev;
132 	struct subchannel *sch;
133 	int state;		/* device state */
134 	atomic_t onoff;
135 	struct ccw_dev_id dev_id;	/* device id */
136 	struct ccw_request req;		/* internal I/O request */
137 	int iretry;
138 	u8 pgid_valid_mask;	/* mask of valid PGIDs */
139 	u8 pgid_todo_mask;	/* mask of PGIDs to be adjusted */
140 	u8 pgid_reset_mask;	/* mask of PGIDs which were reset */
141 	u8 path_noirq_mask;	/* mask of paths for which no irq was
142 				   received */
143 	u8 path_notoper_mask;	/* mask of paths which were found
144 				   not operable */
145 	u8 path_gone_mask;	/* mask of paths, that became unavailable */
146 	u8 path_new_mask;	/* mask of paths, that became available */
147 	u8 path_broken_mask;	/* mask of paths, which were found to be
148 				   unusable */
149 	struct {
150 		unsigned int fast:1;	/* post with "channel end" */
151 		unsigned int repall:1;	/* report every interrupt status */
152 		unsigned int pgroup:1;	/* do path grouping */
153 		unsigned int force:1;	/* allow forced online */
154 		unsigned int mpath:1;	/* do multipathing */
155 	} __attribute__ ((packed)) options;
156 	struct {
157 		unsigned int esid:1;	    /* Ext. SenseID supported by HW */
158 		unsigned int dosense:1;	    /* delayed SENSE required */
159 		unsigned int doverify:1;    /* delayed path verification */
160 		unsigned int donotify:1;    /* call notify function */
161 		unsigned int recog_done:1;  /* dev. recog. complete */
162 		unsigned int fake_irb:2;    /* deliver faked irb */
163 		unsigned int resuming:1;    /* recognition while resume */
164 		unsigned int pgroup:1;	    /* pathgroup is set up */
165 		unsigned int mpath:1;	    /* multipathing is set up */
166 		unsigned int pgid_unknown:1;/* unknown pgid state */
167 		unsigned int initialized:1; /* set if initial reference held */
168 	} __attribute__((packed)) flags;
169 	unsigned long intparm;	/* user interruption parameter */
170 	struct qdio_irq *qdio_data;
171 	int async_kill_io_rc;
172 	struct work_struct todo_work;
173 	enum cdev_todo todo;
174 	wait_queue_head_t wait_q;
175 	struct timer_list timer;
176 	void *cmb;			/* measurement information */
177 	struct list_head cmb_list;	/* list of measured devices */
178 	u64 cmb_start_time;		/* clock value of cmb reset */
179 	void *cmb_wait;			/* deferred cmb enable/disable */
180 	struct gen_pool *dma_pool;
181 	struct ccw_device_dma_area *dma_area;
182 	enum interruption_class int_class;
183 };
184 
185 #endif
186