1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Filename: dev.c
4 *
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7 *
8 * (C) Copyright 2013 IBM Corporation
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
16
17 #include <linux/hdreg.h>
18 #include <linux/genhd.h>
19 #include <linux/blkdev.h>
20 #include <linux/bio.h>
21
22 #include <linux/fs.h>
23
24 #include "rsxx_priv.h"
25
26 static unsigned int blkdev_minors = 64;
27 module_param(blkdev_minors, uint, 0444);
28 MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
29
30 /*
31 * For now I'm making this tweakable in case any applications hit this limit.
32 * If you see a "bio too big" error in the log you will need to raise this
33 * value.
34 */
35 static unsigned int blkdev_max_hw_sectors = 1024;
36 module_param(blkdev_max_hw_sectors, uint, 0444);
37 MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
38
39 static unsigned int enable_blkdev = 1;
40 module_param(enable_blkdev , uint, 0444);
41 MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
42
43
44 struct rsxx_bio_meta {
45 struct bio *bio;
46 atomic_t pending_dmas;
47 atomic_t error;
48 unsigned long start_time;
49 };
50
51 static struct kmem_cache *bio_meta_pool;
52
53 static blk_qc_t rsxx_submit_bio(struct bio *bio);
54
55 /*----------------- Block Device Operations -----------------*/
rsxx_blkdev_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)56 static int rsxx_blkdev_ioctl(struct block_device *bdev,
57 fmode_t mode,
58 unsigned int cmd,
59 unsigned long arg)
60 {
61 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
62
63 switch (cmd) {
64 case RSXX_GETREG:
65 return rsxx_reg_access(card, (void __user *)arg, 1);
66 case RSXX_SETREG:
67 return rsxx_reg_access(card, (void __user *)arg, 0);
68 }
69
70 return -ENOTTY;
71 }
72
rsxx_getgeo(struct block_device * bdev,struct hd_geometry * geo)73 static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
74 {
75 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
76 u64 blocks = card->size8 >> 9;
77
78 /*
79 * get geometry: Fake it. I haven't found any drivers that set
80 * geo->start, so we won't either.
81 */
82 if (card->size8) {
83 geo->heads = 64;
84 geo->sectors = 16;
85 do_div(blocks, (geo->heads * geo->sectors));
86 geo->cylinders = blocks;
87 } else {
88 geo->heads = 0;
89 geo->sectors = 0;
90 geo->cylinders = 0;
91 }
92 return 0;
93 }
94
95 static const struct block_device_operations rsxx_fops = {
96 .owner = THIS_MODULE,
97 .submit_bio = rsxx_submit_bio,
98 .getgeo = rsxx_getgeo,
99 .ioctl = rsxx_blkdev_ioctl,
100 };
101
bio_dma_done_cb(struct rsxx_cardinfo * card,void * cb_data,unsigned int error)102 static void bio_dma_done_cb(struct rsxx_cardinfo *card,
103 void *cb_data,
104 unsigned int error)
105 {
106 struct rsxx_bio_meta *meta = cb_data;
107
108 if (error)
109 atomic_set(&meta->error, 1);
110
111 if (atomic_dec_and_test(&meta->pending_dmas)) {
112 if (!card->eeh_state && card->gendisk)
113 bio_end_io_acct(meta->bio, meta->start_time);
114
115 if (atomic_read(&meta->error))
116 bio_io_error(meta->bio);
117 else
118 bio_endio(meta->bio);
119 kmem_cache_free(bio_meta_pool, meta);
120 }
121 }
122
rsxx_submit_bio(struct bio * bio)123 static blk_qc_t rsxx_submit_bio(struct bio *bio)
124 {
125 struct rsxx_cardinfo *card = bio->bi_disk->private_data;
126 struct rsxx_bio_meta *bio_meta;
127 blk_status_t st = BLK_STS_IOERR;
128
129 blk_queue_split(&bio);
130
131 might_sleep();
132
133 if (!card)
134 goto req_err;
135
136 if (bio_end_sector(bio) > get_capacity(card->gendisk))
137 goto req_err;
138
139 if (unlikely(card->halt))
140 goto req_err;
141
142 if (unlikely(card->dma_fault))
143 goto req_err;
144
145 if (bio->bi_iter.bi_size == 0) {
146 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
147 goto req_err;
148 }
149
150 bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
151 if (!bio_meta) {
152 st = BLK_STS_RESOURCE;
153 goto req_err;
154 }
155
156 bio_meta->bio = bio;
157 atomic_set(&bio_meta->error, 0);
158 atomic_set(&bio_meta->pending_dmas, 0);
159
160 if (!unlikely(card->halt))
161 bio_meta->start_time = bio_start_io_acct(bio);
162
163 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
164 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
165 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
166
167 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
168 bio_dma_done_cb, bio_meta);
169 if (st)
170 goto queue_err;
171
172 return BLK_QC_T_NONE;
173
174 queue_err:
175 kmem_cache_free(bio_meta_pool, bio_meta);
176 req_err:
177 if (st)
178 bio->bi_status = st;
179 bio_endio(bio);
180 return BLK_QC_T_NONE;
181 }
182
183 /*----------------- Device Setup -------------------*/
rsxx_discard_supported(struct rsxx_cardinfo * card)184 static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
185 {
186 unsigned char pci_rev;
187
188 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
189
190 return (pci_rev >= RSXX_DISCARD_SUPPORT);
191 }
192
rsxx_attach_dev(struct rsxx_cardinfo * card)193 int rsxx_attach_dev(struct rsxx_cardinfo *card)
194 {
195 mutex_lock(&card->dev_lock);
196
197 /* The block device requires the stripe size from the config. */
198 if (enable_blkdev) {
199 if (card->config_valid)
200 set_capacity(card->gendisk, card->size8 >> 9);
201 else
202 set_capacity(card->gendisk, 0);
203 device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
204 card->bdev_attached = 1;
205 }
206
207 mutex_unlock(&card->dev_lock);
208
209 return 0;
210 }
211
rsxx_detach_dev(struct rsxx_cardinfo * card)212 void rsxx_detach_dev(struct rsxx_cardinfo *card)
213 {
214 mutex_lock(&card->dev_lock);
215
216 if (card->bdev_attached) {
217 del_gendisk(card->gendisk);
218 card->bdev_attached = 0;
219 }
220
221 mutex_unlock(&card->dev_lock);
222 }
223
rsxx_setup_dev(struct rsxx_cardinfo * card)224 int rsxx_setup_dev(struct rsxx_cardinfo *card)
225 {
226 unsigned short blk_size;
227
228 mutex_init(&card->dev_lock);
229
230 if (!enable_blkdev)
231 return 0;
232
233 card->major = register_blkdev(0, DRIVER_NAME);
234 if (card->major < 0) {
235 dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
236 return -ENOMEM;
237 }
238
239 card->queue = blk_alloc_queue(NUMA_NO_NODE);
240 if (!card->queue) {
241 dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
242 unregister_blkdev(card->major, DRIVER_NAME);
243 return -ENOMEM;
244 }
245
246 card->gendisk = alloc_disk(blkdev_minors);
247 if (!card->gendisk) {
248 dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
249 blk_cleanup_queue(card->queue);
250 unregister_blkdev(card->major, DRIVER_NAME);
251 return -ENOMEM;
252 }
253
254 if (card->config_valid) {
255 blk_size = card->config.data.block_size;
256 blk_queue_dma_alignment(card->queue, blk_size - 1);
257 blk_queue_logical_block_size(card->queue, blk_size);
258 }
259
260 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
261 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
262
263 blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
264 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
265 if (rsxx_discard_supported(card)) {
266 blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
267 blk_queue_max_discard_sectors(card->queue,
268 RSXX_HW_BLK_SIZE >> 9);
269 card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
270 card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
271 }
272
273 snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
274 "rsxx%d", card->disk_id);
275 card->gendisk->major = card->major;
276 card->gendisk->first_minor = 0;
277 card->gendisk->fops = &rsxx_fops;
278 card->gendisk->private_data = card;
279 card->gendisk->queue = card->queue;
280
281 return 0;
282 }
283
rsxx_destroy_dev(struct rsxx_cardinfo * card)284 void rsxx_destroy_dev(struct rsxx_cardinfo *card)
285 {
286 if (!enable_blkdev)
287 return;
288
289 put_disk(card->gendisk);
290 card->gendisk = NULL;
291
292 blk_cleanup_queue(card->queue);
293 unregister_blkdev(card->major, DRIVER_NAME);
294 }
295
rsxx_dev_init(void)296 int rsxx_dev_init(void)
297 {
298 bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
299 if (!bio_meta_pool)
300 return -ENOMEM;
301
302 return 0;
303 }
304
rsxx_dev_cleanup(void)305 void rsxx_dev_cleanup(void)
306 {
307 kmem_cache_destroy(bio_meta_pool);
308 }
309
310
311