Lines Matching +full:pd +full:- +full:node
3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
9 * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
10 * DVD-RAM devices.
15 * typically ide-cd.c or sr.c. This driver can handle read and write requests,
25 * randomly read and written using 2kB-sized blocks.
39 * At the top layer there is a custom ->submit_bio function that forwards
64 #include <linux/backing-dev.h>
75 #define pkt_err(pd, fmt, ...) \ argument
76 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
77 #define pkt_notice(pd, fmt, ...) \ argument
78 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
79 #define pkt_info(pd, fmt, ...) \ argument
80 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
82 #define pkt_dbg(level, pd, fmt, ...) \ argument
86 pd->name, __func__, ##__VA_ARGS__); \
88 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
111 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) in get_zone() argument
113 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); in get_zone()
119 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd, in pkt_kobj_create() argument
130 p->pd = pd; in pkt_kobj_create()
131 error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name); in pkt_kobj_create()
133 kobject_put(&p->kobj); in pkt_kobj_create()
136 kobject_uevent(&p->kobj, KOBJ_ADD); in pkt_kobj_create()
145 kobject_put(&p->kobj); in pkt_kobj_remove()
167 /sys/class/pktcdvd/pktcdvd[0-7]/
210 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; in kobj_pkt_show() local
213 if (strcmp(attr->name, "packets_started") == 0) { in kobj_pkt_show()
214 n = sprintf(data, "%lu\n", pd->stats.pkt_started); in kobj_pkt_show()
216 } else if (strcmp(attr->name, "packets_finished") == 0) { in kobj_pkt_show()
217 n = sprintf(data, "%lu\n", pd->stats.pkt_ended); in kobj_pkt_show()
219 } else if (strcmp(attr->name, "kb_written") == 0) { in kobj_pkt_show()
220 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); in kobj_pkt_show()
222 } else if (strcmp(attr->name, "kb_read") == 0) { in kobj_pkt_show()
223 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); in kobj_pkt_show()
225 } else if (strcmp(attr->name, "kb_read_gather") == 0) { in kobj_pkt_show()
226 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); in kobj_pkt_show()
228 } else if (strcmp(attr->name, "size") == 0) { in kobj_pkt_show()
229 spin_lock(&pd->lock); in kobj_pkt_show()
230 v = pd->bio_queue_size; in kobj_pkt_show()
231 spin_unlock(&pd->lock); in kobj_pkt_show()
234 } else if (strcmp(attr->name, "congestion_off") == 0) { in kobj_pkt_show()
235 spin_lock(&pd->lock); in kobj_pkt_show()
236 v = pd->write_congestion_off; in kobj_pkt_show()
237 spin_unlock(&pd->lock); in kobj_pkt_show()
240 } else if (strcmp(attr->name, "congestion_on") == 0) { in kobj_pkt_show()
241 spin_lock(&pd->lock); in kobj_pkt_show()
242 v = pd->write_congestion_on; in kobj_pkt_show()
243 spin_unlock(&pd->lock); in kobj_pkt_show()
255 *lo = *hi - 100; in init_write_congestion_marks()
257 *lo = min(*lo, *hi - 100); in init_write_congestion_marks()
261 *hi = -1; in init_write_congestion_marks()
262 *lo = -1; in init_write_congestion_marks()
270 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; in kobj_pkt_store() local
273 if (strcmp(attr->name, "reset") == 0 && len > 0) { in kobj_pkt_store()
274 pd->stats.pkt_started = 0; in kobj_pkt_store()
275 pd->stats.pkt_ended = 0; in kobj_pkt_store()
276 pd->stats.secs_w = 0; in kobj_pkt_store()
277 pd->stats.secs_rg = 0; in kobj_pkt_store()
278 pd->stats.secs_r = 0; in kobj_pkt_store()
280 } else if (strcmp(attr->name, "congestion_off") == 0 in kobj_pkt_store()
282 spin_lock(&pd->lock); in kobj_pkt_store()
283 pd->write_congestion_off = val; in kobj_pkt_store()
284 init_write_congestion_marks(&pd->write_congestion_off, in kobj_pkt_store()
285 &pd->write_congestion_on); in kobj_pkt_store()
286 spin_unlock(&pd->lock); in kobj_pkt_store()
288 } else if (strcmp(attr->name, "congestion_on") == 0 in kobj_pkt_store()
290 spin_lock(&pd->lock); in kobj_pkt_store()
291 pd->write_congestion_on = val; in kobj_pkt_store()
292 init_write_congestion_marks(&pd->write_congestion_off, in kobj_pkt_store()
293 &pd->write_congestion_on); in kobj_pkt_store()
294 spin_unlock(&pd->lock); in kobj_pkt_store()
314 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) in pkt_sysfs_dev_new() argument
317 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, in pkt_sysfs_dev_new()
318 "%s", pd->name); in pkt_sysfs_dev_new()
319 if (IS_ERR(pd->dev)) in pkt_sysfs_dev_new()
320 pd->dev = NULL; in pkt_sysfs_dev_new()
322 if (pd->dev) { in pkt_sysfs_dev_new()
323 pd->kobj_stat = pkt_kobj_create(pd, "stat", in pkt_sysfs_dev_new()
324 &pd->dev->kobj, in pkt_sysfs_dev_new()
326 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue", in pkt_sysfs_dev_new()
327 &pd->dev->kobj, in pkt_sysfs_dev_new()
332 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) in pkt_sysfs_dev_remove() argument
334 pkt_kobj_remove(pd->kobj_stat); in pkt_sysfs_dev_remove()
335 pkt_kobj_remove(pd->kobj_wqueue); in pkt_sysfs_dev_remove()
337 device_unregister(pd->dev); in pkt_sysfs_dev_remove()
360 struct pktcdvd_device *pd = pkt_devs[idx]; in device_map_show() local
361 if (!pd) in device_map_show()
364 pd->name, in device_map_show()
365 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), in device_map_show()
366 MAJOR(pd->bdev->bd_dev), in device_map_show()
367 MINOR(pd->bdev->bd_dev)); in device_map_show()
382 return -ENODEV; in add_store()
391 return -EINVAL; in add_store()
403 return -EINVAL; in remove_store()
425 return -ENOMEM; in pkt_sysfs_init()
426 class_pktcdvd->name = DRIVER_NAME; in pkt_sysfs_init()
427 class_pktcdvd->owner = THIS_MODULE; in pkt_sysfs_init()
428 class_pktcdvd->class_release = class_pktcdvd_release; in pkt_sysfs_init()
429 class_pktcdvd->class_groups = class_pktcdvd_groups; in pkt_sysfs_init()
450 /sys/kernel/debug/pktcdvd[0-7]/
462 return single_open(file, pkt_debugfs_seq_show, inode->i_private); in pkt_debugfs_fops_open()
473 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) in pkt_debugfs_dev_new() argument
477 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); in pkt_debugfs_dev_new()
478 if (!pd->dfs_d_root) in pkt_debugfs_dev_new()
481 pd->dfs_f_info = debugfs_create_file("info", 0444, in pkt_debugfs_dev_new()
482 pd->dfs_d_root, pd, &debug_fops); in pkt_debugfs_dev_new()
485 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) in pkt_debugfs_dev_remove() argument
489 debugfs_remove(pd->dfs_f_info); in pkt_debugfs_dev_remove()
490 debugfs_remove(pd->dfs_d_root); in pkt_debugfs_dev_remove()
491 pd->dfs_f_info = NULL; in pkt_debugfs_dev_remove()
492 pd->dfs_d_root = NULL; in pkt_debugfs_dev_remove()
506 /* ----------------------------------------------------------*/
509 static void pkt_bio_finished(struct pktcdvd_device *pd) in pkt_bio_finished() argument
511 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); in pkt_bio_finished()
512 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { in pkt_bio_finished()
513 pkt_dbg(2, pd, "queue empty\n"); in pkt_bio_finished()
514 atomic_set(&pd->iosched.attention, 1); in pkt_bio_finished()
515 wake_up(&pd->wqueue); in pkt_bio_finished()
531 pkt->frames = frames; in pkt_alloc_packet_data()
532 pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames); in pkt_alloc_packet_data()
533 if (!pkt->w_bio) in pkt_alloc_packet_data()
537 pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO); in pkt_alloc_packet_data()
538 if (!pkt->pages[i]) in pkt_alloc_packet_data()
542 spin_lock_init(&pkt->lock); in pkt_alloc_packet_data()
543 bio_list_init(&pkt->orig_bios); in pkt_alloc_packet_data()
550 pkt->r_bios[i] = bio; in pkt_alloc_packet_data()
557 struct bio *bio = pkt->r_bios[i]; in pkt_alloc_packet_data()
564 if (pkt->pages[i]) in pkt_alloc_packet_data()
565 __free_page(pkt->pages[i]); in pkt_alloc_packet_data()
566 bio_put(pkt->w_bio); in pkt_alloc_packet_data()
580 for (i = 0; i < pkt->frames; i++) { in pkt_free_packet_data()
581 struct bio *bio = pkt->r_bios[i]; in pkt_free_packet_data()
585 for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++) in pkt_free_packet_data()
586 __free_page(pkt->pages[i]); in pkt_free_packet_data()
587 bio_put(pkt->w_bio); in pkt_free_packet_data()
591 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) in pkt_shrink_pktlist() argument
595 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); in pkt_shrink_pktlist()
597 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { in pkt_shrink_pktlist()
600 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_shrink_pktlist()
603 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) in pkt_grow_pktlist() argument
607 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); in pkt_grow_pktlist()
610 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); in pkt_grow_pktlist()
612 pkt_shrink_pktlist(pd); in pkt_grow_pktlist()
615 pkt->id = nr_packets; in pkt_grow_pktlist()
616 pkt->pd = pd; in pkt_grow_pktlist()
617 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_grow_pktlist()
618 nr_packets--; in pkt_grow_pktlist()
623 static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) in pkt_rbtree_next() argument
625 struct rb_node *n = rb_next(&node->rb_node); in pkt_rbtree_next()
631 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_erase() argument
633 rb_erase(&node->rb_node, &pd->bio_queue); in pkt_rbtree_erase()
634 mempool_free(node, &pd->rb_pool); in pkt_rbtree_erase()
635 pd->bio_queue_size--; in pkt_rbtree_erase()
636 BUG_ON(pd->bio_queue_size < 0); in pkt_rbtree_erase()
640 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
642 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) in pkt_rbtree_find() argument
644 struct rb_node *n = pd->bio_queue.rb_node; in pkt_rbtree_find()
649 BUG_ON(pd->bio_queue_size > 0); in pkt_rbtree_find()
655 if (s <= tmp->bio->bi_iter.bi_sector) in pkt_rbtree_find()
656 next = n->rb_left; in pkt_rbtree_find()
658 next = n->rb_right; in pkt_rbtree_find()
664 if (s > tmp->bio->bi_iter.bi_sector) { in pkt_rbtree_find()
669 BUG_ON(s > tmp->bio->bi_iter.bi_sector); in pkt_rbtree_find()
674 * Insert a node into the pd->bio_queue rb tree.
676 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_insert() argument
678 struct rb_node **p = &pd->bio_queue.rb_node; in pkt_rbtree_insert()
680 sector_t s = node->bio->bi_iter.bi_sector; in pkt_rbtree_insert()
686 if (s < tmp->bio->bi_iter.bi_sector) in pkt_rbtree_insert()
687 p = &(*p)->rb_left; in pkt_rbtree_insert()
689 p = &(*p)->rb_right; in pkt_rbtree_insert()
691 rb_link_node(&node->rb_node, parent, p); in pkt_rbtree_insert()
692 rb_insert_color(&node->rb_node, &pd->bio_queue); in pkt_rbtree_insert()
693 pd->bio_queue_size++; in pkt_rbtree_insert()
700 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_generic_packet() argument
702 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet()
706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? in pkt_generic_packet()
711 if (cgc->buflen) { in pkt_generic_packet()
712 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, in pkt_generic_packet()
718 scsi_req(rq)->cmd_len = COMMAND_SIZE(cgc->cmd[0]); in pkt_generic_packet()
719 memcpy(scsi_req(rq)->cmd, cgc->cmd, CDROM_PACKET_SIZE); in pkt_generic_packet()
721 rq->timeout = 60*HZ; in pkt_generic_packet()
722 if (cgc->quiet) in pkt_generic_packet()
723 rq->rq_flags |= RQF_QUIET; in pkt_generic_packet()
725 blk_execute_rq(pd->bdev->bd_disk, rq, 0); in pkt_generic_packet()
726 if (scsi_req(rq)->result) in pkt_generic_packet()
727 ret = -EIO; in pkt_generic_packet()
748 static void pkt_dump_sense(struct pktcdvd_device *pd, in pkt_dump_sense() argument
751 struct scsi_sense_hdr *sshdr = cgc->sshdr; in pkt_dump_sense()
754 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", in pkt_dump_sense()
755 CDROM_PACKET_SIZE, cgc->cmd, in pkt_dump_sense()
756 sshdr->sense_key, sshdr->asc, sshdr->ascq, in pkt_dump_sense()
757 sense_key_string(sshdr->sense_key)); in pkt_dump_sense()
759 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); in pkt_dump_sense()
765 static int pkt_flush_cache(struct pktcdvd_device *pd) in pkt_flush_cache() argument
774 * the IMMED bit -- we default to not setting it, although that in pkt_flush_cache()
780 return pkt_generic_packet(pd, &cgc); in pkt_flush_cache()
786 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, in pkt_set_speed() argument
801 ret = pkt_generic_packet(pd, &cgc); in pkt_set_speed()
803 pkt_dump_sense(pd, &cgc); in pkt_set_speed()
809 * Queue a bio for processing by the low-level CD device. Must be called
812 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) in pkt_queue_bio() argument
814 spin_lock(&pd->iosched.lock); in pkt_queue_bio()
816 bio_list_add(&pd->iosched.read_queue, bio); in pkt_queue_bio()
818 bio_list_add(&pd->iosched.write_queue, bio); in pkt_queue_bio()
819 spin_unlock(&pd->iosched.lock); in pkt_queue_bio()
821 atomic_set(&pd->iosched.attention, 1); in pkt_queue_bio()
822 wake_up(&pd->wqueue); in pkt_queue_bio()
828 * - A cache flush command must be inserted before a read request if the
830 * - Switching between reading and writing is slow, so don't do it more often
832 * - Optimize for throughput at the expense of latency. This means that streaming
836 * - Set the read speed according to current usage pattern. When only reading
841 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) in pkt_iosched_process_queue() argument
844 if (atomic_read(&pd->iosched.attention) == 0) in pkt_iosched_process_queue()
846 atomic_set(&pd->iosched.attention, 0); in pkt_iosched_process_queue()
852 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
853 reads_queued = !bio_list_empty(&pd->iosched.read_queue); in pkt_iosched_process_queue()
854 writes_queued = !bio_list_empty(&pd->iosched.write_queue); in pkt_iosched_process_queue()
855 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
860 if (pd->iosched.writing) { in pkt_iosched_process_queue()
862 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
863 bio = bio_list_peek(&pd->iosched.write_queue); in pkt_iosched_process_queue()
864 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
865 if (bio && (bio->bi_iter.bi_sector == in pkt_iosched_process_queue()
866 pd->iosched.last_write)) in pkt_iosched_process_queue()
869 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
870 pkt_dbg(2, pd, "write, waiting\n"); in pkt_iosched_process_queue()
873 pkt_flush_cache(pd); in pkt_iosched_process_queue()
874 pd->iosched.writing = 0; in pkt_iosched_process_queue()
878 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
879 pkt_dbg(2, pd, "read, waiting\n"); in pkt_iosched_process_queue()
882 pd->iosched.writing = 1; in pkt_iosched_process_queue()
886 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
887 if (pd->iosched.writing) in pkt_iosched_process_queue()
888 bio = bio_list_pop(&pd->iosched.write_queue); in pkt_iosched_process_queue()
890 bio = bio_list_pop(&pd->iosched.read_queue); in pkt_iosched_process_queue()
891 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
897 pd->iosched.successive_reads += in pkt_iosched_process_queue()
898 bio->bi_iter.bi_size >> 10; in pkt_iosched_process_queue()
900 pd->iosched.successive_reads = 0; in pkt_iosched_process_queue()
901 pd->iosched.last_write = bio_end_sector(bio); in pkt_iosched_process_queue()
903 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { in pkt_iosched_process_queue()
904 if (pd->read_speed == pd->write_speed) { in pkt_iosched_process_queue()
905 pd->read_speed = MAX_SPEED; in pkt_iosched_process_queue()
906 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
909 if (pd->read_speed != pd->write_speed) { in pkt_iosched_process_queue()
910 pd->read_speed = pd->write_speed; in pkt_iosched_process_queue()
911 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
915 atomic_inc(&pd->cdrw.pending_bios); in pkt_iosched_process_queue()
924 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) in pkt_set_segment_merging() argument
926 if ((pd->settings.size << 9) / CD_FRAMESIZE in pkt_set_segment_merging()
931 clear_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
933 } else if ((pd->settings.size << 9) / PAGE_SIZE in pkt_set_segment_merging()
939 set_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
942 pkt_err(pd, "cdrom max_phys_segments too small\n"); in pkt_set_segment_merging()
943 return -EIO; in pkt_set_segment_merging()
949 struct packet_data *pkt = bio->bi_private; in pkt_end_io_read()
950 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_read() local
951 BUG_ON(!pd); in pkt_end_io_read()
953 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", in pkt_end_io_read()
954 bio, (unsigned long long)pkt->sector, in pkt_end_io_read()
955 (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status); in pkt_end_io_read()
957 if (bio->bi_status) in pkt_end_io_read()
958 atomic_inc(&pkt->io_errors); in pkt_end_io_read()
959 if (atomic_dec_and_test(&pkt->io_wait)) { in pkt_end_io_read()
960 atomic_inc(&pkt->run_sm); in pkt_end_io_read()
961 wake_up(&pd->wqueue); in pkt_end_io_read()
963 pkt_bio_finished(pd); in pkt_end_io_read()
968 struct packet_data *pkt = bio->bi_private; in pkt_end_io_packet_write()
969 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_packet_write() local
970 BUG_ON(!pd); in pkt_end_io_packet_write()
972 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status); in pkt_end_io_packet_write()
974 pd->stats.pkt_ended++; in pkt_end_io_packet_write()
976 pkt_bio_finished(pd); in pkt_end_io_packet_write()
977 atomic_dec(&pkt->io_wait); in pkt_end_io_packet_write()
978 atomic_inc(&pkt->run_sm); in pkt_end_io_packet_write()
979 wake_up(&pd->wqueue); in pkt_end_io_packet_write()
985 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_gather_data() argument
992 BUG_ON(bio_list_empty(&pkt->orig_bios)); in pkt_gather_data()
994 atomic_set(&pkt->io_wait, 0); in pkt_gather_data()
995 atomic_set(&pkt->io_errors, 0); in pkt_gather_data()
1001 spin_lock(&pkt->lock); in pkt_gather_data()
1002 bio_list_for_each(bio, &pkt->orig_bios) { in pkt_gather_data()
1003 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) / in pkt_gather_data()
1005 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_gather_data()
1006 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1008 BUG_ON(first_frame + num_frames > pkt->frames); in pkt_gather_data()
1012 spin_unlock(&pkt->lock); in pkt_gather_data()
1014 if (pkt->cache_valid) { in pkt_gather_data()
1015 pkt_dbg(2, pd, "zone %llx cached\n", in pkt_gather_data()
1016 (unsigned long long)pkt->sector); in pkt_gather_data()
1023 for (f = 0; f < pkt->frames; f++) { in pkt_gather_data()
1029 bio = pkt->r_bios[f]; in pkt_gather_data()
1031 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1032 bio_set_dev(bio, pd->bdev); in pkt_gather_data()
1033 bio->bi_end_io = pkt_end_io_read; in pkt_gather_data()
1034 bio->bi_private = pkt; in pkt_gather_data()
1038 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", in pkt_gather_data()
1039 f, pkt->pages[p], offset); in pkt_gather_data()
1040 if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset)) in pkt_gather_data()
1043 atomic_inc(&pkt->io_wait); in pkt_gather_data()
1045 pkt_queue_bio(pd, bio); in pkt_gather_data()
1050 pkt_dbg(2, pd, "need %d frames for zone %llx\n", in pkt_gather_data()
1051 frames_read, (unsigned long long)pkt->sector); in pkt_gather_data()
1052 pd->stats.pkt_started++; in pkt_gather_data()
1053 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1060 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) in pkt_get_packet_data() argument
1064 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { in pkt_get_packet_data()
1065 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { in pkt_get_packet_data()
1066 list_del_init(&pkt->list); in pkt_get_packet_data()
1067 if (pkt->sector != zone) in pkt_get_packet_data()
1068 pkt->cache_valid = 0; in pkt_get_packet_data()
1076 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_put_packet_data() argument
1078 if (pkt->cache_valid) { in pkt_put_packet_data()
1079 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1081 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1091 enum packet_data_state old_state = pkt->state; in pkt_set_state()
1092 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", in pkt_set_state()
1093 pkt->id, (unsigned long long)pkt->sector, in pkt_set_state()
1096 pkt->state = state; in pkt_set_state()
1101 * returns non-zero if any work was done.
1103 static int pkt_handle_queue(struct pktcdvd_device *pd) in pkt_handle_queue() argument
1108 struct pkt_rb_node *node, *first_node; in pkt_handle_queue() local
1112 atomic_set(&pd->scan_queue, 0); in pkt_handle_queue()
1114 if (list_empty(&pd->cdrw.pkt_free_list)) { in pkt_handle_queue()
1115 pkt_dbg(2, pd, "no pkt\n"); in pkt_handle_queue()
1122 spin_lock(&pd->lock); in pkt_handle_queue()
1123 first_node = pkt_rbtree_find(pd, pd->current_sector); in pkt_handle_queue()
1125 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1129 node = first_node; in pkt_handle_queue()
1130 while (node) { in pkt_handle_queue()
1131 bio = node->bio; in pkt_handle_queue()
1132 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_handle_queue()
1133 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { in pkt_handle_queue()
1134 if (p->sector == zone) { in pkt_handle_queue()
1141 node = pkt_rbtree_next(node); in pkt_handle_queue()
1142 if (!node) { in pkt_handle_queue()
1143 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1145 node = rb_entry(n, struct pkt_rb_node, rb_node); in pkt_handle_queue()
1147 if (node == first_node) in pkt_handle_queue()
1148 node = NULL; in pkt_handle_queue()
1150 spin_unlock(&pd->lock); in pkt_handle_queue()
1152 pkt_dbg(2, pd, "no bio\n"); in pkt_handle_queue()
1156 pkt = pkt_get_packet_data(pd, zone); in pkt_handle_queue()
1158 pd->current_sector = zone + pd->settings.size; in pkt_handle_queue()
1159 pkt->sector = zone; in pkt_handle_queue()
1160 BUG_ON(pkt->frames != pd->settings.size >> 2); in pkt_handle_queue()
1161 pkt->write_size = 0; in pkt_handle_queue()
1167 spin_lock(&pd->lock); in pkt_handle_queue()
1168 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); in pkt_handle_queue()
1169 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { in pkt_handle_queue()
1170 bio = node->bio; in pkt_handle_queue()
1171 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) in pkt_handle_queue()
1172 get_zone(bio->bi_iter.bi_sector, pd)); in pkt_handle_queue()
1173 if (get_zone(bio->bi_iter.bi_sector, pd) != zone) in pkt_handle_queue()
1175 pkt_rbtree_erase(pd, node); in pkt_handle_queue()
1176 spin_lock(&pkt->lock); in pkt_handle_queue()
1177 bio_list_add(&pkt->orig_bios, bio); in pkt_handle_queue()
1178 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_handle_queue()
1179 spin_unlock(&pkt->lock); in pkt_handle_queue()
1183 wakeup = (pd->write_congestion_on > 0 in pkt_handle_queue()
1184 && pd->bio_queue_size <= pd->write_congestion_off); in pkt_handle_queue()
1185 spin_unlock(&pd->lock); in pkt_handle_queue()
1187 clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC); in pkt_handle_queue()
1189 pkt->sleep_time = max(PACKET_WAIT_TIME, 1); in pkt_handle_queue()
1191 atomic_set(&pkt->run_sm, 1); in pkt_handle_queue()
1193 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1194 list_add(&pkt->list, &pd->cdrw.pkt_active_list); in pkt_handle_queue()
1195 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1201 * bio_list_copy_data - copy contents of data buffers from one chain of bios to
1206 * Stops when it reaches the end of either the @src list or @dst list - that is,
1207 * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
1212 struct bvec_iter src_iter = src->bi_iter; in bio_list_copy_data()
1213 struct bvec_iter dst_iter = dst->bi_iter; in bio_list_copy_data()
1217 src = src->bi_next; in bio_list_copy_data()
1221 src_iter = src->bi_iter; in bio_list_copy_data()
1225 dst = dst->bi_next; in bio_list_copy_data()
1229 dst_iter = dst->bi_iter; in bio_list_copy_data()
1240 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_start_write() argument
1244 bio_reset(pkt->w_bio); in pkt_start_write()
1245 pkt->w_bio->bi_iter.bi_sector = pkt->sector; in pkt_start_write()
1246 bio_set_dev(pkt->w_bio, pd->bdev); in pkt_start_write()
1247 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; in pkt_start_write()
1248 pkt->w_bio->bi_private = pkt; in pkt_start_write()
1251 for (f = 0; f < pkt->frames; f++) { in pkt_start_write()
1252 struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE]; in pkt_start_write()
1255 if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset)) in pkt_start_write()
1258 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); in pkt_start_write()
1261 * Fill-in bvec with data from orig_bios. in pkt_start_write()
1263 spin_lock(&pkt->lock); in pkt_start_write()
1264 bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head); in pkt_start_write()
1267 spin_unlock(&pkt->lock); in pkt_start_write()
1269 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", in pkt_start_write()
1270 pkt->write_size, (unsigned long long)pkt->sector); in pkt_start_write()
1272 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) in pkt_start_write()
1273 pkt->cache_valid = 1; in pkt_start_write()
1275 pkt->cache_valid = 0; in pkt_start_write()
1278 atomic_set(&pkt->io_wait, 1); in pkt_start_write()
1279 bio_set_op_attrs(pkt->w_bio, REQ_OP_WRITE, 0); in pkt_start_write()
1280 pkt_queue_bio(pd, pkt->w_bio); in pkt_start_write()
1288 pkt->cache_valid = 0; in pkt_finish_packet()
1291 while ((bio = bio_list_pop(&pkt->orig_bios))) { in pkt_finish_packet()
1292 bio->bi_status = status; in pkt_finish_packet()
1297 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_run_state_machine() argument
1299 pkt_dbg(2, pd, "pkt %d\n", pkt->id); in pkt_run_state_machine()
1302 switch (pkt->state) { in pkt_run_state_machine()
1304 if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0)) in pkt_run_state_machine()
1307 pkt->sleep_time = 0; in pkt_run_state_machine()
1308 pkt_gather_data(pd, pkt); in pkt_run_state_machine()
1313 if (atomic_read(&pkt->io_wait) > 0) in pkt_run_state_machine()
1316 if (atomic_read(&pkt->io_errors) > 0) { in pkt_run_state_machine()
1319 pkt_start_write(pd, pkt); in pkt_run_state_machine()
1324 if (atomic_read(&pkt->io_wait) > 0) in pkt_run_state_machine()
1327 if (!pkt->w_bio->bi_status) { in pkt_run_state_machine()
1335 pkt_dbg(2, pd, "No recovery possible\n"); in pkt_run_state_machine()
1340 pkt_finish_packet(pkt, pkt->w_bio->bi_status); in pkt_run_state_machine()
1350 static void pkt_handle_packets(struct pktcdvd_device *pd) in pkt_handle_packets() argument
1357 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1358 if (atomic_read(&pkt->run_sm) > 0) { in pkt_handle_packets()
1359 atomic_set(&pkt->run_sm, 0); in pkt_handle_packets()
1360 pkt_run_state_machine(pd, pkt); in pkt_handle_packets()
1367 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1368 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1369 if (pkt->state == PACKET_FINISHED_STATE) { in pkt_handle_packets()
1370 list_del(&pkt->list); in pkt_handle_packets()
1371 pkt_put_packet_data(pd, pkt); in pkt_handle_packets()
1373 atomic_set(&pd->scan_queue, 1); in pkt_handle_packets()
1376 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1379 static void pkt_count_states(struct pktcdvd_device *pd, int *states) in pkt_count_states() argument
1387 spin_lock(&pd->cdrw.active_list_lock); in pkt_count_states()
1388 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_count_states()
1389 states[pkt->state]++; in pkt_count_states()
1391 spin_unlock(&pd->cdrw.active_list_lock); in pkt_count_states()
1400 struct pktcdvd_device *pd = foobar; in kcdrwd() local
1413 add_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1418 if (atomic_read(&pd->scan_queue) > 0) in kcdrwd()
1422 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1423 if (atomic_read(&pkt->run_sm) > 0) in kcdrwd()
1428 if (atomic_read(&pd->iosched.attention) != 0) in kcdrwd()
1434 pkt_count_states(pd, states); in kcdrwd()
1435 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", in kcdrwd()
1441 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1442 if (pkt->sleep_time && pkt->sleep_time < min_sleep_time) in kcdrwd()
1443 min_sleep_time = pkt->sleep_time; in kcdrwd()
1446 pkt_dbg(2, pd, "sleeping\n"); in kcdrwd()
1448 pkt_dbg(2, pd, "wake up\n"); in kcdrwd()
1453 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1454 if (!pkt->sleep_time) in kcdrwd()
1456 pkt->sleep_time -= min_sleep_time - residue; in kcdrwd()
1457 if (pkt->sleep_time <= 0) { in kcdrwd()
1458 pkt->sleep_time = 0; in kcdrwd()
1459 atomic_inc(&pkt->run_sm); in kcdrwd()
1468 remove_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1477 while (pkt_handle_queue(pd)) in kcdrwd()
1483 pkt_handle_packets(pd); in kcdrwd()
1488 pkt_iosched_process_queue(pd); in kcdrwd()
1494 static void pkt_print_settings(struct pktcdvd_device *pd) in pkt_print_settings() argument
1496 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", in pkt_print_settings()
1497 pd->settings.fp ? "Fixed" : "Variable", in pkt_print_settings()
1498 pd->settings.size >> 2, in pkt_print_settings()
1499 pd->settings.block_mode == 8 ? '1' : '2'); in pkt_print_settings()
1502 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int… in pkt_mode_sense() argument
1504 memset(cgc->cmd, 0, sizeof(cgc->cmd)); in pkt_mode_sense()
1506 cgc->cmd[0] = GPCMD_MODE_SENSE_10; in pkt_mode_sense()
1507 cgc->cmd[2] = page_code | (page_control << 6); in pkt_mode_sense()
1508 cgc->cmd[7] = cgc->buflen >> 8; in pkt_mode_sense()
1509 cgc->cmd[8] = cgc->buflen & 0xff; in pkt_mode_sense()
1510 cgc->data_direction = CGC_DATA_READ; in pkt_mode_sense()
1511 return pkt_generic_packet(pd, cgc); in pkt_mode_sense()
1514 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_mode_select() argument
1516 memset(cgc->cmd, 0, sizeof(cgc->cmd)); in pkt_mode_select()
1517 memset(cgc->buffer, 0, 2); in pkt_mode_select()
1518 cgc->cmd[0] = GPCMD_MODE_SELECT_10; in pkt_mode_select()
1519 cgc->cmd[1] = 0x10; /* PF */ in pkt_mode_select()
1520 cgc->cmd[7] = cgc->buflen >> 8; in pkt_mode_select()
1521 cgc->cmd[8] = cgc->buflen & 0xff; in pkt_mode_select()
1522 cgc->data_direction = CGC_DATA_WRITE; in pkt_mode_select()
1523 return pkt_generic_packet(pd, cgc); in pkt_mode_select()
1526 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) in pkt_get_disc_info() argument
1537 ret = pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1544 cgc.buflen = be16_to_cpu(di->disc_information_length) + in pkt_get_disc_info()
1545 sizeof(di->disc_information_length); in pkt_get_disc_info()
1551 return pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1554 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information … in pkt_get_track_info() argument
1567 ret = pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1571 cgc.buflen = be16_to_cpu(ti->track_information_length) + in pkt_get_track_info()
1572 sizeof(ti->track_information_length); in pkt_get_track_info()
1578 return pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1581 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, in pkt_get_last_written() argument
1589 ret = pkt_get_disc_info(pd, &di); in pkt_get_last_written()
1594 ret = pkt_get_track_info(pd, last_track, 1, &ti); in pkt_get_last_written()
1600 last_track--; in pkt_get_last_written()
1601 ret = pkt_get_track_info(pd, last_track, 1, &ti); in pkt_get_last_written()
1614 *last_written -= (be32_to_cpu(ti.free_blocks) + 7); in pkt_get_last_written()
1620 * write mode select package based on pd->settings
1622 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) in pkt_set_write_settings() argument
1630 /* doesn't apply to DVD+RW or DVD-RAM */ in pkt_set_write_settings()
1631 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) in pkt_set_write_settings()
1637 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); in pkt_set_write_settings()
1639 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1644 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); in pkt_set_write_settings()
1653 ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0); in pkt_set_write_settings()
1655 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1662 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_set_write_settings()
1664 wp->fp = pd->settings.fp; in pkt_set_write_settings()
1665 wp->track_mode = pd->settings.track_mode; in pkt_set_write_settings()
1666 wp->write_type = pd->settings.write_type; in pkt_set_write_settings()
1667 wp->data_block_type = pd->settings.block_mode; in pkt_set_write_settings()
1669 wp->multi_session = 0; in pkt_set_write_settings()
1672 wp->link_size = 7; in pkt_set_write_settings()
1673 wp->ls_v = 1; in pkt_set_write_settings()
1676 if (wp->data_block_type == PACKET_BLOCK_MODE1) { in pkt_set_write_settings()
1677 wp->session_format = 0; in pkt_set_write_settings()
1678 wp->subhdr2 = 0x20; in pkt_set_write_settings()
1679 } else if (wp->data_block_type == PACKET_BLOCK_MODE2) { in pkt_set_write_settings()
1680 wp->session_format = 0x20; in pkt_set_write_settings()
1681 wp->subhdr2 = 8; in pkt_set_write_settings()
1683 wp->mcn[0] = 0x80; in pkt_set_write_settings()
1684 memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1); in pkt_set_write_settings()
1690 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); in pkt_set_write_settings()
1693 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); in pkt_set_write_settings()
1696 ret = pkt_mode_select(pd, &cgc); in pkt_set_write_settings()
1698 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1702 pkt_print_settings(pd); in pkt_set_write_settings()
1707 * 1 -- we can write to this track, 0 -- we can't
1709 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) in pkt_writable_track() argument
1711 switch (pd->mmc3_profile) { in pkt_writable_track()
1713 case 0x12: /* DVD-RAM */ in pkt_writable_track()
1714 /* The track is always writable on DVD+RW/DVD-RAM */ in pkt_writable_track()
1720 if (!ti->packet || !ti->fp) in pkt_writable_track()
1726 if (ti->rt == 0 && ti->blank == 0) in pkt_writable_track()
1729 if (ti->rt == 0 && ti->blank == 1) in pkt_writable_track()
1732 if (ti->rt == 1 && ti->blank == 0) in pkt_writable_track()
1735 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); in pkt_writable_track()
1740 * 1 -- we can write to this disc, 0 -- we can't
1742 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) in pkt_writable_disc() argument
1744 switch (pd->mmc3_profile) { in pkt_writable_disc()
1745 case 0x0a: /* CD-RW */ in pkt_writable_disc()
1749 case 0x13: /* DVD-RW */ in pkt_writable_disc()
1750 case 0x12: /* DVD-RAM */ in pkt_writable_disc()
1753 pkt_dbg(2, pd, "Wrong disc profile (%x)\n", in pkt_writable_disc()
1754 pd->mmc3_profile); in pkt_writable_disc()
1762 if (di->disc_type == 0xff) { in pkt_writable_disc()
1763 pkt_notice(pd, "unknown disc - no track?\n"); in pkt_writable_disc()
1767 if (di->disc_type != 0x20 && di->disc_type != 0) { in pkt_writable_disc()
1768 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); in pkt_writable_disc()
1772 if (di->erasable == 0) { in pkt_writable_disc()
1773 pkt_notice(pd, "disc not erasable\n"); in pkt_writable_disc()
1777 if (di->border_status == PACKET_SESSION_RESERVED) { in pkt_writable_disc()
1778 pkt_err(pd, "can't write to last track (reserved)\n"); in pkt_writable_disc()
1785 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) in pkt_probe_settings() argument
1796 ret = pkt_generic_packet(pd, &cgc); in pkt_probe_settings()
1797 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; in pkt_probe_settings()
1802 ret = pkt_get_disc_info(pd, &di); in pkt_probe_settings()
1804 pkt_err(pd, "failed get_disc\n"); in pkt_probe_settings()
1808 if (!pkt_writable_disc(pd, &di)) in pkt_probe_settings()
1809 return -EROFS; in pkt_probe_settings()
1811 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; in pkt_probe_settings()
1814 ret = pkt_get_track_info(pd, track, 1, &ti); in pkt_probe_settings()
1816 pkt_err(pd, "failed get_track\n"); in pkt_probe_settings()
1820 if (!pkt_writable_track(pd, &ti)) { in pkt_probe_settings()
1821 pkt_err(pd, "can't write to this track\n"); in pkt_probe_settings()
1822 return -EROFS; in pkt_probe_settings()
1829 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; in pkt_probe_settings()
1830 if (pd->settings.size == 0) { in pkt_probe_settings()
1831 pkt_notice(pd, "detected zero packet size!\n"); in pkt_probe_settings()
1832 return -ENXIO; in pkt_probe_settings()
1834 if (pd->settings.size > PACKET_MAX_SECTORS) { in pkt_probe_settings()
1835 pkt_err(pd, "packet size is too big\n"); in pkt_probe_settings()
1836 return -EROFS; in pkt_probe_settings()
1838 pd->settings.fp = ti.fp; in pkt_probe_settings()
1839 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); in pkt_probe_settings()
1842 pd->nwa = be32_to_cpu(ti.next_writable); in pkt_probe_settings()
1843 set_bit(PACKET_NWA_VALID, &pd->flags); in pkt_probe_settings()
1847 * in theory we could use lra on -RW media as well and just zero in pkt_probe_settings()
1849 * is just a no-go. we'll use that for -R, naturally. in pkt_probe_settings()
1852 pd->lra = be32_to_cpu(ti.last_rec_address); in pkt_probe_settings()
1853 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1855 pd->lra = 0xffffffff; in pkt_probe_settings()
1856 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1862 pd->settings.link_loss = 7; in pkt_probe_settings()
1863 pd->settings.write_type = 0; /* packet */ in pkt_probe_settings()
1864 pd->settings.track_mode = ti.track_mode; in pkt_probe_settings()
1871 pd->settings.block_mode = PACKET_BLOCK_MODE1; in pkt_probe_settings()
1874 pd->settings.block_mode = PACKET_BLOCK_MODE2; in pkt_probe_settings()
1877 pkt_err(pd, "unknown data mode\n"); in pkt_probe_settings()
1878 return -EROFS; in pkt_probe_settings()
1886 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, in pkt_write_caching() argument
1896 cgc.buflen = pd->mode_offset + 12; in pkt_write_caching()
1903 ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0); in pkt_write_caching()
1907 buf[pd->mode_offset + 10] |= (!!set << 2); in pkt_write_caching()
1910 ret = pkt_mode_select(pd, &cgc); in pkt_write_caching()
1912 pkt_err(pd, "write caching control failed\n"); in pkt_write_caching()
1913 pkt_dump_sense(pd, &cgc); in pkt_write_caching()
1915 pkt_notice(pd, "enabled write caching\n"); in pkt_write_caching()
1919 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) in pkt_lock_door() argument
1926 return pkt_generic_packet(pd, &cgc); in pkt_lock_door()
1932 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, in pkt_get_max_speed() argument
1941 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_get_max_speed()
1945 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
1947 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + in pkt_get_max_speed()
1949 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
1951 pkt_dump_sense(pd, &cgc); in pkt_get_max_speed()
1973 /* These tables from cdrecord - I don't have orange book */
1974 /* standard speed CD-RW (1-4x) */
1979 /* high speed CD-RW (-10x) */
1984 /* ultra high speed CD-RW */
1993 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, in pkt_media_speed() argument
2008 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2010 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2023 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2025 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2030 pkt_notice(pd, "disc type is not CD-RW\n"); in pkt_media_speed()
2034 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); in pkt_media_speed()
2038 st = (buf[6] >> 3) & 0x7; /* disc sub-type */ in pkt_media_speed()
2054 pkt_notice(pd, "unknown disc sub-type %d\n", st); in pkt_media_speed()
2058 pkt_info(pd, "maximum media speed: %d\n", *speed); in pkt_media_speed()
2061 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); in pkt_media_speed()
2066 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) in pkt_perform_opc() argument
2072 pkt_dbg(2, pd, "Performing OPC\n"); in pkt_perform_opc()
2079 ret = pkt_generic_packet(pd, &cgc); in pkt_perform_opc()
2081 pkt_dump_sense(pd, &cgc); in pkt_perform_opc()
2085 static int pkt_open_write(struct pktcdvd_device *pd) in pkt_open_write() argument
2090 ret = pkt_probe_settings(pd); in pkt_open_write()
2092 pkt_dbg(2, pd, "failed probe\n"); in pkt_open_write()
2096 ret = pkt_set_write_settings(pd); in pkt_open_write()
2098 pkt_dbg(1, pd, "failed saving write settings\n"); in pkt_open_write()
2099 return -EIO; in pkt_open_write()
2102 pkt_write_caching(pd, USE_WCACHING); in pkt_open_write()
2104 ret = pkt_get_max_speed(pd, &write_speed); in pkt_open_write()
2107 switch (pd->mmc3_profile) { in pkt_open_write()
2108 case 0x13: /* DVD-RW */ in pkt_open_write()
2110 case 0x12: /* DVD-RAM */ in pkt_open_write()
2111 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); in pkt_open_write()
2114 ret = pkt_media_speed(pd, &media_write_speed); in pkt_open_write()
2118 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); in pkt_open_write()
2123 ret = pkt_set_speed(pd, write_speed, read_speed); in pkt_open_write()
2125 pkt_dbg(1, pd, "couldn't set write speed\n"); in pkt_open_write()
2126 return -EIO; in pkt_open_write()
2128 pd->write_speed = write_speed; in pkt_open_write()
2129 pd->read_speed = read_speed; in pkt_open_write()
2131 ret = pkt_perform_opc(pd); in pkt_open_write()
2133 pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); in pkt_open_write()
2142 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) in pkt_open_dev() argument
2150 * We need to re-open the cdrom device without O_NONBLOCK to be able in pkt_open_dev()
2154 bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); in pkt_open_dev()
2160 ret = pkt_get_last_written(pd, &lba); in pkt_open_dev()
2162 pkt_err(pd, "pkt_get_last_written failed\n"); in pkt_open_dev()
2166 set_capacity(pd->disk, lba << 2); in pkt_open_dev()
2167 set_capacity_and_notify(pd->bdev->bd_disk, lba << 2); in pkt_open_dev()
2169 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
2171 ret = pkt_open_write(pd); in pkt_open_dev()
2178 blk_queue_max_hw_sectors(q, pd->settings.size); in pkt_open_dev()
2179 set_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2181 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_open_dev()
2182 clear_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2185 ret = pkt_set_segment_merging(pd, q); in pkt_open_dev()
2190 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { in pkt_open_dev()
2191 pkt_err(pd, "not enough memory for buffers\n"); in pkt_open_dev()
2192 ret = -ENOMEM; in pkt_open_dev()
2195 pkt_info(pd, "%lukB available on disc\n", lba << 1); in pkt_open_dev()
2210 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) in pkt_release_dev() argument
2212 if (flush && pkt_flush_cache(pd)) in pkt_release_dev()
2213 pkt_dbg(1, pd, "not flushing cache\n"); in pkt_release_dev()
2215 pkt_lock_door(pd, 0); in pkt_release_dev()
2217 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_release_dev()
2218 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); in pkt_release_dev()
2220 pkt_shrink_pktlist(pd); in pkt_release_dev()
2234 struct pktcdvd_device *pd = NULL; in pkt_open() local
2239 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); in pkt_open()
2240 if (!pd) { in pkt_open()
2241 ret = -ENODEV; in pkt_open()
2244 BUG_ON(pd->refcnt < 0); in pkt_open()
2246 pd->refcnt++; in pkt_open()
2247 if (pd->refcnt > 1) { in pkt_open()
2249 !test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_open()
2250 ret = -EBUSY; in pkt_open()
2254 ret = pkt_open_dev(pd, mode & FMODE_WRITE); in pkt_open()
2269 pd->refcnt--; in pkt_open()
2278 struct pktcdvd_device *pd = disk->private_data; in pkt_close() local
2282 pd->refcnt--; in pkt_close()
2283 BUG_ON(pd->refcnt < 0); in pkt_close()
2284 if (pd->refcnt == 0) { in pkt_close()
2285 int flush = test_bit(PACKET_WRITABLE, &pd->flags); in pkt_close()
2286 pkt_release_dev(pd, flush); in pkt_close()
2295 struct packet_stacked_data *psd = bio->bi_private; in pkt_end_io_read_cloned()
2296 struct pktcdvd_device *pd = psd->pd; in pkt_end_io_read_cloned() local
2298 psd->bio->bi_status = bio->bi_status; in pkt_end_io_read_cloned()
2300 bio_endio(psd->bio); in pkt_end_io_read_cloned()
2302 pkt_bio_finished(pd); in pkt_end_io_read_cloned()
2305 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) in pkt_make_request_read() argument
2310 psd->pd = pd; in pkt_make_request_read()
2311 psd->bio = bio; in pkt_make_request_read()
2312 bio_set_dev(cloned_bio, pd->bdev); in pkt_make_request_read()
2313 cloned_bio->bi_private = psd; in pkt_make_request_read()
2314 cloned_bio->bi_end_io = pkt_end_io_read_cloned; in pkt_make_request_read()
2315 pd->stats.secs_r += bio_sectors(bio); in pkt_make_request_read()
2316 pkt_queue_bio(pd, cloned_bio); in pkt_make_request_read()
2321 struct pktcdvd_device *pd = q->queuedata; in pkt_make_request_write() local
2325 struct pkt_rb_node *node; in pkt_make_request_write() local
2327 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_make_request_write()
2333 spin_lock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2335 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_make_request_write()
2336 if (pkt->sector == zone) { in pkt_make_request_write()
2337 spin_lock(&pkt->lock); in pkt_make_request_write()
2338 if ((pkt->state == PACKET_WAITING_STATE) || in pkt_make_request_write()
2339 (pkt->state == PACKET_READ_WAIT_STATE)) { in pkt_make_request_write()
2340 bio_list_add(&pkt->orig_bios, bio); in pkt_make_request_write()
2341 pkt->write_size += in pkt_make_request_write()
2342 bio->bi_iter.bi_size / CD_FRAMESIZE; in pkt_make_request_write()
2343 if ((pkt->write_size >= pkt->frames) && in pkt_make_request_write()
2344 (pkt->state == PACKET_WAITING_STATE)) { in pkt_make_request_write()
2345 atomic_inc(&pkt->run_sm); in pkt_make_request_write()
2346 wake_up(&pd->wqueue); in pkt_make_request_write()
2348 spin_unlock(&pkt->lock); in pkt_make_request_write()
2349 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2354 spin_unlock(&pkt->lock); in pkt_make_request_write()
2357 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2364 spin_lock(&pd->lock); in pkt_make_request_write()
2365 if (pd->write_congestion_on > 0 in pkt_make_request_write()
2366 && pd->bio_queue_size >= pd->write_congestion_on) { in pkt_make_request_write()
2367 set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC); in pkt_make_request_write()
2369 spin_unlock(&pd->lock); in pkt_make_request_write()
2371 spin_lock(&pd->lock); in pkt_make_request_write()
2372 } while(pd->bio_queue_size > pd->write_congestion_off); in pkt_make_request_write()
2374 spin_unlock(&pd->lock); in pkt_make_request_write()
2379 node = mempool_alloc(&pd->rb_pool, GFP_NOIO); in pkt_make_request_write()
2380 node->bio = bio; in pkt_make_request_write()
2381 spin_lock(&pd->lock); in pkt_make_request_write()
2382 BUG_ON(pd->bio_queue_size < 0); in pkt_make_request_write()
2383 was_empty = (pd->bio_queue_size == 0); in pkt_make_request_write()
2384 pkt_rbtree_insert(pd, node); in pkt_make_request_write()
2385 spin_unlock(&pd->lock); in pkt_make_request_write()
2390 atomic_set(&pd->scan_queue, 1); in pkt_make_request_write()
2393 wake_up(&pd->wqueue); in pkt_make_request_write()
2394 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { in pkt_make_request_write()
2399 wake_up(&pd->wqueue); in pkt_make_request_write()
2405 struct pktcdvd_device *pd; in pkt_submit_bio() local
2411 pd = bio->bi_bdev->bd_disk->queue->queuedata; in pkt_submit_bio()
2412 if (!pd) { in pkt_submit_bio()
2417 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", in pkt_submit_bio()
2418 (unsigned long long)bio->bi_iter.bi_sector, in pkt_submit_bio()
2425 pkt_make_request_read(pd, bio); in pkt_submit_bio()
2429 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_submit_bio()
2430 pkt_notice(pd, "WRITE for ro device (%llu)\n", in pkt_submit_bio()
2431 (unsigned long long)bio->bi_iter.bi_sector); in pkt_submit_bio()
2435 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) { in pkt_submit_bio()
2436 pkt_err(pd, "wrong bio size\n"); in pkt_submit_bio()
2441 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_submit_bio()
2442 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); in pkt_submit_bio()
2445 BUG_ON(last_zone != zone + pd->settings.size); in pkt_submit_bio()
2447 split = bio_split(bio, last_zone - in pkt_submit_bio()
2448 bio->bi_iter.bi_sector, in pkt_submit_bio()
2455 pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split); in pkt_submit_bio()
2464 static void pkt_init_queue(struct pktcdvd_device *pd) in pkt_init_queue() argument
2466 struct request_queue *q = pd->disk->queue; in pkt_init_queue()
2470 q->queuedata = pd; in pkt_init_queue()
2475 struct pktcdvd_device *pd = m->private; in pkt_seq_show() local
2480 seq_printf(m, "Writer %s mapped to %s:\n", pd->name, in pkt_seq_show()
2481 bdevname(pd->bdev, bdev_buf)); in pkt_seq_show()
2484 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); in pkt_seq_show()
2486 if (pd->settings.write_type == 0) in pkt_seq_show()
2492 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); in pkt_seq_show()
2493 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); in pkt_seq_show()
2495 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); in pkt_seq_show()
2497 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) in pkt_seq_show()
2499 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) in pkt_seq_show()
2506 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); in pkt_seq_show()
2507 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); in pkt_seq_show()
2508 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); in pkt_seq_show()
2509 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); in pkt_seq_show()
2510 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); in pkt_seq_show()
2513 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); in pkt_seq_show()
2514 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); in pkt_seq_show()
2515 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); in pkt_seq_show()
2516 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); in pkt_seq_show()
2517 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); in pkt_seq_show()
2518 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); in pkt_seq_show()
2521 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); in pkt_seq_show()
2522 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); in pkt_seq_show()
2523 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); in pkt_seq_show()
2525 pkt_count_states(pd, states); in pkt_seq_show()
2530 pd->write_congestion_off, in pkt_seq_show()
2531 pd->write_congestion_on); in pkt_seq_show()
2535 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) in pkt_new_dev() argument
2541 if (pd->pkt_dev == dev) { in pkt_new_dev()
2542 pkt_err(pd, "recursive setup not allowed\n"); in pkt_new_dev()
2543 return -EBUSY; in pkt_new_dev()
2549 if (pd2->bdev->bd_dev == dev) { in pkt_new_dev()
2550 pkt_err(pd, "%s already setup\n", in pkt_new_dev()
2551 bdevname(pd2->bdev, b)); in pkt_new_dev()
2552 return -EBUSY; in pkt_new_dev()
2554 if (pd2->pkt_dev == dev) { in pkt_new_dev()
2555 pkt_err(pd, "can't chain pktcdvd devices\n"); in pkt_new_dev()
2556 return -EBUSY; in pkt_new_dev()
2565 return -EINVAL; in pkt_new_dev()
2571 pd->bdev = bdev; in pkt_new_dev()
2574 pkt_init_queue(pd); in pkt_new_dev()
2576 atomic_set(&pd->cdrw.pending_bios, 0); in pkt_new_dev()
2577 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); in pkt_new_dev()
2578 if (IS_ERR(pd->cdrw.thread)) { in pkt_new_dev()
2579 pkt_err(pd, "can't start kernel thread\n"); in pkt_new_dev()
2583 proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd); in pkt_new_dev()
2584 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); in pkt_new_dev()
2591 return -ENOMEM; in pkt_new_dev()
2596 struct pktcdvd_device *pd = bdev->bd_disk->private_data; in pkt_ioctl() local
2599 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", in pkt_ioctl()
2600 cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); in pkt_ioctl()
2609 if (pd->refcnt == 1) in pkt_ioctl()
2610 pkt_lock_door(pd, 0); in pkt_ioctl()
2613 * forward selected CDROM ioctls to CD-ROM, for UDF in pkt_ioctl()
2620 if (!bdev->bd_disk->fops->ioctl) in pkt_ioctl()
2621 ret = -ENOTTY; in pkt_ioctl()
2623 ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg); in pkt_ioctl()
2626 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); in pkt_ioctl()
2627 ret = -ENOTTY; in pkt_ioctl()
2637 struct pktcdvd_device *pd = disk->private_data; in pkt_check_events() local
2640 if (!pd) in pkt_check_events()
2642 if (!pd->bdev) in pkt_check_events()
2644 attached_disk = pd->bdev->bd_disk; in pkt_check_events()
2645 if (!attached_disk || !attached_disk->fops->check_events) in pkt_check_events()
2647 return attached_disk->fops->check_events(attached_disk, clearing); in pkt_check_events()
2652 return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name); in pkt_devnode()
2667 * Set up mapping from pktcdvd device to CD-ROM device.
2672 int ret = -ENOMEM; in pkt_setup_dev()
2673 struct pktcdvd_device *pd; in pkt_setup_dev() local
2683 ret = -EBUSY; in pkt_setup_dev()
2687 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); in pkt_setup_dev()
2688 if (!pd) in pkt_setup_dev()
2691 ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE, in pkt_setup_dev()
2696 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_setup_dev()
2697 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); in pkt_setup_dev()
2698 spin_lock_init(&pd->cdrw.active_list_lock); in pkt_setup_dev()
2700 spin_lock_init(&pd->lock); in pkt_setup_dev()
2701 spin_lock_init(&pd->iosched.lock); in pkt_setup_dev()
2702 bio_list_init(&pd->iosched.read_queue); in pkt_setup_dev()
2703 bio_list_init(&pd->iosched.write_queue); in pkt_setup_dev()
2704 sprintf(pd->name, DRIVER_NAME"%d", idx); in pkt_setup_dev()
2705 init_waitqueue_head(&pd->wqueue); in pkt_setup_dev()
2706 pd->bio_queue = RB_ROOT; in pkt_setup_dev()
2708 pd->write_congestion_on = write_congestion_on; in pkt_setup_dev()
2709 pd->write_congestion_off = write_congestion_off; in pkt_setup_dev()
2711 ret = -ENOMEM; in pkt_setup_dev()
2715 pd->disk = disk; in pkt_setup_dev()
2716 disk->major = pktdev_major; in pkt_setup_dev()
2717 disk->first_minor = idx; in pkt_setup_dev()
2718 disk->minors = 1; in pkt_setup_dev()
2719 disk->fops = &pktcdvd_ops; in pkt_setup_dev()
2720 disk->flags = GENHD_FL_REMOVABLE; in pkt_setup_dev()
2721 strcpy(disk->disk_name, pd->name); in pkt_setup_dev()
2722 disk->private_data = pd; in pkt_setup_dev()
2724 pd->pkt_dev = MKDEV(pktdev_major, idx); in pkt_setup_dev()
2725 ret = pkt_new_dev(pd, dev); in pkt_setup_dev()
2730 disk->events = pd->bdev->bd_disk->events; in pkt_setup_dev()
2734 pkt_sysfs_dev_new(pd); in pkt_setup_dev()
2735 pkt_debugfs_dev_new(pd); in pkt_setup_dev()
2737 pkt_devs[idx] = pd; in pkt_setup_dev()
2739 *pkt_dev = pd->pkt_dev; in pkt_setup_dev()
2747 mempool_exit(&pd->rb_pool); in pkt_setup_dev()
2748 kfree(pd); in pkt_setup_dev()
2756 * Tear down mapping from pktcdvd device to CD-ROM device.
2760 struct pktcdvd_device *pd; in pkt_remove_dev() local
2767 pd = pkt_devs[idx]; in pkt_remove_dev()
2768 if (pd && (pd->pkt_dev == pkt_dev)) in pkt_remove_dev()
2773 ret = -ENXIO; in pkt_remove_dev()
2777 if (pd->refcnt > 0) { in pkt_remove_dev()
2778 ret = -EBUSY; in pkt_remove_dev()
2781 if (!IS_ERR(pd->cdrw.thread)) in pkt_remove_dev()
2782 kthread_stop(pd->cdrw.thread); in pkt_remove_dev()
2786 pkt_debugfs_dev_remove(pd); in pkt_remove_dev()
2787 pkt_sysfs_dev_remove(pd); in pkt_remove_dev()
2789 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); in pkt_remove_dev()
2791 remove_proc_entry(pd->name, pkt_proc); in pkt_remove_dev()
2792 pkt_dbg(1, pd, "writer unmapped\n"); in pkt_remove_dev()
2794 del_gendisk(pd->disk); in pkt_remove_dev()
2795 blk_cleanup_disk(pd->disk); in pkt_remove_dev()
2797 mempool_exit(&pd->rb_pool); in pkt_remove_dev()
2798 kfree(pd); in pkt_remove_dev()
2810 struct pktcdvd_device *pd; in pkt_get_status() local
2814 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); in pkt_get_status()
2815 if (pd) { in pkt_get_status()
2816 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); in pkt_get_status()
2817 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); in pkt_get_status()
2819 ctrl_cmd->dev = 0; in pkt_get_status()
2820 ctrl_cmd->pkt_dev = 0; in pkt_get_status()
2822 ctrl_cmd->num_devices = MAX_WRITERS; in pkt_get_status()
2835 return -ENOTTY; in pkt_ctl_ioctl()
2838 return -EFAULT; in pkt_ctl_ioctl()
2843 return -EPERM; in pkt_ctl_ioctl()
2849 return -EPERM; in pkt_ctl_ioctl()
2856 return -ENOTTY; in pkt_ctl_ioctl()
2860 return -EFAULT; in pkt_ctl_ioctl()