Lines Matching full:md
123 struct mport_dev *md; member
187 * @md master port character device object
199 struct mport_dev *md; member
261 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_rd()
306 struct rio_mport *mport = priv->md->mport; in rio_mport_maint_wr()
359 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_outbound_mapping() argument
363 struct rio_mport *mport = md->mport; in rio_mport_create_outbound_mapping()
383 map->md = md; in rio_mport_create_outbound_mapping()
385 list_add_tail(&map->node, &md->mappings); in rio_mport_create_outbound_mapping()
393 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_outbound_mapping() argument
400 mutex_lock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
401 list_for_each_entry(map, &md->mappings, node) { in rio_mport_get_outbound_mapping()
419 err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr, in rio_mport_get_outbound_mapping()
421 mutex_unlock(&md->buf_mutex); in rio_mport_get_outbound_mapping()
428 struct mport_dev *data = priv->md; in rio_mport_obw_map()
462 struct mport_dev *md = priv->md; in rio_mport_obw_free() local
466 if (!md->mport->ops->unmap_outb) in rio_mport_obw_free()
474 mutex_lock(&md->buf_mutex); in rio_mport_obw_free()
475 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_obw_free()
485 mutex_unlock(&md->buf_mutex); in rio_mport_obw_free()
497 struct mport_dev *md = priv->md; in maint_hdid_set() local
503 md->mport->host_deviceid = hdid; in maint_hdid_set()
504 md->properties.hdid = hdid; in maint_hdid_set()
505 rio_local_set_device_id(md->mport, hdid); in maint_hdid_set()
519 struct mport_dev *md = priv->md; in maint_comptag_set() local
525 rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag); in maint_comptag_set()
553 struct mport_dev *md = in mport_release_def_dma() local
556 rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id); in mport_release_def_dma()
557 rio_release_dma(md->dma_chan); in mport_release_def_dma()
558 md->dma_chan = NULL; in mport_release_def_dma()
585 mutex_lock(&req->map->md->buf_mutex); in dma_req_free()
587 mutex_unlock(&req->map->md->buf_mutex); in dma_req_free()
655 priv->dmach = rio_request_mport_dma(priv->md->mport); in get_dma_channel()
658 if (priv->md->dma_chan) { in get_dma_channel()
659 priv->dmach = priv->md->dma_chan; in get_dma_channel()
660 kref_get(&priv->md->dma_ref); in get_dma_channel()
666 } else if (!priv->md->dma_chan) { in get_dma_channel()
668 priv->md->dma_chan = priv->dmach; in get_dma_channel()
669 kref_init(&priv->md->dma_ref); in get_dma_channel()
814 struct mport_dev *md = priv->md; in rio_dma_transfer() local
902 mutex_lock(&md->buf_mutex); in rio_dma_transfer()
903 list_for_each_entry(map, &md->mappings, node) { in rio_dma_transfer()
911 mutex_unlock(&md->buf_mutex); in rio_dma_transfer()
977 priv->md->properties.transfer_mode) == 0) in rio_mport_transfer_ioctl()
1086 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_dma_mapping() argument
1095 map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size, in rio_mport_create_dma_mapping()
1105 map->md = md; in rio_mport_create_dma_mapping()
1107 mutex_lock(&md->buf_mutex); in rio_mport_create_dma_mapping()
1108 list_add_tail(&map->node, &md->mappings); in rio_mport_create_dma_mapping()
1109 mutex_unlock(&md->buf_mutex); in rio_mport_create_dma_mapping()
1118 struct mport_dev *md = priv->md; in rio_mport_alloc_dma() local
1126 ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping); in rio_mport_alloc_dma()
1133 mutex_lock(&md->buf_mutex); in rio_mport_alloc_dma()
1135 mutex_unlock(&md->buf_mutex); in rio_mport_alloc_dma()
1145 struct mport_dev *md = priv->md; in rio_mport_free_dma() local
1154 mutex_lock(&md->buf_mutex); in rio_mport_free_dma()
1155 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_free_dma()
1163 mutex_unlock(&md->buf_mutex); in rio_mport_free_dma()
1199 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_create_inbound_mapping() argument
1203 struct rio_mport *mport = md->mport; in rio_mport_create_inbound_mapping()
1232 map->md = md; in rio_mport_create_inbound_mapping()
1234 mutex_lock(&md->buf_mutex); in rio_mport_create_inbound_mapping()
1235 list_add_tail(&map->node, &md->mappings); in rio_mport_create_inbound_mapping()
1236 mutex_unlock(&md->buf_mutex); in rio_mport_create_inbound_mapping()
1249 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp, in rio_mport_get_inbound_mapping() argument
1259 mutex_lock(&md->buf_mutex); in rio_mport_get_inbound_mapping()
1260 list_for_each_entry(map, &md->mappings, node) { in rio_mport_get_inbound_mapping()
1274 mutex_unlock(&md->buf_mutex); in rio_mport_get_inbound_mapping()
1280 return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping); in rio_mport_get_inbound_mapping()
1286 struct mport_dev *md = priv->md; in rio_mport_map_inbound() local
1291 if (!md->mport->ops->map_inb) in rio_mport_map_inbound()
1296 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); in rio_mport_map_inbound()
1298 ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr, in rio_mport_map_inbound()
1309 mutex_lock(&md->buf_mutex); in rio_mport_map_inbound()
1311 mutex_unlock(&md->buf_mutex); in rio_mport_map_inbound()
1328 struct mport_dev *md = priv->md; in rio_mport_inbound_free() local
1332 rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp); in rio_mport_inbound_free()
1334 if (!md->mport->ops->unmap_inb) in rio_mport_inbound_free()
1340 mutex_lock(&md->buf_mutex); in rio_mport_inbound_free()
1341 list_for_each_entry_safe(map, _map, &md->mappings, node) { in rio_mport_inbound_free()
1350 mutex_unlock(&md->buf_mutex); in rio_mport_inbound_free()
1362 struct mport_dev *md = priv->md; in maint_port_idx_get() local
1363 u32 port_idx = md->mport->index; in maint_port_idx_get()
1390 dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n"); in rio_mport_add_event()
1433 struct mport_dev *md = priv->md; in rio_mport_add_db_filter() local
1445 ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high, in rio_mport_add_db_filter()
1449 dev_name(&md->dev), ret); in rio_mport_add_db_filter()
1455 rio_release_inb_dbell(md->mport, filter.low, filter.high); in rio_mport_add_db_filter()
1461 spin_lock_irqsave(&md->db_lock, flags); in rio_mport_add_db_filter()
1463 list_add_tail(&db_filter->data_node, &md->doorbells); in rio_mport_add_db_filter()
1464 spin_unlock_irqrestore(&md->db_lock, flags); in rio_mport_add_db_filter()
1490 spin_lock_irqsave(&priv->md->db_lock, flags); in rio_mport_remove_db_filter()
1500 spin_unlock_irqrestore(&priv->md->db_lock, flags); in rio_mport_remove_db_filter()
1503 rio_release_inb_dbell(priv->md->mport, filter.low, filter.high); in rio_mport_remove_db_filter()
1520 struct mport_dev *md = context; in rio_mport_pw_handler() local
1530 spin_lock(&md->pw_lock); in rio_mport_pw_handler()
1531 list_for_each_entry(pw_filter, &md->portwrites, md_node) { in rio_mport_pw_handler()
1538 spin_unlock(&md->pw_lock); in rio_mport_pw_handler()
1552 struct mport_dev *md = priv->md; in rio_mport_add_pw_filter() local
1567 spin_lock_irqsave(&md->pw_lock, flags); in rio_mport_add_pw_filter()
1568 if (list_empty(&md->portwrites)) in rio_mport_add_pw_filter()
1571 list_add_tail(&pw_filter->md_node, &md->portwrites); in rio_mport_add_pw_filter()
1572 spin_unlock_irqrestore(&md->pw_lock, flags); in rio_mport_add_pw_filter()
1577 ret = rio_add_mport_pw_handler(md->mport, md, in rio_mport_add_pw_filter()
1580 dev_err(&md->dev, in rio_mport_add_pw_filter()
1585 rio_pw_enable(md->mport, 1); in rio_mport_add_pw_filter()
1609 struct mport_dev *md = priv->md; in rio_mport_remove_pw_filter() local
1619 spin_lock_irqsave(&md->pw_lock, flags); in rio_mport_remove_pw_filter()
1628 if (list_empty(&md->portwrites)) in rio_mport_remove_pw_filter()
1630 spin_unlock_irqrestore(&md->pw_lock, flags); in rio_mport_remove_pw_filter()
1633 rio_del_mport_pw_handler(md->mport, priv->md, in rio_mport_remove_pw_filter()
1635 rio_pw_enable(md->mport, 0); in rio_mport_remove_pw_filter()
1679 struct mport_dev *md = priv->md; in rio_mport_add_riodev() local
1707 mport = md->mport; in rio_mport_add_riodev()
1829 mport = priv->md->mport; in rio_mport_del_riodev()
1902 priv->md = chdev; in mport_cdev_open()
1946 struct mport_dev *md; in mport_cdev_release_dma() local
1960 md = priv->md; in mport_cdev_release_dma()
1989 if (priv->dmach != priv->md->dma_chan) { in mport_cdev_release_dma()
1995 kref_put(&md->dma_ref, mport_release_def_dma); in mport_cdev_release_dma()
2016 rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp); in mport_cdev_release()
2018 chdev = priv->md; in mport_cdev_release()
2068 struct mport_dev *md = data->md; in mport_cdev_ioctl() local
2070 if (atomic_read(&md->active) == 0) in mport_cdev_ioctl()
2089 md->properties.hdid = md->mport->host_deviceid; in mport_cdev_ioctl()
2090 if (copy_to_user((void __user *)arg, &(md->properties), in mport_cdev_ioctl()
2091 sizeof(md->properties))) in mport_cdev_ioctl()
2147 struct rio_mport *mport = map->md->mport; in mport_release_mapping()
2183 mutex_lock(&map->md->buf_mutex); in mport_mm_close()
2185 mutex_unlock(&map->md->buf_mutex); in mport_mm_close()
2196 struct mport_dev *md; in mport_cdev_mmap() local
2206 md = priv->md; in mport_cdev_mmap()
2209 mutex_lock(&md->buf_mutex); in mport_cdev_mmap()
2210 list_for_each_entry(map, &md->mappings, node) { in mport_cdev_mmap()
2217 mutex_unlock(&md->buf_mutex); in mport_cdev_mmap()
2231 ret = dma_mmap_coherent(md->mport->dev.parent, vma, in mport_cdev_mmap()
2300 struct rio_mport *mport = priv->md->mport; in mport_write()
2349 struct mport_dev *md; in mport_device_release() local
2352 md = container_of(dev, struct mport_dev, dev); in mport_device_release()
2353 kfree(md); in mport_device_release()
2363 struct mport_dev *md; in mport_cdev_add() local
2366 md = kzalloc(sizeof(*md), GFP_KERNEL); in mport_cdev_add()
2367 if (!md) { in mport_cdev_add()
2372 md->mport = mport; in mport_cdev_add()
2373 mutex_init(&md->buf_mutex); in mport_cdev_add()
2374 mutex_init(&md->file_mutex); in mport_cdev_add()
2375 INIT_LIST_HEAD(&md->file_list); in mport_cdev_add()
2377 device_initialize(&md->dev); in mport_cdev_add()
2378 md->dev.devt = MKDEV(MAJOR(dev_number), mport->id); in mport_cdev_add()
2379 md->dev.class = dev_class; in mport_cdev_add()
2380 md->dev.parent = &mport->dev; in mport_cdev_add()
2381 md->dev.release = mport_device_release; in mport_cdev_add()
2382 dev_set_name(&md->dev, DEV_NAME "%d", mport->id); in mport_cdev_add()
2383 atomic_set(&md->active, 1); in mport_cdev_add()
2385 cdev_init(&md->cdev, &mport_fops); in mport_cdev_add()
2386 md->cdev.owner = THIS_MODULE; in mport_cdev_add()
2388 INIT_LIST_HEAD(&md->doorbells); in mport_cdev_add()
2389 spin_lock_init(&md->db_lock); in mport_cdev_add()
2390 INIT_LIST_HEAD(&md->portwrites); in mport_cdev_add()
2391 spin_lock_init(&md->pw_lock); in mport_cdev_add()
2392 INIT_LIST_HEAD(&md->mappings); in mport_cdev_add()
2394 md->properties.id = mport->id; in mport_cdev_add()
2395 md->properties.sys_size = mport->sys_size; in mport_cdev_add()
2396 md->properties.hdid = mport->host_deviceid; in mport_cdev_add()
2397 md->properties.index = mport->index; in mport_cdev_add()
2403 md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED; in mport_cdev_add()
2405 md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER; in mport_cdev_add()
2408 ret = cdev_device_add(&md->cdev, &md->dev); in mport_cdev_add()
2416 md->properties.flags = attr.flags; in mport_cdev_add()
2417 md->properties.link_speed = attr.link_speed; in mport_cdev_add()
2418 md->properties.link_width = attr.link_width; in mport_cdev_add()
2419 md->properties.dma_max_sge = attr.dma_max_sge; in mport_cdev_add()
2420 md->properties.dma_max_size = attr.dma_max_size; in mport_cdev_add()
2421 md->properties.dma_align = attr.dma_align; in mport_cdev_add()
2422 md->properties.cap_sys_size = 0; in mport_cdev_add()
2423 md->properties.cap_transfer_mode = 0; in mport_cdev_add()
2424 md->properties.cap_addr_size = 0; in mport_cdev_add()
2430 list_add_tail(&md->node, &mport_devs); in mport_cdev_add()
2436 return md; in mport_cdev_add()
2439 put_device(&md->dev); in mport_cdev_add()
2447 static void mport_cdev_terminate_dma(struct mport_dev *md) in mport_cdev_terminate_dma() argument
2452 rmcd_debug(DMA, "%s", dev_name(&md->dev)); in mport_cdev_terminate_dma()
2454 mutex_lock(&md->file_mutex); in mport_cdev_terminate_dma()
2455 list_for_each_entry(client, &md->file_list, list) { in mport_cdev_terminate_dma()
2461 mutex_unlock(&md->file_mutex); in mport_cdev_terminate_dma()
2463 if (md->dma_chan) { in mport_cdev_terminate_dma()
2464 dmaengine_terminate_all(md->dma_chan); in mport_cdev_terminate_dma()
2465 rio_release_dma(md->dma_chan); in mport_cdev_terminate_dma()
2466 md->dma_chan = NULL; in mport_cdev_terminate_dma()
2476 static int mport_cdev_kill_fasync(struct mport_dev *md) in mport_cdev_kill_fasync() argument
2481 mutex_lock(&md->file_mutex); in mport_cdev_kill_fasync()
2482 list_for_each_entry(client, &md->file_list, list) { in mport_cdev_kill_fasync()
2487 mutex_unlock(&md->file_mutex); in mport_cdev_kill_fasync()
2495 static void mport_cdev_remove(struct mport_dev *md) in mport_cdev_remove() argument
2499 rmcd_debug(EXIT, "Remove %s cdev", md->mport->name); in mport_cdev_remove()
2500 atomic_set(&md->active, 0); in mport_cdev_remove()
2501 mport_cdev_terminate_dma(md); in mport_cdev_remove()
2502 rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler); in mport_cdev_remove()
2503 cdev_device_del(&md->cdev, &md->dev); in mport_cdev_remove()
2504 mport_cdev_kill_fasync(md); in mport_cdev_remove()
2514 mutex_lock(&md->buf_mutex); in mport_cdev_remove()
2515 list_for_each_entry_safe(map, _map, &md->mappings, node) { in mport_cdev_remove()
2518 mutex_unlock(&md->buf_mutex); in mport_cdev_remove()
2520 if (!list_empty(&md->mappings)) in mport_cdev_remove()
2522 md->mport->name); in mport_cdev_remove()
2524 rio_release_inb_dbell(md->mport, 0, 0x0fff); in mport_cdev_remove()
2526 put_device(&md->dev); in mport_cdev_remove()