Lines Matching refs:mdev_state

157 	struct mdev_state *mdev_state;  member
163 struct mdev_state { struct
205 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
207 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
210 static void mbochs_create_config_space(struct mdev_state *mdev_state) in mbochs_create_config_space() argument
212 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], in mbochs_create_config_space()
214 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], in mbochs_create_config_space()
216 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], in mbochs_create_config_space()
218 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], in mbochs_create_config_space()
221 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], in mbochs_create_config_space()
223 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], in mbochs_create_config_space()
225 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; in mbochs_create_config_space()
227 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], in mbochs_create_config_space()
231 mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1; in mbochs_create_config_space()
233 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2], in mbochs_create_config_space()
236 mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1; in mbochs_create_config_space()
239 static int mbochs_check_framebuffer(struct mdev_state *mdev_state, in mbochs_check_framebuffer() argument
242 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_check_framebuffer()
243 u16 *vbe = mdev_state->vbe; in mbochs_check_framebuffer()
246 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_check_framebuffer()
278 if (mode->offset + mode->size > mdev_state->memsize) { in mbochs_check_framebuffer()
297 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, in handle_pci_cfg_write() argument
300 struct device *dev = mdev_dev(mdev_state->mdev); in handle_pci_cfg_write()
310 cfg_addr = (cfg_addr & mdev_state->bar_mask[index]); in handle_pci_cfg_write()
318 cfg_addr |= (mdev_state->vconfig[offset] & in handle_pci_cfg_write()
320 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); in handle_pci_cfg_write()
325 static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset, in handle_mmio_write() argument
328 struct device *dev = mdev_dev(mdev_state->mdev); in handle_mmio_write()
340 if (index < ARRAY_SIZE(mdev_state->vbe)) in handle_mmio_write()
341 mdev_state->vbe[index] = reg16; in handle_mmio_write()
355 static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset, in handle_mmio_read() argument
358 struct device *dev = mdev_dev(mdev_state->mdev); in handle_mmio_read()
365 edid = &mdev_state->edid_regs; in handle_mmio_read()
371 memcpy(buf, mdev_state->edid_blob + offset, count); in handle_mmio_read()
377 if (index < ARRAY_SIZE(mdev_state->vbe)) in handle_mmio_read()
378 reg16 = mdev_state->vbe[index]; in handle_mmio_read()
392 static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset, in handle_edid_regs() argument
395 char *regs = (void *)&mdev_state->edid_regs; in handle_edid_regs()
397 if (offset + count > sizeof(mdev_state->edid_regs)) in handle_edid_regs()
419 static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset, in handle_edid_blob() argument
422 if (offset + count > mdev_state->edid_regs.edid_max_size) in handle_edid_blob()
425 memcpy(mdev_state->edid_blob + offset, buf, count); in handle_edid_blob()
427 memcpy(buf, mdev_state->edid_blob + offset, count); in handle_edid_blob()
430 static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf, in mdev_access() argument
438 mutex_lock(&mdev_state->ops_lock); in mdev_access()
442 handle_pci_cfg_write(mdev_state, pos, buf, count); in mdev_access()
444 memcpy(buf, (mdev_state->vconfig + pos), count); in mdev_access()
451 handle_mmio_write(mdev_state, pos, buf, count); in mdev_access()
453 handle_mmio_read(mdev_state, pos, buf, count); in mdev_access()
460 handle_edid_regs(mdev_state, pos, buf, count, is_write); in mdev_access()
463 handle_edid_blob(mdev_state, pos, buf, count, is_write); in mdev_access()
468 MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { in mdev_access()
471 pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); in mdev_access()
481 dev_dbg(mdev_state->vdev.dev, "%s: %s @0x%llx (unhandled)\n", in mdev_access()
491 mutex_unlock(&mdev_state->ops_lock); in mdev_access()
496 static int mbochs_reset(struct mdev_state *mdev_state) in mbochs_reset() argument
498 u32 size64k = mdev_state->memsize / (64 * 1024); in mbochs_reset()
501 for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++) in mbochs_reset()
502 mdev_state->vbe[i] = 0; in mbochs_reset()
503 mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5; in mbochs_reset()
504 mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k; in mbochs_reset()
514 struct mdev_state *mdev_state; in mbochs_probe() local
523 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); in mbochs_probe()
524 if (mdev_state == NULL) in mbochs_probe()
526 vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mbochs_dev_ops); in mbochs_probe()
528 mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); in mbochs_probe()
529 if (mdev_state->vconfig == NULL) in mbochs_probe()
532 mdev_state->memsize = type->mbytes * 1024 * 1024; in mbochs_probe()
533 mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; in mbochs_probe()
534 mdev_state->pages = kcalloc(mdev_state->pagecount, in mbochs_probe()
537 if (!mdev_state->pages) in mbochs_probe()
541 type->name, type->mbytes, mdev_state->pagecount); in mbochs_probe()
543 mutex_init(&mdev_state->ops_lock); in mbochs_probe()
544 mdev_state->mdev = mdev; in mbochs_probe()
545 INIT_LIST_HEAD(&mdev_state->dmabufs); in mbochs_probe()
546 mdev_state->next_id = 1; in mbochs_probe()
548 mdev_state->type = type; in mbochs_probe()
549 mdev_state->edid_regs.max_xres = type->max_x; in mbochs_probe()
550 mdev_state->edid_regs.max_yres = type->max_y; in mbochs_probe()
551 mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET; in mbochs_probe()
552 mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob); in mbochs_probe()
553 mbochs_create_config_space(mdev_state); in mbochs_probe()
554 mbochs_reset(mdev_state); in mbochs_probe()
556 ret = vfio_register_group_dev(&mdev_state->vdev); in mbochs_probe()
559 dev_set_drvdata(&mdev->dev, mdev_state); in mbochs_probe()
562 vfio_uninit_group_dev(&mdev_state->vdev); in mbochs_probe()
563 kfree(mdev_state->pages); in mbochs_probe()
564 kfree(mdev_state->vconfig); in mbochs_probe()
565 kfree(mdev_state); in mbochs_probe()
573 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev); in mbochs_remove() local
575 vfio_unregister_group_dev(&mdev_state->vdev); in mbochs_remove()
576 vfio_uninit_group_dev(&mdev_state->vdev); in mbochs_remove()
577 atomic_add(mdev_state->type->mbytes, &mbochs_avail_mbytes); in mbochs_remove()
578 kfree(mdev_state->pages); in mbochs_remove()
579 kfree(mdev_state->vconfig); in mbochs_remove()
580 kfree(mdev_state); in mbochs_remove()
586 struct mdev_state *mdev_state = in mbochs_read() local
587 container_of(vdev, struct mdev_state, vdev); in mbochs_read()
597 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_read()
609 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_read()
621 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_read()
647 struct mdev_state *mdev_state = in mbochs_write() local
648 container_of(vdev, struct mdev_state, vdev); in mbochs_write()
661 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_write()
673 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_write()
685 ret = mdev_access(mdev_state, (char *)&val, sizeof(val), in mbochs_write()
703 static struct page *__mbochs_get_page(struct mdev_state *mdev_state, in __mbochs_get_page() argument
706 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in __mbochs_get_page()
708 if (!mdev_state->pages[pgoff]) { in __mbochs_get_page()
709 mdev_state->pages[pgoff] = in __mbochs_get_page()
711 if (!mdev_state->pages[pgoff]) in __mbochs_get_page()
715 get_page(mdev_state->pages[pgoff]); in __mbochs_get_page()
716 return mdev_state->pages[pgoff]; in __mbochs_get_page()
719 static struct page *mbochs_get_page(struct mdev_state *mdev_state, in mbochs_get_page() argument
724 if (WARN_ON(pgoff >= mdev_state->pagecount)) in mbochs_get_page()
727 mutex_lock(&mdev_state->ops_lock); in mbochs_get_page()
728 page = __mbochs_get_page(mdev_state, pgoff); in mbochs_get_page()
729 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_page()
734 static void mbochs_put_pages(struct mdev_state *mdev_state) in mbochs_put_pages() argument
736 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_put_pages()
739 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_put_pages()
741 for (i = 0; i < mdev_state->pagecount; i++) { in mbochs_put_pages()
742 if (!mdev_state->pages[i]) in mbochs_put_pages()
744 put_page(mdev_state->pages[i]); in mbochs_put_pages()
745 mdev_state->pages[i] = NULL; in mbochs_put_pages()
754 struct mdev_state *mdev_state = vma->vm_private_data; in mbochs_region_vm_fault() local
757 if (page_offset >= mdev_state->pagecount) in mbochs_region_vm_fault()
760 vmf->page = mbochs_get_page(mdev_state, page_offset); in mbochs_region_vm_fault()
773 struct mdev_state *mdev_state = in mbochs_mmap() local
774 container_of(vdev, struct mdev_state, vdev); in mbochs_mmap()
780 if (vma->vm_end - vma->vm_start > mdev_state->memsize) in mbochs_mmap()
786 vma->vm_private_data = mdev_state; in mbochs_mmap()
810 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_mmap_dmabuf()
825 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_print_dmabuf()
842 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_map_dmabuf()
871 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_unmap_dmabuf()
883 struct mdev_state *mdev_state = dmabuf->mdev_state; in mbochs_release_dmabuf() local
884 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_release_dmabuf()
892 mutex_lock(&mdev_state->ops_lock); in mbochs_release_dmabuf()
896 mutex_unlock(&mdev_state->ops_lock); in mbochs_release_dmabuf()
906 static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state, in mbochs_dmabuf_alloc() argument
912 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_alloc()
919 dmabuf->id = mdev_state->next_id++; in mbochs_dmabuf_alloc()
928 dmabuf->pages[pg] = __mbochs_get_page(mdev_state, in mbochs_dmabuf_alloc()
934 dmabuf->mdev_state = mdev_state; in mbochs_dmabuf_alloc()
935 list_add(&dmabuf->next, &mdev_state->dmabufs); in mbochs_dmabuf_alloc()
950 mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state, in mbochs_dmabuf_find_by_mode() argument
955 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_find_by_mode()
957 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) in mbochs_dmabuf_find_by_mode()
965 mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id) in mbochs_dmabuf_find_by_id() argument
969 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_find_by_id()
971 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) in mbochs_dmabuf_find_by_id()
980 struct mdev_state *mdev_state = dmabuf->mdev_state; in mbochs_dmabuf_export() local
981 struct device *dev = mdev_state->vdev.dev; in mbochs_dmabuf_export()
985 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_export()
1009 static int mbochs_get_region_info(struct mdev_state *mdev_state, in mbochs_get_region_info() argument
1026 region_info->size = mdev_state->memsize; in mbochs_get_region_info()
1074 static int mbochs_query_gfx_plane(struct mdev_state *mdev_state, in mbochs_query_gfx_plane() argument
1097 mutex_lock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1101 ret = mbochs_check_framebuffer(mdev_state, &mode); in mbochs_query_gfx_plane()
1112 dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode); in mbochs_query_gfx_plane()
1114 mbochs_dmabuf_alloc(mdev_state, &mode); in mbochs_query_gfx_plane()
1116 mutex_unlock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1129 mdev_state->active_id != plane->dmabuf_id) { in mbochs_query_gfx_plane()
1130 dev_dbg(mdev_state->vdev.dev, "%s: primary: %d => %d\n", in mbochs_query_gfx_plane()
1131 __func__, mdev_state->active_id, plane->dmabuf_id); in mbochs_query_gfx_plane()
1132 mdev_state->active_id = plane->dmabuf_id; in mbochs_query_gfx_plane()
1134 mutex_unlock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1138 static int mbochs_get_gfx_dmabuf(struct mdev_state *mdev_state, u32 id) in mbochs_get_gfx_dmabuf() argument
1142 mutex_lock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1144 dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id); in mbochs_get_gfx_dmabuf()
1146 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1153 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1164 struct mdev_state *mdev_state = in mbochs_ioctl() local
1165 container_of(vdev, struct mdev_state, vdev); in mbochs_ioctl()
1206 ret = mbochs_get_region_info(mdev_state, &info); in mbochs_ioctl()
1252 ret = mbochs_query_gfx_plane(mdev_state, &plane); in mbochs_ioctl()
1269 return mbochs_get_gfx_dmabuf(mdev_state, dmabuf_id); in mbochs_ioctl()
1276 return mbochs_reset(mdev_state); in mbochs_ioctl()
1283 struct mdev_state *mdev_state = in mbochs_close_device() local
1284 container_of(vdev, struct mdev_state, vdev); in mbochs_close_device()
1287 mutex_lock(&mdev_state->ops_lock); in mbochs_close_device()
1289 list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) { in mbochs_close_device()
1298 mbochs_put_pages(mdev_state); in mbochs_close_device()
1300 mutex_unlock(&mdev_state->ops_lock); in mbochs_close_device()
1307 struct mdev_state *mdev_state = dev_get_drvdata(dev); in memory_show() local
1309 return sprintf(buf, "%d MB\n", mdev_state->type->mbytes); in memory_show()