Lines Matching refs:mdev_state

156 	struct mdev_state *mdev_state;  member
162 struct mdev_state { struct
203 static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
205 static struct page *mbochs_get_page(struct mdev_state *mdev_state,
218 static void mbochs_create_config_space(struct mdev_state *mdev_state) in mbochs_create_config_space() argument
220 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID], in mbochs_create_config_space()
222 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID], in mbochs_create_config_space()
224 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID], in mbochs_create_config_space()
226 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID], in mbochs_create_config_space()
229 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND], in mbochs_create_config_space()
231 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE], in mbochs_create_config_space()
233 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01; in mbochs_create_config_space()
235 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0], in mbochs_create_config_space()
239 mdev_state->bar_mask[0] = ~(mdev_state->memsize) + 1; in mbochs_create_config_space()
241 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_2], in mbochs_create_config_space()
244 mdev_state->bar_mask[2] = ~(MBOCHS_MMIO_BAR_SIZE) + 1; in mbochs_create_config_space()
247 static int mbochs_check_framebuffer(struct mdev_state *mdev_state, in mbochs_check_framebuffer() argument
250 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_check_framebuffer()
251 u16 *vbe = mdev_state->vbe; in mbochs_check_framebuffer()
254 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_check_framebuffer()
286 if (mode->offset + mode->size > mdev_state->memsize) { in mbochs_check_framebuffer()
305 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset, in handle_pci_cfg_write() argument
308 struct device *dev = mdev_dev(mdev_state->mdev); in handle_pci_cfg_write()
318 cfg_addr = (cfg_addr & mdev_state->bar_mask[index]); in handle_pci_cfg_write()
326 cfg_addr |= (mdev_state->vconfig[offset] & in handle_pci_cfg_write()
328 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr); in handle_pci_cfg_write()
333 static void handle_mmio_write(struct mdev_state *mdev_state, u16 offset, in handle_mmio_write() argument
336 struct device *dev = mdev_dev(mdev_state->mdev); in handle_mmio_write()
348 if (index < ARRAY_SIZE(mdev_state->vbe)) in handle_mmio_write()
349 mdev_state->vbe[index] = reg16; in handle_mmio_write()
363 static void handle_mmio_read(struct mdev_state *mdev_state, u16 offset, in handle_mmio_read() argument
366 struct device *dev = mdev_dev(mdev_state->mdev); in handle_mmio_read()
373 edid = &mdev_state->edid_regs; in handle_mmio_read()
379 memcpy(buf, mdev_state->edid_blob + offset, count); in handle_mmio_read()
385 if (index < ARRAY_SIZE(mdev_state->vbe)) in handle_mmio_read()
386 reg16 = mdev_state->vbe[index]; in handle_mmio_read()
400 static void handle_edid_regs(struct mdev_state *mdev_state, u16 offset, in handle_edid_regs() argument
403 char *regs = (void *)&mdev_state->edid_regs; in handle_edid_regs()
405 if (offset + count > sizeof(mdev_state->edid_regs)) in handle_edid_regs()
427 static void handle_edid_blob(struct mdev_state *mdev_state, u16 offset, in handle_edid_blob() argument
430 if (offset + count > mdev_state->edid_regs.edid_max_size) in handle_edid_blob()
433 memcpy(mdev_state->edid_blob + offset, buf, count); in handle_edid_blob()
435 memcpy(buf, mdev_state->edid_blob + offset, count); in handle_edid_blob()
441 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mdev_access() local
448 mutex_lock(&mdev_state->ops_lock); in mdev_access()
452 handle_pci_cfg_write(mdev_state, pos, buf, count); in mdev_access()
454 memcpy(buf, (mdev_state->vconfig + pos), count); in mdev_access()
461 handle_mmio_write(mdev_state, pos, buf, count); in mdev_access()
463 handle_mmio_read(mdev_state, pos, buf, count); in mdev_access()
470 handle_edid_regs(mdev_state, pos, buf, count, is_write); in mdev_access()
473 handle_edid_blob(mdev_state, pos, buf, count, is_write); in mdev_access()
478 MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) { in mdev_access()
481 pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT); in mdev_access()
501 mutex_unlock(&mdev_state->ops_lock); in mdev_access()
508 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mbochs_reset() local
509 u32 size64k = mdev_state->memsize / (64 * 1024); in mbochs_reset()
512 for (i = 0; i < ARRAY_SIZE(mdev_state->vbe); i++) in mbochs_reset()
513 mdev_state->vbe[i] = 0; in mbochs_reset()
514 mdev_state->vbe[VBE_DISPI_INDEX_ID] = VBE_DISPI_ID5; in mbochs_reset()
515 mdev_state->vbe[VBE_DISPI_INDEX_VIDEO_MEMORY_64K] = size64k; in mbochs_reset()
523 struct mdev_state *mdev_state; in mbochs_create() local
530 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL); in mbochs_create()
531 if (mdev_state == NULL) in mbochs_create()
534 mdev_state->vconfig = kzalloc(MBOCHS_CONFIG_SPACE_SIZE, GFP_KERNEL); in mbochs_create()
535 if (mdev_state->vconfig == NULL) in mbochs_create()
538 mdev_state->memsize = type->mbytes * 1024 * 1024; in mbochs_create()
539 mdev_state->pagecount = mdev_state->memsize >> PAGE_SHIFT; in mbochs_create()
540 mdev_state->pages = kcalloc(mdev_state->pagecount, in mbochs_create()
543 if (!mdev_state->pages) in mbochs_create()
547 kobj->name, type->mbytes, mdev_state->pagecount); in mbochs_create()
549 mutex_init(&mdev_state->ops_lock); in mbochs_create()
550 mdev_state->mdev = mdev; in mbochs_create()
551 mdev_set_drvdata(mdev, mdev_state); in mbochs_create()
552 INIT_LIST_HEAD(&mdev_state->dmabufs); in mbochs_create()
553 mdev_state->next_id = 1; in mbochs_create()
555 mdev_state->type = type; in mbochs_create()
556 mdev_state->edid_regs.max_xres = type->max_x; in mbochs_create()
557 mdev_state->edid_regs.max_yres = type->max_y; in mbochs_create()
558 mdev_state->edid_regs.edid_offset = MBOCHS_EDID_BLOB_OFFSET; in mbochs_create()
559 mdev_state->edid_regs.edid_max_size = sizeof(mdev_state->edid_blob); in mbochs_create()
560 mbochs_create_config_space(mdev_state); in mbochs_create()
567 kfree(mdev_state->vconfig); in mbochs_create()
568 kfree(mdev_state); in mbochs_create()
574 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mbochs_remove() local
576 mbochs_used_mbytes -= mdev_state->type->mbytes; in mbochs_remove()
578 kfree(mdev_state->pages); in mbochs_remove()
579 kfree(mdev_state->vconfig); in mbochs_remove()
580 kfree(mdev_state); in mbochs_remove()
700 static struct page *__mbochs_get_page(struct mdev_state *mdev_state, in __mbochs_get_page() argument
703 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in __mbochs_get_page()
705 if (!mdev_state->pages[pgoff]) { in __mbochs_get_page()
706 mdev_state->pages[pgoff] = in __mbochs_get_page()
708 if (!mdev_state->pages[pgoff]) in __mbochs_get_page()
712 get_page(mdev_state->pages[pgoff]); in __mbochs_get_page()
713 return mdev_state->pages[pgoff]; in __mbochs_get_page()
716 static struct page *mbochs_get_page(struct mdev_state *mdev_state, in mbochs_get_page() argument
721 if (WARN_ON(pgoff >= mdev_state->pagecount)) in mbochs_get_page()
724 mutex_lock(&mdev_state->ops_lock); in mbochs_get_page()
725 page = __mbochs_get_page(mdev_state, pgoff); in mbochs_get_page()
726 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_page()
731 static void mbochs_put_pages(struct mdev_state *mdev_state) in mbochs_put_pages() argument
733 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_put_pages()
736 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_put_pages()
738 for (i = 0; i < mdev_state->pagecount; i++) { in mbochs_put_pages()
739 if (!mdev_state->pages[i]) in mbochs_put_pages()
741 put_page(mdev_state->pages[i]); in mbochs_put_pages()
742 mdev_state->pages[i] = NULL; in mbochs_put_pages()
751 struct mdev_state *mdev_state = vma->vm_private_data; in mbochs_region_vm_fault() local
754 if (page_offset >= mdev_state->pagecount) in mbochs_region_vm_fault()
757 vmf->page = mbochs_get_page(mdev_state, page_offset); in mbochs_region_vm_fault()
770 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mbochs_mmap() local
776 if (vma->vm_end - vma->vm_start > mdev_state->memsize) in mbochs_mmap()
782 vma->vm_private_data = mdev_state; in mbochs_mmap()
806 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_mmap_dmabuf()
821 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_print_dmabuf()
838 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_map_dmabuf()
867 struct device *dev = mdev_dev(dmabuf->mdev_state->mdev); in mbochs_unmap_dmabuf()
879 struct mdev_state *mdev_state = dmabuf->mdev_state; in mbochs_release_dmabuf() local
880 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_release_dmabuf()
888 mutex_lock(&mdev_state->ops_lock); in mbochs_release_dmabuf()
892 mutex_unlock(&mdev_state->ops_lock); in mbochs_release_dmabuf()
902 static struct mbochs_dmabuf *mbochs_dmabuf_alloc(struct mdev_state *mdev_state, in mbochs_dmabuf_alloc() argument
908 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_alloc()
915 dmabuf->id = mdev_state->next_id++; in mbochs_dmabuf_alloc()
924 dmabuf->pages[pg] = __mbochs_get_page(mdev_state, in mbochs_dmabuf_alloc()
930 dmabuf->mdev_state = mdev_state; in mbochs_dmabuf_alloc()
931 list_add(&dmabuf->next, &mdev_state->dmabufs); in mbochs_dmabuf_alloc()
946 mbochs_dmabuf_find_by_mode(struct mdev_state *mdev_state, in mbochs_dmabuf_find_by_mode() argument
951 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_find_by_mode()
953 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) in mbochs_dmabuf_find_by_mode()
961 mbochs_dmabuf_find_by_id(struct mdev_state *mdev_state, u32 id) in mbochs_dmabuf_find_by_id() argument
965 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_find_by_id()
967 list_for_each_entry(dmabuf, &mdev_state->dmabufs, next) in mbochs_dmabuf_find_by_id()
976 struct mdev_state *mdev_state = dmabuf->mdev_state; in mbochs_dmabuf_export() local
977 struct device *dev = mdev_dev(mdev_state->mdev); in mbochs_dmabuf_export()
981 WARN_ON(!mutex_is_locked(&mdev_state->ops_lock)); in mbochs_dmabuf_export()
1009 struct mdev_state *mdev_state; in mbochs_get_region_info() local
1011 mdev_state = mdev_get_drvdata(mdev); in mbochs_get_region_info()
1012 if (!mdev_state) in mbochs_get_region_info()
1027 region_info->size = mdev_state->memsize; in mbochs_get_region_info()
1080 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mbochs_query_gfx_plane() local
1102 mutex_lock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1106 ret = mbochs_check_framebuffer(mdev_state, &mode); in mbochs_query_gfx_plane()
1117 dmabuf = mbochs_dmabuf_find_by_mode(mdev_state, &mode); in mbochs_query_gfx_plane()
1119 mbochs_dmabuf_alloc(mdev_state, &mode); in mbochs_query_gfx_plane()
1121 mutex_unlock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1134 mdev_state->active_id != plane->dmabuf_id) { in mbochs_query_gfx_plane()
1136 mdev_state->active_id, plane->dmabuf_id); in mbochs_query_gfx_plane()
1137 mdev_state->active_id = plane->dmabuf_id; in mbochs_query_gfx_plane()
1139 mutex_unlock(&mdev_state->ops_lock); in mbochs_query_gfx_plane()
1146 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mbochs_get_gfx_dmabuf() local
1149 mutex_lock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1151 dmabuf = mbochs_dmabuf_find_by_id(mdev_state, id); in mbochs_get_gfx_dmabuf()
1153 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1160 mutex_unlock(&mdev_state->ops_lock); in mbochs_get_gfx_dmabuf()
1296 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in mbochs_close() local
1299 mutex_lock(&mdev_state->ops_lock); in mbochs_close()
1301 list_for_each_entry_safe(dmabuf, tmp, &mdev_state->dmabufs, next) { in mbochs_close()
1310 mbochs_put_pages(mdev_state); in mbochs_close()
1312 mutex_unlock(&mdev_state->ops_lock); in mbochs_close()
1321 struct mdev_state *mdev_state = mdev_get_drvdata(mdev); in memory_show() local
1323 return sprintf(buf, "%d MB\n", mdev_state->type->mbytes); in memory_show()