Lines Matching +full:100 +full:base +full:- +full:fx

2  * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved.
3 * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved.
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
208 return readl((void __iomem *)(dev_priv->mmio->handle + reg)); in via_read()
214 writel(val, (void __iomem *)(dev_priv->mmio->handle + reg)); in via_write()
220 writeb(val, (void __iomem *)(dev_priv->mmio->handle + reg)); in via_write8()
228 tmp = readb((void __iomem *)(dev_priv->mmio->handle + reg)); in via_write8_mask()
230 writeb(tmp, (void __iomem *)(dev_priv->mmio->handle + reg)); in via_write8_mask()
241 * ret = -BUSY if timeout happens
242 * ret = -EINTR if a signal interrupted the waiting period
255 ret = -EBUSY; \
258 schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
260 ret = -EINTR; \
291 dev_priv->dma_low += 8; \
299 dev_priv->dma_low += 8; \
303 #define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
386 * Device-specific IRQs go here. This type might need to be extended with
402 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
411 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
471 * Associates each hazard above with a possible multi-command
633 if ((buf_end - *buf) >= num_words) { in eat_words()
651 drm_local_map_t *map = seq->map_cache; in via_drm_lookup_agp_map()
653 if (map && map->offset <= offset in via_drm_lookup_agp_map()
654 && (offset + size) <= (map->offset + map->size)) { in via_drm_lookup_agp_map()
658 list_for_each_entry(r_list, &dev->maplist, head) { in via_drm_lookup_agp_map()
659 map = r_list->map; in via_drm_lookup_agp_map()
662 if (map->offset <= offset in via_drm_lookup_agp_map()
663 && (offset + size) <= (map->offset + map->size) in via_drm_lookup_agp_map()
664 && !(map->flags & _DRM_RESTRICTED) in via_drm_lookup_agp_map()
665 && (map->type == _DRM_AGP)) { in via_drm_lookup_agp_map()
666 seq->map_cache = map; in via_drm_lookup_agp_map()
684 switch (cur_seq->unfinished) { in finish_current_sequence()
686 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr); in finish_current_sequence()
690 cur_seq->d_addr); in finish_current_sequence()
693 if (cur_seq->agp_texture) { in finish_current_sequence()
695 cur_seq->tex_level_lo[cur_seq->texture]; in finish_current_sequence()
696 unsigned end = cur_seq->tex_level_hi[cur_seq->texture]; in finish_current_sequence()
708 &(cur_seq->t_addr[tex = cur_seq->texture][start]); in finish_current_sequence()
709 pitch = &(cur_seq->pitch[tex][start]); in finish_current_sequence()
710 height = &(cur_seq->height[tex][start]); in finish_current_sequence()
711 npot = cur_seq->tex_npot[tex]; in finish_current_sequence()
725 (cur_seq, lo, hi - lo, cur_seq->dev)) { in finish_current_sequence()
735 cur_seq->unfinished = no_sequence; in finish_current_sequence()
744 if (cur_seq->unfinished && (cur_seq->unfinished != seqs[hz])) { in investigate_hazard()
785 cur_seq->unfinished = z_address; in investigate_hazard()
786 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) | in investigate_hazard()
790 cur_seq->unfinished = z_address; in investigate_hazard()
791 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) | in investigate_hazard()
795 cur_seq->unfinished = z_address; in investigate_hazard()
801 cur_seq->unfinished = dest_address; in investigate_hazard()
802 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) | in investigate_hazard()
806 cur_seq->unfinished = dest_address; in investigate_hazard()
807 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) | in investigate_hazard()
811 cur_seq->unfinished = dest_address; in investigate_hazard()
818 cur_seq->unfinished = tex_address; in investigate_hazard()
820 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; in investigate_hazard()
824 cur_seq->unfinished = tex_address; in investigate_hazard()
825 tmp = ((cmd >> 24) - 0x20); in investigate_hazard()
827 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp]; in investigate_hazard()
835 cur_seq->unfinished = tex_address; in investigate_hazard()
836 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F; in investigate_hazard()
837 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6; in investigate_hazard()
840 cur_seq->unfinished = tex_address; in investigate_hazard()
841 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); in investigate_hazard()
844 cur_seq->pitch[cur_seq->texture][tmp] = in investigate_hazard()
846 cur_seq->tex_npot[cur_seq->texture] = 1; in investigate_hazard()
848 cur_seq->pitch[cur_seq->texture][tmp] = in investigate_hazard()
850 cur_seq->tex_npot[cur_seq->texture] = 0; in investigate_hazard()
859 cur_seq->unfinished = tex_address; in investigate_hazard()
860 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9]; in investigate_hazard()
865 cur_seq->unfinished = tex_address; in investigate_hazard()
871 cur_seq->unfinished = tex_address; in investigate_hazard()
872 tmp_addr = &(cur_seq->height[cur_seq->texture][0]); in investigate_hazard()
881 cur_seq->unfinished = tex_address; in investigate_hazard()
882 tmp_addr = &(cur_seq->height[cur_seq->texture][0]); in investigate_hazard()
889 cur_seq->unfinished = tex_address; in investigate_hazard()
895 cur_seq->agp_texture = (tmp == 3); in investigate_hazard()
896 cur_seq->tex_palette_size[cur_seq->texture] = in investigate_hazard()
900 cur_seq->vertex_count = cmd & 0x0000FFFF; in investigate_hazard()
903 cur_seq->multitex = (cmd >> 3) & 1; in investigate_hazard()
917 (drm_via_private_t *) cur_seq->dev->dev_private; in via_check_prim_list()
925 if ((buf_end - buf) < 2) { in via_check_prim_list()
948 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) { in via_check_prim_list()
956 dw_count += (cur_seq->multitex) ? 2 : 1; in via_check_prim_list()
958 dw_count += (cur_seq->multitex) ? 2 : 1; in via_check_prim_list()
974 if (dev_priv->num_fire_offsets >= in via_check_prim_list()
980 dev_priv->fire_offsets[dev_priv-> in via_check_prim_list()
1006 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) { in via_check_prim_list()
1026 if ((buf_end - buf) < 2) { in via_check_header2()
1044 hc_state->texture = 0; in via_check_header2()
1048 hc_state->texture = 1; in via_check_header2()
1082 cmd, *(buf - 2)); in via_check_header2()
1092 buf--; in via_check_header2()
1097 } else if (hc_state->unfinished && in via_check_header2()
1102 if (hc_state->unfinished && finish_current_sequence(hc_state)) in via_check_header2()
1117 next_fire = dev_priv->fire_offsets[*fire_count]; in via_parse_header2()
1124 (*fire_count < dev_priv->num_fire_offsets) && in via_parse_header2()
1135 if (++(*fire_count) < dev_priv->num_fire_offsets) in via_parse_header2()
1136 next_fire = dev_priv->fire_offsets[*fire_count]; in via_parse_header2()
1161 "Attempt to access 3D- or command burst area.\n"); in verify_mmio_address()
1181 if (buf_end - buf < dwords) { in verify_video_tail()
1185 while (dwords--) { in verify_video_tail()
1209 "Attempt to access 3D- or command burst area.\n"); in via_check_header1()
1251 if (buf_end - buf < 4) { in via_check_vheader5()
1271 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) in via_check_vheader5()
1288 while (i--) in via_parse_vheader5()
1291 buf += 4 - (count & 3); in via_parse_vheader5()
1303 if (buf_end - buf < 4) { in via_check_vheader6()
1317 if ((buf_end - buf) < (data << 1)) { in via_check_vheader6()
1327 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3))) in via_check_vheader6()
1343 while (i--) { in via_parse_vheader6()
1349 buf += 4 - (count & 3); in via_parse_vheader6()
1359 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_verify_command_stream()
1360 drm_via_state_t *hc_state = &dev_priv->hc_state; in via_verify_command_stream()
1368 cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A || in via_verify_command_stream()
1369 dev_priv->chipset == VIA_DX9_0); in via_verify_command_stream()
1371 supported_3d = dev_priv->chipset != VIA_DX9_0; in via_verify_command_stream()
1373 hc_state->dev = dev; in via_verify_command_stream()
1374 hc_state->unfinished = no_sequence; in via_verify_command_stream()
1375 hc_state->map_cache = NULL; in via_verify_command_stream()
1376 hc_state->agp = agp; in via_verify_command_stream()
1377 hc_state->buf_start = buf; in via_verify_command_stream()
1378 dev_priv->num_fire_offsets = 0; in via_verify_command_stream()
1420 return -EINVAL; in via_verify_command_stream()
1425 return -EINVAL; in via_verify_command_stream()
1435 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_parse_command_stream()
1477 return -EINVAL; in via_parse_command_stream()
1481 return -EINVAL; in via_parse_command_stream()
1509 int num_desc = vsg->num_desc; in via_unmap_blit_from_device()
1510 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page; in via_unmap_blit_from_device()
1511 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; in via_unmap_blit_from_device()
1512 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + in via_unmap_blit_from_device()
1514 dma_addr_t next = vsg->chain_start; in via_unmap_blit_from_device()
1516 while (num_desc--) { in via_unmap_blit_from_device()
1517 if (descriptor_this_page-- == 0) { in via_unmap_blit_from_device()
1518 cur_descriptor_page--; in via_unmap_blit_from_device()
1519 descriptor_this_page = vsg->descriptors_per_page - 1; in via_unmap_blit_from_device()
1520 desc_ptr = vsg->desc_pages[cur_descriptor_page] + in via_unmap_blit_from_device()
1523 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); in via_unmap_blit_from_device()
1524 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction); in via_unmap_blit_from_device()
1525 next = (dma_addr_t) desc_ptr->next; in via_unmap_blit_from_device()
1526 desc_ptr--; in via_unmap_blit_from_device()
1544 unsigned char *mem_addr = xfer->mem_addr; in via_map_blit_for_device()
1547 uint32_t fb_addr = xfer->fb_addr; in via_map_blit_for_device()
1557 desc_ptr = vsg->desc_pages[cur_descriptor_page]; in via_map_blit_for_device()
1559 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { in via_map_blit_for_device()
1561 line_len = xfer->line_length; in via_map_blit_for_device()
1567 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); in via_map_blit_for_device()
1568 line_len -= remaining_len; in via_map_blit_for_device()
1571 desc_ptr->mem_addr = in via_map_blit_for_device()
1572 dma_map_page(&pdev->dev, in via_map_blit_for_device()
1573 vsg->pages[VIA_PFN(cur_mem) - in via_map_blit_for_device()
1576 vsg->direction); in via_map_blit_for_device()
1577 desc_ptr->dev_addr = cur_fb; in via_map_blit_for_device()
1579 desc_ptr->size = remaining_len; in via_map_blit_for_device()
1580 desc_ptr->next = (uint32_t) next; in via_map_blit_for_device()
1581 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), in via_map_blit_for_device()
1584 if (++num_descriptors_this_page >= vsg->descriptors_per_page) { in via_map_blit_for_device()
1586 desc_ptr = vsg->desc_pages[++cur_descriptor_page]; in via_map_blit_for_device()
1595 mem_addr += xfer->mem_stride; in via_map_blit_for_device()
1596 fb_addr += xfer->fb_stride; in via_map_blit_for_device()
1600 vsg->chain_start = next; in via_map_blit_for_device()
1601 vsg->state = dr_via_device_mapped; in via_map_blit_for_device()
1603 vsg->num_desc = num_desc; in via_map_blit_for_device()
1616 switch (vsg->state) { in via_free_sg_info()
1621 for (i = 0; i < vsg->num_desc_pages; ++i) { in via_free_sg_info()
1622 if (vsg->desc_pages[i] != NULL) in via_free_sg_info()
1623 free_page((unsigned long)vsg->desc_pages[i]); in via_free_sg_info()
1625 kfree(vsg->desc_pages); in via_free_sg_info()
1628 unpin_user_pages_dirty_lock(vsg->pages, vsg->num_pages, in via_free_sg_info()
1629 (vsg->direction == DMA_FROM_DEVICE)); in via_free_sg_info()
1632 vfree(vsg->pages); in via_free_sg_info()
1635 vsg->state = dr_via_sg_init; in via_free_sg_info()
1637 vfree(vsg->bounce_buffer); in via_free_sg_info()
1638 vsg->bounce_buffer = NULL; in via_free_sg_info()
1639 vsg->free_on_sequence = 0; in via_free_sg_info()
1648 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_fire_dmablit()
1656 via_write(dev_priv, VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); in via_fire_dmablit()
1670 unsigned long first_pfn = VIA_PFN(xfer->mem_addr); in via_lock_all_dma_pages()
1671 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) - in via_lock_all_dma_pages()
1674 vsg->pages = vzalloc(array_size(sizeof(struct page *), vsg->num_pages)); in via_lock_all_dma_pages()
1675 if (NULL == vsg->pages) in via_lock_all_dma_pages()
1676 return -ENOMEM; in via_lock_all_dma_pages()
1677 ret = pin_user_pages_fast((unsigned long)xfer->mem_addr, in via_lock_all_dma_pages()
1678 vsg->num_pages, in via_lock_all_dma_pages()
1679 vsg->direction == DMA_FROM_DEVICE ? FOLL_WRITE : 0, in via_lock_all_dma_pages()
1680 vsg->pages); in via_lock_all_dma_pages()
1681 if (ret != vsg->num_pages) { in via_lock_all_dma_pages()
1684 vsg->state = dr_via_pages_locked; in via_lock_all_dma_pages()
1685 return -EINVAL; in via_lock_all_dma_pages()
1687 vsg->state = dr_via_pages_locked; in via_lock_all_dma_pages()
1702 vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t); in via_alloc_desc_pages()
1703 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / in via_alloc_desc_pages()
1704 vsg->descriptors_per_page; in via_alloc_desc_pages()
1706 if (NULL == (vsg->desc_pages = kcalloc(vsg->num_desc_pages, sizeof(void *), GFP_KERNEL))) in via_alloc_desc_pages()
1707 return -ENOMEM; in via_alloc_desc_pages()
1709 vsg->state = dr_via_desc_pages_alloc; in via_alloc_desc_pages()
1710 for (i = 0; i < vsg->num_desc_pages; ++i) { in via_alloc_desc_pages()
1711 if (NULL == (vsg->desc_pages[i] = in via_alloc_desc_pages()
1713 return -ENOMEM; in via_alloc_desc_pages()
1715 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, in via_alloc_desc_pages()
1716 vsg->num_desc); in via_alloc_desc_pages()
1723 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_abort_dmablit()
1731 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_dmablit_engine_off()
1745 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_dmablit_handler()
1746 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; in via_dmablit_handler()
1756 spin_lock(&blitq->blit_lock); in via_dmablit_handler()
1758 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_handler()
1760 done_transfer = blitq->is_active && in via_dmablit_handler()
1762 done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE)); in via_dmablit_handler()
1764 cur = blitq->cur; in via_dmablit_handler()
1767 blitq->blits[cur]->aborted = blitq->aborting; in via_dmablit_handler()
1768 blitq->done_blit_handle++; in via_dmablit_handler()
1769 wake_up(blitq->blit_queue + cur); in via_dmablit_handler()
1774 blitq->cur = cur; in via_dmablit_handler()
1782 blitq->is_active = 0; in via_dmablit_handler()
1783 blitq->aborting = 0; in via_dmablit_handler()
1784 schedule_work(&blitq->wq); in via_dmablit_handler()
1786 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { in via_dmablit_handler()
1793 blitq->aborting = 1; in via_dmablit_handler()
1794 blitq->end = jiffies + HZ; in via_dmablit_handler()
1797 if (!blitq->is_active) { in via_dmablit_handler()
1798 if (blitq->num_outstanding) { in via_dmablit_handler()
1799 via_fire_dmablit(dev, blitq->blits[cur], engine); in via_dmablit_handler()
1800 blitq->is_active = 1; in via_dmablit_handler()
1801 blitq->cur = cur; in via_dmablit_handler()
1802 blitq->num_outstanding--; in via_dmablit_handler()
1803 blitq->end = jiffies + HZ; in via_dmablit_handler()
1804 if (!timer_pending(&blitq->poll_timer)) in via_dmablit_handler()
1805 mod_timer(&blitq->poll_timer, jiffies + 1); in via_dmablit_handler()
1807 if (timer_pending(&blitq->poll_timer)) in via_dmablit_handler()
1808 del_timer(&blitq->poll_timer); in via_dmablit_handler()
1814 spin_unlock(&blitq->blit_lock); in via_dmablit_handler()
1816 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_handler()
1829 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_active()
1835 active = ((blitq->done_blit_handle - handle) > (1 << 23)) && in via_dmablit_active()
1836 ((blitq->cur_blit_handle - handle) <= (1 << 23)); in via_dmablit_active()
1839 slot = handle - blitq->done_blit_handle + blitq->cur - 1; in via_dmablit_active()
1841 slot -= VIA_NUM_BLIT_SLOTS; in via_dmablit_active()
1842 *queue = blitq->blit_queue + slot; in via_dmablit_active()
1845 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_active()
1857 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_dmablit_sync()
1858 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; in via_dmablit_sync()
1883 struct drm_device *dev = blitq->dev; in via_dmablit_timer()
1885 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); in via_dmablit_timer()
1892 if (!timer_pending(&blitq->poll_timer)) { in via_dmablit_timer()
1893 mod_timer(&blitq->poll_timer, jiffies + 1); in via_dmablit_timer()
1914 struct drm_device *dev = blitq->dev; in via_dmablit_workqueue()
1915 struct pci_dev *pdev = to_pci_dev(dev->dev); in via_dmablit_workqueue()
1922 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues)); in via_dmablit_workqueue()
1924 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_workqueue()
1926 while (blitq->serviced != blitq->cur) { in via_dmablit_workqueue()
1928 cur_released = blitq->serviced++; in via_dmablit_workqueue()
1932 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) in via_dmablit_workqueue()
1933 blitq->serviced = 0; in via_dmablit_workqueue()
1935 cur_sg = blitq->blits[cur_released]; in via_dmablit_workqueue()
1936 blitq->num_free++; in via_dmablit_workqueue()
1938 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_workqueue()
1940 wake_up(&blitq->busy_queue); in via_dmablit_workqueue()
1945 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_workqueue()
1948 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_workqueue()
1958 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_init_dmablit()
1959 struct pci_dev *pdev = to_pci_dev(dev->dev); in via_init_dmablit()
1965 blitq = dev_priv->blit_queues + i; in via_init_dmablit()
1966 blitq->dev = dev; in via_init_dmablit()
1967 blitq->cur_blit_handle = 0; in via_init_dmablit()
1968 blitq->done_blit_handle = 0; in via_init_dmablit()
1969 blitq->head = 0; in via_init_dmablit()
1970 blitq->cur = 0; in via_init_dmablit()
1971 blitq->serviced = 0; in via_init_dmablit()
1972 blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; in via_init_dmablit()
1973 blitq->num_outstanding = 0; in via_init_dmablit()
1974 blitq->is_active = 0; in via_init_dmablit()
1975 blitq->aborting = 0; in via_init_dmablit()
1976 spin_lock_init(&blitq->blit_lock); in via_init_dmablit()
1978 init_waitqueue_head(blitq->blit_queue + j); in via_init_dmablit()
1979 init_waitqueue_head(&blitq->busy_queue); in via_init_dmablit()
1980 INIT_WORK(&blitq->wq, via_dmablit_workqueue); in via_init_dmablit()
1981 timer_setup(&blitq->poll_timer, via_dmablit_timer, 0); in via_init_dmablit()
1991 struct pci_dev *pdev = to_pci_dev(dev->dev); in via_build_sg_info()
1992 int draw = xfer->to_fb; in via_build_sg_info()
1995 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; in via_build_sg_info()
1996 vsg->bounce_buffer = NULL; in via_build_sg_info()
1998 vsg->state = dr_via_sg_init; in via_build_sg_info()
2000 if (xfer->num_lines <= 0 || xfer->line_length <= 0) { in via_build_sg_info()
2002 return -EINVAL; in via_build_sg_info()
2012 if ((xfer->mem_stride - xfer->line_length) > 2*PAGE_SIZE) { in via_build_sg_info()
2014 "Length: %d\n", xfer->mem_stride, xfer->line_length); in via_build_sg_info()
2015 return -EINVAL; in via_build_sg_info()
2018 if ((xfer->mem_stride == xfer->line_length) && in via_build_sg_info()
2019 (xfer->fb_stride == xfer->line_length)) { in via_build_sg_info()
2020 xfer->mem_stride *= xfer->num_lines; in via_build_sg_info()
2021 xfer->line_length = xfer->mem_stride; in via_build_sg_info()
2022 xfer->fb_stride = xfer->mem_stride; in via_build_sg_info()
2023 xfer->num_lines = 1; in via_build_sg_info()
2031 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { in via_build_sg_info()
2033 return -EINVAL; in via_build_sg_info()
2041 if (xfer->mem_stride < xfer->line_length || in via_build_sg_info()
2042 abs(xfer->fb_stride) < xfer->line_length) { in via_build_sg_info()
2043 DRM_ERROR("Invalid frame-buffer / memory stride.\n"); in via_build_sg_info()
2044 return -EINVAL; in via_build_sg_info()
2054 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || in via_build_sg_info()
2055 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { in via_build_sg_info()
2057 return -EINVAL; in via_build_sg_info()
2060 if ((((unsigned long)xfer->mem_addr & 15) || in via_build_sg_info()
2061 ((unsigned long)xfer->fb_addr & 3)) || in via_build_sg_info()
2062 ((xfer->num_lines > 1) && in via_build_sg_info()
2063 ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { in via_build_sg_info()
2065 return -EINVAL; in via_build_sg_info()
2088 * to become available. Otherwise -EBUSY is returned.
2096 DRM_DEBUG("Num free is %d\n", blitq->num_free); in via_dmablit_grab_slot()
2097 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_grab_slot()
2098 while (blitq->num_free == 0) { in via_dmablit_grab_slot()
2099 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_grab_slot()
2101 VIA_WAIT_ON(ret, blitq->busy_queue, HZ, blitq->num_free > 0); in via_dmablit_grab_slot()
2103 return (-EINTR == ret) ? -EAGAIN : ret; in via_dmablit_grab_slot()
2105 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_grab_slot()
2108 blitq->num_free--; in via_dmablit_grab_slot()
2109 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_grab_slot()
2122 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit_release_slot()
2123 blitq->num_free++; in via_dmablit_release_slot()
2124 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit_release_slot()
2125 wake_up(&blitq->busy_queue); in via_dmablit_release_slot()
2134 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; in via_dmablit()
2143 return -EINVAL; in via_dmablit()
2146 engine = (xfer->to_fb) ? 0 : 1; in via_dmablit()
2147 blitq = dev_priv->blit_queues + engine; in via_dmablit()
2152 return -ENOMEM; in via_dmablit()
2159 spin_lock_irqsave(&blitq->blit_lock, irqsave); in via_dmablit()
2161 blitq->blits[blitq->head++] = vsg; in via_dmablit()
2162 if (blitq->head >= VIA_NUM_BLIT_SLOTS) in via_dmablit()
2163 blitq->head = 0; in via_dmablit()
2164 blitq->num_outstanding++; in via_dmablit()
2165 xfer->sync.sync_handle = ++blitq->cur_blit_handle; in via_dmablit()
2167 spin_unlock_irqrestore(&blitq->blit_lock, irqsave); in via_dmablit()
2168 xfer->sync.engine = engine; in via_dmablit()
2178 * case it returns with -EAGAIN for the signal to be delivered.
2187 if (sync->engine >= VIA_NUM_BLIT_ENGINES) in via_dma_blit_sync()
2188 return -EINVAL; in via_dma_blit_sync()
2190 err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); in via_dma_blit_sync()
2192 if (-EINTR == err) in via_dma_blit_sync()
2193 err = -EAGAIN; in via_dma_blit_sync()
2200 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
2216 drm_via_private_t *dev_priv = dev->dev_private; in via_get_vblank_counter()
2221 return atomic_read(&dev_priv->vbl_received); in via_get_vblank_counter()
2227 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_driver_irq_handler()
2231 drm_via_irq_t *cur_irq = dev_priv->via_irqs; in via_driver_irq_handler()
2236 atomic_inc(&dev_priv->vbl_received); in via_driver_irq_handler()
2237 if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { in via_driver_irq_handler()
2239 if (dev_priv->last_vblank_valid) { in via_driver_irq_handler()
2240 dev_priv->nsec_per_vblank = in via_driver_irq_handler()
2242 dev_priv->last_vblank) >> 4; in via_driver_irq_handler()
2244 dev_priv->last_vblank = cur_vblank; in via_driver_irq_handler()
2245 dev_priv->last_vblank_valid = 1; in via_driver_irq_handler()
2247 if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { in via_driver_irq_handler()
2249 ktime_to_ns(dev_priv->nsec_per_vblank)); in via_driver_irq_handler()
2255 for (i = 0; i < dev_priv->num_irqs; ++i) { in via_driver_irq_handler()
2256 if (status & cur_irq->pending_mask) { in via_driver_irq_handler()
2257 atomic_inc(&cur_irq->irq_received); in via_driver_irq_handler()
2258 wake_up(&cur_irq->irq_queue); in via_driver_irq_handler()
2260 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) in via_driver_irq_handler()
2262 else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) in via_driver_irq_handler()
2286 dev_priv->irq_pending_mask); in viadrv_acknowledge_irqs()
2292 drm_via_private_t *dev_priv = dev->dev_private; in via_enable_vblank()
2297 return -EINVAL; in via_enable_vblank()
2311 drm_via_private_t *dev_priv = dev->dev_private; in via_disable_vblank()
2328 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_driver_irq_wait()
2339 return -EINVAL; in via_driver_irq_wait()
2344 return -EINVAL; in via_driver_irq_wait()
2347 real_irq = dev_priv->irq_map[irq]; in via_driver_irq_wait()
2352 return -EINVAL; in via_driver_irq_wait()
2355 masks = dev_priv->irq_masks; in via_driver_irq_wait()
2356 cur_irq = dev_priv->via_irqs + real_irq; in via_driver_irq_wait()
2359 VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, in via_driver_irq_wait()
2362 cur_irq_sequence = atomic_read(&cur_irq->irq_received); in via_driver_irq_wait()
2364 VIA_WAIT_ON(ret, cur_irq->irq_queue, 3 * HZ, in via_driver_irq_wait()
2366 atomic_read(&cur_irq->irq_received)) - in via_driver_irq_wait()
2380 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_driver_irq_preinstall()
2387 cur_irq = dev_priv->via_irqs; in via_driver_irq_preinstall()
2389 dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; in via_driver_irq_preinstall()
2390 dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; in via_driver_irq_preinstall()
2392 if (dev_priv->chipset == VIA_PRO_GROUP_A || in via_driver_irq_preinstall()
2393 dev_priv->chipset == VIA_DX9_0) { in via_driver_irq_preinstall()
2394 dev_priv->irq_masks = via_pro_group_a_irqs; in via_driver_irq_preinstall()
2395 dev_priv->num_irqs = via_num_pro_group_a; in via_driver_irq_preinstall()
2396 dev_priv->irq_map = via_irqmap_pro_group_a; in via_driver_irq_preinstall()
2398 dev_priv->irq_masks = via_unichrome_irqs; in via_driver_irq_preinstall()
2399 dev_priv->num_irqs = via_num_unichrome; in via_driver_irq_preinstall()
2400 dev_priv->irq_map = via_irqmap_unichrome; in via_driver_irq_preinstall()
2403 for (i = 0; i < dev_priv->num_irqs; ++i) { in via_driver_irq_preinstall()
2404 atomic_set(&cur_irq->irq_received, 0); in via_driver_irq_preinstall()
2405 cur_irq->enable_mask = dev_priv->irq_masks[i][0]; in via_driver_irq_preinstall()
2406 cur_irq->pending_mask = dev_priv->irq_masks[i][1]; in via_driver_irq_preinstall()
2407 init_waitqueue_head(&cur_irq->irq_queue); in via_driver_irq_preinstall()
2408 dev_priv->irq_enable_mask |= cur_irq->enable_mask; in via_driver_irq_preinstall()
2409 dev_priv->irq_pending_mask |= cur_irq->pending_mask; in via_driver_irq_preinstall()
2415 dev_priv->last_vblank_valid = 0; in via_driver_irq_preinstall()
2420 ~(dev_priv->irq_enable_mask)); in via_driver_irq_preinstall()
2429 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_driver_irq_postinstall()
2434 return -EINVAL; in via_driver_irq_postinstall()
2438 | dev_priv->irq_enable_mask); in via_driver_irq_postinstall()
2449 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_driver_irq_uninstall()
2462 ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask)); in via_driver_irq_uninstall()
2471 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_wait_irq()
2472 drm_via_irq_t *cur_irq = dev_priv->via_irqs; in via_wait_irq()
2475 if (irqwait->request.irq >= dev_priv->num_irqs) { in via_wait_irq()
2477 irqwait->request.irq); in via_wait_irq()
2478 return -EINVAL; in via_wait_irq()
2481 cur_irq += irqwait->request.irq; in via_wait_irq()
2483 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { in via_wait_irq()
2485 irqwait->request.sequence += in via_wait_irq()
2486 atomic_read(&cur_irq->irq_received); in via_wait_irq()
2487 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; in via_wait_irq()
2492 return -EINVAL; in via_wait_irq()
2495 if (irqwait->request.type & VIA_IRQ_SIGNAL) { in via_wait_irq()
2497 return -EINVAL; in via_wait_irq()
2500 force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE); in via_wait_irq()
2502 ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence, in via_wait_irq()
2503 &irqwait->request.sequence); in via_wait_irq()
2505 irqwait->reply.tval_sec = now.tv_sec; in via_wait_irq()
2506 irqwait->reply.tval_usec = now.tv_nsec / NSEC_PER_USEC; in via_wait_irq()
2518 init_waitqueue_head(&(dev_priv->decoder_queue[i])); in via_init_futex()
2519 XVMCLOCKPTR(dev_priv->sarea_priv, i)->lock = 0; in via_init_futex()
2532 if (!dev_priv->sarea_priv) in via_release_futex()
2536 lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i); in via_release_futex()
2540 wake_up(&(dev_priv->decoder_queue[i])); in via_release_futex()
2549 drm_via_futex_t *fx = data; in via_decoder_futex() local
2551 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_decoder_futex()
2552 drm_via_sarea_t *sAPriv = dev_priv->sarea_priv; in via_decoder_futex()
2557 if (fx->lock >= VIA_NR_XVMC_LOCKS) in via_decoder_futex()
2558 return -EFAULT; in via_decoder_futex()
2560 lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock); in via_decoder_futex()
2562 switch (fx->func) { in via_decoder_futex()
2564 VIA_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock], in via_decoder_futex()
2565 (fx->ms / 10) * (HZ / 100), *lock != fx->val); in via_decoder_futex()
2568 wake_up(&(dev_priv->decoder_queue[fx->lock])); in via_decoder_futex()
2577 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_agp_init()
2579 mutex_lock(&dev->struct_mutex); in via_agp_init()
2580 drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT); in via_agp_init()
2582 dev_priv->agp_initialized = 1; in via_agp_init()
2583 dev_priv->agp_offset = agp->offset; in via_agp_init()
2584 mutex_unlock(&dev->struct_mutex); in via_agp_init()
2586 DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size); in via_agp_init()
2593 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_fb_init()
2595 mutex_lock(&dev->struct_mutex); in via_fb_init()
2596 drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT); in via_fb_init()
2598 dev_priv->vram_initialized = 1; in via_fb_init()
2599 dev_priv->vram_offset = fb->offset; in via_fb_init()
2601 mutex_unlock(&dev->struct_mutex); in via_fb_init()
2602 DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size); in via_fb_init()
2610 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_final_context()
2616 if (list_is_singular(&dev->ctxlist)) { in via_final_context()
2627 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_lastclose()
2632 mutex_lock(&dev->struct_mutex); in via_lastclose()
2633 if (dev_priv->vram_initialized) { in via_lastclose()
2634 drm_mm_takedown(&dev_priv->vram_mm); in via_lastclose()
2635 dev_priv->vram_initialized = 0; in via_lastclose()
2637 if (dev_priv->agp_initialized) { in via_lastclose()
2638 drm_mm_takedown(&dev_priv->agp_mm); in via_lastclose()
2639 dev_priv->agp_initialized = 0; in via_lastclose()
2641 mutex_unlock(&dev->struct_mutex); in via_lastclose()
2650 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_mem_alloc()
2651 struct via_file_private *file_priv = file->driver_priv; in via_mem_alloc()
2654 if (mem->type > VIA_MEM_AGP) { in via_mem_alloc()
2656 return -EINVAL; in via_mem_alloc()
2658 mutex_lock(&dev->struct_mutex); in via_mem_alloc()
2659 if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : in via_mem_alloc()
2660 dev_priv->agp_initialized)) { in via_mem_alloc()
2661 mutex_unlock(&dev->struct_mutex); in via_mem_alloc()
2664 return -EINVAL; in via_mem_alloc()
2669 retval = -ENOMEM; in via_mem_alloc()
2673 tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; in via_mem_alloc()
2674 if (mem->type == VIA_MEM_AGP) in via_mem_alloc()
2675 retval = drm_mm_insert_node(&dev_priv->agp_mm, in via_mem_alloc()
2676 &item->mm_node, in via_mem_alloc()
2679 retval = drm_mm_insert_node(&dev_priv->vram_mm, in via_mem_alloc()
2680 &item->mm_node, in via_mem_alloc()
2685 retval = idr_alloc(&dev_priv->object_idr, item, 1, 0, GFP_KERNEL); in via_mem_alloc()
2690 list_add(&item->owner_list, &file_priv->obj_list); in via_mem_alloc()
2691 mutex_unlock(&dev->struct_mutex); in via_mem_alloc()
2693 mem->offset = ((mem->type == VIA_MEM_VIDEO) ? in via_mem_alloc()
2694 dev_priv->vram_offset : dev_priv->agp_offset) + in via_mem_alloc()
2695 ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT); in via_mem_alloc()
2696 mem->index = user_key; in via_mem_alloc()
2701 drm_mm_remove_node(&item->mm_node); in via_mem_alloc()
2704 mutex_unlock(&dev->struct_mutex); in via_mem_alloc()
2706 mem->offset = 0; in via_mem_alloc()
2707 mem->size = 0; in via_mem_alloc()
2708 mem->index = 0; in via_mem_alloc()
2716 drm_via_private_t *dev_priv = dev->dev_private; in via_mem_free()
2720 mutex_lock(&dev->struct_mutex); in via_mem_free()
2721 obj = idr_find(&dev_priv->object_idr, mem->index); in via_mem_free()
2723 mutex_unlock(&dev->struct_mutex); in via_mem_free()
2724 return -EINVAL; in via_mem_free()
2727 idr_remove(&dev_priv->object_idr, mem->index); in via_mem_free()
2728 list_del(&obj->owner_list); in via_mem_free()
2729 drm_mm_remove_node(&obj->mm_node); in via_mem_free()
2731 mutex_unlock(&dev->struct_mutex); in via_mem_free()
2733 DRM_DEBUG("free = 0x%lx\n", mem->index); in via_mem_free()
2742 struct via_file_private *file_priv = file->driver_priv; in via_reclaim_buffers_locked()
2745 if (!(dev->master && file->master->lock.hw_lock)) in via_reclaim_buffers_locked()
2748 drm_legacy_idlelock_take(&file->master->lock); in via_reclaim_buffers_locked()
2750 mutex_lock(&dev->struct_mutex); in via_reclaim_buffers_locked()
2751 if (list_empty(&file_priv->obj_list)) { in via_reclaim_buffers_locked()
2752 mutex_unlock(&dev->struct_mutex); in via_reclaim_buffers_locked()
2753 drm_legacy_idlelock_release(&file->master->lock); in via_reclaim_buffers_locked()
2760 list_for_each_entry_safe(entry, next, &file_priv->obj_list, in via_reclaim_buffers_locked()
2762 list_del(&entry->owner_list); in via_reclaim_buffers_locked()
2763 drm_mm_remove_node(&entry->mm_node); in via_reclaim_buffers_locked()
2766 mutex_unlock(&dev->struct_mutex); in via_reclaim_buffers_locked()
2768 drm_legacy_idlelock_release(&file->master->lock); in via_reclaim_buffers_locked()
2775 drm_via_private_t *dev_priv = dev->dev_private; in via_do_init_map()
2779 dev_priv->sarea = drm_legacy_getsarea(dev); in via_do_init_map()
2780 if (!dev_priv->sarea) { in via_do_init_map()
2782 dev->dev_private = (void *)dev_priv; in via_do_init_map()
2784 return -EINVAL; in via_do_init_map()
2787 dev_priv->fb = drm_legacy_findmap(dev, init->fb_offset); in via_do_init_map()
2788 if (!dev_priv->fb) { in via_do_init_map()
2790 dev->dev_private = (void *)dev_priv; in via_do_init_map()
2792 return -EINVAL; in via_do_init_map()
2794 dev_priv->mmio = drm_legacy_findmap(dev, init->mmio_offset); in via_do_init_map()
2795 if (!dev_priv->mmio) { in via_do_init_map()
2797 dev->dev_private = (void *)dev_priv; in via_do_init_map()
2799 return -EINVAL; in via_do_init_map()
2802 dev_priv->sarea_priv = in via_do_init_map()
2803 (drm_via_sarea_t *) ((u8 *) dev_priv->sarea->handle + in via_do_init_map()
2804 init->sarea_priv_offset); in via_do_init_map()
2806 dev_priv->agpAddr = init->agpAddr; in via_do_init_map()
2812 dev->dev_private = (void *)dev_priv; in via_do_init_map()
2829 switch (init->func) { in via_map_init()
2836 return -EINVAL; in via_map_init()
2841 struct pci_dev *pdev = to_pci_dev(dev->dev); in via_driver_load()
2847 return -ENOMEM; in via_driver_load()
2849 idr_init_base(&dev_priv->object_idr, 1); in via_driver_load()
2850 dev->dev_private = (void *)dev_priv; in via_driver_load()
2852 dev_priv->chipset = chipset; in via_driver_load()
2867 drm_via_private_t *dev_priv = dev->dev_private; in via_driver_unload()
2869 idr_destroy(&dev_priv->object_idr); in via_driver_unload()
2887 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; in via_cmdbuf_space()
2888 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; in via_cmdbuf_space()
2890 return ((hw_addr <= dev_priv->dma_low) ? in via_cmdbuf_space()
2891 (dev_priv->dma_high + hw_addr - dev_priv->dma_low) : in via_cmdbuf_space()
2892 (hw_addr - dev_priv->dma_low)); in via_cmdbuf_space()
2901 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; in via_cmdbuf_lag()
2902 uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; in via_cmdbuf_lag()
2904 return ((hw_addr <= dev_priv->dma_low) ? in via_cmdbuf_lag()
2905 (dev_priv->dma_low - hw_addr) : in via_cmdbuf_lag()
2906 (dev_priv->dma_wrap + dev_priv->dma_low - hw_addr)); in via_cmdbuf_lag()
2916 uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; in via_cmdbuf_wait()
2920 hw_addr_ptr = dev_priv->hw_addr_ptr; in via_cmdbuf_wait()
2921 cur_addr = dev_priv->dma_low; in via_cmdbuf_wait()
2925 hw_addr = *hw_addr_ptr - agp_base; in via_cmdbuf_wait()
2926 if (count-- == 0) { in via_cmdbuf_wait()
2930 return -1; in via_cmdbuf_wait()
2948 if ((dev_priv->dma_low + size + 4 * CMDBUF_ALIGNMENT_SIZE) > in via_check_dma()
2949 dev_priv->dma_high) { in via_check_dma()
2955 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); in via_check_dma()
2960 if (dev->dev_private) { in via_dma_cleanup()
2962 (drm_via_private_t *) dev->dev_private; in via_dma_cleanup()
2964 if (dev_priv->ring.virtual_start && dev_priv->mmio) { in via_dma_cleanup()
2967 drm_legacy_ioremapfree(&dev_priv->ring.map, dev); in via_dma_cleanup()
2968 dev_priv->ring.virtual_start = NULL; in via_dma_cleanup()
2980 if (!dev_priv || !dev_priv->mmio) { in via_initialize()
2982 return -EFAULT; in via_initialize()
2985 if (dev_priv->ring.virtual_start != NULL) { in via_initialize()
2987 return -EFAULT; in via_initialize()
2990 if (!dev->agp || !dev->agp->base) { in via_initialize()
2992 return -EFAULT; in via_initialize()
2995 if (dev_priv->chipset == VIA_DX9_0) { in via_initialize()
2997 return -EINVAL; in via_initialize()
3000 dev_priv->ring.map.offset = dev->agp->base + init->offset; in via_initialize()
3001 dev_priv->ring.map.size = init->size; in via_initialize()
3002 dev_priv->ring.map.type = 0; in via_initialize()
3003 dev_priv->ring.map.flags = 0; in via_initialize()
3004 dev_priv->ring.map.mtrr = 0; in via_initialize()
3006 drm_legacy_ioremap(&dev_priv->ring.map, dev); in via_initialize()
3008 if (dev_priv->ring.map.handle == NULL) { in via_initialize()
3012 return -ENOMEM; in via_initialize()
3015 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; in via_initialize()
3017 dev_priv->dma_ptr = dev_priv->ring.virtual_start; in via_initialize()
3018 dev_priv->dma_low = 0; in via_initialize()
3019 dev_priv->dma_high = init->size; in via_initialize()
3020 dev_priv->dma_wrap = init->size; in via_initialize()
3021 dev_priv->dma_offset = init->offset; in via_initialize()
3022 dev_priv->last_pause_ptr = NULL; in via_initialize()
3023 dev_priv->hw_addr_ptr = in via_initialize()
3024 (volatile uint32_t *)((char *)dev_priv->mmio->handle + in via_initialize()
3025 init->reg_pause_addr); in via_initialize()
3034 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; in via_dma_init()
3038 switch (init->func) { in via_dma_init()
3041 retcode = -EPERM; in via_dma_init()
3047 retcode = -EPERM; in via_dma_init()
3052 retcode = (dev_priv->ring.virtual_start != NULL) ? in via_dma_init()
3053 0 : -EFAULT; in via_dma_init()
3056 retcode = -EINVAL; in via_dma_init()
3069 dev_priv = (drm_via_private_t *) dev->dev_private; in via_dispatch_cmdbuffer()
3071 if (dev_priv->ring.virtual_start == NULL) { in via_dispatch_cmdbuffer()
3073 return -EFAULT; in via_dispatch_cmdbuffer()
3076 if (cmd->size > VIA_PCI_BUF_SIZE) in via_dispatch_cmdbuffer()
3077 return -ENOMEM; in via_dispatch_cmdbuffer()
3079 if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size)) in via_dispatch_cmdbuffer()
3080 return -EFAULT; in via_dispatch_cmdbuffer()
3089 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, in via_dispatch_cmdbuffer()
3090 cmd->size, dev, 1))) { in via_dispatch_cmdbuffer()
3094 vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size); in via_dispatch_cmdbuffer()
3096 return -EAGAIN; in via_dispatch_cmdbuffer()
3098 memcpy(vb, dev_priv->pci_buf, cmd->size); in via_dispatch_cmdbuffer()
3100 dev_priv->dma_low += cmd->size; in via_dispatch_cmdbuffer()
3107 if (cmd->size < 0x100) in via_dispatch_cmdbuffer()
3108 via_pad_cache(dev_priv, (0x100 - cmd->size) >> 3); in via_dispatch_cmdbuffer()
3116 drm_via_private_t *dev_priv = dev->dev_private; in via_driver_dma_quiescent()
3119 return -EBUSY; in via_driver_dma_quiescent()
3138 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); in via_cmdbuffer()
3147 drm_via_private_t *dev_priv = dev->dev_private; in via_dispatch_pci_cmdbuffer()
3150 if (cmd->size > VIA_PCI_BUF_SIZE) in via_dispatch_pci_cmdbuffer()
3151 return -ENOMEM; in via_dispatch_pci_cmdbuffer()
3152 if (copy_from_user(dev_priv->pci_buf, cmd->buf, cmd->size)) in via_dispatch_pci_cmdbuffer()
3153 return -EFAULT; in via_dispatch_pci_cmdbuffer()
3156 via_verify_command_stream((uint32_t *) dev_priv->pci_buf, in via_dispatch_pci_cmdbuffer()
3157 cmd->size, dev, 0))) { in via_dispatch_pci_cmdbuffer()
3162 via_parse_command_stream(dev, (const uint32_t *)dev_priv->pci_buf, in via_dispatch_pci_cmdbuffer()
3163 cmd->size); in via_dispatch_pci_cmdbuffer()
3174 DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size); in via_pci_cmdbuffer()
3183 for (; qw_count > 0; --qw_count) in via_align_buffer()
3195 return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low); in via_get_dma()
3199 * Hooks a segment of data into the tail of the ring-buffer by
3208 volatile uint32_t *paused_at = dev_priv->last_pause_ptr; in via_hook_segment()
3214 (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1); in via_hook_segment()
3220 reader = *(dev_priv->hw_addr_ptr); in via_hook_segment()
3221 ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) + in via_hook_segment()
3222 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; in via_hook_segment()
3224 dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1; in via_hook_segment()
3233 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; in via_hook_segment()
3235 while (diff == 0 && count--) { in via_hook_segment()
3239 reader = *(dev_priv->hw_addr_ptr); in via_hook_segment()
3240 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; in via_hook_segment()
3246 reader = *(dev_priv->hw_addr_ptr); in via_hook_segment()
3247 diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff; in via_hook_segment()
3248 diff &= (dev_priv->dma_high - 1); in via_hook_segment()
3249 if (diff != 0 && diff < (dev_priv->dma_high >> 1)) { in via_hook_segment()
3252 ptr, reader, dev_priv->dma_diff); in via_hook_segment()
3273 while (!(via_read(dev_priv, VIA_REG_STATUS) & VIA_VR_QUEUE_BUSY) && --count) in via_wait_idle()
3279 --count; in via_wait_idle()
3298 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; in via_align_cmd()
3299 qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) - in via_align_cmd()
3300 ((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); in via_align_cmd()
3303 agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3); in via_align_cmd()
3308 vb = via_align_buffer(dev_priv, vb, qw_pad_count - 1); in via_align_cmd()
3324 dev_priv->dma_low = 0; in via_cmdbuf_start()
3326 agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; in via_cmdbuf_start()
3328 end_addr = agp_base + dev_priv->dma_high; in via_cmdbuf_start()
3335 dev_priv->last_pause_ptr = in via_cmdbuf_start()
3337 &pause_addr_hi, &pause_addr_lo, 1) - 1; in via_cmdbuf_start()
3340 (void) *(volatile uint32_t *)dev_priv->last_pause_ptr; in via_cmdbuf_start()
3353 dev_priv->dma_diff = 0; in via_cmdbuf_start()
3356 while (!(via_read(dev_priv, 0x41c) & 0x80000000) && count--); in via_cmdbuf_start()
3358 reader = *(dev_priv->hw_addr_ptr); in via_cmdbuf_start()
3359 ptr = ((volatile char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) + in via_cmdbuf_start()
3360 dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4; in via_cmdbuf_start()
3369 dev_priv->dma_diff = ptr - reader; in via_cmdbuf_start()
3400 dev_priv->dma_wrap = dev_priv->dma_low; in via_cmdbuf_jump()
3406 dev_priv->dma_low = 0; in via_cmdbuf_jump()
3415 &pause_addr_lo, 0) - 1; in via_cmdbuf_jump()
3420 dma_low_save1 = dev_priv->dma_low; in via_cmdbuf_jump()
3433 &pause_addr_lo, 0) - 1; in via_cmdbuf_jump()
3438 dma_low_save2 = dev_priv->dma_low; in via_cmdbuf_jump()
3439 dev_priv->dma_low = dma_low_save1; in via_cmdbuf_jump()
3441 dev_priv->dma_low = dma_low_save2; in via_cmdbuf_jump()
3484 dev_priv = (drm_via_private_t *) dev->dev_private; in via_cmdbuf_size()
3486 if (dev_priv->ring.virtual_start == NULL) { in via_cmdbuf_size()
3488 return -EFAULT; in via_cmdbuf_size()
3492 tmp_size = d_siz->size; in via_cmdbuf_size()
3493 switch (d_siz->func) { in via_cmdbuf_size()
3495 while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size) in via_cmdbuf_size()
3496 && --count) { in via_cmdbuf_size()
3497 if (!d_siz->wait) in via_cmdbuf_size()
3502 ret = -EAGAIN; in via_cmdbuf_size()
3506 while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size) in via_cmdbuf_size()
3507 && --count) { in via_cmdbuf_size()
3508 if (!d_siz->wait) in via_cmdbuf_size()
3513 ret = -EAGAIN; in via_cmdbuf_size()
3517 ret = -EFAULT; in via_cmdbuf_size()
3519 d_siz->size = tmp_size; in via_cmdbuf_size()
3549 return -ENOMEM; in via_driver_open()
3551 file->driver_priv = file_priv; in via_driver_open()
3553 INIT_LIST_HEAD(&file_priv->obj_list); in via_driver_open()
3560 struct via_file_private *file_priv = file->driver_priv; in via_driver_postclose()