Lines Matching refs:its_dev

107 	u64			(*get_msi_base)(struct its_device *its_dev);
179 static struct its_collection *dev_event_to_col(struct its_device *its_dev, in dev_event_to_col() argument
182 struct its_node *its = its_dev->its; in dev_event_to_col()
184 return its->collections + its_dev->event_map.col_map[event]; in dev_event_to_col()
1024 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_get_event_id() local
1025 return d->hwirq - its_dev->event_map.lpi_base; in its_get_event_id()
1035 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in lpi_write_config() local
1039 prop_page = its_dev->event_map.vm->vprop_page; in lpi_write_config()
1040 map = &its_dev->event_map.vlpi_maps[event]; in lpi_write_config()
1068 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in lpi_update_config() local
1071 its_send_inv(its_dev, its_get_event_id(d)); in lpi_update_config()
1076 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_vlpi_set_doorbell() local
1079 if (its_dev->event_map.vlpi_maps[event].db_enabled == enable) in its_vlpi_set_doorbell()
1082 its_dev->event_map.vlpi_maps[event].db_enabled = enable; in its_vlpi_set_doorbell()
1094 its_send_vmovi(its_dev, event); in its_vlpi_set_doorbell()
1118 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_set_affinity() local
1127 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { in its_set_affinity()
1128 if (its_dev->its->numa_node >= 0) { in its_set_affinity()
1129 cpu_mask = cpumask_of_node(its_dev->its->numa_node); in its_set_affinity()
1141 if (cpu != its_dev->event_map.col_map[id]) { in its_set_affinity()
1142 target_col = &its_dev->its->collections[cpu]; in its_set_affinity()
1143 its_send_movi(its_dev, target_col, id); in its_set_affinity()
1144 its_dev->event_map.col_map[id] = cpu; in its_set_affinity()
1151 static u64 its_irq_get_msi_base(struct its_device *its_dev) in its_irq_get_msi_base() argument
1153 struct its_node *its = its_dev->its; in its_irq_get_msi_base()
1160 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_irq_compose_msi_msg() local
1164 its = its_dev->its; in its_irq_compose_msi_msg()
1165 addr = its->get_msi_base(its_dev); in its_irq_compose_msi_msg()
1178 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_irq_set_irqchip_state() local
1185 its_send_int(its_dev, event); in its_irq_set_irqchip_state()
1187 its_send_clear(its_dev, event); in its_irq_set_irqchip_state()
1248 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_vlpi_map() local
1255 mutex_lock(&its_dev->event_map.vlpi_lock); in its_vlpi_map()
1257 if (!its_dev->event_map.vm) { in its_vlpi_map()
1260 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), in its_vlpi_map()
1267 its_dev->event_map.vm = info->map->vm; in its_vlpi_map()
1268 its_dev->event_map.vlpi_maps = maps; in its_vlpi_map()
1269 } else if (its_dev->event_map.vm != info->map->vm) { in its_vlpi_map()
1275 its_dev->event_map.vlpi_maps[event] = *info->map; in its_vlpi_map()
1279 its_send_vmovi(its_dev, event); in its_vlpi_map()
1282 its_map_vm(its_dev->its, info->map->vm); in its_vlpi_map()
1294 its_send_discard(its_dev, event); in its_vlpi_map()
1297 its_send_vmapti(its_dev, event); in its_vlpi_map()
1300 its_dev->event_map.nr_vlpis++; in its_vlpi_map()
1304 mutex_unlock(&its_dev->event_map.vlpi_lock); in its_vlpi_map()
1310 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_vlpi_get() local
1314 mutex_lock(&its_dev->event_map.vlpi_lock); in its_vlpi_get()
1316 if (!its_dev->event_map.vm || in its_vlpi_get()
1317 !its_dev->event_map.vlpi_maps[event].vm) { in its_vlpi_get()
1323 *info->map = its_dev->event_map.vlpi_maps[event]; in its_vlpi_get()
1326 mutex_unlock(&its_dev->event_map.vlpi_lock); in its_vlpi_get()
1332 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_vlpi_unmap() local
1336 mutex_lock(&its_dev->event_map.vlpi_lock); in its_vlpi_unmap()
1338 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { in its_vlpi_unmap()
1344 its_send_discard(its_dev, event); in its_vlpi_unmap()
1348 its_send_mapti(its_dev, d->hwirq, event); in its_vlpi_unmap()
1354 its_unmap_vm(its_dev->its, its_dev->event_map.vm); in its_vlpi_unmap()
1360 if (!--its_dev->event_map.nr_vlpis) { in its_vlpi_unmap()
1361 its_dev->event_map.vm = NULL; in its_vlpi_unmap()
1362 kfree(its_dev->event_map.vlpi_maps); in its_vlpi_unmap()
1366 mutex_unlock(&its_dev->event_map.vlpi_lock); in its_vlpi_unmap()
1372 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_vlpi_prop_update() local
1374 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) in its_vlpi_prop_update()
1388 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_irq_set_vcpu_affinity() local
1392 if (!its_dev->its->is_v4) in its_irq_set_vcpu_affinity()
2078 struct its_device *its_dev = NULL, *tmp; in its_find_device() local
2085 its_dev = tmp; in its_find_device()
2092 return its_dev; in its_find_device()
2259 static void its_free_device(struct its_device *its_dev) in its_free_device() argument
2263 raw_spin_lock_irqsave(&its_dev->its->lock, flags); in its_free_device()
2264 list_del(&its_dev->entry); in its_free_device()
2265 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); in its_free_device()
2266 kfree(its_dev->itt); in its_free_device()
2267 kfree(its_dev); in its_free_device()
2289 struct its_device *its_dev; in its_msi_prepare() local
2314 its_dev = its_find_device(its, dev_id); in its_msi_prepare()
2315 if (its_dev) { in its_msi_prepare()
2325 its_dev = its_create_device(its, dev_id, nvec, true); in its_msi_prepare()
2326 if (!its_dev) in its_msi_prepare()
2331 info->scratchpad[0].ptr = its_dev; in its_msi_prepare()
2367 struct its_device *its_dev = info->scratchpad[0].ptr; in its_irq_domain_alloc() local
2373 err = its_alloc_device_irq(its_dev, &hwirq); in its_irq_domain_alloc()
2382 hwirq, &its_irq_chip, its_dev); in its_irq_domain_alloc()
2385 (int)(hwirq - its_dev->event_map.lpi_base), in its_irq_domain_alloc()
2395 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_irq_domain_activate() local
2401 if (its_dev->its->numa_node >= 0) in its_irq_domain_activate()
2402 cpu_mask = cpumask_of_node(its_dev->its->numa_node); in its_irq_domain_activate()
2407 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) in its_irq_domain_activate()
2413 its_dev->event_map.col_map[event] = cpu; in its_irq_domain_activate()
2417 its_send_mapti(its_dev, d->hwirq, event); in its_irq_domain_activate()
2424 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_irq_domain_deactivate() local
2428 its_send_discard(its_dev, event); in its_irq_domain_deactivate()
2435 struct its_device *its_dev = irq_data_get_irq_chip_data(d); in its_irq_domain_free() local
2444 clear_bit(event, its_dev->event_map.lpi_map); in its_irq_domain_free()
2451 if (bitmap_empty(its_dev->event_map.lpi_map, in its_irq_domain_free()
2452 its_dev->event_map.nr_lpis)) { in its_irq_domain_free()
2453 its_lpi_free(its_dev->event_map.lpi_map, in its_irq_domain_free()
2454 its_dev->event_map.lpi_base, in its_irq_domain_free()
2455 its_dev->event_map.nr_lpis); in its_irq_domain_free()
2456 kfree(its_dev->event_map.col_map); in its_irq_domain_free()
2459 its_send_mapd(its_dev, 0); in its_irq_domain_free()
2460 its_free_device(its_dev); in its_irq_domain_free()
3031 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) in its_irq_get_msi_base_pre_its() argument
3033 struct its_node *its = its_dev->its; in its_irq_get_msi_base_pre_its()
3042 return its->pre_its_base + (its_dev->device_id << 2); in its_irq_get_msi_base_pre_its()