Lines Matching full:hw

13 struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)  in vf_to_adapter()  argument
15 return container_of(hw, struct ifcvf_adapter, vf); in vf_to_adapter()
18 u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) in ifcvf_set_vq_vector() argument
20 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; in ifcvf_set_vq_vector()
28 u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector) in ifcvf_set_config_vector() argument
30 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; in ifcvf_set_config_vector()
37 static void __iomem *get_cap_addr(struct ifcvf_hw *hw, in get_cap_addr() argument
49 ifcvf= vf_to_adapter(hw); in get_cap_addr()
65 return hw->base[bar] + offset; in get_cap_addr()
82 int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) in ifcvf_init_hw() argument
110 hw->common_cfg = get_cap_addr(hw, &cap); in ifcvf_init_hw()
111 IFCVF_DBG(pdev, "hw->common_cfg = %p\n", in ifcvf_init_hw()
112 hw->common_cfg); in ifcvf_init_hw()
116 &hw->notify_off_multiplier); in ifcvf_init_hw()
117 hw->notify_bar = cap.bar; in ifcvf_init_hw()
118 hw->notify_base = get_cap_addr(hw, &cap); in ifcvf_init_hw()
119 hw->notify_base_pa = pci_resource_start(pdev, cap.bar) + in ifcvf_init_hw()
121 IFCVF_DBG(pdev, "hw->notify_base = %p\n", in ifcvf_init_hw()
122 hw->notify_base); in ifcvf_init_hw()
125 hw->isr = get_cap_addr(hw, &cap); in ifcvf_init_hw()
126 IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr); in ifcvf_init_hw()
129 hw->dev_cfg = get_cap_addr(hw, &cap); in ifcvf_init_hw()
130 hw->cap_dev_config_size = le32_to_cpu(cap.length); in ifcvf_init_hw()
131 IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg); in ifcvf_init_hw()
139 if (hw->common_cfg == NULL || hw->notify_base == NULL || in ifcvf_init_hw()
140 hw->isr == NULL || hw->dev_cfg == NULL) { in ifcvf_init_hw()
145 hw->nr_vring = vp_ioread16(&hw->common_cfg->num_queues); in ifcvf_init_hw()
147 for (i = 0; i < hw->nr_vring; i++) { in ifcvf_init_hw()
148 vp_iowrite16(i, &hw->common_cfg->queue_select); in ifcvf_init_hw()
149 notify_off = vp_ioread16(&hw->common_cfg->queue_notify_off); in ifcvf_init_hw()
150 hw->vring[i].notify_addr = hw->notify_base + in ifcvf_init_hw()
151 notify_off * hw->notify_off_multiplier; in ifcvf_init_hw()
152 hw->vring[i].notify_pa = hw->notify_base_pa + in ifcvf_init_hw()
153 notify_off * hw->notify_off_multiplier; in ifcvf_init_hw()
154 hw->vring[i].irq = -EINVAL; in ifcvf_init_hw()
157 hw->lm_cfg = hw->base[IFCVF_LM_BAR]; in ifcvf_init_hw()
161 hw->common_cfg, hw->notify_base, hw->isr, in ifcvf_init_hw()
162 hw->dev_cfg, hw->notify_off_multiplier); in ifcvf_init_hw()
164 hw->vqs_reused_irq = -EINVAL; in ifcvf_init_hw()
165 hw->config_irq = -EINVAL; in ifcvf_init_hw()
170 u8 ifcvf_get_status(struct ifcvf_hw *hw) in ifcvf_get_status() argument
172 return vp_ioread8(&hw->common_cfg->device_status); in ifcvf_get_status()
175 void ifcvf_set_status(struct ifcvf_hw *hw, u8 status) in ifcvf_set_status() argument
177 vp_iowrite8(status, &hw->common_cfg->device_status); in ifcvf_set_status()
180 void ifcvf_reset(struct ifcvf_hw *hw) in ifcvf_reset() argument
182 hw->config_cb.callback = NULL; in ifcvf_reset()
183 hw->config_cb.private = NULL; in ifcvf_reset()
185 ifcvf_set_status(hw, 0); in ifcvf_reset()
187 ifcvf_get_status(hw); in ifcvf_reset()
190 static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status) in ifcvf_add_status() argument
193 status |= ifcvf_get_status(hw); in ifcvf_add_status()
195 ifcvf_set_status(hw, status); in ifcvf_add_status()
196 ifcvf_get_status(hw); in ifcvf_add_status()
199 u64 ifcvf_get_hw_features(struct ifcvf_hw *hw) in ifcvf_get_hw_features() argument
201 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; in ifcvf_get_hw_features()
216 u64 ifcvf_get_features(struct ifcvf_hw *hw) in ifcvf_get_features() argument
218 return hw->hw_features; in ifcvf_get_features()
221 int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) in ifcvf_verify_min_features() argument
223 struct ifcvf_adapter *ifcvf = vf_to_adapter(hw); in ifcvf_verify_min_features()
233 u32 ifcvf_get_config_size(struct ifcvf_hw *hw) in ifcvf_get_config_size() argument
238 u32 cap_size = hw->cap_dev_config_size; in ifcvf_get_config_size()
241 adapter = vf_to_adapter(hw); in ifcvf_get_config_size()
247 switch (hw->dev_type) { in ifcvf_get_config_size()
256 IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type); in ifcvf_get_config_size()
262 void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset, in ifcvf_read_dev_config() argument
268 WARN_ON(offset + length > hw->config_size); in ifcvf_read_dev_config()
270 old_gen = vp_ioread8(&hw->common_cfg->config_generation); in ifcvf_read_dev_config()
273 *p++ = vp_ioread8(hw->dev_cfg + offset + i); in ifcvf_read_dev_config()
275 new_gen = vp_ioread8(&hw->common_cfg->config_generation); in ifcvf_read_dev_config()
279 void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, in ifcvf_write_dev_config() argument
286 WARN_ON(offset + length > hw->config_size); in ifcvf_write_dev_config()
288 vp_iowrite8(*p++, hw->dev_cfg + offset + i); in ifcvf_write_dev_config()
291 static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features) in ifcvf_set_features() argument
293 struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; in ifcvf_set_features()
302 static int ifcvf_config_features(struct ifcvf_hw *hw) in ifcvf_config_features() argument
306 ifcvf = vf_to_adapter(hw); in ifcvf_config_features()
307 ifcvf_set_features(hw, hw->req_features); in ifcvf_config_features()
308 ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK); in ifcvf_config_features()
310 if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) { in ifcvf_config_features()
318 u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) in ifcvf_get_vq_state() argument
325 ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; in ifcvf_get_vq_state()
333 int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) in ifcvf_set_vq_state() argument
339 ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; in ifcvf_set_vq_state()
342 hw->vring[qid].last_avail_idx = num; in ifcvf_set_vq_state()
348 static int ifcvf_hw_enable(struct ifcvf_hw *hw) in ifcvf_hw_enable() argument
353 cfg = hw->common_cfg; in ifcvf_hw_enable()
354 for (i = 0; i < hw->nr_vring; i++) { in ifcvf_hw_enable()
355 if (!hw->vring[i].ready) in ifcvf_hw_enable()
359 vp_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, in ifcvf_hw_enable()
361 vp_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, in ifcvf_hw_enable()
363 vp_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo, in ifcvf_hw_enable()
365 vp_iowrite16(hw->vring[i].size, &cfg->queue_size); in ifcvf_hw_enable()
366 ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx); in ifcvf_hw_enable()
373 static void ifcvf_hw_disable(struct ifcvf_hw *hw) in ifcvf_hw_disable() argument
377 ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR); in ifcvf_hw_disable()
378 for (i = 0; i < hw->nr_vring; i++) { in ifcvf_hw_disable()
379 ifcvf_set_vq_vector(hw, i, VIRTIO_MSI_NO_VECTOR); in ifcvf_hw_disable()
383 int ifcvf_start_hw(struct ifcvf_hw *hw) in ifcvf_start_hw() argument
385 ifcvf_reset(hw); in ifcvf_start_hw()
386 ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE); in ifcvf_start_hw()
387 ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER); in ifcvf_start_hw()
389 if (ifcvf_config_features(hw) < 0) in ifcvf_start_hw()
392 if (ifcvf_hw_enable(hw) < 0) in ifcvf_start_hw()
395 ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK); in ifcvf_start_hw()
400 void ifcvf_stop_hw(struct ifcvf_hw *hw) in ifcvf_stop_hw() argument
402 ifcvf_hw_disable(hw); in ifcvf_stop_hw()
403 ifcvf_reset(hw); in ifcvf_stop_hw()
406 void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) in ifcvf_notify_queue() argument
408 vp_iowrite16(qid, hw->vring[qid].notify_addr); in ifcvf_notify_queue()