Lines Matching refs:alias_guid

85 	guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid.  in mlx4_ib_update_cache_on_guid_change()
135 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_slave_alias_guid_event()
136 if (dev->sriov.alias_guid.ports_guid[port_index].state_flags & in mlx4_ib_slave_alias_guid_event()
141 alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
153 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
156 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
159 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
163 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
165 dev->sriov.alias_guid.ports_guid[port_index]. in mlx4_ib_slave_alias_guid_event()
170 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_slave_alias_guid_event()
203 rec = &dev->sriov.alias_guid.ports_guid[port_num - 1]. in mlx4_ib_notify_slaves_on_guid_change()
205 guid_indexes = be64_to_cpu((__force __be64) dev->sriov.alias_guid. in mlx4_ib_notify_slaves_on_guid_change()
235 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in mlx4_ib_notify_slaves_on_guid_change()
248 alias_guid.ag_work_lock, flags); in mlx4_ib_notify_slaves_on_guid_change()
252 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, in mlx4_ib_notify_slaves_on_guid_change()
306 rec = &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
326 rec = &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
329 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in aliasguid_query_handler()
423 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in aliasguid_query_handler()
434 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in aliasguid_query_handler()
437 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, in aliasguid_query_handler()
438 &dev->sriov.alias_guid.ports_guid[port_index]. in aliasguid_query_handler()
447 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in aliasguid_query_handler()
457 dev->sriov.alias_guid.ports_guid[port - 1].all_rec_per_port[index].status in invalidate_guid_record()
463 *(u64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
476 dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
478 if (dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
480 dev->sriov.alias_guid.ports_guid[port - 1]. in invalidate_guid_record()
499 &dev->sriov.alias_guid.ports_guid[port - 1].cb_list; in set_guid_rec()
539 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
541 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
544 ib_sa_guid_info_rec_query(dev->sriov.alias_guid.sa_client, in set_guid_rec()
554 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
557 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
567 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
570 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in set_guid_rec()
571 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, in set_guid_rec()
574 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in set_guid_rec()
595 *(__be64 *)&dev->sriov.alias_guid.ports_guid[port - 1]. in mlx4_ib_guid_port_init()
613 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_invalidate_all_guid_record()
615 if (dev->sriov.alias_guid.ports_guid[port - 1].state_flags & in mlx4_ib_invalidate_all_guid_record()
618 dev->sriov.alias_guid.ports_guid[port - 1].state_flags &= in mlx4_ib_invalidate_all_guid_record()
630 cancel_delayed_work(&dev->sriov.alias_guid. in mlx4_ib_invalidate_all_guid_record()
632 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in mlx4_ib_invalidate_all_guid_record()
633 &dev->sriov.alias_guid.ports_guid[port - 1].alias_guid_work, in mlx4_ib_invalidate_all_guid_record()
636 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_invalidate_all_guid_record()
650 &dev->sriov.alias_guid.ports_guid[port]. in set_required_record()
699 rec = dev->sriov.alias_guid.ports_guid[port]. in get_low_record_time_index()
729 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags); in get_next_record_to_update()
739 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags); in get_next_record_to_update()
754 alias_guid); in alias_guid_work()
781 spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_init_alias_guid_work()
787 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[port]. in mlx4_ib_init_alias_guid_work()
789 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, in mlx4_ib_init_alias_guid_work()
790 &dev->sriov.alias_guid.ports_guid[port].alias_guid_work, 0); in mlx4_ib_init_alias_guid_work()
792 spin_unlock_irqrestore(&dev->sriov.alias_guid.ag_work_lock, flags1); in mlx4_ib_init_alias_guid_work()
806 det = &sriov->alias_guid.ports_guid[i]; in mlx4_ib_destroy_alias_guid_service()
808 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); in mlx4_ib_destroy_alias_guid_service()
816 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); in mlx4_ib_destroy_alias_guid_service()
820 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); in mlx4_ib_destroy_alias_guid_service()
822 spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); in mlx4_ib_destroy_alias_guid_service()
825 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service()
826 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service()
827 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_destroy_alias_guid_service()
839 dev->sriov.alias_guid.sa_client = in mlx4_ib_init_alias_guid_service()
840 kzalloc(sizeof *dev->sriov.alias_guid.sa_client, GFP_KERNEL); in mlx4_ib_init_alias_guid_service()
841 if (!dev->sriov.alias_guid.sa_client) in mlx4_ib_init_alias_guid_service()
844 ib_sa_register_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
846 spin_lock_init(&dev->sriov.alias_guid.ag_work_lock); in mlx4_ib_init_alias_guid_service()
856 memset(&dev->sriov.alias_guid.ports_guid[i], 0, in mlx4_ib_init_alias_guid_service()
858 dev->sriov.alias_guid.ports_guid[i].state_flags |= in mlx4_ib_init_alias_guid_service()
862 memset(dev->sriov.alias_guid.ports_guid[i]. in mlx4_ib_init_alias_guid_service()
864 sizeof(dev->sriov.alias_guid.ports_guid[i]. in mlx4_ib_init_alias_guid_service()
867 INIT_LIST_HEAD(&dev->sriov.alias_guid.ports_guid[i].cb_list); in mlx4_ib_init_alias_guid_service()
875 dev->sriov.alias_guid.ports_guid[i].parent = &dev->sriov.alias_guid; in mlx4_ib_init_alias_guid_service()
876 dev->sriov.alias_guid.ports_guid[i].port = i; in mlx4_ib_init_alias_guid_service()
879 dev->sriov.alias_guid.ports_guid[i].wq = in mlx4_ib_init_alias_guid_service()
881 if (!dev->sriov.alias_guid.ports_guid[i].wq) { in mlx4_ib_init_alias_guid_service()
885 INIT_DELAYED_WORK(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work, in mlx4_ib_init_alias_guid_service()
892 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_init_alias_guid_service()
893 dev->sriov.alias_guid.ports_guid[i].wq = NULL; in mlx4_ib_init_alias_guid_service()
897 ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
898 kfree(dev->sriov.alias_guid.sa_client); in mlx4_ib_init_alias_guid_service()
899 dev->sriov.alias_guid.sa_client = NULL; in mlx4_ib_init_alias_guid_service()