Lines Matching full:pg

85 	struct alua_port_group __rcu *pg;  member
103 static bool alua_rtpg_queue(struct alua_port_group *pg,
110 struct alua_port_group *pg; in release_port_group() local
112 pg = container_of(kref, struct alua_port_group, kref); in release_port_group()
113 if (pg->rtpg_sdev) in release_port_group()
114 flush_delayed_work(&pg->rtpg_work); in release_port_group()
116 list_del(&pg->node); in release_port_group()
118 kfree_rcu(pg, rcu); in release_port_group()
181 struct alua_port_group *pg; in alua_find_get_pg() local
186 list_for_each_entry(pg, &port_group_list, node) { in alua_find_get_pg()
187 if (pg->group_id != group_id) in alua_find_get_pg()
189 if (!pg->device_id_len || pg->device_id_len != id_size) in alua_find_get_pg()
191 if (strncmp(pg->device_id_str, id_str, id_size)) in alua_find_get_pg()
193 if (!kref_get_unless_zero(&pg->kref)) in alua_find_get_pg()
195 return pg; in alua_find_get_pg()
213 struct alua_port_group *pg, *tmp_pg; in alua_alloc_pg() local
215 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); in alua_alloc_pg()
216 if (!pg) in alua_alloc_pg()
219 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, in alua_alloc_pg()
220 sizeof(pg->device_id_str)); in alua_alloc_pg()
221 if (pg->device_id_len <= 0) { in alua_alloc_pg()
229 pg->device_id_str[0] = '\0'; in alua_alloc_pg()
230 pg->device_id_len = 0; in alua_alloc_pg()
232 pg->group_id = group_id; in alua_alloc_pg()
233 pg->tpgs = tpgs; in alua_alloc_pg()
234 pg->state = SCSI_ACCESS_STATE_OPTIMAL; in alua_alloc_pg()
235 pg->valid_states = TPGS_SUPPORT_ALL; in alua_alloc_pg()
237 pg->flags |= ALUA_OPTIMIZE_STPG; in alua_alloc_pg()
238 kref_init(&pg->kref); in alua_alloc_pg()
239 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); in alua_alloc_pg()
240 INIT_LIST_HEAD(&pg->rtpg_list); in alua_alloc_pg()
241 INIT_LIST_HEAD(&pg->node); in alua_alloc_pg()
242 INIT_LIST_HEAD(&pg->dh_list); in alua_alloc_pg()
243 spin_lock_init(&pg->lock); in alua_alloc_pg()
246 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, in alua_alloc_pg()
250 kfree(pg); in alua_alloc_pg()
254 list_add(&pg->node, &port_group_list); in alua_alloc_pg()
257 return pg; in alua_alloc_pg()
323 struct alua_port_group *pg, *old_pg = NULL; in alua_check_vpd() local
340 pg = alua_alloc_pg(sdev, group_id, tpgs); in alua_check_vpd()
341 if (IS_ERR(pg)) { in alua_check_vpd()
342 if (PTR_ERR(pg) == -ENOMEM) in alua_check_vpd()
346 if (pg->device_id_len) in alua_check_vpd()
349 ALUA_DH_NAME, pg->device_id_str, in alua_check_vpd()
358 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); in alua_check_vpd()
359 if (old_pg != pg) { in alua_check_vpd()
361 if (h->pg) { in alua_check_vpd()
366 rcu_assign_pointer(h->pg, pg); in alua_check_vpd()
370 spin_lock_irqsave(&pg->lock, flags); in alua_check_vpd()
372 list_add_rcu(&h->node, &pg->dh_list); in alua_check_vpd()
373 spin_unlock_irqrestore(&pg->lock, flags); in alua_check_vpd()
375 alua_rtpg_queue(rcu_dereference_protected(h->pg, in alua_check_vpd()
505 static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) in alua_rtpg() argument
517 if (!pg->expiry) { in alua_rtpg()
520 if (pg->transition_tmo) in alua_rtpg()
521 transition_tmo = pg->transition_tmo * HZ; in alua_rtpg()
523 pg->expiry = round_jiffies_up(jiffies + transition_tmo); in alua_rtpg()
532 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); in alua_rtpg()
544 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { in alua_rtpg()
569 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && in alua_rtpg()
572 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; in alua_rtpg()
591 pg->expiry != 0 && time_before(jiffies, pg->expiry)) { in alua_rtpg()
602 pg->expiry = 0; in alua_rtpg()
617 pg->expiry = 0; in alua_rtpg()
623 orig_transition_tmo = pg->transition_tmo; in alua_rtpg()
625 pg->transition_tmo = buff[5]; in alua_rtpg()
627 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; in alua_rtpg()
629 if (orig_transition_tmo != pg->transition_tmo) { in alua_rtpg()
632 ALUA_DH_NAME, pg->transition_tmo); in alua_rtpg()
633 pg->expiry = jiffies + pg->transition_tmo * HZ; in alua_rtpg()
647 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, in alua_rtpg()
652 if ((tmp_pg == pg) || in alua_rtpg()
667 if (tmp_pg == pg) in alua_rtpg()
677 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg()
679 pg->state = SCSI_ACCESS_STATE_TRANSITIONING; in alua_rtpg()
683 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), in alua_rtpg()
684 pg->pref ? "preferred" : "non-preferred", in alua_rtpg()
685 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', in alua_rtpg()
686 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', in alua_rtpg()
687 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', in alua_rtpg()
688 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', in alua_rtpg()
689 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', in alua_rtpg()
690 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', in alua_rtpg()
691 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); in alua_rtpg()
693 switch (pg->state) { in alua_rtpg()
695 if (time_before(jiffies, pg->expiry)) { in alua_rtpg()
697 pg->interval = ALUA_RTPG_RETRY_DELAY; in alua_rtpg()
704 pg->state = SCSI_ACCESS_STATE_STANDBY; in alua_rtpg()
705 pg->expiry = 0; in alua_rtpg()
707 list_for_each_entry_rcu(h, &pg->dh_list, node) { in alua_rtpg()
711 (pg->state & SCSI_ACCESS_STATE_MASK); in alua_rtpg()
712 if (pg->pref) in alua_rtpg()
722 pg->expiry = 0; in alua_rtpg()
727 pg->expiry = 0; in alua_rtpg()
730 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg()
743 static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) in alua_stpg() argument
748 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { in alua_stpg()
752 switch (pg->state) { in alua_stpg()
756 if ((pg->flags & ALUA_OPTIMIZE_STPG) && in alua_stpg()
757 !pg->pref && in alua_stpg()
758 (pg->tpgs & TPGS_MODE_IMPLICIT)) in alua_stpg()
771 ALUA_DH_NAME, pg->state); in alua_stpg()
774 retval = submit_stpg(sdev, pg->group_id, &sense_hdr); in alua_stpg()
795 struct alua_port_group *pg = in alua_rtpg_work() local
803 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
804 sdev = pg->rtpg_sdev; in alua_rtpg_work()
806 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); in alua_rtpg_work()
807 WARN_ON(pg->flags & ALUA_PG_RUN_STPG); in alua_rtpg_work()
808 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
809 kref_put(&pg->kref, release_port_group); in alua_rtpg_work()
812 pg->flags |= ALUA_PG_RUNNING; in alua_rtpg_work()
813 if (pg->flags & ALUA_PG_RUN_RTPG) { in alua_rtpg_work()
814 int state = pg->state; in alua_rtpg_work()
816 pg->flags &= ~ALUA_PG_RUN_RTPG; in alua_rtpg_work()
817 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
820 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
821 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
822 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_work()
823 if (!pg->interval) in alua_rtpg_work()
824 pg->interval = ALUA_RTPG_RETRY_DELAY; in alua_rtpg_work()
825 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
826 queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_work()
827 pg->interval * HZ); in alua_rtpg_work()
832 err = alua_rtpg(sdev, pg); in alua_rtpg_work()
833 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
834 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { in alua_rtpg_work()
835 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
836 if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) in alua_rtpg_work()
837 pg->interval = ALUA_RTPG_RETRY_DELAY; in alua_rtpg_work()
838 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_work()
839 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
840 queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_work()
841 pg->interval * HZ); in alua_rtpg_work()
845 pg->flags &= ~ALUA_PG_RUN_STPG; in alua_rtpg_work()
847 if (pg->flags & ALUA_PG_RUN_STPG) { in alua_rtpg_work()
848 pg->flags &= ~ALUA_PG_RUN_STPG; in alua_rtpg_work()
849 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
850 err = alua_stpg(sdev, pg); in alua_rtpg_work()
851 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
852 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { in alua_rtpg_work()
853 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_work()
854 pg->interval = 0; in alua_rtpg_work()
855 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
856 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
857 queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_work()
858 pg->interval * HZ); in alua_rtpg_work()
863 list_splice_init(&pg->rtpg_list, &qdata_list); in alua_rtpg_work()
864 pg->rtpg_sdev = NULL; in alua_rtpg_work()
865 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
873 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_work()
874 pg->flags &= ~ALUA_PG_RUNNING; in alua_rtpg_work()
875 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_work()
877 kref_put(&pg->kref, release_port_group); in alua_rtpg_work()
882 * @pg: ALUA port group associated with @sdev.
891 static bool alua_rtpg_queue(struct alua_port_group *pg, in alua_rtpg_queue() argument
897 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) in alua_rtpg_queue()
900 spin_lock_irqsave(&pg->lock, flags); in alua_rtpg_queue()
902 list_add_tail(&qdata->entry, &pg->rtpg_list); in alua_rtpg_queue()
903 pg->flags |= ALUA_PG_RUN_STPG; in alua_rtpg_queue()
906 if (pg->rtpg_sdev == NULL) { in alua_rtpg_queue()
907 pg->interval = 0; in alua_rtpg_queue()
908 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_queue()
909 kref_get(&pg->kref); in alua_rtpg_queue()
910 pg->rtpg_sdev = sdev; in alua_rtpg_queue()
912 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { in alua_rtpg_queue()
913 pg->flags |= ALUA_PG_RUN_RTPG; in alua_rtpg_queue()
915 if (!(pg->flags & ALUA_PG_RUNNING)) { in alua_rtpg_queue()
916 kref_get(&pg->kref); in alua_rtpg_queue()
921 spin_unlock_irqrestore(&pg->lock, flags); in alua_rtpg_queue()
924 if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, in alua_rtpg_queue()
928 kref_put(&pg->kref, release_port_group); in alua_rtpg_queue()
967 struct alua_port_group *pg = NULL; in alua_set_params() local
982 pg = rcu_dereference(h->pg); in alua_set_params()
983 if (!pg) { in alua_set_params()
987 spin_lock_irqsave(&pg->lock, flags); in alua_set_params()
989 pg->flags |= ALUA_OPTIMIZE_STPG; in alua_set_params()
991 pg->flags &= ~ALUA_OPTIMIZE_STPG; in alua_set_params()
992 spin_unlock_irqrestore(&pg->lock, flags); in alua_set_params()
1014 struct alua_port_group *pg; in alua_activate() local
1026 pg = rcu_dereference(h->pg); in alua_activate()
1027 if (!pg || !kref_get_unless_zero(&pg->kref)) { in alua_activate()
1037 if (alua_rtpg_queue(pg, sdev, qdata, true)) in alua_activate()
1041 kref_put(&pg->kref, release_port_group); in alua_activate()
1057 struct alua_port_group *pg; in alua_check() local
1060 pg = rcu_dereference(h->pg); in alua_check()
1061 if (!pg || !kref_get_unless_zero(&pg->kref)) { in alua_check()
1067 alua_rtpg_queue(pg, sdev, NULL, force); in alua_check()
1068 kref_put(&pg->kref, release_port_group); in alua_check()
1080 struct alua_port_group *pg; in alua_prep_fn() local
1084 pg = rcu_dereference(h->pg); in alua_prep_fn()
1085 if (pg) in alua_prep_fn()
1086 state = pg->state; in alua_prep_fn()
1122 rcu_assign_pointer(h->pg, NULL); in alua_bus_attach()
1146 struct alua_port_group *pg; in alua_bus_detach() local
1149 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); in alua_bus_detach()
1150 rcu_assign_pointer(h->pg, NULL); in alua_bus_detach()
1152 if (pg) { in alua_bus_detach()
1153 spin_lock_irq(&pg->lock); in alua_bus_detach()
1155 spin_unlock_irq(&pg->lock); in alua_bus_detach()
1156 kref_put(&pg->kref, release_port_group); in alua_bus_detach()