Lines Matching full:part
16 * partition This part detects the presence/absence of other
20 * channel This part manages the channels and sends/receives
68 .init_name = "", /* set to "part" at xpc_init() time */
178 struct xpc_partition *part = from_timer(part, t, disengage_timer); in xpc_timeout_partition_disengage() local
180 DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); in xpc_timeout_partition_disengage()
182 xpc_partition_disengaged_from_timer(part); in xpc_timeout_partition_disengage()
184 DBUG_ON(part->disengage_timeout != 0); in xpc_timeout_partition_disengage()
185 DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part))); in xpc_timeout_partition_disengage()
227 struct xpc_partition *part; in xpc_check_remote_hb() local
239 part = &xpc_partitions[partid]; in xpc_check_remote_hb()
241 if (part->act_state == XPC_P_AS_INACTIVE || in xpc_check_remote_hb()
242 part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_check_remote_hb()
246 ret = xpc_arch_ops.get_remote_heartbeat(part); in xpc_check_remote_hb()
248 XPC_DEACTIVATE_PARTITION(part, ret); in xpc_check_remote_hb()
340 xpc_channel_mgr(struct xpc_partition *part) in xpc_channel_mgr() argument
342 while (part->act_state != XPC_P_AS_DEACTIVATING || in xpc_channel_mgr()
343 atomic_read(&part->nchannels_active) > 0 || in xpc_channel_mgr()
344 !xpc_partition_disengaged(part)) { in xpc_channel_mgr()
346 xpc_process_sent_chctl_flags(part); in xpc_channel_mgr()
361 atomic_dec(&part->channel_mgr_requests); in xpc_channel_mgr()
362 (void)wait_event_interruptible(part->channel_mgr_wq, in xpc_channel_mgr()
363 (atomic_read(&part->channel_mgr_requests) > 0 || in xpc_channel_mgr()
364 part->chctl.all_flags != 0 || in xpc_channel_mgr()
365 (part->act_state == XPC_P_AS_DEACTIVATING && in xpc_channel_mgr()
366 atomic_read(&part->nchannels_active) == 0 && in xpc_channel_mgr()
367 xpc_partition_disengaged(part)))); in xpc_channel_mgr()
368 atomic_set(&part->channel_mgr_requests, 1); in xpc_channel_mgr()
401 xpc_setup_ch_structures(struct xpc_partition *part) in xpc_setup_ch_structures() argument
406 short partid = XPC_PARTID(part); in xpc_setup_ch_structures()
412 DBUG_ON(part->channels != NULL); in xpc_setup_ch_structures()
413 part->channels = kcalloc(XPC_MAX_NCHANNELS, in xpc_setup_ch_structures()
416 if (part->channels == NULL) { in xpc_setup_ch_structures()
423 part->remote_openclose_args = in xpc_setup_ch_structures()
425 GFP_KERNEL, &part-> in xpc_setup_ch_structures()
427 if (part->remote_openclose_args == NULL) { in xpc_setup_ch_structures()
433 part->chctl.all_flags = 0; in xpc_setup_ch_structures()
434 spin_lock_init(&part->chctl_lock); in xpc_setup_ch_structures()
436 atomic_set(&part->channel_mgr_requests, 1); in xpc_setup_ch_structures()
437 init_waitqueue_head(&part->channel_mgr_wq); in xpc_setup_ch_structures()
439 part->nchannels = XPC_MAX_NCHANNELS; in xpc_setup_ch_structures()
441 atomic_set(&part->nchannels_active, 0); in xpc_setup_ch_structures()
442 atomic_set(&part->nchannels_engaged, 0); in xpc_setup_ch_structures()
444 for (ch_number = 0; ch_number < part->nchannels; ch_number++) { in xpc_setup_ch_structures()
445 ch = &part->channels[ch_number]; in xpc_setup_ch_structures()
466 ret = xpc_arch_ops.setup_ch_structures(part); in xpc_setup_ch_structures()
474 part->setup_state = XPC_P_SS_SETUP; in xpc_setup_ch_structures()
480 kfree(part->remote_openclose_args_base); in xpc_setup_ch_structures()
481 part->remote_openclose_args = NULL; in xpc_setup_ch_structures()
483 kfree(part->channels); in xpc_setup_ch_structures()
484 part->channels = NULL; in xpc_setup_ch_structures()
493 xpc_teardown_ch_structures(struct xpc_partition *part) in xpc_teardown_ch_structures() argument
495 DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); in xpc_teardown_ch_structures()
496 DBUG_ON(atomic_read(&part->nchannels_active) != 0); in xpc_teardown_ch_structures()
503 DBUG_ON(part->setup_state != XPC_P_SS_SETUP); in xpc_teardown_ch_structures()
504 part->setup_state = XPC_P_SS_WTEARDOWN; in xpc_teardown_ch_structures()
506 wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); in xpc_teardown_ch_structures()
510 xpc_arch_ops.teardown_ch_structures(part); in xpc_teardown_ch_structures()
512 kfree(part->remote_openclose_args_base); in xpc_teardown_ch_structures()
513 part->remote_openclose_args = NULL; in xpc_teardown_ch_structures()
514 kfree(part->channels); in xpc_teardown_ch_structures()
515 part->channels = NULL; in xpc_teardown_ch_structures()
517 part->setup_state = XPC_P_SS_TORNDOWN; in xpc_teardown_ch_structures()
535 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_activating() local
540 spin_lock_irqsave(&part->act_lock, irq_flags); in xpc_activating()
542 if (part->act_state == XPC_P_AS_DEACTIVATING) { in xpc_activating()
543 part->act_state = XPC_P_AS_INACTIVE; in xpc_activating()
544 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activating()
545 part->remote_rp_pa = 0; in xpc_activating()
550 DBUG_ON(part->act_state != XPC_P_AS_ACTIVATION_REQ); in xpc_activating()
551 part->act_state = XPC_P_AS_ACTIVATING; in xpc_activating()
553 XPC_SET_REASON(part, 0, 0); in xpc_activating()
554 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activating()
560 if (xpc_setup_ch_structures(part) == xpSuccess) { in xpc_activating()
561 (void)xpc_part_ref(part); /* this will always succeed */ in xpc_activating()
563 if (xpc_arch_ops.make_first_contact(part) == xpSuccess) { in xpc_activating()
564 xpc_mark_partition_active(part); in xpc_activating()
565 xpc_channel_mgr(part); in xpc_activating()
569 xpc_part_deref(part); in xpc_activating()
570 xpc_teardown_ch_structures(part); in xpc_activating()
574 xpc_mark_partition_inactive(part); in xpc_activating()
576 if (part->reason == xpReactivating) { in xpc_activating()
578 xpc_arch_ops.request_partition_reactivation(part); in xpc_activating()
585 xpc_activate_partition(struct xpc_partition *part) in xpc_activate_partition() argument
587 short partid = XPC_PARTID(part); in xpc_activate_partition()
591 spin_lock_irqsave(&part->act_lock, irq_flags); in xpc_activate_partition()
593 DBUG_ON(part->act_state != XPC_P_AS_INACTIVE); in xpc_activate_partition()
595 part->act_state = XPC_P_AS_ACTIVATION_REQ; in xpc_activate_partition()
596 XPC_SET_REASON(part, xpCloneKThread, __LINE__); in xpc_activate_partition()
598 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activate_partition()
603 spin_lock_irqsave(&part->act_lock, irq_flags); in xpc_activate_partition()
604 part->act_state = XPC_P_AS_INACTIVE; in xpc_activate_partition()
605 XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); in xpc_activate_partition()
606 spin_unlock_irqrestore(&part->act_lock, irq_flags); in xpc_activate_partition()
649 xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) in xpc_kthread_waitmsgs() argument
686 struct xpc_partition *part = &xpc_partitions[partid]; in xpc_kthread_start() local
696 ch = &part->channels[ch_number]; in xpc_kthread_start()
728 xpc_kthread_waitmsgs(part, ch); in xpc_kthread_start()
747 atomic_dec_return(&part->nchannels_engaged) == 0) { in xpc_kthread_start()
748 xpc_arch_ops.indicate_partition_disengaged(part); in xpc_kthread_start()
756 xpc_part_deref(part); in xpc_kthread_start()
778 struct xpc_partition *part = &xpc_partitions[ch->partid]; in xpc_create_kthreads() local
802 atomic_inc_return(&part->nchannels_engaged) == 1) { in xpc_create_kthreads()
803 xpc_arch_ops.indicate_partition_engaged(part); in xpc_create_kthreads()
805 (void)xpc_part_ref(part); in xpc_create_kthreads()
824 atomic_dec_return(&part->nchannels_engaged) == 0) { in xpc_create_kthreads()
825 indicate_partition_disengaged(part); in xpc_create_kthreads()
828 xpc_part_deref(part); in xpc_create_kthreads()
852 struct xpc_partition *part; in xpc_disconnect_wait() local
858 part = &xpc_partitions[partid]; in xpc_disconnect_wait()
860 if (!xpc_part_ref(part)) in xpc_disconnect_wait()
863 ch = &part->channels[ch_number]; in xpc_disconnect_wait()
866 xpc_part_deref(part); in xpc_disconnect_wait()
877 if (part->act_state != XPC_P_AS_DEACTIVATING) { in xpc_disconnect_wait()
878 spin_lock(&part->chctl_lock); in xpc_disconnect_wait()
879 part->chctl.flags[ch->number] |= in xpc_disconnect_wait()
881 spin_unlock(&part->chctl_lock); in xpc_disconnect_wait()
891 xpc_wakeup_channel_mgr(part); in xpc_disconnect_wait()
893 xpc_part_deref(part); in xpc_disconnect_wait()
901 struct xpc_partition *part; in xpc_setup_partitions() local
920 part = &xpc_partitions[partid]; in xpc_setup_partitions()
922 DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); in xpc_setup_partitions()
924 part->activate_IRQ_rcvd = 0; in xpc_setup_partitions()
925 spin_lock_init(&part->act_lock); in xpc_setup_partitions()
926 part->act_state = XPC_P_AS_INACTIVE; in xpc_setup_partitions()
927 XPC_SET_REASON(part, 0, 0); in xpc_setup_partitions()
929 timer_setup(&part->disengage_timer, in xpc_setup_partitions()
932 part->setup_state = XPC_P_SS_UNSET; in xpc_setup_partitions()
933 init_waitqueue_head(&part->teardown_wq); in xpc_setup_partitions()
934 atomic_set(&part->references, 0); in xpc_setup_partitions()
952 struct xpc_partition *part; in xpc_do_exit() local
984 part = &xpc_partitions[partid]; in xpc_do_exit()
986 if (xpc_partition_disengaged(part) && in xpc_do_exit()
987 part->act_state == XPC_P_AS_INACTIVE) { in xpc_do_exit()
993 XPC_DEACTIVATE_PARTITION(part, reason); in xpc_do_exit()
995 if (part->disengage_timeout > disengage_timeout) in xpc_do_exit()
996 disengage_timeout = part->disengage_timeout; in xpc_do_exit()
1087 struct xpc_partition *part; in xpc_die_deactivate() local
1102 part = &xpc_partitions[partid]; in xpc_die_deactivate()
1105 part->act_state != XPC_P_AS_INACTIVE) { in xpc_die_deactivate()
1106 xpc_arch_ops.request_partition_deactivation(part); in xpc_die_deactivate()
1107 xpc_arch_ops.indicate_partition_disengaged(part); in xpc_die_deactivate()
1227 dev_set_name(xpc_part, "part"); in xpc_init()