Lines Matching refs:bcp

143 	struct bau_control *bcp;  in set_bau_on()  local
151 bcp = &per_cpu(bau_control, cpu); in set_bau_on()
152 bcp->nobau = false; in set_bau_on()
162 struct bau_control *bcp; in set_bau_off() local
166 bcp = &per_cpu(bau_control, cpu); in set_bau_off()
167 bcp->nobau = true; in set_bau_off()
210 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp, in reply_to_message() argument
229 struct bau_control *bcp) in bau_process_retry_msg() argument
237 struct ptc_stats *stat = bcp->statp; in bau_process_retry_msg()
285 static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, in bau_process_message() argument
291 struct ptc_stats *stat = bcp->statp; in bau_process_message()
293 struct bau_control *smaster = bcp->socket_master; in bau_process_message()
313 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) in bau_process_message()
314 bau_process_retry_msg(mdp, bcp); in bau_process_message()
325 if (socket_ack_count == bcp->cpus_in_socket) { in bau_process_message()
335 if (msg_ack_count == bcp->cpus_in_uvhub) { in bau_process_message()
340 reply_to_message(mdp, bcp, do_acknowledge); in bau_process_message()
376 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); in do_reset() local
379 struct ptc_stats *stat = bcp->statp; in do_reset()
388 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { in do_reset()
422 static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) in reset_with_ipi() argument
427 int sender = bcp->cpu; in reset_with_ipi()
428 cpumask_t *mask = bcp->uvhub_master->cpumask; in reset_with_ipi()
429 struct bau_control *smaster = bcp->socket_master; in reset_with_ipi()
441 apnode = pnode + bcp->partition_base_pnode; in reset_with_ipi()
531 struct bau_control *bcp, long try) in uv1_wait_completion() argument
535 u64 mmr_offset = bcp->status_mmr; in uv1_wait_completion()
536 int right_shift = bcp->status_index; in uv1_wait_completion()
537 struct ptc_stats *stat = bcp->statp; in uv1_wait_completion()
561 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { in uv1_wait_completion()
562 bcp->conseccompletes = 0; in uv1_wait_completion()
566 bcp->conseccompletes = 0; in uv1_wait_completion()
576 bcp->conseccompletes++; in uv1_wait_completion()
594 static int handle_uv2_busy(struct bau_control *bcp) in handle_uv2_busy() argument
596 struct ptc_stats *stat = bcp->statp; in handle_uv2_busy()
599 bcp->busy = 1; in handle_uv2_busy()
604 struct bau_control *bcp, long try) in uv2_3_wait_completion() argument
608 u64 mmr_offset = bcp->status_mmr; in uv2_3_wait_completion()
609 int right_shift = bcp->status_index; in uv2_3_wait_completion()
610 int desc = bcp->uvhub_cpu; in uv2_3_wait_completion()
612 struct ptc_stats *stat = bcp->statp; in uv2_3_wait_completion()
640 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { in uv2_3_wait_completion()
641 bcp->conseccompletes = 0; in uv2_3_wait_completion()
647 bcp->conseccompletes = 0; in uv2_3_wait_completion()
656 if ((ttm - bcp->send_message) > bcp->timeout_interval) in uv2_3_wait_completion()
657 return handle_uv2_busy(bcp); in uv2_3_wait_completion()
666 bcp->conseccompletes++; in uv2_3_wait_completion()
685 struct bau_control *bcp, long try) in uv4_wait_completion() argument
687 struct ptc_stats *stat = bcp->statp; in uv4_wait_completion()
689 u64 mmr = bcp->status_mmr; in uv4_wait_completion()
690 int index = bcp->status_index; in uv4_wait_completion()
691 int desc = bcp->uvhub_cpu; in uv4_wait_completion()
704 bcp->conseccompletes = 0; in uv4_wait_completion()
709 bcp->conseccompletes = 0; in uv4_wait_completion()
713 bcp->conseccompletes = 0; in uv4_wait_completion()
722 bcp->conseccompletes++; in uv4_wait_completion()
732 struct bau_control *bcp, in destination_plugged() argument
735 udelay(bcp->plugged_delay); in destination_plugged()
736 bcp->plugged_tries++; in destination_plugged()
738 if (bcp->plugged_tries >= bcp->plugsb4reset) { in destination_plugged()
739 bcp->plugged_tries = 0; in destination_plugged()
744 reset_with_ipi(&bau_desc->distribution, bcp); in destination_plugged()
749 bcp->ipi_attempts++; in destination_plugged()
755 struct bau_control *bcp, struct bau_control *hmaster, in destination_timeout() argument
759 bcp->timeout_tries++; in destination_timeout()
760 if (bcp->timeout_tries >= bcp->timeoutsb4reset) { in destination_timeout()
761 bcp->timeout_tries = 0; in destination_timeout()
766 reset_with_ipi(&bau_desc->distribution, bcp); in destination_timeout()
771 bcp->ipi_attempts++; in destination_timeout()
780 static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) in disable_for_period() argument
787 hmaster = bcp->uvhub_master; in disable_for_period()
789 if (!bcp->baudisabled) { in disable_for_period()
797 tm1 + bcp->disabled_period; in disable_for_period()
804 static void count_max_concurr(int stat, struct bau_control *bcp, in count_max_concurr() argument
807 bcp->plugged_tries = 0; in count_max_concurr()
808 bcp->timeout_tries = 0; in count_max_concurr()
811 if (bcp->conseccompletes <= bcp->complete_threshold) in count_max_concurr()
819 struct bau_control *bcp, struct ptc_stats *stat, in record_send_stats() argument
829 bcp->period_requests++; in record_send_stats()
830 bcp->period_time += elapsed; in record_send_stats()
831 if ((elapsed > usec_2_cycles(bcp->cong_response_us)) && in record_send_stats()
832 (bcp->period_requests > bcp->cong_reps) && in record_send_stats()
833 ((bcp->period_time / bcp->period_requests) > in record_send_stats()
834 usec_2_cycles(bcp->cong_response_us))) { in record_send_stats()
836 disable_for_period(bcp, stat); in record_send_stats()
846 if (get_cycles() > bcp->period_end) in record_send_stats()
847 bcp->period_giveups = 0; in record_send_stats()
848 bcp->period_giveups++; in record_send_stats()
849 if (bcp->period_giveups == 1) in record_send_stats()
850 bcp->period_end = get_cycles() + bcp->disabled_period; in record_send_stats()
851 if (bcp->period_giveups > bcp->giveup_limit) { in record_send_stats()
852 disable_for_period(bcp, stat); in record_send_stats()
880 struct bau_control *bcp, struct bau_control *hmaster, in handle_cmplt() argument
884 destination_plugged(bau_desc, bcp, hmaster, stat); in handle_cmplt()
886 destination_timeout(bau_desc, bcp, hmaster, stat); in handle_cmplt()
900 struct bau_control *bcp, in uv_flush_send_and_wait() argument
910 struct ptc_stats *stat = bcp->statp; in uv_flush_send_and_wait()
911 struct bau_control *hmaster = bcp->uvhub_master; in uv_flush_send_and_wait()
915 if (bcp->uvhub_version == UV_BAU_V1) { in uv_flush_send_and_wait()
936 seq_number = bcp->message_number++; in uv_flush_send_and_wait()
949 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; in uv_flush_send_and_wait()
950 bcp->send_message = get_cycles(); in uv_flush_send_and_wait()
955 completion_stat = ops.wait_completion(bau_desc, bcp, try); in uv_flush_send_and_wait()
957 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); in uv_flush_send_and_wait()
959 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { in uv_flush_send_and_wait()
960 bcp->ipi_attempts = 0; in uv_flush_send_and_wait()
971 count_max_concurr(completion_stat, bcp, hmaster); in uv_flush_send_and_wait()
978 record_send_stats(time1, time2, bcp, stat, completion_stat, try); in uv_flush_send_and_wait()
991 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) in check_enable() argument
997 hmaster = bcp->uvhub_master; in check_enable()
999 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { in check_enable()
1051 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, in set_distrib_bits() argument
1066 hpp = &bcp->socket_master->thp[cpu]; in set_distrib_bits()
1067 pnode = hpp->pnode - bcp->partition_base_pnode; in set_distrib_bits()
1070 if (hpp->uvhub == bcp->uvhub) in set_distrib_bits()
1113 struct bau_control *bcp; in uv_flush_tlb_others() local
1116 bcp = &per_cpu(bau_control, cpu); in uv_flush_tlb_others()
1118 if (bcp->nobau) in uv_flush_tlb_others()
1121 stat = bcp->statp; in uv_flush_tlb_others()
1124 if (bcp->busy) { in uv_flush_tlb_others()
1127 status = ((descriptor_status >> (bcp->uvhub_cpu * in uv_flush_tlb_others()
1131 bcp->busy = 0; in uv_flush_tlb_others()
1135 if (bcp->baudisabled) { in uv_flush_tlb_others()
1136 if (check_enable(bcp, stat)) { in uv_flush_tlb_others()
1154 bau_desc = bcp->descriptor_base; in uv_flush_tlb_others()
1155 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu); in uv_flush_tlb_others()
1157 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) in uv_flush_tlb_others()
1167 switch (bcp->uvhub_version) { in uv_flush_tlb_others()
1185 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc)) in uv_flush_tlb_others()
1196 struct bau_control *bcp) in find_another_by_swack() argument
1201 if (msg_next > bcp->queue_last) in find_another_by_swack()
1202 msg_next = bcp->queue_first; in find_another_by_swack()
1208 if (msg_next > bcp->queue_last) in find_another_by_swack()
1209 msg_next = bcp->queue_first; in find_another_by_swack()
1219 static void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) in process_uv2_message() argument
1239 other_msg = find_another_by_swack(msg, bcp); in process_uv2_message()
1245 bau_process_message(mdp, bcp, 0); in process_uv2_message()
1259 bau_process_message(mdp, bcp, 1); in process_uv2_message()
1283 struct bau_control *bcp; in uv_bau_message_interrupt() local
1291 bcp = &per_cpu(bau_control, smp_processor_id()); in uv_bau_message_interrupt()
1292 stat = bcp->statp; in uv_bau_message_interrupt()
1294 msgdesc.queue_first = bcp->queue_first; in uv_bau_message_interrupt()
1295 msgdesc.queue_last = bcp->queue_last; in uv_bau_message_interrupt()
1297 msg = bcp->bau_msg_head; in uv_bau_message_interrupt()
1303 if (bcp->uvhub_version == UV_BAU_V2) in uv_bau_message_interrupt()
1304 process_uv2_message(&msgdesc, bcp); in uv_bau_message_interrupt()
1307 bau_process_message(&msgdesc, bcp, 1); in uv_bau_message_interrupt()
1312 bcp->bau_msg_head = msg; in uv_bau_message_interrupt()
1402 struct bau_control *bcp; in ptc_seq_show() local
1421 bcp = &per_cpu(bau_control, cpu); in ptc_seq_show()
1422 if (bcp->nobau) { in ptc_seq_show()
1426 stat = bcp->statp; in ptc_seq_show()
1430 cpu, bcp->nobau, stat->s_requestor, in ptc_seq_show()
1561 static int parse_tunables_write(struct bau_control *bcp, char *instr, in parse_tunables_write() argument
1595 if (val < 1 || val > bcp->cpus_in_uvhub) { in parse_tunables_write()
1624 struct bau_control *bcp; in tunables_write() local
1634 bcp = &per_cpu(bau_control, cpu); in tunables_write()
1635 ret = parse_tunables_write(bcp, instr, count); in tunables_write()
1641 bcp = &per_cpu(bau_control, cpu); in tunables_write()
1642 bcp->max_concurr = max_concurr; in tunables_write()
1643 bcp->max_concurr_const = max_concurr; in tunables_write()
1644 bcp->plugged_delay = plugged_delay; in tunables_write()
1645 bcp->plugsb4reset = plugsb4reset; in tunables_write()
1646 bcp->timeoutsb4reset = timeoutsb4reset; in tunables_write()
1647 bcp->ipi_reset_limit = ipi_reset_limit; in tunables_write()
1648 bcp->complete_threshold = complete_threshold; in tunables_write()
1649 bcp->cong_response_us = congested_respns_us; in tunables_write()
1650 bcp->cong_reps = congested_reps; in tunables_write()
1651 bcp->disabled_period = sec_2_cycles(disabled_period); in tunables_write()
1652 bcp->giveup_limit = giveup_limit; in tunables_write()
1736 struct bau_control *bcp; in activation_descriptor_init() local
1797 bcp = &per_cpu(bau_control, cpu); in activation_descriptor_init()
1798 bcp->descriptor_base = bau_desc; in activation_descriptor_init()
1816 struct bau_control *bcp; in pq_init() local
1830 bcp = &per_cpu(bau_control, cpu); in pq_init()
1831 bcp->queue_first = pqp; in pq_init()
1832 bcp->bau_msg_head = pqp; in pq_init()
1833 bcp->queue_last = pqp + (DEST_Q_SIZE - 1); in pq_init()
1843 bcp = &per_cpu(bau_control, smp_processor_id()); in pq_init()
1844 if (bcp->uvhub_version <= UV_BAU_V3) { in pq_init()
1923 struct bau_control *bcp; in init_per_cpu_tunables() local
1926 bcp = &per_cpu(bau_control, cpu); in init_per_cpu_tunables()
1927 bcp->baudisabled = 0; in init_per_cpu_tunables()
1929 bcp->nobau = true; in init_per_cpu_tunables()
1930 bcp->statp = &per_cpu(ptcstats, cpu); in init_per_cpu_tunables()
1932 bcp->timeout_interval = usec_2_cycles(2*timeout_us); in init_per_cpu_tunables()
1933 bcp->max_concurr = max_concurr; in init_per_cpu_tunables()
1934 bcp->max_concurr_const = max_concurr; in init_per_cpu_tunables()
1935 bcp->plugged_delay = plugged_delay; in init_per_cpu_tunables()
1936 bcp->plugsb4reset = plugsb4reset; in init_per_cpu_tunables()
1937 bcp->timeoutsb4reset = timeoutsb4reset; in init_per_cpu_tunables()
1938 bcp->ipi_reset_limit = ipi_reset_limit; in init_per_cpu_tunables()
1939 bcp->complete_threshold = complete_threshold; in init_per_cpu_tunables()
1940 bcp->cong_response_us = congested_respns_us; in init_per_cpu_tunables()
1941 bcp->cong_reps = congested_reps; in init_per_cpu_tunables()
1942 bcp->disabled_period = sec_2_cycles(disabled_period); in init_per_cpu_tunables()
1943 bcp->giveup_limit = giveup_limit; in init_per_cpu_tunables()
1944 spin_lock_init(&bcp->queue_lock); in init_per_cpu_tunables()
1945 spin_lock_init(&bcp->uvhub_lock); in init_per_cpu_tunables()
1946 spin_lock_init(&bcp->disable_lock); in init_per_cpu_tunables()
1961 struct bau_control *bcp; in get_cpu_topology() local
1966 bcp = &per_cpu(bau_control, cpu); in get_cpu_topology()
1968 memset(bcp, 0, sizeof(struct bau_control)); in get_cpu_topology()
1978 bcp->osnode = cpu_to_node(cpu); in get_cpu_topology()
1979 bcp->partition_base_pnode = base_pnode; in get_cpu_topology()
1991 socket = bcp->osnode & 1; in get_cpu_topology()
2041 struct bau_control *bcp; in scan_sock() local
2045 bcp = &per_cpu(bau_control, cpu); in scan_sock()
2046 bcp->cpu = cpu; in scan_sock()
2048 *smasterp = bcp; in scan_sock()
2050 *hmasterp = bcp; in scan_sock()
2052 bcp->cpus_in_uvhub = bdp->num_cpus; in scan_sock()
2053 bcp->cpus_in_socket = sdp->num_cpus; in scan_sock()
2054 bcp->socket_master = *smasterp; in scan_sock()
2055 bcp->uvhub = bdp->uvhub; in scan_sock()
2057 bcp->uvhub_version = UV_BAU_V1; in scan_sock()
2059 bcp->uvhub_version = UV_BAU_V2; in scan_sock()
2061 bcp->uvhub_version = UV_BAU_V3; in scan_sock()
2063 bcp->uvhub_version = UV_BAU_V4; in scan_sock()
2068 bcp->uvhub_master = *hmasterp; in scan_sock()
2070 bcp->uvhub_cpu = uvhub_cpu; in scan_sock()
2077 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_0; in scan_sock()
2078 bcp->status_index = uvhub_cpu * UV_ACT_STATUS_SIZE; in scan_sock()
2080 bcp->status_mmr = UVH_LB_BAU_SB_ACTIVATION_STATUS_1; in scan_sock()
2081 bcp->status_index = (uvhub_cpu - UV_CPUS_PER_AS) in scan_sock()
2085 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { in scan_sock()
2087 bcp->uvhub_cpu); in scan_sock()