Lines Matching refs:smi_info
124 struct smi_info { struct
145 int (*oem_data_avail_handler)(struct smi_info *smi_info); argument
263 static int try_smi_init(struct smi_info *smi); argument
264 static void cleanup_one_si(struct smi_info *smi_info);
285 static void deliver_recv_msg(struct smi_info *smi_info, in deliver_recv_msg() argument
289 ipmi_smi_msg_received(smi_info->intf, msg); in deliver_recv_msg()
292 static void return_hosed_msg(struct smi_info *smi_info, int cCode) in return_hosed_msg() argument
294 struct ipmi_smi_msg *msg = smi_info->curr_msg; in return_hosed_msg()
306 smi_info->curr_msg = NULL; in return_hosed_msg()
307 deliver_recv_msg(smi_info, msg); in return_hosed_msg()
310 static enum si_sm_result start_next_msg(struct smi_info *smi_info) in start_next_msg() argument
314 if (!smi_info->waiting_msg) { in start_next_msg()
315 smi_info->curr_msg = NULL; in start_next_msg()
320 smi_info->curr_msg = smi_info->waiting_msg; in start_next_msg()
321 smi_info->waiting_msg = NULL; in start_next_msg()
324 0, smi_info); in start_next_msg()
329 err = smi_info->handlers->start_transaction( in start_next_msg()
330 smi_info->si_sm, in start_next_msg()
331 smi_info->curr_msg->data, in start_next_msg()
332 smi_info->curr_msg->data_size); in start_next_msg()
334 return_hosed_msg(smi_info, err); in start_next_msg()
342 static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) in smi_mod_timer() argument
344 if (!smi_info->timer_can_start) in smi_mod_timer()
346 smi_info->last_timeout_jiffies = jiffies; in smi_mod_timer()
347 mod_timer(&smi_info->si_timer, new_val); in smi_mod_timer()
348 smi_info->timer_running = true; in smi_mod_timer()
354 static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, in start_new_msg() argument
357 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); in start_new_msg()
359 if (smi_info->thread) in start_new_msg()
360 wake_up_process(smi_info->thread); in start_new_msg()
362 smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); in start_new_msg()
365 static void start_check_enables(struct smi_info *smi_info) in start_check_enables() argument
372 start_new_msg(smi_info, msg, 2); in start_check_enables()
373 smi_info->si_state = SI_CHECKING_ENABLES; in start_check_enables()
376 static void start_clear_flags(struct smi_info *smi_info) in start_clear_flags() argument
385 start_new_msg(smi_info, msg, 3); in start_clear_flags()
386 smi_info->si_state = SI_CLEARING_FLAGS; in start_clear_flags()
389 static void start_getting_msg_queue(struct smi_info *smi_info) in start_getting_msg_queue() argument
391 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); in start_getting_msg_queue()
392 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD; in start_getting_msg_queue()
393 smi_info->curr_msg->data_size = 2; in start_getting_msg_queue()
395 start_new_msg(smi_info, smi_info->curr_msg->data, in start_getting_msg_queue()
396 smi_info->curr_msg->data_size); in start_getting_msg_queue()
397 smi_info->si_state = SI_GETTING_MESSAGES; in start_getting_msg_queue()
400 static void start_getting_events(struct smi_info *smi_info) in start_getting_events() argument
402 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); in start_getting_events()
403 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; in start_getting_events()
404 smi_info->curr_msg->data_size = 2; in start_getting_events()
406 start_new_msg(smi_info, smi_info->curr_msg->data, in start_getting_events()
407 smi_info->curr_msg->data_size); in start_getting_events()
408 smi_info->si_state = SI_GETTING_EVENTS; in start_getting_events()
420 static inline bool disable_si_irq(struct smi_info *smi_info) in disable_si_irq() argument
422 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { in disable_si_irq()
423 smi_info->interrupt_disabled = true; in disable_si_irq()
424 start_check_enables(smi_info); in disable_si_irq()
430 static inline bool enable_si_irq(struct smi_info *smi_info) in enable_si_irq() argument
432 if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { in enable_si_irq()
433 smi_info->interrupt_disabled = false; in enable_si_irq()
434 start_check_enables(smi_info); in enable_si_irq()
446 static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) in alloc_msg_handle_irq() argument
452 if (!disable_si_irq(smi_info)) in alloc_msg_handle_irq()
453 smi_info->si_state = SI_NORMAL; in alloc_msg_handle_irq()
454 } else if (enable_si_irq(smi_info)) { in alloc_msg_handle_irq()
461 static void handle_flags(struct smi_info *smi_info) in handle_flags() argument
464 if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { in handle_flags()
466 smi_inc_stat(smi_info, watchdog_pretimeouts); in handle_flags()
468 start_clear_flags(smi_info); in handle_flags()
469 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; in handle_flags()
470 ipmi_smi_watchdog_pretimeout(smi_info->intf); in handle_flags()
471 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { in handle_flags()
473 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in handle_flags()
474 if (!smi_info->curr_msg) in handle_flags()
477 start_getting_msg_queue(smi_info); in handle_flags()
478 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { in handle_flags()
480 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in handle_flags()
481 if (!smi_info->curr_msg) in handle_flags()
484 start_getting_events(smi_info); in handle_flags()
485 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && in handle_flags()
486 smi_info->oem_data_avail_handler) { in handle_flags()
487 if (smi_info->oem_data_avail_handler(smi_info)) in handle_flags()
490 smi_info->si_state = SI_NORMAL; in handle_flags()
499 static u8 current_global_enables(struct smi_info *smi_info, u8 base, in current_global_enables() argument
504 if (smi_info->supports_event_msg_buff) in current_global_enables()
507 if (((smi_info->io.irq && !smi_info->interrupt_disabled) || in current_global_enables()
508 smi_info->cannot_disable_irq) && in current_global_enables()
509 !smi_info->irq_enable_broken) in current_global_enables()
512 if (smi_info->supports_event_msg_buff && in current_global_enables()
513 smi_info->io.irq && !smi_info->interrupt_disabled && in current_global_enables()
514 !smi_info->irq_enable_broken) in current_global_enables()
522 static void check_bt_irq(struct smi_info *smi_info, bool irq_on) in check_bt_irq() argument
524 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG); in check_bt_irq()
532 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, in check_bt_irq()
535 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0); in check_bt_irq()
538 static void handle_transaction_done(struct smi_info *smi_info) in handle_transaction_done() argument
543 switch (smi_info->si_state) { in handle_transaction_done()
545 if (!smi_info->curr_msg) in handle_transaction_done()
548 smi_info->curr_msg->rsp_size in handle_transaction_done()
549 = smi_info->handlers->get_result( in handle_transaction_done()
550 smi_info->si_sm, in handle_transaction_done()
551 smi_info->curr_msg->rsp, in handle_transaction_done()
559 msg = smi_info->curr_msg; in handle_transaction_done()
560 smi_info->curr_msg = NULL; in handle_transaction_done()
561 deliver_recv_msg(smi_info, msg); in handle_transaction_done()
570 len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
573 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
579 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
581 smi_info->msg_flags = msg[3]; in handle_transaction_done()
582 handle_flags(smi_info); in handle_transaction_done()
592 smi_info->handlers->get_result(smi_info->si_sm, msg, 3); in handle_transaction_done()
595 dev_warn(smi_info->io.dev, in handle_transaction_done()
598 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
604 smi_info->curr_msg->rsp_size in handle_transaction_done()
605 = smi_info->handlers->get_result( in handle_transaction_done()
606 smi_info->si_sm, in handle_transaction_done()
607 smi_info->curr_msg->rsp, in handle_transaction_done()
615 msg = smi_info->curr_msg; in handle_transaction_done()
616 smi_info->curr_msg = NULL; in handle_transaction_done()
622 smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; in handle_transaction_done()
623 handle_flags(smi_info); in handle_transaction_done()
625 smi_inc_stat(smi_info, events); in handle_transaction_done()
633 handle_flags(smi_info); in handle_transaction_done()
635 deliver_recv_msg(smi_info, msg); in handle_transaction_done()
642 smi_info->curr_msg->rsp_size in handle_transaction_done()
643 = smi_info->handlers->get_result( in handle_transaction_done()
644 smi_info->si_sm, in handle_transaction_done()
645 smi_info->curr_msg->rsp, in handle_transaction_done()
653 msg = smi_info->curr_msg; in handle_transaction_done()
654 smi_info->curr_msg = NULL; in handle_transaction_done()
660 smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; in handle_transaction_done()
661 handle_flags(smi_info); in handle_transaction_done()
663 smi_inc_stat(smi_info, incoming_messages); in handle_transaction_done()
671 handle_flags(smi_info); in handle_transaction_done()
673 deliver_recv_msg(smi_info, msg); in handle_transaction_done()
685 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
687 dev_warn(smi_info->io.dev, in handle_transaction_done()
689 dev_warn(smi_info->io.dev, in handle_transaction_done()
691 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
694 enables = current_global_enables(smi_info, 0, &irq_on); in handle_transaction_done()
695 if (smi_info->io.si_type == SI_BT) in handle_transaction_done()
697 check_bt_irq(smi_info, irq_on); in handle_transaction_done()
703 smi_info->handlers->start_transaction( in handle_transaction_done()
704 smi_info->si_sm, msg, 3); in handle_transaction_done()
705 smi_info->si_state = SI_SETTING_ENABLES; in handle_transaction_done()
706 } else if (smi_info->supports_event_msg_buff) { in handle_transaction_done()
707 smi_info->curr_msg = ipmi_alloc_smi_msg(); in handle_transaction_done()
708 if (!smi_info->curr_msg) { in handle_transaction_done()
709 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
712 start_getting_events(smi_info); in handle_transaction_done()
714 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
723 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); in handle_transaction_done()
725 dev_warn(smi_info->io.dev, in handle_transaction_done()
729 if (smi_info->supports_event_msg_buff) { in handle_transaction_done()
730 smi_info->curr_msg = ipmi_alloc_smi_msg(); in handle_transaction_done()
731 if (!smi_info->curr_msg) { in handle_transaction_done()
732 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
735 start_getting_events(smi_info); in handle_transaction_done()
737 smi_info->si_state = SI_NORMAL; in handle_transaction_done()
749 static enum si_sm_result smi_event_handler(struct smi_info *smi_info, in smi_event_handler() argument
763 si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); in smi_event_handler()
766 si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); in smi_event_handler()
769 smi_inc_stat(smi_info, complete_transactions); in smi_event_handler()
771 handle_transaction_done(smi_info); in smi_event_handler()
774 smi_inc_stat(smi_info, hosed_count); in smi_event_handler()
780 smi_info->si_state = SI_NORMAL; in smi_event_handler()
781 if (smi_info->curr_msg != NULL) { in smi_event_handler()
787 return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); in smi_event_handler()
796 if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) { in smi_event_handler()
799 if (smi_info->si_state != SI_NORMAL) { in smi_event_handler()
804 smi_info->got_attn = true; in smi_event_handler()
806 smi_info->got_attn = false; in smi_event_handler()
807 smi_inc_stat(smi_info, attentions); in smi_event_handler()
819 start_new_msg(smi_info, msg, 2); in smi_event_handler()
820 smi_info->si_state = SI_GETTING_FLAGS; in smi_event_handler()
827 smi_inc_stat(smi_info, idles); in smi_event_handler()
829 si_sm_result = start_next_msg(smi_info); in smi_event_handler()
835 && (atomic_read(&smi_info->req_events))) { in smi_event_handler()
840 atomic_set(&smi_info->req_events, 0); in smi_event_handler()
848 if (smi_info->supports_event_msg_buff || smi_info->io.irq) { in smi_event_handler()
849 start_check_enables(smi_info); in smi_event_handler()
851 smi_info->curr_msg = alloc_msg_handle_irq(smi_info); in smi_event_handler()
852 if (!smi_info->curr_msg) in smi_event_handler()
855 start_getting_events(smi_info); in smi_event_handler()
860 if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) { in smi_event_handler()
862 if (del_timer(&smi_info->si_timer)) in smi_event_handler()
863 smi_info->timer_running = false; in smi_event_handler()
870 static void check_start_timer_thread(struct smi_info *smi_info) in check_start_timer_thread() argument
872 if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { in check_start_timer_thread()
873 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); in check_start_timer_thread()
875 if (smi_info->thread) in check_start_timer_thread()
876 wake_up_process(smi_info->thread); in check_start_timer_thread()
878 start_next_msg(smi_info); in check_start_timer_thread()
879 smi_event_handler(smi_info, 0); in check_start_timer_thread()
885 struct smi_info *smi_info = send_info; in flush_messages() local
892 result = smi_event_handler(smi_info, 0); in flush_messages()
895 result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC); in flush_messages()
902 struct smi_info *smi_info = send_info; in sender() local
907 if (smi_info->run_to_completion) { in sender()
912 smi_info->waiting_msg = msg; in sender()
916 spin_lock_irqsave(&smi_info->si_lock, flags); in sender()
924 BUG_ON(smi_info->waiting_msg); in sender()
925 smi_info->waiting_msg = msg; in sender()
926 check_start_timer_thread(smi_info); in sender()
927 spin_unlock_irqrestore(&smi_info->si_lock, flags); in sender()
932 struct smi_info *smi_info = send_info; in set_run_to_completion() local
934 smi_info->run_to_completion = i_run_to_completion; in set_run_to_completion()
936 flush_messages(smi_info); in set_run_to_completion()
954 const struct smi_info *smi_info, in ipmi_thread_busy_wait() argument
959 if (smi_info->si_num < num_max_busy_us) in ipmi_thread_busy_wait()
960 max_busy_us = kipmid_max_busy_us[smi_info->si_num]; in ipmi_thread_busy_wait()
990 struct smi_info *smi_info = data; in ipmi_thread() local
1000 spin_lock_irqsave(&(smi_info->si_lock), flags); in ipmi_thread()
1001 smi_result = smi_event_handler(smi_info, 0); in ipmi_thread()
1010 if (smi_result != SI_SM_IDLE && !smi_info->timer_running) in ipmi_thread()
1011 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); in ipmi_thread()
1013 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in ipmi_thread()
1014 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, in ipmi_thread()
1021 if (atomic_read(&smi_info->need_watch)) { in ipmi_thread()
1037 struct smi_info *smi_info = send_info; in poll() local
1039 bool run_to_completion = smi_info->run_to_completion; in poll()
1047 spin_lock_irqsave(&smi_info->si_lock, flags); in poll()
1048 smi_event_handler(smi_info, 10); in poll()
1050 spin_unlock_irqrestore(&smi_info->si_lock, flags); in poll()
1055 struct smi_info *smi_info = send_info; in request_events() local
1057 if (!smi_info->has_event_buffer) in request_events()
1060 atomic_set(&smi_info->req_events, 1); in request_events()
1065 struct smi_info *smi_info = send_info; in set_need_watch() local
1068 atomic_set(&smi_info->need_watch, enable); in set_need_watch()
1069 spin_lock_irqsave(&smi_info->si_lock, flags); in set_need_watch()
1070 check_start_timer_thread(smi_info); in set_need_watch()
1071 spin_unlock_irqrestore(&smi_info->si_lock, flags); in set_need_watch()
1076 struct smi_info *smi_info = from_timer(smi_info, t, si_timer); in smi_timeout() local
1083 spin_lock_irqsave(&(smi_info->si_lock), flags); in smi_timeout()
1087 time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) in smi_timeout()
1089 smi_result = smi_event_handler(smi_info, time_diff); in smi_timeout()
1091 if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { in smi_timeout()
1094 smi_inc_stat(smi_info, long_timeouts); in smi_timeout()
1103 smi_inc_stat(smi_info, short_timeouts); in smi_timeout()
1106 smi_inc_stat(smi_info, long_timeouts); in smi_timeout()
1112 smi_mod_timer(smi_info, timeout); in smi_timeout()
1114 smi_info->timer_running = false; in smi_timeout()
1115 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in smi_timeout()
1120 struct smi_info *smi_info = data; in ipmi_si_irq_handler() local
1123 if (smi_info->io.si_type == SI_BT) in ipmi_si_irq_handler()
1125 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, in ipmi_si_irq_handler()
1129 spin_lock_irqsave(&(smi_info->si_lock), flags); in ipmi_si_irq_handler()
1131 smi_inc_stat(smi_info, interrupts); in ipmi_si_irq_handler()
1135 smi_event_handler(smi_info, 0); in ipmi_si_irq_handler()
1136 spin_unlock_irqrestore(&(smi_info->si_lock), flags); in ipmi_si_irq_handler()
1143 struct smi_info *new_smi = send_info; in smi_start_processing()
1188 struct smi_info *smi = send_info; in get_smi_info()
1200 struct smi_info *smi_info = send_info; in set_maintenance_mode() local
1203 atomic_set(&smi_info->req_events, 0); in set_maintenance_mode()
1288 static int wait_for_msg_done(struct smi_info *smi_info) in wait_for_msg_done() argument
1292 smi_result = smi_info->handlers->event(smi_info->si_sm, 0); in wait_for_msg_done()
1297 smi_result = smi_info->handlers->event( in wait_for_msg_done()
1298 smi_info->si_sm, jiffies_to_usecs(1)); in wait_for_msg_done()
1300 smi_result = smi_info->handlers->event( in wait_for_msg_done()
1301 smi_info->si_sm, 0); in wait_for_msg_done()
1315 static int try_get_dev_id(struct smi_info *smi_info) in try_get_dev_id() argument
1332 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in try_get_dev_id()
1334 rv = wait_for_msg_done(smi_info); in try_get_dev_id()
1338 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_get_dev_id()
1343 resp + 2, resp_len - 2, &smi_info->device_id); in try_get_dev_id()
1350 static int get_global_enables(struct smi_info *smi_info, u8 *enables) in get_global_enables() argument
1363 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in get_global_enables()
1365 rv = wait_for_msg_done(smi_info); in get_global_enables()
1367 dev_warn(smi_info->io.dev, in get_global_enables()
1373 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in get_global_enables()
1380 dev_warn(smi_info->io.dev, in get_global_enables()
1397 static int set_global_enables(struct smi_info *smi_info, u8 enables) in set_global_enables() argument
1411 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); in set_global_enables()
1413 rv = wait_for_msg_done(smi_info); in set_global_enables()
1415 dev_warn(smi_info->io.dev, in set_global_enables()
1421 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in set_global_enables()
1427 dev_warn(smi_info->io.dev, in set_global_enables()
1447 static void check_clr_rcv_irq(struct smi_info *smi_info) in check_clr_rcv_irq() argument
1452 rv = get_global_enables(smi_info, &enables); in check_clr_rcv_irq()
1459 rv = set_global_enables(smi_info, enables); in check_clr_rcv_irq()
1463 dev_err(smi_info->io.dev, in check_clr_rcv_irq()
1473 dev_warn(smi_info->io.dev, in check_clr_rcv_irq()
1475 smi_info->cannot_disable_irq = true; in check_clr_rcv_irq()
1484 static void check_set_rcv_irq(struct smi_info *smi_info) in check_set_rcv_irq() argument
1489 if (!smi_info->io.irq) in check_set_rcv_irq()
1492 rv = get_global_enables(smi_info, &enables); in check_set_rcv_irq()
1495 rv = set_global_enables(smi_info, enables); in check_set_rcv_irq()
1499 dev_err(smi_info->io.dev, in check_set_rcv_irq()
1509 dev_warn(smi_info->io.dev, in check_set_rcv_irq()
1511 smi_info->cannot_disable_irq = true; in check_set_rcv_irq()
1512 smi_info->irq_enable_broken = true; in check_set_rcv_irq()
1516 static int try_enable_event_buffer(struct smi_info *smi_info) in try_enable_event_buffer() argument
1529 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); in try_enable_event_buffer()
1531 rv = wait_for_msg_done(smi_info); in try_enable_event_buffer()
1537 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_enable_event_buffer()
1551 smi_info->supports_event_msg_buff = true; in try_enable_event_buffer()
1558 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); in try_enable_event_buffer()
1560 rv = wait_for_msg_done(smi_info); in try_enable_event_buffer()
1566 resp_len = smi_info->handlers->get_result(smi_info->si_sm, in try_enable_event_buffer()
1584 smi_info->supports_event_msg_buff = true; in try_enable_event_buffer()
1596 struct smi_info *smi_info = dev_get_drvdata(dev); \
1598 return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
1606 struct smi_info *smi_info = dev_get_drvdata(dev); in ipmi_type_show() local
1608 return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]); in ipmi_type_show()
1616 struct smi_info *smi_info = dev_get_drvdata(dev); in ipmi_interrupts_enabled_show() local
1617 int enabled = smi_info->io.irq && !smi_info->interrupt_disabled; in ipmi_interrupts_enabled_show()
1640 struct smi_info *smi_info = dev_get_drvdata(dev); in ipmi_params_show() local
1644 si_to_str[smi_info->io.si_type], in ipmi_params_show()
1645 addr_space_to_str[smi_info->io.addr_type], in ipmi_params_show()
1646 smi_info->io.addr_data, in ipmi_params_show()
1647 smi_info->io.regspacing, in ipmi_params_show()
1648 smi_info->io.regsize, in ipmi_params_show()
1649 smi_info->io.regshift, in ipmi_params_show()
1650 smi_info->io.irq, in ipmi_params_show()
1651 smi_info->io.slave_addr); in ipmi_params_show()
1684 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) in oem_data_avail_to_receive_msg_avail() argument
1686 smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | in oem_data_avail_to_receive_msg_avail()
1720 static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) in setup_dell_poweredge_oem_data_handler() argument
1722 struct ipmi_device_id *id = &smi_info->device_id; in setup_dell_poweredge_oem_data_handler()
1727 smi_info->oem_data_avail_handler = in setup_dell_poweredge_oem_data_handler()
1732 smi_info->oem_data_avail_handler = in setup_dell_poweredge_oem_data_handler()
1739 static void return_hosed_msg_badsize(struct smi_info *smi_info) in return_hosed_msg_badsize() argument
1741 struct ipmi_smi_msg *msg = smi_info->curr_msg; in return_hosed_msg_badsize()
1748 smi_info->curr_msg = NULL; in return_hosed_msg_badsize()
1749 deliver_recv_msg(smi_info, msg); in return_hosed_msg_badsize()
1769 struct smi_info *smi_info = in; in dell_poweredge_bt_xaction_handler() local
1770 unsigned char *data = smi_info->curr_msg->data; in dell_poweredge_bt_xaction_handler()
1771 unsigned int size = smi_info->curr_msg->data_size; in dell_poweredge_bt_xaction_handler()
1776 return_hosed_msg_badsize(smi_info); in dell_poweredge_bt_xaction_handler()
1794 setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) in setup_dell_poweredge_bt_xaction_handler() argument
1796 struct ipmi_device_id *id = &smi_info->device_id; in setup_dell_poweredge_bt_xaction_handler()
1798 smi_info->io.si_type == SI_BT) in setup_dell_poweredge_bt_xaction_handler()
1810 static void setup_oem_data_handler(struct smi_info *smi_info) in setup_oem_data_handler() argument
1812 setup_dell_poweredge_oem_data_handler(smi_info); in setup_oem_data_handler()
1815 static void setup_xaction_handlers(struct smi_info *smi_info) in setup_xaction_handlers() argument
1817 setup_dell_poweredge_bt_xaction_handler(smi_info); in setup_xaction_handlers()
1820 static void check_for_broken_irqs(struct smi_info *smi_info) in check_for_broken_irqs() argument
1822 check_clr_rcv_irq(smi_info); in check_for_broken_irqs()
1823 check_set_rcv_irq(smi_info); in check_for_broken_irqs()
1826 static inline void stop_timer_and_thread(struct smi_info *smi_info) in stop_timer_and_thread() argument
1828 if (smi_info->thread != NULL) { in stop_timer_and_thread()
1829 kthread_stop(smi_info->thread); in stop_timer_and_thread()
1830 smi_info->thread = NULL; in stop_timer_and_thread()
1833 smi_info->timer_can_start = false; in stop_timer_and_thread()
1834 if (smi_info->timer_running) in stop_timer_and_thread()
1835 del_timer_sync(&smi_info->si_timer); in stop_timer_and_thread()
1838 static struct smi_info *find_dup_si(struct smi_info *info) in find_dup_si()
1840 struct smi_info *e; in find_dup_si()
1863 struct smi_info *new_smi, *dup; in ipmi_si_add_smi()
1921 static int try_smi_init(struct smi_info *new_smi) in try_smi_init()
2094 struct smi_info *e; in init_ipmi_si()
2165 struct smi_info *smi_info = send_info; in shutdown_smi() local
2167 if (smi_info->dev_group_added) { in shutdown_smi()
2168 device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group); in shutdown_smi()
2169 smi_info->dev_group_added = false; in shutdown_smi()
2171 if (smi_info->io.dev) in shutdown_smi()
2172 dev_set_drvdata(smi_info->io.dev, NULL); in shutdown_smi()
2178 smi_info->interrupt_disabled = true; in shutdown_smi()
2179 if (smi_info->io.irq_cleanup) { in shutdown_smi()
2180 smi_info->io.irq_cleanup(&smi_info->io); in shutdown_smi()
2181 smi_info->io.irq_cleanup = NULL; in shutdown_smi()
2183 stop_timer_and_thread(smi_info); in shutdown_smi()
2197 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { in shutdown_smi()
2198 poll(smi_info); in shutdown_smi()
2201 if (smi_info->handlers) in shutdown_smi()
2202 disable_si_irq(smi_info); in shutdown_smi()
2203 while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) { in shutdown_smi()
2204 poll(smi_info); in shutdown_smi()
2207 if (smi_info->handlers) in shutdown_smi()
2208 smi_info->handlers->cleanup(smi_info->si_sm); in shutdown_smi()
2210 if (smi_info->io.addr_source_cleanup) { in shutdown_smi()
2211 smi_info->io.addr_source_cleanup(&smi_info->io); in shutdown_smi()
2212 smi_info->io.addr_source_cleanup = NULL; in shutdown_smi()
2214 if (smi_info->io.io_cleanup) { in shutdown_smi()
2215 smi_info->io.io_cleanup(&smi_info->io); in shutdown_smi()
2216 smi_info->io.io_cleanup = NULL; in shutdown_smi()
2219 kfree(smi_info->si_sm); in shutdown_smi()
2220 smi_info->si_sm = NULL; in shutdown_smi()
2222 smi_info->intf = NULL; in shutdown_smi()
2229 static void cleanup_one_si(struct smi_info *smi_info) in cleanup_one_si() argument
2231 if (!smi_info) in cleanup_one_si()
2234 list_del(&smi_info->link); in cleanup_one_si()
2236 if (smi_info->intf) in cleanup_one_si()
2237 ipmi_unregister_smi(smi_info->intf); in cleanup_one_si()
2239 if (smi_info->pdev) { in cleanup_one_si()
2240 if (smi_info->pdev_registered) in cleanup_one_si()
2241 platform_device_unregister(smi_info->pdev); in cleanup_one_si()
2243 platform_device_put(smi_info->pdev); in cleanup_one_si()
2246 kfree(smi_info); in cleanup_one_si()
2251 struct smi_info *e; in ipmi_si_remove_by_dev()
2271 struct smi_info *e, *tmp_e; in ipmi_si_remove_by_data()
2287 struct smi_info *e, *tmp_e; in cleanup_ipmi_si()