/nrf_hw_models-latest/src/nrfx/hal/ |
D | nrf_timer.c | 30 nrf_timer_task_t task) in nrf_timer_task_trigger() argument 34 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_timer_task_trigger() 36 if (task == NRF_TIMER_TASK_START) { in nrf_timer_task_trigger() 38 } else if (task == NRF_TIMER_TASK_STOP) { in nrf_timer_task_trigger() 40 } else if (task == NRF_TIMER_TASK_COUNT) { in nrf_timer_task_trigger() 42 } else if (task == NRF_TIMER_TASK_CLEAR) { in nrf_timer_task_trigger() 45 } else if (task == NRF_TIMER_TASK_SHUTDOWN) { in nrf_timer_task_trigger() 48 } else if (task >= NRF_TIMER_TASK_CAPTURE0) { in nrf_timer_task_trigger() 49 int task_nbr = (task - NRF_TIMER_TASK_CAPTURE0)/sizeof(uint32_t); in nrf_timer_task_trigger() 53 (int) task); in nrf_timer_task_trigger() [all …]
|
D | nrf_ccm.c | 13 void nrf_ccm_task_trigger(NRF_CCM_Type * p_reg, nrf_ccm_task_t task) in nrf_ccm_task_trigger() argument 15 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_ccm_task_trigger() 17 if ( task == NRF_CCM_TASK_KSGEN ) { in nrf_ccm_task_trigger() 19 } else if ( task == offsetof(NRF_CCM_Type, TASKS_CRYPT) ) { in nrf_ccm_task_trigger() 21 } else if ( task == NRF_CCM_TASK_STOP ) { in nrf_ccm_task_trigger() 49 nrf_ccm_task_t task) in nrf_ccm_subscribe_common() argument 52 if (task == NRF_CCM_TASK_KSGEN) { in nrf_ccm_subscribe_common() 54 } else if ( task == offsetof(NRF_CCM_Type, TASKS_CRYPT) ) { in nrf_ccm_subscribe_common() 56 } else if ( task == NRF_CCM_TASK_STOP ) { in nrf_ccm_subscribe_common() 58 } else if ( task == NRF_CCM_TASK_RATEOVERRIDE ) { in nrf_ccm_subscribe_common() [all …]
|
D | nrf_ecb.c | 12 void nrf_ecb_task_trigger(NRF_ECB_Type * p_reg, nrf_ecb_task_t task) in nrf_ecb_task_trigger() argument 14 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_ecb_task_trigger() 16 if ( task == NRF_ECB_TASK_STARTECB ) { in nrf_ecb_task_trigger() 18 } else if ( task == NRF_ECB_TASK_STOPECB ) { in nrf_ecb_task_trigger() 46 nrf_ecb_task_t task) in nrf_ecb_subscribe_common() argument 49 if (task == NRF_ECB_TASK_STARTECB) { in nrf_ecb_subscribe_common() 51 } else if ( task == NRF_ECB_TASK_STOPECB ) { in nrf_ecb_subscribe_common() 55 task); in nrf_ecb_subscribe_common() 60 nrf_ecb_task_t task, in nrf_ecb_subscribe_set() argument 63 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_ecb_subscribe_set() [all …]
|
D | nrf_aar.c | 25 void nrf_aar_task_trigger(NRF_AAR_Type * p_reg, nrf_aar_task_t task) in nrf_aar_task_trigger() argument 27 *(volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task) = 1; in nrf_aar_task_trigger() 28 if (task == NRF_AAR_TASK_START) { in nrf_aar_task_trigger() 30 } else if (task == NRF_AAR_TASK_STOP) { in nrf_aar_task_trigger() 46 nrf_aar_task_t task) in nrf_aar_subscribe_common() argument 49 if (task == NRF_AAR_TASK_START) { in nrf_aar_subscribe_common() 51 } else if ( task == NRF_AAR_TASK_STOP ) { in nrf_aar_subscribe_common() 55 task); in nrf_aar_subscribe_common() 60 nrf_aar_task_t task, in nrf_aar_subscribe_set() argument 63 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_aar_subscribe_set() [all …]
|
D | nrf_rtc.c | 46 void nrf_rtc_task_trigger(NRF_RTC_Type * p_reg, nrf_rtc_task_t task) in nrf_rtc_task_trigger() argument 48 *(uint32_t *)((uint32_t)p_reg + task) = 1; in nrf_rtc_task_trigger() 52 if ( task == NRF_RTC_TASK_START ) { in nrf_rtc_task_trigger() 54 } else if ( task == NRF_RTC_TASK_STOP ) { in nrf_rtc_task_trigger() 56 } else if ( task == NRF_RTC_TASK_CLEAR ) { in nrf_rtc_task_trigger() 58 } else if ( task == NRF_RTC_TASK_TRIGGER_OVERFLOW ) { in nrf_rtc_task_trigger() 61 } else if ( task >= NRF_RTC_TASK_CAPTURE_0 ) { in nrf_rtc_task_trigger() 62 int cc = (task - NRF_RTC_TASK_CAPTURE_0)/sizeof(uint32_t); in nrf_rtc_task_trigger() 95 nrf_rtc_task_t task) in nrf_rtc_subscribe_common() argument 99 if (task == NRF_RTC_TASK_START) { in nrf_rtc_subscribe_common() [all …]
|
D | nrf_54_ecb.c | 18 void nrf_ecb_task_trigger(NRF_ECB_Type * p_reg, nrf_ecb_task_t task) in nrf_ecb_task_trigger() argument 22 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_ecb_task_trigger() 24 if (task == NRF_ECB_TASK_START) { in nrf_ecb_task_trigger() 26 } else if (task == NRF_ECB_TASK_STOP) { in nrf_ecb_task_trigger() 55 nrf_ecb_task_t task) in nrf_ecb_subscribe_common() argument 59 if (task == NRF_ECB_TASK_START) { in nrf_ecb_subscribe_common() 61 } else if (task == NRF_ECB_TASK_STOP) { in nrf_ecb_subscribe_common() 65 task); in nrf_ecb_subscribe_common() 70 nrf_ecb_task_t task, in nrf_ecb_subscribe_set() argument 73 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_ecb_subscribe_set() [all …]
|
D | nrf_54_aar.c | 35 void nrf_aar_task_trigger(NRF_AAR_Type * p_reg, nrf_aar_task_t task) in nrf_aar_task_trigger() argument 38 *(volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task) = 1; in nrf_aar_task_trigger() 39 if (task == NRF_AAR_TASK_START) { in nrf_aar_task_trigger() 41 } else if (task == NRF_AAR_TASK_STOP) { in nrf_aar_task_trigger() 43 } else if ( (int)task == (int)NRF_CCM_TASK_RATEOVERRIDE ) { in nrf_aar_task_trigger() 72 nrf_aar_task_t task) in nrf_aar_subscribe_common() argument 75 if (task == NRF_AAR_TASK_START) { in nrf_aar_subscribe_common() 77 } else if ( task == NRF_AAR_TASK_STOP ) { in nrf_aar_subscribe_common() 79 } else if ( (int)task == (int)NRF_CCM_TASK_RATEOVERRIDE ) { in nrf_aar_subscribe_common() 85 task); in nrf_aar_subscribe_common() [all …]
|
D | nrf_54_ccm.c | 20 void nrf_ccm_task_trigger(NRF_CCM_Type * p_reg, nrf_ccm_task_t task) in nrf_ccm_task_trigger() argument 23 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_ccm_task_trigger() 25 if (task == NRF_CCM_TASK_START) { in nrf_ccm_task_trigger() 27 } else if (task == NRF_CCM_TASK_STOP) { in nrf_ccm_task_trigger() 29 } else if (task == NRF_CCM_TASK_RATEOVERRIDE) { in nrf_ccm_task_trigger() 72 nrf_ccm_task_t task) in nrf_ccm_subscribe_common() argument 76 if (task == NRF_CCM_TASK_START) { in nrf_ccm_subscribe_common() 78 } else if (task == NRF_CCM_TASK_STOP) { in nrf_ccm_subscribe_common() 80 } else if (task == NRF_CCM_TASK_RATEOVERRIDE) { in nrf_ccm_subscribe_common() 84 task); in nrf_ccm_subscribe_common() [all …]
|
D | nrf_temp.c | 48 nrf_temp_task_t task) in nrf_temp_subscribe_common() argument 51 if (task == NRF_TEMP_TASK_START) { in nrf_temp_subscribe_common() 53 } else if ( task == NRF_TEMP_TASK_STOP ) { in nrf_temp_subscribe_common() 57 task); in nrf_temp_subscribe_common() 62 nrf_temp_task_t task, in nrf_temp_subscribe_set() argument 65 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_temp_subscribe_set() 67 nrf_temp_subscribe_common(p_reg, task); in nrf_temp_subscribe_set() 71 nrf_temp_task_t task) in nrf_temp_subscribe_clear() argument 73 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = 0; in nrf_temp_subscribe_clear() 74 nrf_temp_subscribe_common(p_reg, task); in nrf_temp_subscribe_clear()
|
D | nrf_rng.c | 50 nrf_rng_task_t task) in nrf_rng_subscribe_common() argument 53 if (task == NRF_RNG_TASK_START) { in nrf_rng_subscribe_common() 55 } else if ( task == NRF_RNG_TASK_STOP ) { in nrf_rng_subscribe_common() 59 task); in nrf_rng_subscribe_common() 64 nrf_rng_task_t task, in nrf_rng_subscribe_set() argument 67 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_rng_subscribe_set() 69 nrf_rng_subscribe_common(p_reg, task); in nrf_rng_subscribe_set() 73 nrf_rng_task_t task) in nrf_rng_subscribe_clear() argument 75 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = 0; in nrf_rng_subscribe_clear() 76 nrf_rng_subscribe_common(p_reg, task); in nrf_rng_subscribe_clear()
|
D | nrf_radio.c | 13 void nrf_radio_task_trigger(NRF_RADIO_Type * p_reg, nrf_radio_task_t task) in nrf_radio_task_trigger() argument 15 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_radio_task_trigger() 22 switch (task) { in nrf_radio_task_trigger() 43 bs_trace_error_line_time("%s: Not supported task %i started\n", __func__, task); in nrf_radio_task_trigger() 92 nrf_radio_task_t task) in nrf_radio_subscribe_common() argument 101 switch (task) { in nrf_radio_subscribe_common() 122 __func__, task); in nrf_radio_subscribe_common() 129 nrf_radio_task_t task, in nrf_radio_subscribe_set() argument 132 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + NRF_RADIO_DPPI_OFFSET)) = in nrf_radio_subscribe_set() 134 nrf_radio_subscribe_common(p_reg, task); in nrf_radio_subscribe_set() [all …]
|
D | nrf_clock.c | 41 void nrf_clock_task_trigger(NRF_CLOCK_Type * p_reg, nrf_clock_task_t task) in nrf_clock_task_trigger() argument 43 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_clock_task_trigger() 50 switch (task) { in nrf_clock_task_trigger() 81 bs_trace_error_line_time("Not supported task started in nrf_clock, %d\n", task); in nrf_clock_task_trigger() 98 nrf_clock_task_t task) in nrf_clock_subscribe_common() argument 105 switch (task) { in nrf_clock_subscribe_common() 133 task); in nrf_clock_subscribe_common() 140 nrf_clock_task_t task, in nrf_clock_subscribe_set() argument 143 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_clock_subscribe_set() 145 nrf_clock_subscribe_common(p_reg, task); in nrf_clock_subscribe_set() [all …]
|
D | nrf_uart.c | 19 void nrf_uart_task_trigger(NRF_UART_Type * p_reg, nrf_uart_task_t task) in nrf_uart_task_trigger() argument 21 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_uart_task_trigger() 28 switch (task) { in nrf_uart_task_trigger() 35 bs_trace_error_line_time("Not supported task started in nrf_clock, %d\n", task); in nrf_uart_task_trigger() 113 nrf_uart_task_t task) in nrf_uart_subscribe_common() argument 120 switch (task) { in nrf_uart_subscribe_common() 128 task); in nrf_uart_subscribe_common() 135 nrf_uart_task_t task, in nrf_uart_subscribe_set() argument 138 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_uart_subscribe_set() 140 nrf_uart_subscribe_common(p_reg, task); in nrf_uart_subscribe_set() [all …]
|
D | nrf_ppib.c | 21 nrf_ppib_task_t task, in nrf_ppib_subscribe_set() argument 25 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_ppib_subscribe_set() 29 int task_nbr = (task - NRF_PPIB_TASK_SEND_0)/sizeof(uint32_t); in nrf_ppib_subscribe_set() 35 nrf_ppib_task_t task) in nrf_ppib_subscribe_clear() argument 38 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = 0; in nrf_ppib_subscribe_clear() 41 int task_nbr = (task - NRF_PPIB_TASK_SEND_0)/sizeof(uint32_t); in nrf_ppib_subscribe_clear()
|
D | nrf_uarte.c | 19 void nrf_uarte_task_trigger(NRF_UARTE_Type * p_reg, nrf_uarte_task_t task) in nrf_uarte_task_trigger() argument 21 *((volatile uint32_t *)((uint8_t *)p_reg + (uint32_t)task)) = 0x1UL; in nrf_uarte_task_trigger() 28 switch (task) { in nrf_uarte_task_trigger() 45 bs_trace_error_line_time("Not supported task started in nrf_clock, %d\n", task); in nrf_uarte_task_trigger() 113 nrf_uarte_task_t task) in nrf_uarte_subscribe_common() argument 120 switch (task) { in nrf_uarte_subscribe_common() 138 task); in nrf_uarte_subscribe_common() 145 nrf_uarte_task_t task, in nrf_uarte_subscribe_set() argument 148 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_uarte_subscribe_set() 150 nrf_uarte_subscribe_common(p_reg, task); in nrf_uarte_subscribe_set() [all …]
|
D | nrf_gpiote.c | 52 void nrf_gpiote_task_trigger(NRF_GPIOTE_Type * p_reg, nrf_gpiote_task_t task) in nrf_gpiote_task_trigger() argument 54 uint32_t *reg = (uint32_t *)((uintptr_t)p_reg + task); in nrf_gpiote_task_trigger() 69 bs_trace_error_time_line("%s: Unknown GPIOTE tasks %i\n",task); /* LCOV_EXCL_LINE */ in nrf_gpiote_task_trigger() 290 nrf_gpiote_task_t task) in nrf_gpiote_subscribe_common() argument 295 if ((task >= NRF_GPIOTE_TASK_OUT_0) && (task < NRF_GPIOTE_TASK_SET_0)) { in nrf_gpiote_subscribe_common() 296 task_nbr = (task - NRF_GPIOTE_TASK_OUT_0)/sizeof(uint32_t); in nrf_gpiote_subscribe_common() 298 } else if ((task >= NRF_GPIOTE_TASK_SET_0) && (task < NRF_GPIOTE_TASK_CLR_0)) { in nrf_gpiote_subscribe_common() 299 task_nbr = (task - NRF_GPIOTE_TASK_SET_0)/sizeof(uint32_t); in nrf_gpiote_subscribe_common() 301 } else if (task >= NRF_GPIOTE_TASK_CLR_0) { in nrf_gpiote_subscribe_common() 302 task_nbr = (task - NRF_GPIOTE_TASK_CLR_0)/sizeof(uint32_t); in nrf_gpiote_subscribe_common() [all …]
|
D | nrf_grtc.c | 80 nrf_grtc_task_t task, in nrf_grtc_subscribe_set() argument 84 NRFX_ASSERT((task != NRF_GRTC_TASK_START) && in nrf_grtc_subscribe_set() 85 (task != NRF_GRTC_TASK_CLEAR) && in nrf_grtc_subscribe_set() 86 (task != NRF_GRTC_TASK_STOP)); in nrf_grtc_subscribe_set() 89 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_grtc_subscribe_set() 92 int task_nbr = (task - NRF_GRTC_TASK_CAPTURE_0)/sizeof(uint32_t); in nrf_grtc_subscribe_set() 98 nrf_grtc_task_t task) in nrf_grtc_subscribe_clear() argument 101 NRFX_ASSERT((task != NRF_GRTC_TASK_START) && in nrf_grtc_subscribe_clear() 102 (task != NRF_GRTC_TASK_CLEAR) && in nrf_grtc_subscribe_clear() 103 (task != NRF_GRTC_TASK_STOP)); in nrf_grtc_subscribe_clear() [all …]
|
D | nrf_ipc.c | 52 nrf_ipc_task_t task, in nrf_ipc_subscribe_set() argument 56 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_ipc_subscribe_set() 60 int task_nbr = (task - NRF_IPC_TASK_SEND_0)/sizeof(uint32_t); in nrf_ipc_subscribe_set() 66 nrf_ipc_task_t task) in nrf_ipc_subscribe_clear() argument 69 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = 0; in nrf_ipc_subscribe_clear() 72 int task_nbr = (task - NRF_IPC_TASK_SEND_0)/sizeof(uint32_t); in nrf_ipc_subscribe_clear()
|
D | nrf_egu.c | 55 nrf_egu_task_t task, in nrf_egu_subscribe_set() argument 59 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_egu_subscribe_set() 63 int task_nbr = (task - NRF_EGU_TASK_TRIGGER0)/sizeof(uint32_t); in nrf_egu_subscribe_set() 69 nrf_egu_task_t task) in nrf_egu_subscribe_clear() argument 72 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = 0; in nrf_egu_subscribe_clear() 75 int task_nbr = (task - NRF_EGU_TASK_TRIGGER0)/sizeof(uint32_t); in nrf_egu_subscribe_clear()
|
D | nrf_dppi.c | 62 nrf_dppi_task_t task) in nrf_dppi_subscribe_common() argument 65 uint task_off = task - NRF_DPPI_TASK_CHG0_EN; in nrf_dppi_subscribe_common() 76 nrf_dppi_task_t task, in nrf_dppi_subscribe_set() argument 80 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = in nrf_dppi_subscribe_set() 83 nrf_dppi_subscribe_common(p_reg, task); in nrf_dppi_subscribe_set() 86 void nrf_dppi_subscribe_clear(NRF_DPPIC_Type * p_reg, nrf_dppi_task_t task) in nrf_dppi_subscribe_clear() argument 88 *((volatile uint32_t *) ((uint8_t *) p_reg + (uint32_t) task + 0x80uL)) = 0; in nrf_dppi_subscribe_clear() 90 nrf_dppi_subscribe_common(p_reg, task); in nrf_dppi_subscribe_clear()
|
D | nrf_hack.c | 115 int *task) { in nrf_hack_get_task_from_ptr() argument 121 *task = (intptr_t)task_reg - (intptr_t)*p_reg; \ in nrf_hack_get_task_from_ptr() 130 *task = (intptr_t)task_reg - (intptr_t)*p_reg; \ in nrf_hack_get_task_from_ptr() 340 int task; 347 nrf_hack_get_task_from_ptr((void *)task_reg_pr, &p_reg, &set_f, &clear_f, &task_trigger_f, &task); 348 if (nrf_hack_is_task(task)) { 349 set_f(p_reg, task, channel); 367 int task; 374 nrf_hack_get_task_from_ptr((void *)task_reg_pr, &p_reg, &set_f, &clear_f, &trigger_f, &task); 375 if (nrf_hack_is_task(task)) { [all …]
|
/nrf_hw_models-latest/src/HW_models/ |
D | NHW_templates.h | 22 #define NHW_SIDEEFFECTS_TASKS_si(peri, task) \ argument 23 void nhw_##peri##_regw_sideeffects_TASKS_##task(void) { \ 24 if ( NRF_##peri##_regs.TASKS_##task ) { \ 25 NRF_##peri##_regs.TASKS_##task = 0; \ 26 nhw_##peri##_TASK_##task(); \ 30 #define NHW_SIDEEFFECTS_TASKS(peri, peri_regs, task) \ argument 31 void nhw_##peri##_regw_sideeffects_TASKS_##task(unsigned int inst) { \ 32 if ( peri_regs TASKS_##task ) { \ 33 peri_regs TASKS_##task = 0; \ 34 nhw_##peri##_TASK_##task(inst); \ [all …]
|
D | NHW_54L_CLOCK.c | 282 #define NHW_CLOCK_SIDEEFFECTS_SUBSCRIBE(task) \ argument 283 static void nhw_CLOCK_TASK_##task##_wrap(void *param) { \ 284 nhw_CLOCK_TASK_##task((int) param); \ 286 void nhw_CLOCK_regw_sideeffects_SUBSCRIBE_##task(unsigned int inst) { \ 287 static struct nhw_subsc_mem task##_subscribed[NHW_CLKPWR_TOTAL_INST]; \ 289 NRF_CLOCK_regs[0]->SUBSCRIBE_##task, \ 290 &task##_subscribed[inst], \ 291 nhw_CLOCK_TASK_##task##_wrap, \
|
D | NRF_PPI.c | 696 static void nrf_ppi_enqueue_task(dest_f_t task) { in nrf_ppi_enqueue_task() argument 699 if (tasks_queue.q[i] == task){ //We ignore dups in nrf_ppi_enqueue_task() 708 tasks_queue.q[tasks_queue.used++] = task; in nrf_ppi_enqueue_task()
|
/nrf_hw_models-latest/docs/ |
D | README_HW_models.md | 60 model tasked with continuing executing that task/process. 72 when needed, and call into the corresponding HW submodule "event|task runner" 84 When they do so, their "event|task runner" will be called right after in the
|