Lines Matching full:s
72 * @s: the AMDTP stream to initialize
80 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, in amdtp_stream_init() argument
89 s->protocol = kzalloc(protocol_size, GFP_KERNEL); in amdtp_stream_init()
90 if (!s->protocol) in amdtp_stream_init()
93 s->unit = unit; in amdtp_stream_init()
94 s->direction = dir; in amdtp_stream_init()
95 s->flags = flags; in amdtp_stream_init()
96 s->context = ERR_PTR(-1); in amdtp_stream_init()
97 mutex_init(&s->mutex); in amdtp_stream_init()
98 tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); in amdtp_stream_init()
99 s->packet_index = 0; in amdtp_stream_init()
101 init_waitqueue_head(&s->callback_wait); in amdtp_stream_init()
102 s->callbacked = false; in amdtp_stream_init()
104 s->fmt = fmt; in amdtp_stream_init()
105 s->process_ctx_payloads = process_ctx_payloads; in amdtp_stream_init()
108 s->ctx_data.rx.syt_override = -1; in amdtp_stream_init()
116 * @s: the AMDTP stream to destroy
118 void amdtp_stream_destroy(struct amdtp_stream *s) in amdtp_stream_destroy() argument
121 if (s->protocol == NULL) in amdtp_stream_destroy()
124 WARN_ON(amdtp_stream_running(s)); in amdtp_stream_destroy()
125 kfree(s->protocol); in amdtp_stream_destroy()
126 mutex_destroy(&s->mutex); in amdtp_stream_destroy()
155 struct snd_interval *s = hw_param_interval(params, rule->var); in apply_constraint_to_size() local
167 t.min = roundup(s->min, step); in apply_constraint_to_size()
168 t.max = rounddown(s->max, step); in apply_constraint_to_size()
171 return snd_interval_refine(s, &t); in apply_constraint_to_size()
176 * @s: the AMDTP stream, which must be initialized.
179 int amdtp_stream_add_pcm_hw_constraints(struct amdtp_stream *s, in amdtp_stream_add_pcm_hw_constraints() argument
220 if (!(s->flags & CIP_BLOCKING)) in amdtp_stream_add_pcm_hw_constraints()
226 * depending on its sampling rate. For accurate period interrupt, it's in amdtp_stream_add_pcm_hw_constraints()
248 * @s: the AMDTP stream to configure
255 int amdtp_stream_set_parameters(struct amdtp_stream *s, unsigned int rate, in amdtp_stream_set_parameters() argument
267 s->sfc = sfc; in amdtp_stream_set_parameters()
268 s->data_block_quadlets = data_block_quadlets; in amdtp_stream_set_parameters()
269 s->syt_interval = amdtp_syt_intervals[sfc]; in amdtp_stream_set_parameters()
272 if (s->direction == AMDTP_OUT_STREAM) { in amdtp_stream_set_parameters()
273 s->ctx_data.rx.transfer_delay = in amdtp_stream_set_parameters()
276 if (s->flags & CIP_BLOCKING) { in amdtp_stream_set_parameters()
279 s->ctx_data.rx.transfer_delay += in amdtp_stream_set_parameters()
280 TICKS_PER_SECOND * s->syt_interval / rate; in amdtp_stream_set_parameters()
289 * amdtp_stream_get_max_payload - get the stream's packet size
290 * @s: the AMDTP stream
295 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) in amdtp_stream_get_max_payload() argument
300 if (s->flags & CIP_JUMBO_PAYLOAD) in amdtp_stream_get_max_payload()
302 if (!(s->flags & CIP_NO_HEADER)) in amdtp_stream_get_max_payload()
306 s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; in amdtp_stream_get_max_payload()
312 * @s: the AMDTP stream
314 * This function should be called from the PCM device's .prepare callback.
316 void amdtp_stream_pcm_prepare(struct amdtp_stream *s) in amdtp_stream_pcm_prepare() argument
318 tasklet_kill(&s->period_tasklet); in amdtp_stream_pcm_prepare()
319 s->pcm_buffer_pointer = 0; in amdtp_stream_pcm_prepare()
320 s->pcm_period_pointer = 0; in amdtp_stream_pcm_prepare()
324 static unsigned int calculate_data_blocks(struct amdtp_stream *s, in calculate_data_blocks() argument
330 if (s->flags & CIP_BLOCKING) { in calculate_data_blocks()
335 data_blocks = s->syt_interval; in calculate_data_blocks()
338 if (!cip_sfc_is_base_44100(s->sfc)) { in calculate_data_blocks()
340 data_blocks = s->ctx_data.rx.data_block_state; in calculate_data_blocks()
342 phase = s->ctx_data.rx.data_block_state; in calculate_data_blocks()
350 * device's buffer). in calculate_data_blocks()
352 if (s->sfc == CIP_SFC_44100) in calculate_data_blocks()
358 data_blocks = 11 * (s->sfc >> 1) + (phase == 0); in calculate_data_blocks()
359 if (++phase >= (80 >> (s->sfc >> 1))) in calculate_data_blocks()
361 s->ctx_data.rx.data_block_state = phase; in calculate_data_blocks()
368 static unsigned int calculate_syt(struct amdtp_stream *s, in calculate_syt() argument
373 if (s->ctx_data.rx.last_syt_offset < TICKS_PER_CYCLE) { in calculate_syt()
374 if (!cip_sfc_is_base_44100(s->sfc)) in calculate_syt()
375 syt_offset = s->ctx_data.rx.last_syt_offset + in calculate_syt()
376 s->ctx_data.rx.syt_offset_state; in calculate_syt()
388 phase = s->ctx_data.rx.syt_offset_state; in calculate_syt()
390 syt_offset = s->ctx_data.rx.last_syt_offset; in calculate_syt()
395 s->ctx_data.rx.syt_offset_state = phase; in calculate_syt()
398 syt_offset = s->ctx_data.rx.last_syt_offset - TICKS_PER_CYCLE; in calculate_syt()
399 s->ctx_data.rx.last_syt_offset = syt_offset; in calculate_syt()
402 syt_offset += s->ctx_data.rx.transfer_delay; in calculate_syt()
412 static void update_pcm_pointers(struct amdtp_stream *s, in update_pcm_pointers() argument
418 ptr = s->pcm_buffer_pointer + frames; in update_pcm_pointers()
421 WRITE_ONCE(s->pcm_buffer_pointer, ptr); in update_pcm_pointers()
423 s->pcm_period_pointer += frames; in update_pcm_pointers()
424 if (s->pcm_period_pointer >= pcm->runtime->period_size) { in update_pcm_pointers()
425 s->pcm_period_pointer -= pcm->runtime->period_size; in update_pcm_pointers()
426 tasklet_hi_schedule(&s->period_tasklet); in update_pcm_pointers()
432 struct amdtp_stream *s = (void *)data; in pcm_period_tasklet() local
433 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); in pcm_period_tasklet()
439 static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params) in queue_packet() argument
443 params->interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL); in queue_packet()
444 params->tag = s->tag; in queue_packet()
447 err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, in queue_packet()
448 s->buffer.packets[s->packet_index].offset); in queue_packet()
450 dev_err(&s->unit->device, "queueing error: %d\n", err); in queue_packet()
454 if (++s->packet_index >= QUEUE_LENGTH) in queue_packet()
455 s->packet_index = 0; in queue_packet()
460 static inline int queue_out_packet(struct amdtp_stream *s, in queue_out_packet() argument
465 return queue_packet(s, params); in queue_out_packet()
468 static inline int queue_in_packet(struct amdtp_stream *s, in queue_in_packet() argument
472 params->header_length = s->ctx_data.tx.ctx_header_size; in queue_in_packet()
473 params->payload_length = s->ctx_data.tx.max_ctx_payload_length; in queue_in_packet()
475 return queue_packet(s, params); in queue_in_packet()
478 static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], in generate_cip_header() argument
481 cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | in generate_cip_header()
482 (s->data_block_quadlets << CIP_DBS_SHIFT) | in generate_cip_header()
483 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | in generate_cip_header()
486 ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | in generate_cip_header()
487 ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | in generate_cip_header()
491 static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, in build_it_pkt_header() argument
500 payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; in build_it_pkt_header()
503 if (!(s->flags & CIP_NO_HEADER)) { in build_it_pkt_header()
505 generate_cip_header(s, cip_header, data_block_counter, syt); in build_it_pkt_header()
512 trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks, in build_it_pkt_header()
516 static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, in check_cip_header() argument
537 (!(s->flags & CIP_HEADER_WITHOUT_EOH))) { in check_cip_header()
538 dev_info_ratelimited(&s->unit->device, in check_cip_header()
547 if (sph != s->sph || fmt != s->fmt) { in check_cip_header()
548 dev_info_ratelimited(&s->unit->device, in check_cip_header()
564 dev_err(&s->unit->device, in check_cip_header()
569 if (s->flags & CIP_WRONG_DBS) in check_cip_header()
570 data_block_quadlets = s->data_block_quadlets; in check_cip_header()
578 if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && in check_cip_header()
582 if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || in check_cip_header()
585 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { in check_cip_header()
590 if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) in check_cip_header()
591 dbc_interval = s->ctx_data.tx.dbc_interval; in check_cip_header()
599 dev_err(&s->unit->device, in check_cip_header()
612 static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, in parse_ir_ctx_header() argument
623 if (*payload_length > s->ctx_data.tx.ctx_header_size + in parse_ir_ctx_header()
624 s->ctx_data.tx.max_ctx_payload_length) { in parse_ir_ctx_header()
625 dev_err(&s->unit->device, in parse_ir_ctx_header()
627 *payload_length, s->ctx_data.tx.max_ctx_payload_length); in parse_ir_ctx_header()
631 if (!(s->flags & CIP_NO_HEADER)) { in parse_ir_ctx_header()
633 err = check_cip_header(s, cip_header, *payload_length, in parse_ir_ctx_header()
641 s->data_block_quadlets; in parse_ir_ctx_header()
648 trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks, in parse_ir_ctx_header()
673 // skip isochronous cycle, therefore it's OK to just increment the cycle by
681 static int generate_device_pkt_descs(struct amdtp_stream *s, in generate_device_pkt_descs() argument
686 unsigned int dbc = s->data_block_counter; in generate_device_pkt_descs()
692 unsigned int index = (s->packet_index + i) % QUEUE_LENGTH; in generate_device_pkt_descs()
700 err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length, in generate_device_pkt_descs()
709 desc->ctx_payload = s->buffer.packets[index].buffer; in generate_device_pkt_descs()
711 if (!(s->flags & CIP_DBC_IS_END_EVENT)) in generate_device_pkt_descs()
715 s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); in generate_device_pkt_descs()
718 s->data_block_counter = dbc; in generate_device_pkt_descs()
723 static void generate_ideal_pkt_descs(struct amdtp_stream *s, in generate_ideal_pkt_descs() argument
728 unsigned int dbc = s->data_block_counter; in generate_ideal_pkt_descs()
733 unsigned int index = (s->packet_index + i) % QUEUE_LENGTH; in generate_ideal_pkt_descs()
736 desc->syt = calculate_syt(s, desc->cycle); in generate_ideal_pkt_descs()
737 desc->data_blocks = calculate_data_blocks(s, desc->syt); in generate_ideal_pkt_descs()
739 if (s->flags & CIP_DBC_IS_END_EVENT) in generate_ideal_pkt_descs()
744 if (!(s->flags & CIP_DBC_IS_END_EVENT)) in generate_ideal_pkt_descs()
747 desc->ctx_payload = s->buffer.packets[index].buffer; in generate_ideal_pkt_descs()
752 s->data_block_counter = dbc; in generate_ideal_pkt_descs()
755 static inline void cancel_stream(struct amdtp_stream *s) in cancel_stream() argument
757 s->packet_index = -1; in cancel_stream()
759 amdtp_stream_pcm_abort(s); in cancel_stream()
760 WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); in cancel_stream()
763 static void process_ctx_payloads(struct amdtp_stream *s, in process_ctx_payloads() argument
770 pcm = READ_ONCE(s->pcm); in process_ctx_payloads()
771 pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm); in process_ctx_payloads()
773 update_pcm_pointers(s, pcm, pcm_frames); in process_ctx_payloads()
780 struct amdtp_stream *s = private_data; in out_stream_callback() local
785 if (s->packet_index < 0) in out_stream_callback()
788 generate_ideal_pkt_descs(s, s->pkt_descs, ctx_header, packets); in out_stream_callback()
790 process_ctx_payloads(s, s->pkt_descs, packets); in out_stream_callback()
793 const struct pkt_desc *desc = s->pkt_descs + i; in out_stream_callback()
800 if (s->ctx_data.rx.syt_override < 0) in out_stream_callback()
803 syt = s->ctx_data.rx.syt_override; in out_stream_callback()
805 build_it_pkt_header(s, desc->cycle, &template.params, in out_stream_callback()
809 if (queue_out_packet(s, &template.params) < 0) { in out_stream_callback()
810 cancel_stream(s); in out_stream_callback()
815 fw_iso_context_queue_flush(s->context); in out_stream_callback()
822 struct amdtp_stream *s = private_data; in in_stream_callback() local
828 if (s->packet_index < 0) in in_stream_callback()
832 packets = header_length / s->ctx_data.tx.ctx_header_size; in in_stream_callback()
834 err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets); in in_stream_callback()
837 cancel_stream(s); in in_stream_callback()
841 process_ctx_payloads(s, s->pkt_descs, packets); in in_stream_callback()
847 if (queue_in_packet(s, ¶ms) < 0) { in in_stream_callback()
848 cancel_stream(s); in in_stream_callback()
853 fw_iso_context_queue_flush(s->context); in in_stream_callback()
861 struct amdtp_stream *s = private_data; in amdtp_stream_first_callback() local
869 s->callbacked = true; in amdtp_stream_first_callback()
870 wake_up(&s->callback_wait); in amdtp_stream_first_callback()
872 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_first_callback()
882 s->start_cycle = cycle; in amdtp_stream_first_callback()
884 context->callback.sc(context, tstamp, header_length, header, s); in amdtp_stream_first_callback()
889 * @s: the AMDTP stream to start
897 static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed) in amdtp_stream_start() argument
916 mutex_lock(&s->mutex); in amdtp_stream_start()
918 if (WARN_ON(amdtp_stream_running(s) || in amdtp_stream_start()
919 (s->data_block_quadlets < 1))) { in amdtp_stream_start()
924 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
925 s->data_block_counter = UINT_MAX; in amdtp_stream_start()
927 entry = &initial_state[s->sfc]; in amdtp_stream_start()
929 s->data_block_counter = 0; in amdtp_stream_start()
930 s->ctx_data.rx.data_block_state = entry->data_block; in amdtp_stream_start()
931 s->ctx_data.rx.syt_offset_state = entry->syt_offset; in amdtp_stream_start()
932 s->ctx_data.rx.last_syt_offset = TICKS_PER_CYCLE; in amdtp_stream_start()
936 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
939 if (!(s->flags & CIP_NO_HEADER)) in amdtp_stream_start()
944 max_ctx_payload_size = amdtp_stream_get_max_payload(s) - in amdtp_stream_start()
951 max_ctx_payload_size = amdtp_stream_get_max_payload(s); in amdtp_stream_start()
952 if (!(s->flags & CIP_NO_HEADER)) in amdtp_stream_start()
956 err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, in amdtp_stream_start()
961 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, in amdtp_stream_start()
963 amdtp_stream_first_callback, s); in amdtp_stream_start()
964 if (IS_ERR(s->context)) { in amdtp_stream_start()
965 err = PTR_ERR(s->context); in amdtp_stream_start()
967 dev_err(&s->unit->device, in amdtp_stream_start()
972 amdtp_stream_update(s); in amdtp_stream_start()
974 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
975 s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; in amdtp_stream_start()
976 s->ctx_data.tx.ctx_header_size = ctx_header_size; in amdtp_stream_start()
979 if (s->flags & CIP_NO_HEADER) in amdtp_stream_start()
980 s->tag = TAG_NO_CIP_HEADER; in amdtp_stream_start()
982 s->tag = TAG_CIP; in amdtp_stream_start()
984 s->pkt_descs = kcalloc(INTERRUPT_INTERVAL, sizeof(*s->pkt_descs), in amdtp_stream_start()
986 if (!s->pkt_descs) { in amdtp_stream_start()
991 s->packet_index = 0; in amdtp_stream_start()
994 if (s->direction == AMDTP_IN_STREAM) { in amdtp_stream_start()
995 err = queue_in_packet(s, ¶ms); in amdtp_stream_start()
999 err = queue_out_packet(s, ¶ms); in amdtp_stream_start()
1003 } while (s->packet_index > 0); in amdtp_stream_start()
1007 if ((s->flags & CIP_EMPTY_WITH_TAG0) || (s->flags & CIP_NO_HEADER)) in amdtp_stream_start()
1010 s->callbacked = false; in amdtp_stream_start()
1011 err = fw_iso_context_start(s->context, -1, 0, tag); in amdtp_stream_start()
1015 mutex_unlock(&s->mutex); in amdtp_stream_start()
1019 kfree(s->pkt_descs); in amdtp_stream_start()
1021 fw_iso_context_destroy(s->context); in amdtp_stream_start()
1022 s->context = ERR_PTR(-1); in amdtp_stream_start()
1024 iso_packets_buffer_destroy(&s->buffer, s->unit); in amdtp_stream_start()
1026 mutex_unlock(&s->mutex); in amdtp_stream_start()
1033 * @s: the AMDTP stream that transports the PCM data
1037 unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) in amdtp_stream_pcm_pointer() argument
1055 if (!in_interrupt() && amdtp_stream_running(s)) in amdtp_stream_pcm_pointer()
1056 fw_iso_context_flush_completions(s->context); in amdtp_stream_pcm_pointer()
1058 return READ_ONCE(s->pcm_buffer_pointer); in amdtp_stream_pcm_pointer()
1064 * @s: the AMDTP stream that transfers the PCM frames
1068 int amdtp_stream_pcm_ack(struct amdtp_stream *s) in amdtp_stream_pcm_ack() argument
1074 if (amdtp_stream_running(s)) in amdtp_stream_pcm_ack()
1075 fw_iso_context_flush_completions(s->context); in amdtp_stream_pcm_ack()
1083 * @s: the AMDTP stream
1085 void amdtp_stream_update(struct amdtp_stream *s) in amdtp_stream_update() argument
1088 WRITE_ONCE(s->source_node_id_field, in amdtp_stream_update()
1089 (fw_parent_device(s->unit)->card->node_id << CIP_SID_SHIFT) & CIP_SID_MASK); in amdtp_stream_update()
1095 * @s: the AMDTP stream to stop
1100 static void amdtp_stream_stop(struct amdtp_stream *s) in amdtp_stream_stop() argument
1102 mutex_lock(&s->mutex); in amdtp_stream_stop()
1104 if (!amdtp_stream_running(s)) { in amdtp_stream_stop()
1105 mutex_unlock(&s->mutex); in amdtp_stream_stop()
1109 tasklet_kill(&s->period_tasklet); in amdtp_stream_stop()
1110 fw_iso_context_stop(s->context); in amdtp_stream_stop()
1111 fw_iso_context_destroy(s->context); in amdtp_stream_stop()
1112 s->context = ERR_PTR(-1); in amdtp_stream_stop()
1113 iso_packets_buffer_destroy(&s->buffer, s->unit); in amdtp_stream_stop()
1114 kfree(s->pkt_descs); in amdtp_stream_stop()
1116 s->callbacked = false; in amdtp_stream_stop()
1118 mutex_unlock(&s->mutex); in amdtp_stream_stop()
1123 * @s: the AMDTP stream about to be stopped
1128 void amdtp_stream_pcm_abort(struct amdtp_stream *s) in amdtp_stream_pcm_abort() argument
1132 pcm = READ_ONCE(s->pcm); in amdtp_stream_pcm_abort()
1164 * @s: the AMDTP stream.
1168 int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, in amdtp_domain_add_stream() argument
1174 if (s == tmp) in amdtp_domain_add_stream()
1178 list_add(&s->list, &d->streams); in amdtp_domain_add_stream()
1180 s->channel = channel; in amdtp_domain_add_stream()
1181 s->speed = speed; in amdtp_domain_add_stream()
1193 struct amdtp_stream *s; in amdtp_domain_start() local
1196 list_for_each_entry(s, &d->streams, list) { in amdtp_domain_start()
1197 err = amdtp_stream_start(s, s->channel, s->speed); in amdtp_domain_start()
1203 list_for_each_entry(s, &d->streams, list) in amdtp_domain_start()
1204 amdtp_stream_stop(s); in amdtp_domain_start()
1217 struct amdtp_stream *s, *next; in amdtp_domain_stop() local
1219 list_for_each_entry_safe(s, next, &d->streams, list) { in amdtp_domain_stop()
1220 list_del(&s->list); in amdtp_domain_stop()
1222 amdtp_stream_stop(s); in amdtp_domain_stop()