Lines Matching refs:mstm

661 	struct nv50_mstm *mstm;  member
682 struct nv50_mstm *mstm = mstc->mstm; in nv50_msto_payload() local
685 WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock)); in nv50_msto_payload()
688 for (i = 0; i < mstm->mgr.max_payloads; i++) { in nv50_msto_payload()
689 struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; in nv50_msto_payload()
691 mstm->outp->base.base.name, i, payload->vcpi, in nv50_msto_payload()
695 for (i = 0; i < mstm->mgr.max_payloads; i++) { in nv50_msto_payload()
696 struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; in nv50_msto_payload()
709 struct nv50_mstm *mstm = mstc->mstm; in nv50_msto_cleanup() local
716 drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); in nv50_msto_cleanup()
728 struct nv50_mstm *mstm = mstc->mstm; in nv50_msto_prepare() local
735 .base.hasht = mstm->outp->dcb->hasht, in nv50_msto_prepare()
736 .base.hashm = (0xf0ff & mstm->outp->dcb->hashm) | in nv50_msto_prepare()
740 mutex_lock(&mstm->mgr.payload_lock); in nv50_msto_prepare()
759 mutex_unlock(&mstm->mgr.payload_lock); in nv50_msto_prepare()
770 struct nv50_mstm *mstm = mstc->mstm; in nv50_msto_atomic_check() local
788 slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, in nv50_msto_atomic_check()
808 struct nv50_mstm *mstm = NULL; in nv50_msto_enable() local
818 mstm = mstc->mstm; in nv50_msto_enable()
827 r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, armh->dp.pbn, in nv50_msto_enable()
832 if (!mstm->links++) in nv50_msto_enable()
833 nv50_outp_acquire(mstm->outp); in nv50_msto_enable()
835 if (mstm->outp->link & 1) in nv50_msto_enable()
847 mstm->outp->update(mstm->outp, head->base.index, armh, proto, depth); in nv50_msto_enable()
851 mstm->modified = true; in nv50_msto_enable()
859 struct nv50_mstm *mstm = mstc->mstm; in nv50_msto_disable() local
861 drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port); in nv50_msto_disable()
863 mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); in nv50_msto_disable()
864 mstm->modified = true; in nv50_msto_disable()
865 if (!--mstm->links) in nv50_msto_disable()
866 mstm->disabled = true; in nv50_msto_disable()
920 return &mstc->mstm->msto[head->base.index]->encoder; in nv50_mstc_atomic_best_encoder()
928 return &mstc->mstm->msto[0]->encoder; in nv50_mstc_best_encoder()
963 struct drm_dp_mst_topology_mgr *mgr = &mstc->mstm->mgr; in nv50_mstc_atomic_check()
1044 nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, in nv50_mstc_new() argument
1047 struct drm_device *dev = mstm->outp->base.base.dev; in nv50_mstc_new()
1053 mstc->mstm = mstm; in nv50_mstc_new()
1069 for (i = 0; i < ARRAY_SIZE(mstm->msto) && mstm->msto[i]; i++) in nv50_mstc_new()
1070 drm_connector_attach_encoder(&mstc->connector, &mstm->msto[i]->encoder); in nv50_mstc_new()
1080 nv50_mstm_cleanup(struct nv50_mstm *mstm) in nv50_mstm_cleanup() argument
1082 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); in nv50_mstm_cleanup()
1086 NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name); in nv50_mstm_cleanup()
1087 ret = drm_dp_check_act_status(&mstm->mgr); in nv50_mstm_cleanup()
1089 ret = drm_dp_update_payload_part2(&mstm->mgr); in nv50_mstm_cleanup()
1091 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { in nv50_mstm_cleanup()
1095 if (mstc && mstc->mstm == mstm) in nv50_mstm_cleanup()
1100 mstm->modified = false; in nv50_mstm_cleanup()
1104 nv50_mstm_prepare(struct nv50_mstm *mstm) in nv50_mstm_prepare() argument
1106 struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); in nv50_mstm_prepare()
1110 NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); in nv50_mstm_prepare()
1111 ret = drm_dp_update_payload_part1(&mstm->mgr); in nv50_mstm_prepare()
1113 drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { in nv50_mstm_prepare()
1117 if (mstc && mstc->mstm == mstm) in nv50_mstm_prepare()
1122 if (mstm->disabled) { in nv50_mstm_prepare()
1123 if (!mstm->links) in nv50_mstm_prepare()
1124 nv50_outp_release(mstm->outp); in nv50_mstm_prepare()
1125 mstm->disabled = false; in nv50_mstm_prepare()
1157 struct nv50_mstm *mstm = nv50_mstm(mgr); in nv50_mstm_add_connector() local
1161 ret = nv50_mstc_new(mstm, port, path, &mstc); in nv50_mstm_add_connector()
1176 nv50_mstm_service(struct nv50_mstm *mstm) in nv50_mstm_service() argument
1178 struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; in nv50_mstm_service()
1189 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); in nv50_mstm_service()
1193 drm_dp_mst_hpd_irq(&mstm->mgr, esi, &handled); in nv50_mstm_service()
1202 nv50_mstm_remove(struct nv50_mstm *mstm) in nv50_mstm_remove() argument
1204 if (mstm) in nv50_mstm_remove()
1205 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); in nv50_mstm_remove()
1209 nv50_mstm_enable(struct nv50_mstm *mstm, u8 dpcd, int state) in nv50_mstm_enable() argument
1211 struct nouveau_encoder *outp = mstm->outp; in nv50_mstm_enable()
1231 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, 0); in nv50_mstm_enable()
1237 ret = drm_dp_dpcd_writeb(mstm->mgr.aux, DP_MSTM_CTRL, in nv50_mstm_enable()
1248 nv50_mstm_detect(struct nv50_mstm *mstm, u8 dpcd[8], int allow) in nv50_mstm_detect() argument
1255 if (!mstm) in nv50_mstm_detect()
1258 mutex_lock(&mstm->mgr.lock); in nv50_mstm_detect()
1260 old_state = mstm->mgr.mst_state; in nv50_mstm_detect()
1262 aux = mstm->mgr.aux; in nv50_mstm_detect()
1283 mutex_unlock(&mstm->mgr.lock); in nv50_mstm_detect()
1287 ret = nv50_mstm_enable(mstm, dpcd[0], new_state); in nv50_mstm_detect()
1291 mutex_unlock(&mstm->mgr.lock); in nv50_mstm_detect()
1293 ret = drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, new_state); in nv50_mstm_detect()
1295 return nv50_mstm_enable(mstm, dpcd[0], 0); in nv50_mstm_detect()
1300 mutex_unlock(&mstm->mgr.lock); in nv50_mstm_detect()
1305 nv50_mstm_fini(struct nv50_mstm *mstm) in nv50_mstm_fini() argument
1307 if (mstm && mstm->mgr.mst_state) in nv50_mstm_fini()
1308 drm_dp_mst_topology_mgr_suspend(&mstm->mgr); in nv50_mstm_fini()
1312 nv50_mstm_init(struct nv50_mstm *mstm) in nv50_mstm_init() argument
1316 if (!mstm || !mstm->mgr.mst_state) in nv50_mstm_init()
1319 ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); in nv50_mstm_init()
1321 drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); in nv50_mstm_init()
1322 drm_kms_helper_hotplug_event(mstm->mgr.dev); in nv50_mstm_init()
1329 struct nv50_mstm *mstm = *pmstm; in nv50_mstm_del() local
1330 if (mstm) { in nv50_mstm_del()
1331 drm_dp_mst_topology_mgr_destroy(&mstm->mgr); in nv50_mstm_del()
1343 struct nv50_mstm *mstm; in nv50_mstm_new() local
1357 if (!(mstm = *pmstm = kzalloc(sizeof(*mstm), GFP_KERNEL))) in nv50_mstm_new()
1359 mstm->outp = outp; in nv50_mstm_new()
1360 mstm->mgr.cbs = &nv50_mstm; in nv50_mstm_new()
1362 ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max, in nv50_mstm_new()
1369 i, &mstm->msto[i]); in nv50_mstm_new()
1542 nv50_mstm_del(&nv_encoder->dp.mstm); in nv50_sor_destroy()
1611 &nv_encoder->dp.mstm); in nv50_sor_create()
1760 struct nv50_mstm *mstm; in nv50_disp_atomic_commit_core() local
1767 mstm = nouveau_encoder(encoder)->dp.mstm; in nv50_disp_atomic_commit_core()
1768 if (mstm && mstm->modified) in nv50_disp_atomic_commit_core()
1769 nv50_mstm_prepare(mstm); in nv50_disp_atomic_commit_core()
1781 mstm = nouveau_encoder(encoder)->dp.mstm; in nv50_disp_atomic_commit_core()
1782 if (mstm && mstm->modified) in nv50_disp_atomic_commit_core()
1783 nv50_mstm_cleanup(mstm); in nv50_disp_atomic_commit_core()
2248 nv50_mstm_fini(nv_encoder->dp.mstm); in nv50_display_fini()
2266 nv50_mstm_init(nv_encoder->dp.mstm); in nv50_display_init()