Lines Matching +full:rpm +full:- +full:msg +full:- +full:ram
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
42 * two regions are cached and non-cached memory respectively. Each region
46 * Items in the non-cached region are allocated from the start of the partition
48 * is hence the region between the cached and non-cached offsets. The header of
57 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
90 * struct smem_proc_comm - proc_comm communication struct (legacy)
102 * struct smem_global_entry - entry to reference smem items on the heap
118 * struct smem_header - header found in beginning of primary smem region
138 * struct smem_ptable_entry - one entry in the @smem_ptable list
158 * struct smem_ptable - partition table for the private partitions
176 * struct smem_partition_header - header of the partitions
200 * struct smem_private_entry - header of each item in the private partition
219 * struct smem_info - smem region info located after the table of contents
237 * struct smem_region - representation of a chunk of memory used for smem
249 * struct qcom_smem - device data for the smem device
283 return p + le32_to_cpu(phdr->offset_free_uncached); in phdr_to_last_uncached_entry()
293 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry()
301 return p + le32_to_cpu(phdr->offset_free_cached); in phdr_to_last_cached_entry()
317 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + in uncached_entry_next()
318 le32_to_cpu(e->size); in uncached_entry_next()
326 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next()
333 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); in uncached_entry_to_item()
340 return p - le32_to_cpu(e->size); in cached_entry_to_item()
363 if (hdr->canary != SMEM_PRIVATE_CANARY) in qcom_smem_alloc_private()
365 if (le16_to_cpu(hdr->item) == item) in qcom_smem_alloc_private()
366 return -EEXIST; in qcom_smem_alloc_private()
374 dev_err(smem->dev, "Out of memory\n"); in qcom_smem_alloc_private()
375 return -ENOSPC; in qcom_smem_alloc_private()
378 hdr->canary = SMEM_PRIVATE_CANARY; in qcom_smem_alloc_private()
379 hdr->item = cpu_to_le16(item); in qcom_smem_alloc_private()
380 hdr->size = cpu_to_le32(ALIGN(size, 8)); in qcom_smem_alloc_private()
381 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); in qcom_smem_alloc_private()
382 hdr->padding_hdr = 0; in qcom_smem_alloc_private()
390 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); in qcom_smem_alloc_private()
394 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_alloc_private()
395 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_alloc_private()
397 return -EINVAL; in qcom_smem_alloc_private()
407 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
408 entry = &header->toc[item]; in qcom_smem_alloc_global()
409 if (entry->allocated) in qcom_smem_alloc_global()
410 return -EEXIST; in qcom_smem_alloc_global()
413 if (WARN_ON(size > le32_to_cpu(header->available))) in qcom_smem_alloc_global()
414 return -ENOMEM; in qcom_smem_alloc_global()
416 entry->offset = header->free_offset; in qcom_smem_alloc_global()
417 entry->size = cpu_to_le32(size); in qcom_smem_alloc_global()
425 entry->allocated = cpu_to_le32(1); in qcom_smem_alloc_global()
427 le32_add_cpu(&header->free_offset, size); in qcom_smem_alloc_global()
428 le32_add_cpu(&header->available, -size); in qcom_smem_alloc_global()
434 * qcom_smem_alloc() - allocate space for a smem item
435 * @host: remote processor id, or -1
449 return -EPROBE_DEFER; in qcom_smem_alloc()
452 dev_err(__smem->dev, in qcom_smem_alloc()
454 return -EINVAL; in qcom_smem_alloc()
457 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_alloc()
458 return -EINVAL; in qcom_smem_alloc()
460 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_alloc()
466 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { in qcom_smem_alloc()
467 phdr = __smem->partitions[host]; in qcom_smem_alloc()
469 } else if (__smem->global_partition) { in qcom_smem_alloc()
470 phdr = __smem->global_partition; in qcom_smem_alloc()
476 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_alloc()
492 header = smem->regions[0].virt_base; in qcom_smem_get_global()
493 entry = &header->toc[item]; in qcom_smem_get_global()
494 if (!entry->allocated) in qcom_smem_get_global()
495 return ERR_PTR(-ENXIO); in qcom_smem_get_global()
497 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; in qcom_smem_get_global()
499 for (i = 0; i < smem->num_regions; i++) { in qcom_smem_get_global()
500 region = &smem->regions[i]; in qcom_smem_get_global()
502 if (region->aux_base == aux_base || !aux_base) { in qcom_smem_get_global()
504 *size = le32_to_cpu(entry->size); in qcom_smem_get_global()
505 return region->virt_base + le32_to_cpu(entry->offset); in qcom_smem_get_global()
509 return ERR_PTR(-ENOENT); in qcom_smem_get_global()
524 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
527 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
529 *size = le32_to_cpu(e->size) - in qcom_smem_get_private()
530 le16_to_cpu(e->padding_data); in qcom_smem_get_private()
544 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
547 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
549 *size = le32_to_cpu(e->size) - in qcom_smem_get_private()
550 le16_to_cpu(e->padding_data); in qcom_smem_get_private()
558 return ERR_PTR(-ENOENT); in qcom_smem_get_private()
561 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_get_private()
562 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_get_private()
564 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
568 * qcom_smem_get() - resolve ptr of size of a smem item
569 * @host: the remote processor, or -1
582 void *ptr = ERR_PTR(-EPROBE_DEFER); in qcom_smem_get()
587 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_get()
588 return ERR_PTR(-EINVAL); in qcom_smem_get()
590 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_get()
596 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { in qcom_smem_get()
597 phdr = __smem->partitions[host]; in qcom_smem_get()
598 cacheln = __smem->cacheline[host]; in qcom_smem_get()
600 } else if (__smem->global_partition) { in qcom_smem_get()
601 phdr = __smem->global_partition; in qcom_smem_get()
602 cacheln = __smem->global_cacheline; in qcom_smem_get()
608 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_get()
616 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
617 * @host: the remote processor identifying a partition, or -1
629 return -EPROBE_DEFER; in qcom_smem_get_free_space()
631 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { in qcom_smem_get_free_space()
632 phdr = __smem->partitions[host]; in qcom_smem_get_free_space()
633 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
634 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
635 } else if (__smem->global_partition) { in qcom_smem_get_free_space()
636 phdr = __smem->global_partition; in qcom_smem_get_free_space()
637 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
638 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
640 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space()
641 ret = le32_to_cpu(header->available); in qcom_smem_get_free_space()
649 * qcom_smem_virt_to_phys() - return the physical address associated
659 for (i = 0; i < __smem->num_regions; i++) { in qcom_smem_virt_to_phys()
660 struct smem_region *region = &__smem->regions[i]; in qcom_smem_virt_to_phys()
662 if (p < region->virt_base) in qcom_smem_virt_to_phys()
664 if (p < region->virt_base + region->size) { in qcom_smem_virt_to_phys()
665 u64 offset = p - region->virt_base; in qcom_smem_virt_to_phys()
667 return (phys_addr_t)region->aux_base + offset; in qcom_smem_virt_to_phys()
680 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version()
681 versions = header->version; in qcom_smem_get_sbl_version()
691 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; in qcom_smem_get_ptable()
692 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) in qcom_smem_get_ptable()
693 return ERR_PTR(-ENOENT); in qcom_smem_get_ptable()
695 version = le32_to_cpu(ptable->version); in qcom_smem_get_ptable()
697 dev_err(smem->dev, in qcom_smem_get_ptable()
699 return ERR_PTR(-EINVAL); in qcom_smem_get_ptable()
713 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; in qcom_smem_get_item_count()
714 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) in qcom_smem_get_item_count()
717 return le16_to_cpu(info->num_items); in qcom_smem_get_item_count()
732 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); in qcom_smem_partition_header()
734 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { in qcom_smem_partition_header()
735 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); in qcom_smem_partition_header()
739 if (host0 != le16_to_cpu(header->host0)) { in qcom_smem_partition_header()
740 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", in qcom_smem_partition_header()
741 host0, le16_to_cpu(header->host0)); in qcom_smem_partition_header()
744 if (host1 != le16_to_cpu(header->host1)) { in qcom_smem_partition_header()
745 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", in qcom_smem_partition_header()
746 host1, le16_to_cpu(header->host1)); in qcom_smem_partition_header()
750 size = le32_to_cpu(header->size); in qcom_smem_partition_header()
751 if (size != le32_to_cpu(entry->size)) { in qcom_smem_partition_header()
752 dev_err(smem->dev, "bad partition size (%u != %u)\n", in qcom_smem_partition_header()
753 size, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
757 if (le32_to_cpu(header->offset_free_uncached) > size) { in qcom_smem_partition_header()
758 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", in qcom_smem_partition_header()
759 le32_to_cpu(header->offset_free_uncached), size); in qcom_smem_partition_header()
774 if (smem->global_partition) { in qcom_smem_set_global_partition()
775 dev_err(smem->dev, "Already found the global partition\n"); in qcom_smem_set_global_partition()
776 return -EINVAL; in qcom_smem_set_global_partition()
783 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_set_global_partition()
784 entry = &ptable->entry[i]; in qcom_smem_set_global_partition()
785 if (!le32_to_cpu(entry->offset)) in qcom_smem_set_global_partition()
787 if (!le32_to_cpu(entry->size)) in qcom_smem_set_global_partition()
790 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) in qcom_smem_set_global_partition()
793 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { in qcom_smem_set_global_partition()
800 dev_err(smem->dev, "Missing entry for global partition\n"); in qcom_smem_set_global_partition()
801 return -EINVAL; in qcom_smem_set_global_partition()
807 return -EINVAL; in qcom_smem_set_global_partition()
809 smem->global_partition = header; in qcom_smem_set_global_partition()
810 smem->global_cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition()
829 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_enumerate_partitions()
830 entry = &ptable->entry[i]; in qcom_smem_enumerate_partitions()
831 if (!le32_to_cpu(entry->offset)) in qcom_smem_enumerate_partitions()
833 if (!le32_to_cpu(entry->size)) in qcom_smem_enumerate_partitions()
836 host0 = le16_to_cpu(entry->host0); in qcom_smem_enumerate_partitions()
837 host1 = le16_to_cpu(entry->host1); in qcom_smem_enumerate_partitions()
846 dev_err(smem->dev, "bad host %hu\n", remote_host); in qcom_smem_enumerate_partitions()
847 return -EINVAL; in qcom_smem_enumerate_partitions()
850 if (smem->partitions[remote_host]) { in qcom_smem_enumerate_partitions()
851 dev_err(smem->dev, "duplicate host %hu\n", remote_host); in qcom_smem_enumerate_partitions()
852 return -EINVAL; in qcom_smem_enumerate_partitions()
857 return -EINVAL; in qcom_smem_enumerate_partitions()
859 smem->partitions[remote_host] = header; in qcom_smem_enumerate_partitions()
860 smem->cacheline[remote_host] = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
874 np = of_parse_phandle(dev->of_node, name, 0); in qcom_smem_map_memory()
877 return -EINVAL; in qcom_smem_map_memory()
886 smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size); in qcom_smem_map_memory()
887 if (!smem->regions[i].virt_base) in qcom_smem_map_memory()
888 return -ENOMEM; in qcom_smem_map_memory()
889 smem->regions[i].aux_base = (u32)r.start; in qcom_smem_map_memory()
890 smem->regions[i].size = size; in qcom_smem_map_memory()
906 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL)) in qcom_smem_probe()
910 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); in qcom_smem_probe()
912 return -ENOMEM; in qcom_smem_probe()
914 smem->dev = &pdev->dev; in qcom_smem_probe()
915 smem->num_regions = num_regions; in qcom_smem_probe()
917 ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); in qcom_smem_probe()
921 if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev, in qcom_smem_probe()
922 "qcom,rpm-msg-ram", 1))) in qcom_smem_probe()
925 header = smem->regions[0].virt_base; in qcom_smem_probe()
926 if (le32_to_cpu(header->initialized) != 1 || in qcom_smem_probe()
927 le32_to_cpu(header->reserved)) { in qcom_smem_probe()
928 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); in qcom_smem_probe()
929 return -EINVAL; in qcom_smem_probe()
938 smem->item_count = qcom_smem_get_item_count(smem); in qcom_smem_probe()
941 smem->item_count = SMEM_ITEM_COUNT; in qcom_smem_probe()
944 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); in qcom_smem_probe()
945 return -EINVAL; in qcom_smem_probe()
950 if (ret < 0 && ret != -ENOENT) in qcom_smem_probe()
953 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); in qcom_smem_probe()
955 if (hwlock_id != -EPROBE_DEFER) in qcom_smem_probe()
956 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); in qcom_smem_probe()
960 smem->hwlock = hwspin_lock_request_specific(hwlock_id); in qcom_smem_probe()
961 if (!smem->hwlock) in qcom_smem_probe()
962 return -ENXIO; in qcom_smem_probe()
966 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", in qcom_smem_probe()
969 if (IS_ERR(smem->socinfo)) in qcom_smem_probe()
970 dev_dbg(&pdev->dev, "failed to register socinfo device\n"); in qcom_smem_probe()
977 platform_device_unregister(__smem->socinfo); in qcom_smem_remove()
979 hwspin_lock_free(__smem->hwlock); in qcom_smem_remove()
995 .name = "qcom-smem",