diff --git a/src/audio/base_fw_intel.c b/src/audio/base_fw_intel.c index 38bb7bd7aa4c..ec3a35a9b601 100644 --- a/src/audio/base_fw_intel.c +++ b/src/audio/base_fw_intel.c @@ -109,7 +109,8 @@ static const struct device *uaol_devs[] = { DT_FOREACH_STATUS_OKAY(intel_adsp_uaol, DEV_AND_COMMA) }; -static void tlv_value_set_uaol_caps(struct sof_tlv *tuple, uint32_t type) +#if !CONFIG_SOF_OS_LINUX_COMPAT_PRIORITY +__cold static void tlv_value_set_uaol_caps(struct sof_tlv *tuple, uint32_t type) { const size_t dev_count = ARRAY_SIZE(uaol_devs); struct uaol_capabilities dev_cap; @@ -118,6 +119,8 @@ static void tlv_value_set_uaol_caps(struct sof_tlv *tuple, uint32_t type) size_t i; int ret; + assert_can_be_cold(); + memset(caps, 0, caps_size); caps->link_count = dev_count; @@ -135,12 +138,15 @@ static void tlv_value_set_uaol_caps(struct sof_tlv *tuple, uint32_t type) tlv_value_set(tuple, type, caps_size, caps); } +#endif /* CONFIG_SOF_OS_LINUX_COMPAT_PRIORITY */ -static int uaol_stream_id_to_hda_link_stream_id(int uaol_stream_id) +__cold static int uaol_stream_id_to_hda_link_stream_id(int uaol_stream_id) { size_t dev_count = ARRAY_SIZE(uaol_devs); size_t i; + assert_can_be_cold(); + for (i = 0; i < dev_count; i++) { int hda_link_stream_id = uaol_get_mapped_hda_link_stream_id(uaol_devs[i], uaol_stream_id); diff --git a/src/audio/buffers/comp_buffer.c b/src/audio/buffers/comp_buffer.c index d6562b8d821f..c93ba3df5323 100644 --- a/src/audio/buffers/comp_buffer.c +++ b/src/audio/buffers/comp_buffer.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -151,7 +152,7 @@ static void comp_buffer_free(struct sof_audio_buffer *audio_buffer) .buffer = buffer, }; - buf_dbg(buffer, "buffer_free()"); + buf_dbg(buffer, "entry"); notifier_event(buffer, NOTIFIER_ID_BUFFER_FREE, NOTIFIER_TARGET_CORE_LOCAL, &cb_data, sizeof(cb_data)); @@ -159,15 +160,15 @@ static void comp_buffer_free(struct sof_audio_buffer *audio_buffer) /* In case some listeners didn't unregister from buffer's callbacks */ notifier_unregister_all(NULL, buffer); - struct k_heap *heap = buffer->audio_buffer.heap; + struct mod_alloc_ctx *alloc = buffer->audio_buffer.alloc; rfree(buffer->stream.addr); - sof_heap_free(heap, buffer); - if (heap) { - struct dp_heap_user *mod_heap_user = container_of(heap, struct dp_heap_user, heap); - - if (!--mod_heap_user->client_count) - rfree(mod_heap_user); + if (alloc && alloc->vreg) { + vregion_free(alloc->vreg, buffer); + if (!vregion_put(alloc->vreg)) + rfree(alloc); + } else { + sof_heap_free(alloc ? alloc->heap : NULL, buffer); } } @@ -198,19 +199,24 @@ static const struct audio_buffer_ops audio_buffer_ops = { .set_alignment_constants = comp_buffer_set_alignment_constants, }; -static struct comp_buffer *buffer_alloc_struct(struct k_heap *heap, +static struct comp_buffer *buffer_alloc_struct(struct mod_alloc_ctx *alloc, void *stream_addr, size_t size, uint32_t flags, bool is_shared) { struct comp_buffer *buffer; - tr_dbg(&buffer_tr, "buffer_alloc_struct()"); + tr_dbg(&buffer_tr, "entry"); /* allocate new buffer, but add coherent if shared with other cores */ if (is_shared) flags |= SOF_MEM_FLAG_COHERENT; - buffer = sof_heap_alloc(heap, flags, sizeof(*buffer), 0); + if (!alloc || !alloc->vreg) + buffer = sof_heap_alloc(alloc ? alloc->heap : NULL, flags, sizeof(*buffer), 0); + else if (is_shared) + buffer = vregion_alloc_coherent(alloc->vreg, VREGION_MEM_TYPE_INTERIM, sizeof(*buffer)); + else + buffer = vregion_alloc(alloc->vreg, VREGION_MEM_TYPE_INTERIM, sizeof(*buffer)); if (!buffer) { tr_err(&buffer_tr, "could not alloc structure"); return NULL; @@ -232,7 +238,7 @@ static struct comp_buffer *buffer_alloc_struct(struct k_heap *heap, audio_stream_set_underrun(&buffer->stream, !!(flags & SOF_BUF_UNDERRUN_PERMITTED)); audio_stream_set_overrun(&buffer->stream, !!(flags & SOF_BUF_OVERRUN_PERMITTED)); - buffer->audio_buffer.heap = heap; + buffer->audio_buffer.alloc = alloc; comp_buffer_reset_source_list(buffer); comp_buffer_reset_sink_list(buffer); @@ -240,13 +246,13 @@ static struct comp_buffer *buffer_alloc_struct(struct k_heap *heap, return buffer; } -struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flags, uint32_t align, - bool is_shared) +struct comp_buffer *buffer_alloc(struct mod_alloc_ctx *alloc, size_t size, uint32_t flags, + uint32_t align, bool is_shared) { struct comp_buffer *buffer; void *stream_addr; - tr_dbg(&buffer_tr, "buffer_alloc()"); + tr_dbg(&buffer_tr, "entry"); /* validate request */ if (size == 0) { @@ -261,7 +267,7 @@ struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flag return NULL; } - buffer = buffer_alloc_struct(heap, stream_addr, size, flags, is_shared); + buffer = buffer_alloc_struct(alloc, stream_addr, size, flags, is_shared); if (!buffer) { tr_err(&buffer_tr, "could not alloc buffer structure"); rfree(stream_addr); @@ -270,7 +276,7 @@ struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flag return buffer; } -struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_size, +struct comp_buffer *buffer_alloc_range(struct mod_alloc_ctx *alloc, size_t preferred_size, size_t minimum_size, uint32_t flags, uint32_t align, bool is_shared) { @@ -305,7 +311,7 @@ struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_siz return NULL; } - buffer = buffer_alloc_struct(heap, stream_addr, size, flags, is_shared); + buffer = buffer_alloc_struct(alloc, stream_addr, size, flags, is_shared); if (!buffer) { tr_err(&buffer_tr, "could not alloc buffer structure"); rfree(stream_addr); @@ -316,7 +322,7 @@ struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_siz void buffer_zero(struct comp_buffer *buffer) { - buf_dbg(buffer, "stream_zero()"); + buf_dbg(buffer, "entry"); CORE_CHECK_STRUCT(&buffer->audio_buffer); bzero(audio_stream_get_addr(&buffer->stream), audio_stream_get_size(&buffer->stream)); diff --git a/src/audio/buffers/ring_buffer.c b/src/audio/buffers/ring_buffer.c index 11ebd736dfe4..fe67027df8db 100644 --- a/src/audio/buffers/ring_buffer.c +++ b/src/audio/buffers/ring_buffer.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -96,9 +97,15 @@ static void ring_buffer_free(struct sof_audio_buffer *audio_buffer) struct ring_buffer *ring_buffer = container_of(audio_buffer, struct ring_buffer, audio_buffer); - - sof_heap_free(audio_buffer->heap, (__sparse_force void *)ring_buffer->_data_buffer); - sof_heap_free(audio_buffer->heap, ring_buffer); + struct mod_alloc_ctx *alloc = audio_buffer->alloc; + + if (alloc->vreg) { + vregion_free(alloc->vreg, (__sparse_force void *)ring_buffer->_data_buffer); + vregion_free(alloc->vreg, ring_buffer); + } else { + sof_heap_free(alloc->heap, (__sparse_force void *)ring_buffer->_data_buffer); + sof_heap_free(alloc->heap, ring_buffer); + } } static void ring_buffer_reset(struct sof_audio_buffer *audio_buffer) @@ -287,12 +294,19 @@ struct ring_buffer *ring_buffer_create(struct comp_dev *dev, size_t min_availabl uint32_t id) { struct ring_buffer *ring_buffer; - struct k_heap *heap = dev->mod->priv.resources.heap; + struct mod_alloc_ctx *alloc = dev->mod->priv.resources.alloc; + struct k_heap *heap = alloc->heap; + struct vregion *vreg = alloc->vreg; int memory_flags = (is_shared ? SOF_MEM_FLAG_COHERENT : 0) | user_get_buffer_memory_region(dev->drv); /* allocate ring_buffer structure */ - ring_buffer = sof_heap_alloc(heap, memory_flags, sizeof(*ring_buffer), 0); + if (!vreg) + ring_buffer = sof_heap_alloc(heap, memory_flags, sizeof(*ring_buffer), 0); + else if (is_shared) + ring_buffer = vregion_alloc_coherent(vreg, VREGION_MEM_TYPE_INTERIM, sizeof(*ring_buffer)); + else + ring_buffer = vregion_alloc(vreg, VREGION_MEM_TYPE_INTERIM, sizeof(*ring_buffer)); if (!ring_buffer) return NULL; @@ -307,7 +321,8 @@ struct ring_buffer *ring_buffer_create(struct comp_dev *dev, size_t min_availabl audio_buffer_init(&ring_buffer->audio_buffer, BUFFER_TYPE_RING_BUFFER, is_shared, &ring_buffer_source_ops, &ring_buffer_sink_ops, &audio_buffer_ops, NULL); - ring_buffer->audio_buffer.heap = heap; + ring_buffer->audio_buffer.alloc = alloc; + ring_buffer->audio_buffer.alloc->heap = heap; /* set obs/ibs in sink/source interfaces */ sink_set_min_free_space(audio_buffer_get_sink(&ring_buffer->audio_buffer), @@ -364,12 +379,21 @@ struct ring_buffer *ring_buffer_create(struct comp_dev *dev, size_t min_availabl /* allocate data buffer - always in cached memory alias */ ring_buffer->data_buffer_size = ALIGN_UP(ring_buffer->data_buffer_size, PLATFORM_DCACHE_ALIGN); - ring_buffer->_data_buffer = (__sparse_force __sparse_cache void *)sof_heap_alloc(heap, - user_get_buffer_memory_region(dev->drv), - ring_buffer->data_buffer_size, PLATFORM_DCACHE_ALIGN); - if (!ring_buffer->_data_buffer) + + void *data_buf; + + if (vreg) + data_buf = vregion_alloc_align(vreg, VREGION_MEM_TYPE_INTERIM, ring_buffer->data_buffer_size, + PLATFORM_DCACHE_ALIGN); + else + data_buf = sof_heap_alloc(heap, user_get_buffer_memory_region(dev->drv), + ring_buffer->data_buffer_size, PLATFORM_DCACHE_ALIGN); + + if (!data_buf) goto err; + ring_buffer->_data_buffer = (__sparse_force __sparse_cache void *)data_buf; + tr_info(&ring_buffer_tr, "Ring buffer created, id: %u shared: %u min_available: %u min_free_space %u, size %u", id, ring_buffer_is_shared(ring_buffer), min_available, min_free_space, ring_buffer->data_buffer_size); @@ -378,6 +402,9 @@ struct ring_buffer *ring_buffer_create(struct comp_dev *dev, size_t min_availabl return ring_buffer; err: tr_err(&ring_buffer_tr, "Ring buffer creation failure"); - sof_heap_free(heap, ring_buffer); + if (vreg) + vregion_free(vreg, ring_buffer); + else + sof_heap_free(heap, ring_buffer); return NULL; } diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index 6dda66899c26..80cba02eb8fc 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -869,7 +869,7 @@ static int dai_set_dma_config(struct dai_data *dd, struct comp_dev *dev) comp_dbg(dev, "entry"); - dma_cfg = rballoc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, + dma_cfg = rmalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT | SOF_MEM_FLAG_DMA, sizeof(struct dma_config)); if (!dma_cfg) { comp_err(dev, "dma_cfg allocation failed"); diff --git a/src/audio/module_adapter/module/generic.c b/src/audio/module_adapter/module/generic.c index 179d412b5a99..540308c5c400 100644 --- a/src/audio/module_adapter/module/generic.c +++ b/src/audio/module_adapter/module/generic.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #if CONFIG_IPC_MAJOR_4 #include @@ -82,13 +83,14 @@ int module_load_config(struct comp_dev *dev, const void *cfg, size_t size) void mod_resource_init(struct processing_module *mod) { - struct module_data *md = &mod->priv; + struct module_resources *res = &mod->priv.resources; /* Init memory list */ - list_init(&md->resources.objpool.list); - md->resources.objpool.heap = md->resources.heap; - md->resources.heap_usage = 0; - md->resources.heap_high_water_mark = 0; + list_init(&res->objpool.list); + res->objpool.heap = res->alloc->heap; + res->objpool.vreg = res->alloc->vreg; + res->heap_usage = 0; + res->heap_high_water_mark = 0; } int module_init(struct processing_module *mod) @@ -158,11 +160,15 @@ void mod_heap_info(struct processing_module *mod, size_t *size, uintptr_t *start { struct module_resources *res = &mod->priv.resources; - if (size) - *size = res->heap->heap.init_bytes; + if (res->alloc->vreg) { + vregion_mem_info(res->alloc->vreg, size, start); + } else if (res->alloc->heap) { + if (size) + *size = res->alloc->heap->heap.init_bytes; - if (start) - *start = (uintptr_t)container_of(res->heap, struct dp_heap_user, heap); + if (start) + *start = (uintptr_t)res->alloc->heap->heap.init_mem; + } } #endif @@ -195,7 +201,7 @@ void *mod_balloc_align(struct processing_module *mod, size_t size, size_t alignm } /* Allocate buffer memory for module */ - void *ptr = sof_heap_alloc(res->heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_LARGE_BUFFER, + void *ptr = sof_heap_alloc(res->alloc->heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_LARGE_BUFFER, size, alignment); if (!ptr) { @@ -246,7 +252,16 @@ void *z_impl_mod_alloc_ext(struct processing_module *mod, uint32_t flags, size_t } /* Allocate memory for module */ - void *ptr = sof_heap_alloc(res->heap, flags, size, alignment); + void *ptr; + + if (!res->alloc->vreg) + ptr = sof_heap_alloc(res->alloc->heap, flags, size, alignment); + else if (flags & SOF_MEM_FLAG_COHERENT) + ptr = vregion_alloc_coherent_align(res->alloc->vreg, VREGION_MEM_TYPE_INTERIM, + size, alignment); + else + ptr = vregion_alloc_align(res->alloc->vreg, VREGION_MEM_TYPE_INTERIM, + size, alignment); if (!ptr) { comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", @@ -323,7 +338,7 @@ const void *z_impl_mod_fast_get(struct processing_module *mod, const void * cons if (!container) return NULL; - ptr = fast_get(res->heap, dram_ptr, size); + ptr = fast_get(res->alloc->heap, dram_ptr, size); if (!ptr) { container_put(mod, container); return NULL; @@ -347,7 +362,10 @@ static int free_contents(struct processing_module *mod, struct module_resource * switch (container->type) { case MOD_RES_HEAP: - sof_heap_free(res->heap, container->ptr); + if (res->alloc->vreg) + vregion_free(res->alloc->vreg, container->ptr); + else + sof_heap_free(res->alloc->heap, container->ptr); res->heap_usage -= container->size; return 0; #if CONFIG_COMP_BLOB @@ -362,7 +380,7 @@ static int free_contents(struct processing_module *mod, struct module_resource * #else mdom = NULL; #endif - fast_put(res->heap, mdom, container->sram_ptr); + fast_put(res->alloc->heap, mdom, container->sram_ptr); return 0; #endif default: @@ -426,10 +444,13 @@ EXPORT_SYMBOL(z_impl_mod_free); const void *z_vrfy_mod_fast_get(struct processing_module *mod, const void * const dram_ptr, size_t size) { - struct module_resources *res = &mod->priv.resources; + size_t h_size = 0; + uintptr_t h_start; K_OOPS(K_SYSCALL_MEMORY_WRITE(mod, sizeof(*mod))); - K_OOPS(K_SYSCALL_MEMORY_WRITE(res->heap, sizeof(*res->heap))); + mod_heap_info(mod, &h_size, &h_start); + if (h_size) + K_OOPS(K_SYSCALL_MEMORY_WRITE(h_start, h_size)); K_OOPS(K_SYSCALL_MEMORY_READ(dram_ptr, size)); return z_impl_mod_fast_get(mod, dram_ptr, size); @@ -440,10 +461,13 @@ const void *z_vrfy_mod_fast_get(struct processing_module *mod, const void * cons void *z_vrfy_mod_alloc_ext(struct processing_module *mod, uint32_t flags, size_t size, size_t alignment) { - struct module_resources *res = &mod->priv.resources; + size_t h_size = 0; + uintptr_t h_start; K_OOPS(K_SYSCALL_MEMORY_WRITE(mod, sizeof(*mod))); - K_OOPS(K_SYSCALL_MEMORY_WRITE(res->heap, sizeof(*res->heap))); + mod_heap_info(mod, &h_size, &h_start); + if (h_size) + K_OOPS(K_SYSCALL_MEMORY_WRITE(h_start, h_size)); return z_impl_mod_alloc_ext(mod, flags, size, alignment); } @@ -451,10 +475,13 @@ void *z_vrfy_mod_alloc_ext(struct processing_module *mod, uint32_t flags, size_t int z_vrfy_mod_free(struct processing_module *mod, const void *ptr) { - struct module_resources *res = &mod->priv.resources; + size_t h_size = 0; + uintptr_t h_start; K_OOPS(K_SYSCALL_MEMORY_WRITE(mod, sizeof(*mod))); - K_OOPS(K_SYSCALL_MEMORY_WRITE(res->heap, sizeof(*res->heap))); + mod_heap_info(mod, &h_size, &h_start); + if (h_size) + K_OOPS(K_SYSCALL_MEMORY_WRITE(h_start, h_size)); return z_impl_mod_free(mod, ptr); } diff --git a/src/audio/module_adapter/module_adapter.c b/src/audio/module_adapter/module_adapter.c index 9218b0df33ce..c2ecc7b3c9ea 100644 --- a/src/audio/module_adapter/module_adapter.c +++ b/src/audio/module_adapter/module_adapter.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -57,39 +58,29 @@ struct comp_dev *module_adapter_new(const struct comp_driver *drv, #define PAGE_SZ HOST_PAGE_SIZE #endif -static struct dp_heap_user *module_adapter_dp_heap_new(const struct comp_ipc_config *config, - size_t *heap_size) +static struct vregion *module_adapter_dp_heap_new(const struct comp_ipc_config *config, + size_t *heap_size) { /* src-lite with 8 channels has been seen allocating 14k in one go */ /* FIXME: the size will be derived from configuration */ const size_t buf_size = 20 * 1024; - /* Keep uncached to match the default SOF heap! */ - uint8_t *mod_heap_mem = rballoc_align(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - buf_size, PAGE_SZ); - - if (!mod_heap_mem) - return NULL; - - struct dp_heap_user *mod_heap_user = (struct dp_heap_user *)mod_heap_mem; - struct k_heap *mod_heap = &mod_heap_user->heap; - const size_t heap_prefix_size = ALIGN_UP(sizeof(*mod_heap_user), 4); - void *mod_heap_buf = mod_heap_mem + heap_prefix_size; - - *heap_size = buf_size - heap_prefix_size; - k_heap_init(mod_heap, mod_heap_buf, *heap_size); -#ifdef __ZEPHYR__ - mod_heap->heap.init_mem = mod_heap_buf; - mod_heap->heap.init_bytes = *heap_size; -#endif - - return mod_heap_user; + /* + * A 1-to-1 replacement of the original heap implementation would be to + * have "lifetime size" equal to 0. But (1) this is invalid for + * vregion_create() and (2) we gradually move objects, that are simple + * to move to the lifetime buffer. Make it 1k for the beginning. + */ + return vregion_create(4096, buf_size - 4096); } static struct processing_module *module_adapter_mem_alloc(const struct comp_driver *drv, const struct comp_ipc_config *config) { struct k_heap *mod_heap; + struct vregion *mod_vreg; + struct processing_module *mod; + struct comp_dev *dev; /* * For DP shared modules the struct processing_module object must be * accessible from all cores. Unfortunately at this point there's no @@ -99,32 +90,43 @@ static struct processing_module *module_adapter_mem_alloc(const struct comp_driv */ uint32_t flags = config->proc_domain == COMP_PROCESSING_DOMAIN_DP ? SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT : SOF_MEM_FLAG_USER; - struct dp_heap_user *mod_heap_user; size_t heap_size; if (config->proc_domain == COMP_PROCESSING_DOMAIN_DP && IS_ENABLED(CONFIG_USERSPACE) && !IS_ENABLED(CONFIG_SOF_USERSPACE_USE_DRIVER_HEAP)) { - mod_heap_user = module_adapter_dp_heap_new(config, &heap_size); - if (!mod_heap_user) { - comp_cl_err(drv, "Failed to allocate DP module heap"); + mod_vreg = module_adapter_dp_heap_new(config, &heap_size); + if (!mod_vreg) { + comp_cl_err(drv, "Failed to allocate DP module heap / vregion"); return NULL; } - mod_heap = &mod_heap_user->heap; + mod_heap = NULL; } else { mod_heap = drv->user_heap; - mod_heap_user = NULL; heap_size = 0; + mod_vreg = NULL; } - struct processing_module *mod = sof_heap_alloc(mod_heap, flags, sizeof(*mod), 0); + if (!mod_vreg) + mod = sof_heap_alloc(mod_heap, flags, sizeof(*mod), 0); + else if (flags & SOF_MEM_FLAG_COHERENT) + mod = vregion_alloc_coherent(mod_vreg, VREGION_MEM_TYPE_LIFETIME, sizeof(*mod)); + else + mod = vregion_alloc(mod_vreg, VREGION_MEM_TYPE_LIFETIME, sizeof(*mod)); if (!mod) { comp_cl_err(drv, "failed to allocate memory for module"); goto emod; } + struct mod_alloc_ctx *alloc = rmalloc(0, sizeof(*alloc)); + + if (!alloc) + goto ealloc; + memset(mod, 0, sizeof(*mod)); - mod->priv.resources.heap = mod_heap; + alloc->heap = mod_heap; + alloc->vreg = mod_vreg; + mod->priv.resources.alloc = alloc; mod_resource_init(mod); /* @@ -133,11 +135,14 @@ static struct processing_module *module_adapter_mem_alloc(const struct comp_driv * then it can be cached. Effectively it can be only cached in * single-core configurations. */ - struct comp_dev *dev = sof_heap_alloc(mod_heap, SOF_MEM_FLAG_COHERENT, sizeof(*dev), 0); + if (mod_vreg) + dev = vregion_alloc_coherent(mod_vreg, VREGION_MEM_TYPE_LIFETIME, sizeof(*dev)); + else + dev = sof_heap_alloc(mod_heap, SOF_MEM_FLAG_COHERENT, sizeof(*dev), 0); if (!dev) { comp_cl_err(drv, "failed to allocate memory for comp_dev"); - goto err; + goto edev; } memset(dev, 0, sizeof(*dev)); @@ -146,23 +151,25 @@ static struct processing_module *module_adapter_mem_alloc(const struct comp_driv mod->dev = dev; dev->mod = mod; - if (mod_heap_user) - mod_heap_user->client_count++; - return mod; -err: - sof_heap_free(mod_heap, mod); +edev: + rfree(alloc); +ealloc: + if (mod_vreg) + vregion_free(mod_vreg, mod); + else + sof_heap_free(mod_heap, mod); emod: - rfree(mod_heap_user); + vregion_put(mod_vreg); return NULL; } static void module_adapter_mem_free(struct processing_module *mod) { - struct k_heap *mod_heap = mod->priv.resources.heap; - unsigned int domain = mod->dev->ipc_config.proc_domain; + struct mod_alloc_ctx *alloc = mod->priv.resources.alloc; + struct k_heap *mod_heap = alloc->heap; /* * In principle it shouldn't even be needed to free individual objects @@ -171,14 +178,17 @@ static void module_adapter_mem_free(struct processing_module *mod) #if CONFIG_IPC_MAJOR_4 sof_heap_free(mod_heap, mod->priv.cfg.input_pins); #endif - sof_heap_free(mod_heap, mod->dev); - sof_heap_free(mod_heap, mod); - if (domain == COMP_PROCESSING_DOMAIN_DP) { - struct dp_heap_user *mod_heap_user = container_of(mod_heap, struct dp_heap_user, - heap); - - if (mod_heap && !--mod_heap_user->client_count) - rfree(mod_heap_user); + if (alloc->vreg) { + struct vregion *mod_vreg = alloc->vreg; + + vregion_free(mod_vreg, mod->dev); + vregion_free(mod_vreg, mod); + if (!vregion_put(mod_vreg)) + rfree(alloc); + } else { + sof_heap_free(mod_heap, mod->dev); + sof_heap_free(mod_heap, mod); + rfree(alloc); } } @@ -611,8 +621,8 @@ int module_adapter_prepare(struct comp_dev *dev) if (list_is_empty(&mod->raw_data_buffers_list)) { for (i = 0; i < mod->num_of_sinks; i++) { /* allocate not shared buffer */ - struct comp_buffer *buffer = buffer_alloc(md->resources.heap, buff_size, - memory_flags, + struct comp_buffer *buffer = buffer_alloc(md->resources.alloc, + buff_size, memory_flags, PLATFORM_DCACHE_ALIGN, BUFFER_USAGE_NOT_SHARED); uint32_t flags; @@ -623,13 +633,7 @@ int module_adapter_prepare(struct comp_dev *dev) goto free; } - if (md->resources.heap && md->resources.heap != dev->drv->user_heap) { - struct dp_heap_user *dp_user = container_of(md->resources.heap, - struct dp_heap_user, - heap); - - dp_user->client_count++; - } + vregion_get(md->resources.alloc->vreg); irq_local_disable(flags); list_item_prepend(&buffer->buffers_list, &mod->raw_data_buffers_list); diff --git a/src/audio/module_adapter/module_adapter_ipc4.c b/src/audio/module_adapter/module_adapter_ipc4.c index cdf535b1661e..092f93314bac 100644 --- a/src/audio/module_adapter/module_adapter_ipc4.c +++ b/src/audio/module_adapter/module_adapter_ipc4.c @@ -147,7 +147,7 @@ int module_adapter_init_data(struct comp_dev *dev, if (cfgsz == (sizeof(*cfg) + pinsz)) { dst->nb_input_pins = n_in; dst->nb_output_pins = n_out; - dst->input_pins = sof_heap_alloc(dev->mod->priv.resources.heap, + dst->input_pins = sof_heap_alloc(dev->mod->priv.resources.alloc->heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, pinsz, 0); if (!dst->input_pins) diff --git a/src/include/sof/audio/audio_buffer.h b/src/include/sof/audio/audio_buffer.h index e627fc0494fa..2526a7696534 100644 --- a/src/include/sof/audio/audio_buffer.h +++ b/src/include/sof/audio/audio_buffer.h @@ -111,7 +111,7 @@ struct sof_audio_buffer { */ bool walking; /**< indicates if the buffer is being walked */ - struct k_heap *heap; + struct mod_alloc_ctx *alloc; }; #if CONFIG_PIPELINE_2_0 diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index 91c09ef2e510..84ac231ac5a5 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -211,15 +211,15 @@ struct buffer_cb_free { buffer->cb_type = type; \ } while (0) -struct k_heap; +struct mod_alloc_ctx; /* pipeline buffer creation and destruction */ -struct comp_buffer *buffer_alloc(struct k_heap *heap, size_t size, uint32_t flags, uint32_t align, - bool is_shared); -struct comp_buffer *buffer_alloc_range(struct k_heap *heap, size_t preferred_size, +struct comp_buffer *buffer_alloc(struct mod_alloc_ctx *alloc, size_t size, uint32_t flags, + uint32_t align, bool is_shared); +struct comp_buffer *buffer_alloc_range(struct mod_alloc_ctx *alloc, size_t preferred_size, size_t minimum_size, uint32_t flags, uint32_t align, bool is_shared); -struct comp_buffer *buffer_new(struct k_heap *heap, const struct sof_ipc_buffer *desc, +struct comp_buffer *buffer_new(struct mod_alloc_ctx *alloc, const struct sof_ipc_buffer *desc, bool is_shared); int buffer_set_size(struct comp_buffer *buffer, uint32_t size, uint32_t alignment); diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index b6695b9cd312..c0e737a55b9b 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -578,6 +578,13 @@ struct comp_ops { uint64_t (*get_total_data_processed)(struct comp_dev *dev, uint32_t stream_no, bool input); }; +struct k_heap; +struct vregion; +struct mod_alloc_ctx { + struct k_heap *heap; + struct vregion *vreg; +}; + /** * Audio component base driver "class" * - used by all other component types. diff --git a/src/include/sof/audio/module_adapter/module/generic.h b/src/include/sof/audio/module_adapter/module/generic.h index 59f5f398bad5..91bdce96b1c7 100644 --- a/src/include/sof/audio/module_adapter/module/generic.h +++ b/src/include/sof/audio/module_adapter/module/generic.h @@ -132,7 +132,7 @@ struct module_resources { struct objpool_head objpool; size_t heap_usage; size_t heap_high_water_mark; - struct k_heap *heap; + struct mod_alloc_ctx *alloc; #if CONFIG_MODULE_MEMORY_API_DEBUG && defined(__ZEPHYR__) k_tid_t rsrc_mngr; #endif diff --git a/zephyr/include/sof/lib/vregion.h b/src/include/sof/lib/vregion.h similarity index 50% rename from zephyr/include/sof/lib/vregion.h rename to src/include/sof/lib/vregion.h index 135052c51280..612443f5bc48 100644 --- a/zephyr/include/sof/lib/vregion.h +++ b/src/include/sof/lib/vregion.h @@ -5,7 +5,6 @@ #ifndef __SOF_LIB_VREGION_H__ #define __SOF_LIB_VREGION_H__ -#include #include #ifdef __cplusplus @@ -14,6 +13,21 @@ extern "C" { struct vregion; +/** + * @brief Memory types for virtual region allocations. + * Used to specify the type of memory allocation within a virtual region. + * + * @note + * - interim: allocation that can be freed i.e. get/set large config, kcontrols. + * - lifetime: allocation that cannot be freed i.e. init data, pipeline data. + */ +enum vregion_mem_type { + VREGION_MEM_TYPE_INTERIM, /* interim allocation that can be freed */ + VREGION_MEM_TYPE_LIFETIME, /* lifetime allocation */ +}; + +#if CONFIG_SOF_VREGIONS + /** * @brief Create a new virtual region instance. * @@ -27,26 +41,26 @@ struct vregion; struct vregion *vregion_create(size_t lifetime_size, size_t interim_size); /** - * @brief Destroy a virtual region instance. + * @brief Increment virtual region's user count. * - * Free all associated resources and deallocate the virtual region instance. + * The creator of the virtual region is its first user, for any additional users + * increment the region's use-count. * - * @param[in] vr Pointer to the virtual region instance to destroy. + * @param[in] vr Pointer to the virtual region instance to release. + * @return struct vregion* Pointer to the virtual region instance. */ -void vregion_destroy(struct vregion *vr); +struct vregion *vregion_get(struct vregion *vr); /** - * @brief Memory types for virtual region allocations. - * Used to specify the type of memory allocation within a virtual region. + * @brief Decrement virtual region's user count or destroy it. * - * @note - * - interim: allocation that can be freed i.e. get/set large config, kcontrols. - * - lifetime: allocation that cannot be freed i.e. init data, pipeline data. + * Decrement virtual region's user count, when it reaches 0 free all associated + * resources. + * + * @param[in] vr Pointer to the virtual region instance to release. + * @return struct vregion* Pointer to the virtual region instance or NULL if it has been destroyed. */ -enum vregion_mem_type { - VREGION_MEM_TYPE_INTERIM, /* interim allocation that can be freed */ - VREGION_MEM_TYPE_LIFETIME, /* lifetime allocation */ -}; +struct vregion *vregion_put(struct vregion *vr); /** * @brief Allocate memory from the specified virtual region. @@ -58,6 +72,11 @@ enum vregion_mem_type { */ void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size); +/** + * @brief like vregion_alloc() but allocates coherent memory + */ +void *vregion_alloc_coherent(struct vregion *vr, enum vregion_mem_type type, size_t size); + /** * @brief Allocate aligned memory from the specified virtual region. * @@ -72,6 +91,12 @@ void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size) void *vregion_alloc_align(struct vregion *vr, enum vregion_mem_type type, size_t size, size_t alignment); +/** + * @brief like vregion_alloc_align() but allocates coherent memory + */ +void *vregion_alloc_coherent_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment); + /** * @brief Free memory allocated from the specified virtual region. * @@ -89,6 +114,71 @@ void vregion_free(struct vregion *vr, void *ptr); */ void vregion_info(struct vregion *vr); +/** + * @brief Get virtual region memory start and size. + * + * @param[in] vr Pointer to the virtual region instance. + * @param[in] size Pointer to size + * @param[in] start Pointer to start + */ +void vregion_mem_info(struct vregion *vr, size_t *size, uintptr_t *start); + +#else /* CONFIG_SOF_VREGIONS */ + +#include + +struct vregion { + unsigned int use_count; +}; + +static inline struct vregion *vregion_create(size_t lifetime_size, size_t interim_size) +{ + struct vregion *vr = rmalloc(0, sizeof(*vr)); + + vr->use_count = 1; + return vr; +} +static inline struct vregion *vregion_get(struct vregion *vr) +{ + if (vr) + vr->use_count++; + return vr; +} +static inline struct vregion *vregion_put(struct vregion *vr) +{ + if (vr && !--vr->use_count) + rfree(vr); + return vr; +} +static inline void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size) +{ + return NULL; +} +static inline void *vregion_alloc_coherent(struct vregion *vr, enum vregion_mem_type type, + size_t size) +{ + return NULL; +} +static inline void *vregion_alloc_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment) +{ + return NULL; +} +static inline void *vregion_alloc_coherent_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment) +{ + return NULL; +} +static inline void vregion_free(struct vregion *vr, void *ptr) {} +static inline void vregion_info(struct vregion *vr) {} +static inline void vregion_mem_info(struct vregion *vr, size_t *size, uintptr_t *start) +{ + if (size) + *size = 0; +} + +#endif /* CONFIG_SOF_VREGIONS */ + #ifdef __cplusplus } #endif diff --git a/src/include/sof/objpool.h b/src/include/sof/objpool.h index 697a19cd2336..0821fec8786b 100644 --- a/src/include/sof/objpool.h +++ b/src/include/sof/objpool.h @@ -12,9 +12,11 @@ #include #include +struct vregion; struct k_heap; struct objpool_head { struct list_item list; + struct vregion *vreg; struct k_heap *heap; uint32_t flags; }; diff --git a/src/include/sof/schedule/dp_schedule.h b/src/include/sof/schedule/dp_schedule.h index 37b8f1fc3f2c..2267d676fb8a 100644 --- a/src/include/sof/schedule/dp_schedule.h +++ b/src/include/sof/schedule/dp_schedule.h @@ -119,12 +119,6 @@ union scheduler_dp_thread_ipc_param { } pipeline_state; }; -struct dp_heap_user { - struct k_heap heap; - /* So far relying on linear processing of serialized IPCs, but might need protection */ - unsigned int client_count; /* devices and buffers */ -}; - #if CONFIG_ZEPHYR_DP_SCHEDULER int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd, const union scheduler_dp_thread_ipc_param *param); diff --git a/src/ipc/ipc-helper.c b/src/ipc/ipc-helper.c index 2f685b551747..ad7b3771a16b 100644 --- a/src/ipc/ipc-helper.c +++ b/src/ipc/ipc-helper.c @@ -50,8 +50,8 @@ __cold static bool valid_ipc_buffer_desc(const struct sof_ipc_buffer *desc) } /* create a new component in the pipeline */ -__cold struct comp_buffer *buffer_new(struct k_heap *heap, const struct sof_ipc_buffer *desc, - bool is_shared) +__cold struct comp_buffer *buffer_new(struct mod_alloc_ctx *alloc, + const struct sof_ipc_buffer *desc, bool is_shared) { struct comp_buffer *buffer; uint32_t flags = desc->flags; @@ -79,7 +79,7 @@ __cold struct comp_buffer *buffer_new(struct k_heap *heap, const struct sof_ipc_ desc->caps, flags); /* allocate buffer */ - buffer = buffer_alloc(heap, desc->size, flags, PLATFORM_DCACHE_ALIGN, + buffer = buffer_alloc(alloc, desc->size, flags, PLATFORM_DCACHE_ALIGN, is_shared); if (buffer) { buffer->stream.runtime_stream_params.id = desc->comp.id; diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index 3404906b9771..a81a75312f13 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include #include @@ -527,8 +528,8 @@ __cold int ipc_pipeline_free(struct ipc *ipc, uint32_t comp_id) } __cold static struct comp_buffer *ipc4_create_buffer(struct comp_dev *src, bool is_shared, - uint32_t buf_size, uint32_t src_queue, - uint32_t dst_queue, struct k_heap *heap) + uint32_t buf_size, uint32_t src_queue, + uint32_t dst_queue, struct mod_alloc_ctx *alloc) { struct sof_ipc_buffer ipc_buf; @@ -539,7 +540,7 @@ __cold static struct comp_buffer *ipc4_create_buffer(struct comp_dev *src, bool ipc_buf.comp.id = IPC4_COMP_ID(src_queue, dst_queue); ipc_buf.comp.pipeline_id = src->ipc_config.pipeline_id; ipc_buf.comp.core = cpu_get_id(); - return buffer_new(heap, &ipc_buf, is_shared); + return buffer_new(alloc, &ipc_buf, is_shared); } #if CONFIG_CROSS_CORE_STREAM @@ -640,7 +641,7 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) return IPC4_INVALID_RESOURCE_ID; } - struct k_heap *dp_heap; + struct mod_alloc_ctx *alloc; #if CONFIG_ZEPHYR_DP_SCHEDULER if (source->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP && @@ -659,9 +660,9 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) else dp = NULL; - dp_heap = dp && dp->mod ? dp->mod->priv.resources.heap : NULL; + alloc = dp && dp->mod ? dp->mod->priv.resources.alloc : NULL; #else - dp_heap = NULL; + alloc = NULL; #endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ bool cross_core_bind = source->ipc_config.core != sink->ipc_config.core; @@ -731,18 +732,15 @@ __cold int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) buf_size = ibs * 2; buffer = ipc4_create_buffer(source, cross_core_bind, buf_size, bu->extension.r.src_queue, - bu->extension.r.dst_queue, dp_heap); + bu->extension.r.dst_queue, alloc); if (!buffer) { tr_err(&ipc_tr, "failed to allocate buffer to bind %#x to %#x", src_id, sink_id); return IPC4_OUT_OF_MEMORY; } #if CONFIG_ZEPHYR_DP_SCHEDULER - if (dp_heap) { - struct dp_heap_user *dp_user = container_of(dp_heap, struct dp_heap_user, heap); - - dp_user->client_count++; - } + if (alloc) + vregion_get(alloc->vreg); #endif /* diff --git a/src/lib/objpool.c b/src/lib/objpool.c index 3df78103e13c..6925e6f7070a 100644 --- a/src/lib/objpool.c +++ b/src/lib/objpool.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -37,8 +38,17 @@ static int objpool_add(struct objpool_head *head, unsigned int n, size_t size, u if (!head->heap) head->heap = sof_sys_heap_get(); - struct objpool *pobjpool = sof_heap_alloc(head->heap, flags, - aligned_size + sizeof(*pobjpool), 0); + struct objpool *pobjpool; + + if (!head->vreg) + pobjpool = sof_heap_alloc(head->heap, flags, + aligned_size + sizeof(*pobjpool), 0); + else if (flags & SOF_MEM_FLAG_COHERENT) + pobjpool = vregion_alloc_coherent(head->vreg, VREGION_MEM_TYPE_INTERIM, + aligned_size + sizeof(*pobjpool)); + else + pobjpool = vregion_alloc(head->vreg, VREGION_MEM_TYPE_INTERIM, + aligned_size + sizeof(*pobjpool)); if (!pobjpool) return -ENOMEM; @@ -150,8 +160,13 @@ void objpool_prune(struct objpool_head *head) struct list_item *next, *tmp; list_for_item_safe(next, tmp, &head->list) { + struct objpool *pool = container_of(next, struct objpool, list); + list_item_del(next); - sof_heap_free(head->heap, container_of(next, struct objpool, list)); + if (head->vreg) + vregion_free(head->vreg, pool); + else + sof_heap_free(head->heap, pool); } } diff --git a/src/schedule/zephyr_dp_schedule_application.c b/src/schedule/zephyr_dp_schedule_application.c index 35dd072040c4..099ffb7c9a5d 100644 --- a/src/schedule/zephyr_dp_schedule_application.c +++ b/src/schedule/zephyr_dp_schedule_application.c @@ -526,12 +526,12 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid, /* Module heap partition */ mod_heap_info(mod, &size, &start); pdata->mpart[SOF_DP_PART_HEAP] = (struct k_mem_partition){ - .start = start, + .start = (uintptr_t)sys_cache_uncached_ptr_get((void *)start), .size = size, .attr = K_MEM_PARTITION_P_RW_U_RW, }; pdata->mpart[SOF_DP_PART_HEAP_CACHE] = (struct k_mem_partition){ - .start = (uintptr_t)sys_cache_cached_ptr_get((void *)start), + .start = start, .size = size, .attr = K_MEM_PARTITION_P_RW_U_RW | XTENSA_MMU_CACHED_WB, }; diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 852196abcb57..d198502f4f3a 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -133,6 +133,7 @@ config SOF_USERSPACE_PROXY_WORKER_STACK_SIZE config SOF_USERSPACE_APPLICATION bool default USERSPACE && !SOF_USERSPACE_PROXY + depends on SOF_VREGIONS help Not manually settable. This is effectively a shortcut to replace numerous checks for (CONFIG_USERSPACE && !CONFIG_SOF_USERSPACE_PROXY) diff --git a/zephyr/lib/vregion.c b/zephyr/lib/vregion.c index 7ef9b52fe97e..84af0d0645e6 100644 --- a/zephyr/lib/vregion.c +++ b/zephyr/lib/vregion.c @@ -84,6 +84,8 @@ struct vregion { uint8_t *base; /* base address of entire region */ size_t size; /* size of whole region in bytes */ unsigned int pages; /* size of whole region in pages */ + struct k_mutex lock; /* protect vregion heaps and use-count */ + unsigned int use_count; /* interim heap */ struct interim_heap interim; /* interim heap */ @@ -145,12 +147,17 @@ struct vregion *vregion_create(size_t lifetime_size, size_t interim_size) vr->lifetime.base = vr->base + interim_size; /* set alloc ptr addresses for lifetime linear partitions */ - vr->lifetime.ptr = vr->lifetime.base + sizeof(*vr); /* skip vregion struct */ - vr->lifetime.used = sizeof(*vr); + vr->lifetime.ptr = vr->lifetime.base + + ALIGN_UP(sizeof(*vr), CONFIG_DCACHE_LINE_SIZE); /* skip vregion struct */ + vr->lifetime.used = ALIGN_UP(sizeof(*vr), CONFIG_DCACHE_LINE_SIZE); /* init interim heaps */ k_heap_init(&vr->interim.heap, vr->interim.heap.heap.init_mem, interim_size); + k_mutex_init(&vr->lock); + /* The creator is the first user */ + vr->use_count = 1; + /* log the new vregion */ LOG_INF("new at base %p size %#zx pages %u struct embedded at %p", (void *)vr->base, total_size, pages, (void *)vr); @@ -160,20 +167,46 @@ struct vregion *vregion_create(size_t lifetime_size, size_t interim_size) return vr; } +struct vregion *vregion_get(struct vregion *vr) +{ + if (!vr) + return NULL; + + k_mutex_lock(&vr->lock, K_FOREVER); + vr->use_count++; + k_mutex_unlock(&vr->lock); + + return vr; +} + /** - * @brief Destroy a virtual region instance. + * @brief Decrement virtual region's user count or destroy it. * - * @param[in] vr Pointer to the virtual region instance to destroy. + * @param[in] vr Pointer to the virtual region instance to release. + * @return struct vregion* Pointer to the virtual region instance or NULL if it has been destroyed. */ -void vregion_destroy(struct vregion *vr) +struct vregion *vregion_put(struct vregion *vr) { + unsigned int use_count; + if (!vr) - return; + return NULL; + + k_mutex_lock(&vr->lock, K_FOREVER); + use_count = --vr->use_count; + k_mutex_unlock(&vr->lock); + + if (use_count) + return vr; + + /* Last user: nobody else can access the instance. */ /* log the vregion being destroyed */ LOG_DBG("destroy %p size %#zx pages %u", (void *)vr->base, vr->size, vr->pages); LOG_DBG(" lifetime used %zu free count %d", vr->lifetime.used, vr->lifetime.free_count); vpage_free(vr->base); + + return NULL; } /** @@ -274,22 +307,24 @@ void vregion_free(struct vregion *vr, void *ptr) if (!vr || !ptr) return; - /* check if pointer is in interim heap */ + k_mutex_lock(&vr->lock, K_FOREVER); + + if (sys_cache_is_ptr_uncached(ptr)) + ptr = sys_cache_cached_ptr_get(ptr); + if (ptr >= (void *)vr->interim.heap.heap.init_mem && ptr < (void *)((uint8_t *)vr->interim.heap.heap.init_mem + - vr->interim.heap.heap.init_bytes)) { + vr->interim.heap.heap.init_bytes)) + /* pointer is in interim heap */ interim_free(&vr->interim, ptr); - return; - } - - /* check if pointer is in lifetime heap */ - if (ptr >= (void *)vr->lifetime.base && - ptr < (void *)(vr->lifetime.base + vr->lifetime.size)) { + else if (ptr >= (void *)vr->lifetime.base && + ptr < (void *)(vr->lifetime.base + vr->lifetime.size)) + /* pointer is in lifetime heap */ lifetime_free(&vr->lifetime, ptr); - return; - } + else + LOG_ERR("error: vregion free invalid pointer %p", ptr); - LOG_ERR("error: vregion free invalid pointer %p", ptr); + k_mutex_unlock(&vr->lock); } EXPORT_SYMBOL(vregion_free); @@ -306,21 +341,31 @@ EXPORT_SYMBOL(vregion_free); void *vregion_alloc_align(struct vregion *vr, enum vregion_mem_type type, size_t size, size_t alignment) { + void *p; + if (!vr || !size) return NULL; - if (!alignment) - alignment = 4; /* default align 4 bytes */ + if (alignment < PLATFORM_DCACHE_ALIGN) + alignment = PLATFORM_DCACHE_ALIGN; + + k_mutex_lock(&vr->lock, K_FOREVER); switch (type) { case VREGION_MEM_TYPE_INTERIM: - return interim_alloc(&vr->interim, size, alignment); + p = interim_alloc(&vr->interim, size, alignment); + break; case VREGION_MEM_TYPE_LIFETIME: - return lifetime_alloc(&vr->lifetime, size, alignment); + p = lifetime_alloc(&vr->lifetime, size, alignment); + break; default: LOG_ERR("error: invalid memory type %d", type); - return NULL; + p = NULL; } + + k_mutex_unlock(&vr->lock); + + return p; } EXPORT_SYMBOL(vregion_alloc_align); @@ -337,6 +382,37 @@ void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size) } EXPORT_SYMBOL(vregion_alloc); +void *vregion_alloc_coherent(struct vregion *vr, enum vregion_mem_type type, size_t size) +{ + size = ALIGN_UP(size, CONFIG_DCACHE_LINE_SIZE); + + void *p = vregion_alloc_align(vr, type, size, CONFIG_DCACHE_LINE_SIZE); + + if (!p) + return NULL; + + sys_cache_data_invd_range(p, size); + + return sys_cache_uncached_ptr_get(p); +} + +void *vregion_alloc_coherent_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment) +{ + if (alignment < CONFIG_DCACHE_LINE_SIZE) + alignment = CONFIG_DCACHE_LINE_SIZE; + size = ALIGN_UP(size, CONFIG_DCACHE_LINE_SIZE); + + void *p = vregion_alloc_align(vr, type, size, alignment); + + if (!p) + return NULL; + + sys_cache_data_invd_range(p, size); + + return sys_cache_uncached_ptr_get(p); +} + /** * @brief Log virtual region memory usage. * @@ -353,3 +429,12 @@ void vregion_info(struct vregion *vr) vr->lifetime.used, vr->lifetime.free_count); } EXPORT_SYMBOL(vregion_info); + +void vregion_mem_info(struct vregion *vr, size_t *size, uintptr_t *start) +{ + if (size) + *size = vr->size; + + if (start) + *start = (uintptr_t)vr->base; +} diff --git a/zephyr/test/vregion.c b/zephyr/test/vregion.c index 65586d690494..eb59f68a14d7 100644 --- a/zephyr/test/vregion.c +++ b/zephyr/test/vregion.c @@ -31,7 +31,7 @@ static void test_vreg_alloc_lifet(struct vregion *vreg) zassert_not_null(ptr); - void *ptr_align = vregion_alloc_align(vreg, VREGION_MEM_TYPE_LIFETIME, 2000, 16); + void *ptr_align = vregion_alloc_align(vreg, VREGION_MEM_TYPE_LIFETIME, 1600, 16); zassert_not_null(ptr_align); zassert_equal((uintptr_t)ptr_align & 15, 0); @@ -76,7 +76,7 @@ static void test_vreg_alloc_tmp(struct vregion *vreg) static void test_vreg_destroy(struct vregion *vreg) { vregion_info(vreg); - vregion_destroy(vreg); + vregion_put(vreg); } ZTEST(sof_boot, vregion)