diff --git a/tt_metal/impl/dispatch/kernels/cq_dispatch.cpp b/tt_metal/impl/dispatch/kernels/cq_dispatch.cpp index 8d171caf2d2e..af9186340eea 100644 --- a/tt_metal/impl/dispatch/kernels/cq_dispatch.cpp +++ b/tt_metal/impl/dispatch/kernels/cq_dispatch.cpp @@ -89,7 +89,7 @@ constexpr uint32_t l1_cache_elements_rounded = ((l1_cache_elements + l1_to_local_cache_copy_chunk - 1) / l1_to_local_cache_copy_chunk) * l1_to_local_cache_copy_chunk; -static uint32_t l1_cache[l1_cache_elements]; +static uint32_t l1_cache[l1_cache_elements_rounded]; FORCE_INLINE volatile uint32_t *get_cq_completion_read_ptr() { return reinterpret_cast(CQ_COMPLETION_READ_PTR); diff --git a/tt_metal/impl/dispatch/kernels/cq_prefetch.cpp b/tt_metal/impl/dispatch/kernels/cq_prefetch.cpp index bc337d29c6aa..02218e317e85 100644 --- a/tt_metal/impl/dispatch/kernels/cq_prefetch.cpp +++ b/tt_metal/impl/dispatch/kernels/cq_prefetch.cpp @@ -83,7 +83,7 @@ constexpr uint32_t l1_cache_elements_rounded = ((l1_cache_elements + l1_to_local_cache_copy_chunk - 1) / l1_to_local_cache_copy_chunk) * l1_to_local_cache_copy_chunk; -static uint32_t l1_cache[l1_cache_elements]; +static uint32_t l1_cache[l1_cache_elements_rounded]; static struct PrefetchExecBufState { uint32_t page_id; @@ -737,7 +737,7 @@ void process_relay_paged_packed_sub_cmds(uint32_t total_length) { // Third step - write from DB scratch_write_addr = scratch_db_top[db_toggle]; uint32_t amt_to_write = amt_read; - uint32_t npages = write_pages_to_dispatcher + uint32_t npages = write_pages_to_dispatcher<1, true> (downstream_data_ptr, scratch_write_addr, amt_to_write); downstream_data_ptr = round_up_pow2(downstream_data_ptr, downstream_cb_page_size);