From c51aab59a75201c56ac0e19ff50b0975b5d03b0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20W=C4=99grzyn?= Date: Sun, 11 Aug 2024 14:17:49 +0200 Subject: [PATCH 1/4] Support loading initialized data memory from asm tests (#725) --- test/asm/exception_mem.asm | 2 ++ test/asm/fibonacci_mem.asm | 3 +++ test/asm/init_regs.s | 8 ++++++ test/asm/interrupt.asm | 10 ++++--- test/asm/link.ld | 12 ++++++--- test/asm/wfi_int.asm | 6 ++++- test/test_core.py | 55 ++++++++++++++++++++++---------------- 7 files changed, 66 insertions(+), 30 deletions(-) diff --git a/test/asm/exception_mem.asm b/test/asm/exception_mem.asm index c3556e795..4964f135d 100644 --- a/test/asm/exception_mem.asm +++ b/test/asm/exception_mem.asm @@ -9,3 +9,5 @@ sw x2, 4(x0) sw x1, 4(x0) /* TODO: actually check the side fx */ li x2, 9 +.section .bss +.skip 0x8 diff --git a/test/asm/fibonacci_mem.asm b/test/asm/fibonacci_mem.asm index 9986aca76..0db8b2130 100644 --- a/test/asm/fibonacci_mem.asm +++ b/test/asm/fibonacci_mem.asm @@ -24,3 +24,6 @@ loop: bne x3, x4, loop infloop: j infloop + +.section .bss +.skip 0xC diff --git a/test/asm/init_regs.s b/test/asm/init_regs.s index 5c27a365b..853a40a15 100644 --- a/test/asm/init_regs.s +++ b/test/asm/init_regs.s @@ -1,3 +1,4 @@ +.macro INIT_REGS_LOAD # load the initial states of registers # the value of a register `n` is assumed to be stored under address `0x100 + n * 4`. lw x1, 0x104(x0) @@ -31,3 +32,10 @@ lw x29,0x174(x0) lw x30,0x178(x0) lw x31,0x17c(x0) +.endm + +.macro INIT_REGS_ALLOCATION +.section .init_regs, "a", @nobits +.skip 0x80 +.previous +.endm diff --git a/test/asm/interrupt.asm b/test/asm/interrupt.asm index 02388127f..a3b59b30e 100644 --- a/test/asm/interrupt.asm +++ b/test/asm/interrupt.asm @@ -1,7 +1,9 @@ - _start: - .include "init_regs.s" +.include "init_regs.s" -# fibonacci spiced with interrupt handler (also with fibonacci) +_start: + INIT_REGS_LOAD + + # fibonacci spiced with interrupt handler (also with fibonacci) li x1, 0x200 csrw mtvec, x1 li x27, 0 # handler count @@ -98,3 +100,5 @@ fail: .org 0x200 j int_handler li x31, 0xae # should never happen + +INIT_REGS_ALLOCATION diff --git a/test/asm/link.ld b/test/asm/link.ld index 9ceab42eb..c77c5fef8 100644 --- a/test/asm/link.ld +++ b/test/asm/link.ld @@ -5,7 +5,13 @@ start = 0; SECTIONS { .text : { *(.text) } - . = 0x100000000; /* start from 2**32 - trick to emulate Harvard architecture (.bss addresses will start from 0) */ - .bss : { *(.bss) } - _end = .; + . = 0x100000000; /* start from 2**32 - trick to emulate Harvard architecture (memory addresses will start from 0) */ + .data : { + *(.data) + *(.bss) + + . = _end_init_regs > . ? 0x1000 : .; /* skip .init_regs origin allocation if not used */ + *(.init_regs) + _end_init_regs = .; + } } diff --git a/test/asm/wfi_int.asm b/test/asm/wfi_int.asm index 3f50000c4..39ceda94a 100644 --- a/test/asm/wfi_int.asm +++ b/test/asm/wfi_int.asm @@ -1,5 +1,7 @@ +.include "init_regs.s" + _start: - .include "init_regs.s" + INIT_REGS_LOAD li x1, 0x100 # set handler vector csrw mtvec, x1 @@ -26,3 +28,5 @@ skip: .org 0x100 j handler + +INIT_REGS_ALLOCATION diff --git a/test/test_core.py b/test/test_core.py index d9cf4655d..e988b1437 100644 --- a/test/test_core.py +++ b/test/test_core.py @@ -12,7 +12,6 @@ from coreblocks.params.configurations import CoreConfiguration, basic_core_config, full_core_config from coreblocks.peripherals.wishbone import WishboneMemorySlave -from typing import Optional import random import subprocess import tempfile @@ -20,13 +19,10 @@ class CoreTestElaboratable(Elaboratable): - def __init__(self, gen_params: GenParams, instr_mem: list[int] = [0], data_mem: Optional[list[int]] = None): + def __init__(self, gen_params: GenParams, instr_mem: list[int] = [0], data_mem: list[int] = []): self.gen_params = gen_params self.instr_mem = instr_mem - if data_mem is None: - self.data_mem = [0] * (2**10) - else: - self.data_mem = data_mem + self.data_mem = data_mem def elaborate(self, platform): m = Module() @@ -71,12 +67,10 @@ def get_arch_reg_val(self, reg_id): class TestCoreAsmSourceBase(TestCoreBase): base_dir: str = "test/asm/" - def prepare_source(self, filename): - bin_src = [] + def prepare_source(self, filename, *, c_extension=False): with ( tempfile.NamedTemporaryFile() as asm_tmp, tempfile.NamedTemporaryFile() as ld_tmp, - tempfile.NamedTemporaryFile() as bin_tmp, ): subprocess.check_call( [ @@ -84,7 +78,7 @@ def prepare_source(self, filename): "-mabi=ilp32", # Specified manually, because toolchains from most distributions don't support new extensioins # and this test should be accessible locally. - "-march=rv32im_zicsr", + f"-march=rv32im{'c' if c_extension else ''}_zicsr", "-I", self.base_dir, "-o", @@ -104,16 +98,31 @@ def prepare_source(self, filename): ld_tmp.name, ] ) - subprocess.check_call( - ["riscv64-unknown-elf-objcopy", "-O", "binary", "-j", ".text", ld_tmp.name, bin_tmp.name] - ) - code = bin_tmp.read() - for word_idx in range(0, len(code), 4): - word = code[word_idx : word_idx + 4] - bin_instr = int.from_bytes(word, "little") - bin_src.append(bin_instr) - return bin_src + def load_section(section: str): + with tempfile.NamedTemporaryFile() as bin_tmp: + bin = [] + + subprocess.check_call( + [ + "riscv64-unknown-elf-objcopy", + "-O", + "binary", + "-j", + section, + ld_tmp.name, + bin_tmp.name, + ] + ) + + data = bin_tmp.read() + for word_idx in range(0, len(data), 4): + word = data[word_idx : word_idx + 4] + bin.append(int.from_bytes(word, "little")) + + return bin + + return {"text": load_section(".text"), "data": load_section(".data")} @parameterized_class( @@ -146,7 +155,8 @@ def test_asm_source(self): self.gen_params = GenParams(self.configuration) bin_src = self.prepare_source(self.source_file) - self.m = CoreTestElaboratable(self.gen_params, instr_mem=bin_src) + self.m = CoreTestElaboratable(self.gen_params, instr_mem=bin_src["text"], data_mem=bin_src["data"]) + with self.run_simulation(self.m) as sim: sim.add_sync_process(self.run_and_check) @@ -269,10 +279,9 @@ def do_interrupt(): def test_interrupted_prog(self): bin_src = self.prepare_source(self.source_file) - data_mem = [0] * (2**10) for reg_id, val in self.start_regvals.items(): - data_mem[self.reg_init_mem_offset // 4 + reg_id] = val - self.m = CoreTestElaboratable(self.gen_params, instr_mem=bin_src, data_mem=data_mem) + bin_src["data"][self.reg_init_mem_offset // 4 + reg_id] = val + self.m = CoreTestElaboratable(self.gen_params, instr_mem=bin_src["text"], data_mem=bin_src["data"]) with self.run_simulation(self.m) as sim: sim.add_sync_process(self.run_with_interrupt_process) sim.add_sync_process(self.clear_level_interrupt_procsess) From 26cc87019cc6e7d274d412f7cab35fc6d4312bf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20W=C4=99grzyn?= Date: Sun, 11 Aug 2024 14:40:30 +0200 Subject: [PATCH 2/4] Fix `mepc` alignment (#724) --- coreblocks/priv/csr/csr_instances.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/coreblocks/priv/csr/csr_instances.py b/coreblocks/priv/csr/csr_instances.py index 1bf3c3a09..6c385f568 100644 --- a/coreblocks/priv/csr/csr_instances.py +++ b/coreblocks/priv/csr/csr_instances.py @@ -2,6 +2,7 @@ from typing import Optional from coreblocks.arch import CSRAddress +from coreblocks.arch.isa import Extension from coreblocks.params.genparams import GenParams from coreblocks.priv.csr.csr_register import CSRRegister from coreblocks.priv.csr.aliased import AliasedCSR @@ -74,8 +75,8 @@ def __init__(self, gen_params: GenParams): # set `MODE` as fixed to 0 - Direct mode "All exceptions set pc to BASE" self.mtvec = CSRRegister(CSRAddress.MTVEC, gen_params, ro_bits=0b11) - # TODO: set low bits R/O based on gp align - self.mepc = CSRRegister(CSRAddress.MEPC, gen_params) + mepc_ro_bits = 0b1 if Extension.C in gen_params.isa.extensions else 0b11 # pc alignment (SPEC) + self.mepc = CSRRegister(CSRAddress.MEPC, gen_params, ro_bits=mepc_ro_bits) def elaborate(self, platform): m = Module() From 5b96b1a064760af38c1f8522c0dbc06ffb939736 Mon Sep 17 00:00:00 2001 From: Marek Materzok Date: Wed, 25 Sep 2024 10:35:40 +0200 Subject: [PATCH 3/4] Add `def_methods` (#731) Co-authored-by: lekcyjna123 <34948061+lekcyjna123@users.noreply.github.com> --- test/transactron/test_methods.py | 38 +++++++++++++ transactron/core/sugar.py | 94 +++++++++++++++++++++++++++++++- 2 files changed, 130 insertions(+), 2 deletions(-) diff --git a/test/transactron/test_methods.py b/test/transactron/test_methods.py index a34b325a0..40da46091 100644 --- a/test/transactron/test_methods.py +++ b/test/transactron/test_methods.py @@ -7,6 +7,7 @@ from transactron.testing import TestCaseWithSimulator, TestbenchIO, data_layout from transactron import * +from transactron.testing.infrastructure import SimpleTestCircuit from transactron.utils import MethodStruct from transactron.lib import * @@ -126,6 +127,43 @@ def definition(foo1): self.do_test_definition(definition) +class TestDefMethods(TestCaseWithSimulator): + class CircuitTestModule(Elaboratable): + def __init__(self, method_definition): + self.methods = [ + Method( + i=[("foo", 3)], + o=[("foo", 3)], + ) + for _ in range(4) + ] + + self.method_definition = method_definition + + def elaborate(self, platform): + m = TModule() + m._MustUse__silence = True # type: ignore + + def_methods(m, self.methods)(self.method_definition) + + return m + + def test_basic_methods(self): + def definition(idx: int, foo: Value): + return {"foo": foo + idx} + + circuit = SimpleTestCircuit(TestDefMethods.CircuitTestModule(definition)) + + def test_process(): + for k, method in enumerate(circuit.methods): + val = random.randrange(0, 2**3) + ret = yield from method.call(foo=val) + assert ret["foo"] == val + k % 2**3 + + with self.run_simulation(circuit) as sim: + sim.add_sync_process(test_process) + + class AdapterCircuit(Elaboratable): def __init__(self, module, methods): self.module = module diff --git a/transactron/core/sugar.py b/transactron/core/sugar.py index 49ca6e5cd..640cddbb5 100644 --- a/transactron/core/sugar.py +++ b/transactron/core/sugar.py @@ -1,13 +1,18 @@ +from collections.abc import Sequence, Callable from amaranth import * -from typing import TYPE_CHECKING, Optional, Callable +from typing import TYPE_CHECKING, Optional, Concatenate, ParamSpec from transactron.utils import * from transactron.utils.assign import AssignArg +from functools import partial if TYPE_CHECKING: from .tmodule import TModule from .method import Method -__all__ = ["def_method"] +__all__ = ["def_method", "def_methods"] + + +P = ParamSpec("P") def def_method( @@ -88,3 +93,88 @@ def decorator(func: Callable[..., Optional[AssignArg]]): m.d.top_comb += assign(out, ret_out, fields=AssignType.ALL) return decorator + + +def def_methods( + m: "TModule", + methods: Sequence["Method"], + ready: Callable[[int], ValueLike] = lambda _: C(1), + validate_arguments: Optional[Callable[..., ValueLike]] = None, +): + """Decorator for defining similar methods + + This decorator is a wrapper over `def_method`, which allows you to easily + define multiple similar methods in a loop. + + The function over which this decorator is applied, should always expect + at least one argument, as the index of the method will be passed as the + first argument to the function. + + This is a syntax sugar equivalent to: + + .. highlight:: python + .. code-block:: python + + for i in range(len(my_methods)): + @def_method(m, my_methods[i]) + def _(arg): + ... + + Parameters + ---------- + m: TModule + Module in which operations on signals should be executed. + methods: Sequence[Method] + The methods whose body is going to be defined. + ready: Callable[[int], Value] + A `Callable` that takes the index in the form of an `int` of the currently defined method + and produces a `Value` describing whether the method is ready to be run. + When omitted, each defined method is always ready. Assigned combinationally to the `ready` attribute. + validate_arguments: Optional[Callable[Concatenate[int, ...], ValueLike]] + Function that takes input arguments used to call the method + and checks whether the method can be called with those arguments. + It instantiates a combinational circuit for each + method caller. By default, there is no function, so all arguments + are accepted. + + Examples + -------- + Define three methods with the same body: + + .. highlight:: python + .. code-block:: python + + m = TModule() + my_sum_methods = [Method(i=[("arg1",8),("arg2",8)], o=[("res",8)]) for _ in range(3)] + @def_methods(m, my_sum_methods) + def _(_, arg1, arg2): + return arg1 + arg2 + + Define three methods with different bodies parametrized with the index of the method: + + .. highlight:: python + .. code-block:: python + + m = TModule() + my_sum_methods = [Method(i=[("arg1",8),("arg2",8)], o=[("res",8)]) for _ in range(3)] + @def_methods(m, my_sum_methods) + def _(index : int, arg1, arg2): + return arg1 + arg2 + index + + Define three methods with different ready signals: + + .. highlight:: python + .. code-block:: python + + @def_methods(m, my_filter_read_methods, ready_list=lambda i: fifo.head == i) + def _(_): + return fifo.read(m) + """ + + def decorator(func: Callable[Concatenate[int, P], Optional[RecordDict]]): + for i in range(len(methods)): + partial_f = partial(func, i) + partial_vargs = partial(validate_arguments, i) if validate_arguments is not None else None + def_method(m, methods[i], ready(i), partial_vargs)(partial_f) + + return decorator From 123e69c4e67168ddf9312a2139ebd1137ab0a2ff Mon Sep 17 00:00:00 2001 From: Marek Materzok Date: Wed, 25 Sep 2024 10:52:44 +0200 Subject: [PATCH 4/4] Clean up `MemoryBank` (#730) --- test/transactron/lib/test_transaction_lib.py | 90 +++++--------------- transactron/lib/storage.py | 85 ++++++++---------- 2 files changed, 57 insertions(+), 118 deletions(-) diff --git a/test/transactron/lib/test_transaction_lib.py b/test/transactron/lib/test_transaction_lib.py index 912c5467d..2216e86c7 100644 --- a/test/transactron/lib/test_transaction_lib.py +++ b/test/transactron/lib/test_transaction_lib.py @@ -1,10 +1,9 @@ import pytest from itertools import product import random -import itertools from operator import and_ from functools import reduce -from amaranth.sim import Settle, Passive +from amaranth.sim import Settle from typing import Optional, TypeAlias from parameterized import parameterized from collections import deque @@ -139,22 +138,22 @@ def test_pipelining(self): class TestMemoryBank(TestCaseWithSimulator): - test_conf = [(9, 3, 3, 3, 14), (16, 1, 1, 3, 15), (16, 1, 1, 1, 16), (12, 3, 1, 1, 17)] + test_conf = [(9, 3, 3, 3, 14), (16, 1, 1, 3, 15), (16, 1, 1, 1, 16), (12, 3, 1, 1, 17), (9, 0, 0, 0, 18)] - parametrized_input = [tc + sf for tc, sf in itertools.product(test_conf, [(True,), (False,)])] - - @parameterized.expand(parametrized_input) - def test_mem(self, max_addr, writer_rand, reader_req_rand, reader_resp_rand, seed, safe_writes): + @pytest.mark.parametrize("max_addr, writer_rand, reader_req_rand, reader_resp_rand, seed", test_conf) + @pytest.mark.parametrize("transparent", [False, True]) + def test_mem( + self, max_addr: int, writer_rand: int, reader_req_rand: int, reader_resp_rand: int, seed: int, transparent: bool + ): test_count = 200 data_width = 6 m = SimpleTestCircuit( - MemoryBank(data_layout=[("data", data_width)], elem_count=max_addr, safe_writes=safe_writes) + MemoryBank(data_layout=[("data", data_width)], elem_count=max_addr, transparent=transparent) ) data: list[int] = list(0 for _ in range(max_addr)) read_req_queue = deque() - addr_queue = deque() random.seed(seed) @@ -163,10 +162,10 @@ def writer(): d = random.randrange(2**data_width) a = random.randrange(max_addr) yield from m.write.call(data=d, addr=a) - for _ in range(2): + for _ in range(2 if not transparent else 0): yield Settle() data[a] = d - yield from self.random_wait(writer_rand, min_cycle_cnt=1) + yield from self.random_wait(writer_rand) def reader_req(): for cycle in range(test_count): @@ -174,72 +173,29 @@ def reader_req(): yield from m.read_req.call(addr=a) for _ in range(1): yield Settle() - if safe_writes: - d = data[a] - read_req_queue.append(d) - else: - addr_queue.append((cycle, a)) - yield from self.random_wait(reader_req_rand, min_cycle_cnt=1) + d = data[a] + read_req_queue.append(d) + yield from self.random_wait(reader_req_rand) def reader_resp(): for cycle in range(test_count): + for _ in range(3): + yield Settle() while not read_req_queue: - yield from self.random_wait(reader_resp_rand, min_cycle_cnt=1) + yield from self.random_wait(reader_resp_rand or 1, min_cycle_cnt=1) + for _ in range(3): + yield Settle() d = read_req_queue.popleft() assert (yield from m.read_resp.call()) == {"data": d} - yield from self.random_wait(reader_resp_rand, min_cycle_cnt=1) - - def internal_reader_resp(): - assert m._dut._internal_read_resp_trans is not None - yield Passive() - while True: - if addr_queue: - instr, a = addr_queue[0] - else: - yield - continue - d = data[a] - # check when internal method has been run to capture - # memory state for tests purposes - if (yield m._dut._internal_read_resp_trans.grant): - addr_queue.popleft() - read_req_queue.append(d) - yield + yield from self.random_wait(reader_resp_rand) - with self.run_simulation(m) as sim: + pipeline_test = writer_rand == 0 and reader_req_rand == 0 and reader_resp_rand == 0 + max_cycles = test_count + 2 if pipeline_test else 100000 + + with self.run_simulation(m, max_cycles=max_cycles) as sim: sim.add_sync_process(reader_req) sim.add_sync_process(reader_resp) sim.add_sync_process(writer) - if not safe_writes: - sim.add_sync_process(internal_reader_resp) - - def test_pipelined(self): - data_width = 6 - max_addr = 9 - m = SimpleTestCircuit(MemoryBank(data_layout=[("data", data_width)], elem_count=max_addr, safe_writes=False)) - - random.seed(14) - - def process(): - a = 3 - d1 = random.randrange(2**data_width) - yield from m.write.call_init(data=d1, addr=a) - yield from m.read_req.call_init(addr=a) - yield - d2 = random.randrange(2**data_width) - yield from m.write.call_init(data=d2, addr=a) - yield from m.read_resp.call_init() - yield - yield from m.write.disable() - yield from m.read_req.disable() - ret_d1 = (yield from m.read_resp.call_result())["data"] - assert d1 == ret_d1 - yield - ret_d2 = (yield from m.read_resp.call_result())["data"] - assert d2 == ret_d2 - - with self.run_simulation(m) as sim: - sim.add_sync_process(process) class TestAsyncMemoryBank(TestCaseWithSimulator): diff --git a/transactron/lib/storage.py b/transactron/lib/storage.py index 835406b66..0deadbfcf 100644 --- a/transactron/lib/storage.py +++ b/transactron/lib/storage.py @@ -5,8 +5,7 @@ from ..core import * from ..utils import SrcLoc, get_src_loc, MultiPriorityEncoder from typing import Optional -from transactron.utils import assign, AssignType, LayoutList, MethodLayout -from .reqres import ArgumentsToResultsZipper +from transactron.utils import LayoutList, MethodLayout __all__ = ["MemoryBank", "ContentAddressableMemory", "AsyncMemoryBank"] @@ -36,7 +35,7 @@ def __init__( data_layout: LayoutList, elem_count: int, granularity: Optional[int] = None, - safe_writes: bool = True, + transparent: bool = False, src_loc: int | SrcLoc = 0, ): """ @@ -49,10 +48,10 @@ def __init__( granularity: Optional[int] Granularity of write, forwarded to Amaranth. If `None` the whole structure is always saved at once. If not, the width of `data_layout` is split into `granularity` parts, which can be saved independently. - safe_writes: bool - Set to `False` if an optimisation can be done to increase throughput of writes. This will cause that - writes will be reordered with respect to reads eg. in sequence "read A, write A X", read can return - "X" even when write was called later. By default `True`, which disable optimisation. + transparent: bool + Read port transparency, false by default. When a read port is transparent, if a given memory address + is read and written in the same clock cycle, the read returns the written value instead of the value + which was in the memory in that cycle. src_loc: int | SrcLoc How many stack frames deep the source location is taken from. Alternatively, the source location to use instead of the default. @@ -63,7 +62,7 @@ def __init__( self.granularity = granularity self.width = from_method_layout(self.data_layout).size self.addr_width = bits_for(self.elem_count - 1) - self.safe_writes = safe_writes + self.transparent = transparent self.read_req_layout: LayoutList = [("addr", self.addr_width)] write_layout = [("addr", self.addr_width), ("data", self.data_layout)] @@ -80,60 +79,44 @@ def elaborate(self, platform) -> TModule: m = TModule() mem = Memory(width=self.width, depth=self.elem_count) - m.submodules.read_port = read_port = mem.read_port() + m.submodules.read_port = read_port = mem.read_port(transparent=self.transparent) m.submodules.write_port = write_port = mem.write_port() read_output_valid = Signal() - prev_read_addr = Signal(self.addr_width) - write_pending = Signal() - write_req = Signal() - write_args = Signal(self.write_layout) - write_args_prev = Signal(self.write_layout) - m.d.comb += read_port.addr.eq(prev_read_addr) - - zipper = ArgumentsToResultsZipper([("valid", 1)], self.data_layout) - m.submodules.zipper = zipper - - self._internal_read_resp_trans = Transaction(src_loc=self.src_loc) - with self._internal_read_resp_trans.body(m, request=read_output_valid): - m.d.sync += read_output_valid.eq(0) - zipper.write_results(m, read_port.data) - - write_trans = Transaction(src_loc=self.src_loc) - with write_trans.body(m, request=write_req | (~read_output_valid & write_pending)): - if self.safe_writes: - with m.If(write_pending): - m.d.comb += assign(write_args, write_args_prev, fields=AssignType.ALL) - m.d.sync += write_pending.eq(0) - m.d.comb += write_port.addr.eq(write_args.addr) - m.d.comb += write_port.data.eq(write_args.data) - if self.granularity is None: - m.d.comb += write_port.en.eq(1) - else: - m.d.comb += write_port.en.eq(write_args.mask) + overflow_valid = Signal() + overflow_data = Signal(self.width) - @def_method(m, self.read_resp) + # The read request method can be called at most twice when not reading the response. + # The first result is stored in the overflow buffer, the second - in the read value buffer of the memory. + # If the responses are always read as they arrive, overflow is never written and no stalls occur. + + with m.If(read_output_valid & ~overflow_valid & self.read_req.run & ~self.read_resp.run): + m.d.sync += overflow_valid.eq(1) + m.d.sync += overflow_data.eq(read_port.data) + + @def_method(m, self.read_resp, read_output_valid | overflow_valid) def _(): - output = zipper.read(m) - return output.results + with m.If(overflow_valid): + m.d.sync += overflow_valid.eq(0) + with m.Else(): + m.d.sync += read_output_valid.eq(0) + return Mux(overflow_valid, overflow_data, read_port.data) + + m.d.comb += read_port.en.eq(0) # because the init value is 1 - @def_method(m, self.read_req, ~write_pending) + @def_method(m, self.read_req, ~overflow_valid) def _(addr): m.d.sync += read_output_valid.eq(1) + m.d.comb += read_port.en.eq(1) m.d.comb += read_port.addr.eq(addr) - m.d.sync += prev_read_addr.eq(addr) - zipper.write_args(m, valid=1) - @def_method(m, self.write, ~write_pending) + @def_method(m, self.write) def _(arg): - if self.safe_writes: - with m.If((arg.addr == read_port.addr) & (read_output_valid | self.read_req.run)): - m.d.sync += write_pending.eq(1) - m.d.sync += assign(write_args_prev, arg, fields=AssignType.ALL) - with m.Else(): - m.d.comb += write_req.eq(1) + m.d.comb += write_port.addr.eq(arg.addr) + m.d.comb += write_port.data.eq(arg.data) + if self.granularity is None: + m.d.comb += write_port.en.eq(1) else: - m.d.comb += write_req.eq(1) - m.d.comb += assign(write_args, arg, fields=AssignType.ALL) + m.d.comb += write_port.en.eq(arg.mask) return m