From 291364e1b408d29236034854b7ed30f080e5b6c9 Mon Sep 17 00:00:00 2001 From: Johannes Stoelp Date: Wed, 1 Nov 2023 18:41:44 +0100 Subject: lt_bus: add initial support for global bus locking --- src/models/lt_bus.h | 101 ++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 95 insertions(+), 6 deletions(-) (limited to 'src/models/lt_bus.h') diff --git a/src/models/lt_bus.h b/src/models/lt_bus.h index cf09d99..86b181e 100644 --- a/src/models/lt_bus.h +++ b/src/models/lt_bus.h @@ -7,10 +7,42 @@ #include "utils/tlm_target_socket_tagged.h" #include "utils/types.h" +#include + #include #include #include +// TLM bus lock extension. +// +// This extension is used to implemented the bus locking scheme. +// The protocol is as follows. Once an initiator sends a locked transaction, +// the bus will be locked by that initiator after all currently pending +// transactions are finished. The bus is locked until the locking initiator +// sends an unlocked transaction. +struct [[nodiscard]] bus_lock : tlm::tlm_extension { + bool is_lock{false}; + + constexpr explicit bus_lock() = default; + constexpr explicit bus_lock(const bus_lock&) = default; + + virtual tlm_extension_base* clone() const override { + return new bus_lock(*this); + } + + virtual void copy_from(const tlm_extension_base& ext) override { + if (&ext == this) { + // Copy from self, nop. + return; + } + + assert(typeid(this) == typeid(ext)); + const bus_lock& other = static_cast(ext); + + is_lock = other.is_lock; + } +}; + class lt_bus : public sc_core::sc_module { using target_socket = tlm_target_socket_tagged; using target_socket_ptr = std::unique_ptr; @@ -116,14 +148,60 @@ class lt_bus : public sc_core::sc_module { // -- TLM_FW_TRANSPORT_IF ---------------------------------------------------- - void b_transport(usize, tlm::tlm_generic_payload& tx, sc_core::sc_time& t) { + void b_transport(usize idx, + tlm::tlm_generic_payload& tx, + sc_core::sc_time& t) { if (const auto r = decode(tx)) { - const auto tx_start = tx.get_address(); - assert(r.start <= tx_start); + const auto do_tx = [&]() { + const auto tx_start = tx.get_address(); + assert(r.start <= tx_start); - tx.set_address(tx_start - r.start); - (*r.sock)->b_transport(tx, t); - tx.set_address(tx_start); + tx.set_address(tx_start - r.start); + (*r.sock)->b_transport(tx, t); + tx.set_address(tx_start); + }; + + while (m_bl.is_locked && (m_bl.idx != idx)) { + wait(m_bl.ev_no_pending_tx); + } + + const auto* ext = tx.get_extension(); + if (ext && ext->is_lock) { + assert(!m_bl.is_locked); + + m_bl.is_locked = true; + m_bl.idx = idx; + + if (m_bl.pending_tx) { + wait(m_bl.ev_no_pending_tx); + } + assert(m_bl.pending_tx == 0); + + // Invalidate DMI pointers for the whole address space. This forces all + // initiators on the slow path and through the bus, which can than + // enforce the exclusive access. + for (auto& sock : m_initiators) { + (*sock)->invalidate_direct_mem_ptr(0, -1ull); + } + + do_tx(); + assert(m_bl.pending_tx == 0); + } else { + assert(!m_bl.is_locked || (m_bl.is_locked && m_bl.idx == idx)); + + m_bl.pending_tx++; + do_tx(); + m_bl.pending_tx--; + + if (m_bl.is_locked && m_bl.idx == idx) { + assert(m_bl.pending_tx == 0); + m_bl.is_locked = false; + } + + if (m_bl.pending_tx == 0) { + m_bl.ev_no_pending_tx.notify(sc_core::SC_ZERO_TIME); + } + } } else { tx.set_response_status(tlm::TLM_ADDRESS_ERROR_RESPONSE); } @@ -132,6 +210,10 @@ class lt_bus : public sc_core::sc_module { bool get_direct_mem_ptr(usize, tlm::tlm_generic_payload& tx, tlm::tlm_dmi& dmi) { + if (m_bl.is_locked) { + return false; + } + if (const auto r = decode(tx)) { const auto tx_start = tx.get_address(); assert(r.start <= tx_start); @@ -245,6 +327,13 @@ class lt_bus : public sc_core::sc_module { std::vector m_targets; // Address range mappings to BUS TARGETs (m_tragets). std::vector m_mappings; + + struct bus_lock_state { + bool is_locked{false}; + usize idx{0}; + usize pending_tx{0}; + sc_core::sc_event ev_no_pending_tx; + } m_bl; }; #endif -- cgit v1.2.3