diff --git a/utils/fasm/src/fasm.cpp b/utils/fasm/src/fasm.cpp index 504e15dbfa8..2785c1149e8 100644 --- a/utils/fasm/src/fasm.cpp +++ b/utils/fasm/src/fasm.cpp @@ -343,9 +343,9 @@ static AtomNetId _find_atom_input_logical_net(const t_pb* atom, const t_pb_route static LogicVec lut_outputs(const t_pb* atom_pb, size_t num_inputs, const t_pb_routes &pb_route) { auto& atom_ctx = g_vpr_ctx.atom(); - AtomBlockId block_id = atom_ctx.lookup().pb_atom(atom_pb); + AtomBlockId block_id = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom_pb); const auto& truth_table = atom_ctx.netlist().block_truth_table(block_id); - auto ports = atom_ctx.netlist().block_input_ports(atom_ctx.lookup().pb_atom(atom_pb)); + auto ports = atom_ctx.netlist().block_input_ports(atom_ctx.lookup().atom_pb_bimap().pb_atom(atom_pb)); const t_pb_graph_node* gnode = atom_pb->pb_graph_node; @@ -537,7 +537,7 @@ static const t_pb_routes &find_pb_route(const t_pb* pb) { void FasmWriterVisitor::check_for_param(const t_pb *atom) { auto& atom_ctx = g_vpr_ctx.atom(); - auto atom_blk_id = atom_ctx.lookup().pb_atom(atom); + auto atom_blk_id = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom); if (atom_blk_id == AtomBlockId::INVALID()) { return; } @@ -592,7 +592,7 @@ void FasmWriterVisitor::check_for_param(const t_pb *atom) { void FasmWriterVisitor::check_for_lut(const t_pb* atom) { auto& atom_ctx = g_vpr_ctx.atom(); - auto atom_blk_id = atom_ctx.lookup().pb_atom(atom); + auto atom_blk_id = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom); if (atom_blk_id == AtomBlockId::INVALID()) { return; } diff --git a/vpr/src/base/atom_lookup.cpp b/vpr/src/base/atom_lookup.cpp index c0f1ef3cd10..c487e5e6549 100644 --- a/vpr/src/base/atom_lookup.cpp +++ b/vpr/src/base/atom_lookup.cpp @@ -4,51 +4,6 @@ #include "vtr_optional.h" #include "atom_lookup.h" -/* - * PB - */ -const t_pb* AtomLookup::atom_pb(const AtomBlockId blk_id) const { - auto iter = atom_to_pb_.find(blk_id); - if (iter == atom_to_pb_.end()) { - //Not found - return nullptr; - } - return iter->second; -} - -AtomBlockId AtomLookup::pb_atom(const t_pb* pb) const { - auto iter = atom_to_pb_.find(pb); - if (iter == atom_to_pb_.inverse_end()) { - //Not found - return AtomBlockId::INVALID(); - } - return iter->second; -} - -const t_pb_graph_node* AtomLookup::atom_pb_graph_node(const AtomBlockId blk_id) const { - const t_pb* pb = atom_pb(blk_id); - if (pb) { - //Found - return pb->pb_graph_node; - } - return nullptr; -} - -void AtomLookup::set_atom_pb(const AtomBlockId blk_id, const t_pb* pb) { - //If either of blk_id or pb are not valid, - //remove any mapping - - if (!blk_id && pb) { - //Remove - atom_to_pb_.erase(pb); - } else if (blk_id && !pb) { - //Remove - atom_to_pb_.erase(blk_id); - } else if (blk_id && pb) { - //If both are valid store the mapping - atom_to_pb_.update(blk_id, pb); - } -} /* * PB Pins diff --git a/vpr/src/base/atom_lookup.h b/vpr/src/base/atom_lookup.h index fdf17cddd46..8a218fae207 100644 --- a/vpr/src/base/atom_lookup.h +++ b/vpr/src/base/atom_lookup.h @@ -14,6 +14,7 @@ #include "tatum/TimingGraphFwd.hpp" #include "vtr_optional.h" +#include "atom_pb_bimap.h" /** * @brief The AtomLookup class describes the mapping between components in the AtomNetlist @@ -31,23 +32,45 @@ class AtomLookup { */ /** - * @brief Returns the leaf pb associated with the atom blk_id - * @note this is the lowest level pb which corresponds directly to the atom block + * @brief Sets the atom to pb bimap access lock to value. + * If set to true, access to the bimap is prohibited and will result in failing assertions. + * + * @param value Value to set to lock to */ - const t_pb* atom_pb(const AtomBlockId blk_id) const; - - ///@brief Returns the atom block id associated with pb - AtomBlockId pb_atom(const t_pb* pb) const; - - ///@brief Conveneince wrapper around atom_pb to access the associated graph node - const t_pb_graph_node* atom_pb_graph_node(const AtomBlockId blk_id) const; + inline void set_atom_pb_bimap_lock(bool value) { + VTR_ASSERT_SAFE_MSG(lock_atom_pb_bimap_ != value, "Double locking or unlocking the atom pb bimap lock"); + lock_atom_pb_bimap_ = value; + } + + /// @brief Gets the current atom to pb bimap lock value. + inline bool atom_pb_bimap_islocked() const { return lock_atom_pb_bimap_; } + + // All accesses, mutable or immutable, to the atom to pb bimap + // will result in failing assertions if the lock is set to true. + // This is done to make sure there is only a single source of + // data in places that are supposed to use a local data structure + // instead of the global context. + + /// @brief Returns a mutable reference to the atom to pb bimap, provided that access to it is unlocked. It will result in a crash otherwise. + /// @return Mutable reference to the atom pb bimap. + inline AtomPBBimap& mutable_atom_pb_bimap() { + VTR_ASSERT(!lock_atom_pb_bimap_); + return atom_to_pb_bimap_; + } + + /// @brief Returns an immutable reference to the atom to pb bimap, provided that access to it is unlocked. It will result in a crash otherwise. + /// @return Immutable reference to the atom pb bimap. + inline const AtomPBBimap& atom_pb_bimap() const { + VTR_ASSERT(!lock_atom_pb_bimap_); + return atom_to_pb_bimap_; + } /** - * @brief Sets the bidirectional mapping between an atom and pb - * - * If either blk_id or pb are not valid any, existing mapping is removed + * @brief Set atom to pb bimap + * + * @param atom_to_pb Reference to AtomPBBimab to be copied from */ - void set_atom_pb(const AtomBlockId blk_id, const t_pb* pb); + void set_atom_to_pb_bimap(const AtomPBBimap& atom_to_pb) { atom_to_pb_bimap_ = atom_to_pb; } /* * PB Pins @@ -112,7 +135,12 @@ class AtomLookup { private: //Types private: - vtr::bimap atom_to_pb_; + /** + * @brief Allows or disallows access to the AtomPBBimap data. + * Useful to make sure global context is not accessed in places you don't want it to. + */ + bool lock_atom_pb_bimap_ = false; + AtomPBBimap atom_to_pb_bimap_; vtr::vector_map atom_pin_to_pb_graph_pin_; diff --git a/vpr/src/base/clustered_netlist.cpp b/vpr/src/base/clustered_netlist.cpp index ce9e44dd422..2f2fce860a4 100644 --- a/vpr/src/base/clustered_netlist.cpp +++ b/vpr/src/base/clustered_netlist.cpp @@ -1,5 +1,5 @@ #include "clustered_netlist.h" - +#include "globals.h" #include "physical_types_util.h" #include "vtr_assert.h" @@ -171,7 +171,7 @@ ClusterNetId ClusteredNetlist::create_net(const std::string& name) { void ClusteredNetlist::remove_block_impl(const ClusterBlockId blk_id) { //Remove & invalidate pointers - free_pb(block_pbs_[blk_id]); + free_pb(block_pbs_[blk_id], g_vpr_ctx.mutable_atom().mutable_lookup().mutable_atom_pb_bimap()); delete block_pbs_[blk_id]; block_pbs_.insert(blk_id, NULL); block_types_.insert(blk_id, NULL); diff --git a/vpr/src/base/load_flat_place.cpp b/vpr/src/base/load_flat_place.cpp index 7d47bc31dcc..14de1c6da1f 100644 --- a/vpr/src/base/load_flat_place.cpp +++ b/vpr/src/base/load_flat_place.cpp @@ -71,7 +71,7 @@ static void print_flat_cluster(FILE* fp, // Print a line for each atom. for (AtomBlockId atom : atoms_lookup[blk_id]) { // Get the atom pb graph node. - t_pb_graph_node* atom_pbgn = atom_ctx.lookup().atom_pb(atom)->pb_graph_node; + t_pb_graph_node* atom_pbgn = atom_ctx.lookup().atom_pb_bimap().atom_pb(atom)->pb_graph_node; // Print the flat placement information for this atom. fprintf(fp, "%s %d %d %d %d %d #%zu %s\n", diff --git a/vpr/src/base/netlist_writer.cpp b/vpr/src/base/netlist_writer.cpp index 32206cbf4c8..2cc43788007 100644 --- a/vpr/src/base/netlist_writer.cpp +++ b/vpr/src/base/netlist_writer.cpp @@ -859,7 +859,7 @@ class NetlistWriterVisitor : public NetlistVisitor { void visit_atom_impl(const t_pb* atom) override { auto& atom_ctx = g_vpr_ctx.atom(); - auto atom_pb = atom_ctx.lookup().pb_atom(atom); + auto atom_pb = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom); if (atom_pb == AtomBlockId::INVALID()) { return; } @@ -1787,7 +1787,7 @@ class NetlistWriterVisitor : public NetlistVisitor { } auto& atom_ctx = g_vpr_ctx.atom(); - AtomBlockId blk_id = atom_ctx.lookup().pb_atom(atom); + AtomBlockId blk_id = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom); for (auto param : atom_ctx.netlist().block_params(blk_id)) { params[param.first] = param.second; } @@ -1809,7 +1809,7 @@ class NetlistWriterVisitor : public NetlistVisitor { tatum::NodeId find_tnode(const t_pb* atom, int cluster_pin_idx) { auto& atom_ctx = g_vpr_ctx.atom(); - AtomBlockId blk_id = atom_ctx.lookup().pb_atom(atom); + AtomBlockId blk_id = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom); ClusterBlockId clb_index = atom_ctx.lookup().atom_clb(blk_id); auto key = std::make_pair(clb_index, cluster_pin_idx); @@ -1840,7 +1840,7 @@ class NetlistWriterVisitor : public NetlistVisitor { const t_pb* atom) { //LUT primitive auto& atom_ctx = g_vpr_ctx.atom(); - const t_model* model = atom_ctx.netlist().block_model(atom_ctx.lookup().pb_atom(atom)); + const t_model* model = atom_ctx.netlist().block_model(atom_ctx.lookup().atom_pb_bimap().pb_atom(atom)); VTR_ASSERT(model->name == std::string(MODEL_NAMES)); #ifdef DEBUG_LUT_MASK @@ -1851,7 +1851,7 @@ class NetlistWriterVisitor : public NetlistVisitor { std::vector permute = determine_lut_permutation(num_inputs, atom); //Retrieve the truth table - const auto& truth_table = atom_ctx.netlist().block_truth_table(atom_ctx.lookup().pb_atom(atom)); + const auto& truth_table = atom_ctx.netlist().block_truth_table(atom_ctx.lookup().atom_pb_bimap().pb_atom(atom)); //Apply the permutation auto permuted_truth_table = permute_truth_table(truth_table, num_inputs, permute); @@ -1896,7 +1896,7 @@ class NetlistWriterVisitor : public NetlistVisitor { // //We walk through the logical inputs to this atom (i.e. in the original truth table/netlist) //and find the corresponding input in the implementation atom (i.e. in the current netlist) - auto ports = atom_ctx.netlist().block_input_ports(atom_ctx.lookup().pb_atom(atom_pb)); + auto ports = atom_ctx.netlist().block_input_ports(atom_ctx.lookup().atom_pb_bimap().pb_atom(atom_pb)); if (ports.size() == 1) { const t_pb_graph_node* gnode = atom_pb->pb_graph_node; VTR_ASSERT(gnode->num_input_ports == 1); @@ -2144,7 +2144,7 @@ class MergedNetlistWriterVisitor : public NetlistWriterVisitor { void visit_atom_impl(const t_pb* atom) override { auto& atom_ctx = g_vpr_ctx.atom(); - auto atom_pb = atom_ctx.lookup().pb_atom(atom); + auto atom_pb = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom); if (atom_pb == AtomBlockId::INVALID()) { return; } diff --git a/vpr/src/base/read_netlist.cpp b/vpr/src/base/read_netlist.cpp index 9a1a00f4ddc..9c648607cae 100644 --- a/vpr/src/base/read_netlist.cpp +++ b/vpr/src/base/read_netlist.cpp @@ -177,7 +177,7 @@ ClusteredNetlist read_netlist(const char* net_file, //Reset atom/pb mapping (it is reloaded from the packed netlist file) for (auto blk_id : atom_ctx.netlist().blocks()) - atom_ctx.mutable_lookup().set_atom_pb(blk_id, nullptr); + atom_ctx.mutable_lookup().mutable_atom_pb_bimap().set_atom_pb(blk_id, nullptr); //Count the number of blocks for allocation bcount = pugiutil::count_children(top, "block", loc_data, pugiutil::ReqOpt::OPTIONAL); @@ -197,7 +197,7 @@ ClusteredNetlist read_netlist(const char* net_file, /* Error check */ for (auto blk_id : atom_ctx.netlist().blocks()) { - if (atom_ctx.lookup().atom_pb(blk_id) == nullptr) { + if (atom_ctx.lookup().atom_pb_bimap().atom_pb(blk_id) == nullptr) { VPR_FATAL_ERROR(VPR_ERROR_NET_F, ".blif file and .net file do not match, .net file missing atom %s.\n", atom_ctx.netlist().block_name(blk_id).c_str()); @@ -319,7 +319,7 @@ static void processComplexBlock(pugi::xml_node clb_block, } //Parse all pbs and CB internal nets - atom_ctx.mutable_lookup().set_atom_pb(AtomBlockId::INVALID(), clb_nlist->block_pb(index)); + atom_ctx.mutable_lookup().mutable_atom_pb_bimap().set_atom_pb(AtomBlockId::INVALID(), clb_nlist->block_pb(index)); clb_nlist->block_pb(index)->pb_graph_node = clb_nlist->block_type(index)->pb_graph_head; clb_nlist->block_pb(index)->pb_route = alloc_pb_route(clb_nlist->block_pb(index)->pb_graph_node); @@ -474,7 +474,7 @@ static void processPb(pugi::xml_node Parent, const ClusterBlockId index, t_pb* p //Update atom netlist mapping VTR_ASSERT(blk_id); - atom_ctx.mutable_lookup().set_atom_pb(blk_id, pb); + atom_ctx.mutable_lookup().mutable_atom_pb_bimap().set_atom_pb(blk_id, pb); atom_ctx.mutable_lookup().set_atom_clb(blk_id, index); auto atom_attrs = atom_ctx.netlist().block_attrs(blk_id); @@ -542,7 +542,7 @@ static void processPb(pugi::xml_node Parent, const ClusterBlockId index, t_pb* p pb->child_pbs[i][pb_index].name = vtr::strdup(name.value()); /* Parse all pbs and CB internal nets*/ - atom_ctx.mutable_lookup().set_atom_pb(AtomBlockId::INVALID(), &pb->child_pbs[i][pb_index]); + atom_ctx.mutable_lookup().mutable_atom_pb_bimap().set_atom_pb(AtomBlockId::INVALID(), &pb->child_pbs[i][pb_index]); auto mode = child.attribute("mode"); pb->child_pbs[i][pb_index].mode = 0; @@ -564,7 +564,7 @@ static void processPb(pugi::xml_node Parent, const ClusterBlockId index, t_pb* p } else { /* physical block has no used primitives but it may have used routing */ pb->child_pbs[i][pb_index].name = nullptr; - atom_ctx.mutable_lookup().set_atom_pb(AtomBlockId::INVALID(), &pb->child_pbs[i][pb_index]); + atom_ctx.mutable_lookup().mutable_atom_pb_bimap().set_atom_pb(AtomBlockId::INVALID(), &pb->child_pbs[i][pb_index]); auto lookahead1 = pugiutil::get_first_child(child, "outputs", loc_data, pugiutil::OPTIONAL); if (lookahead1) { @@ -1180,7 +1180,7 @@ static void load_atom_pin_mapping(const ClusteredNetlist& clb_nlist) { auto& atom_ctx = g_vpr_ctx.atom(); for (const AtomBlockId blk : atom_ctx.netlist().blocks()) { - const t_pb* pb = atom_ctx.lookup().atom_pb(blk); + const t_pb* pb = atom_ctx.lookup().atom_pb_bimap().atom_pb(blk); VTR_ASSERT_MSG(pb, "Atom block must have a matching PB"); const t_pb_graph_node* gnode = pb->pb_graph_node; @@ -1250,7 +1250,7 @@ void set_atom_pin_mapping(const ClusteredNetlist& clb_nlist, const AtomBlockId a return; } - const t_pb* atom_pb = atom_ctx.lookup().atom_pb(atom_blk); + const t_pb* atom_pb = atom_ctx.lookup().atom_pb_bimap().atom_pb(atom_blk); //This finds the index within the atom port to which the current gpin //is mapped. Note that this accounts for any applied pin rotations diff --git a/vpr/src/draw/draw.cpp b/vpr/src/draw/draw.cpp index cd0f293b067..5dc0c09523e 100644 --- a/vpr/src/draw/draw.cpp +++ b/vpr/src/draw/draw.cpp @@ -797,7 +797,7 @@ ezgl::point2d atom_pin_draw_coord(AtomPinId pin) { AtomBlockId blk = atom_ctx.netlist().pin_block(pin); ClusterBlockId clb_index = atom_ctx.lookup().atom_clb(blk); - const t_pb_graph_node* pg_gnode = atom_ctx.lookup().atom_pb_graph_node(blk); + const t_pb_graph_node* pg_gnode = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(blk); t_draw_coords* draw_coords = get_draw_coords_vars(); ezgl::rectangle pb_bbox = draw_coords->get_absolute_pb_bbox(clb_index, diff --git a/vpr/src/draw/draw_floorplanning.cpp b/vpr/src/draw/draw_floorplanning.cpp index 088cc191a10..e22c6446f7a 100644 --- a/vpr/src/draw/draw_floorplanning.cpp +++ b/vpr/src/draw/draw_floorplanning.cpp @@ -152,8 +152,8 @@ void draw_constrained_atoms(ezgl::renderer* g) { auto atoms = constraints.get_part_atoms((PartitionId)partitionID); for (const AtomBlockId atom_id : atoms) { - if (atom_ctx.lookup().atom_pb(atom_id) != nullptr) { - const t_pb* pb = atom_ctx.lookup().atom_pb(atom_id); + if (atom_ctx.lookup().atom_pb_bimap().atom_pb(atom_id) != nullptr) { + const t_pb* pb = atom_ctx.lookup().atom_pb_bimap().atom_pb(atom_id); auto color = kelly_max_contrast_colors_no_black[partitionID % (kelly_max_contrast_colors_no_black.size())]; ClusterBlockId clb_index = atom_ctx.lookup().atom_clb(atom_id); auto type = cluster_ctx.clb_nlist.block_type(clb_index); @@ -310,7 +310,7 @@ static GtkTreeModel* create_and_fill_model() { -1); for (AtomBlockId const_atom : atoms) { - std::string atom_name = (atom_ctx.lookup().atom_pb(const_atom))->name; + std::string atom_name = (atom_ctx.lookup().atom_pb_bimap().atom_pb(const_atom))->name; gtk_tree_store_append(store, &child_iter, &iter); gtk_tree_store_set(store, &child_iter, COL_NAME, atom_name.c_str(), diff --git a/vpr/src/draw/intra_logic_block.cpp b/vpr/src/draw/intra_logic_block.cpp index 6c6a686228d..375eee6f151 100644 --- a/vpr/src/draw/intra_logic_block.cpp +++ b/vpr/src/draw/intra_logic_block.cpp @@ -530,13 +530,13 @@ void collect_pb_atoms_recurr(const t_pb* pb, std::vector& atoms) { if (pb->is_primitive()) { //Base case - AtomBlockId blk = atom_ctx.lookup().pb_atom(pb); + AtomBlockId blk = atom_ctx.lookup().atom_pb_bimap().pb_atom(pb); if (blk) { atoms.push_back(blk); } } else { //Recurse - VTR_ASSERT_DEBUG(atom_ctx.lookup().pb_atom(pb) == AtomBlockId::INVALID()); + VTR_ASSERT_DEBUG(atom_ctx.lookup().atom_pb_bimap().pb_atom(pb) == AtomBlockId::INVALID()); for (int itype = 0; itype < pb->get_num_child_types(); ++itype) { for (int ichild = 0; ichild < pb->get_num_children_of_type(itype); ++ichild) { @@ -575,14 +575,14 @@ void draw_logical_connections(ezgl::renderer* g) { continue; /* Don't Draw */ } - const t_pb_graph_node* src_pb_gnode = atom_ctx.lookup().atom_pb_graph_node(src_blk_id); + const t_pb_graph_node* src_pb_gnode = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(src_blk_id); bool src_is_selected = sel_subblk_info.is_in_selected_subtree(src_pb_gnode, src_clb); bool src_is_src_of_selected = sel_subblk_info.is_source_of_selected(src_pb_gnode, src_clb); // iterate over the sinks for (auto sink_pin_id : atom_ctx.netlist().net_sinks(net_id)) { AtomBlockId sink_blk_id = atom_ctx.netlist().pin_block(sink_pin_id); - const t_pb_graph_node* sink_pb_gnode = atom_ctx.lookup().atom_pb_graph_node(sink_blk_id); + const t_pb_graph_node* sink_pb_gnode = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(sink_blk_id); ClusterBlockId sink_clb = atom_ctx.lookup().atom_clb(sink_blk_id); int sink_layer_num = block_locs[sink_clb].loc.layer; @@ -807,7 +807,7 @@ void t_selected_sub_block_info::set(t_pb* new_selected_sub_block, const ClusterB for (auto blk_id : atom_ctx.netlist().blocks()) { const ClusterBlockId clb = atom_ctx.lookup().atom_clb(blk_id); - const t_pb_graph_node* pb_graph_node = atom_ctx.lookup().atom_pb_graph_node(blk_id); + const t_pb_graph_node* pb_graph_node = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(blk_id); // find the atom block that corrisponds to this pb. if (is_in_selected_subtree(pb_graph_node, clb)) { //Collect the sources of all nets driving this node @@ -818,7 +818,7 @@ void t_selected_sub_block_info::set(t_pb* new_selected_sub_block, const ClusterB AtomBlockId src_blk = atom_ctx.netlist().pin_block(driver_pin_id); const ClusterBlockId src_clb = atom_ctx.lookup().atom_clb(src_blk); - const t_pb_graph_node* src_pb_graph_node = atom_ctx.lookup().atom_pb_graph_node(src_blk); + const t_pb_graph_node* src_pb_graph_node = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(src_blk); sources.insert(gnode_clb_pair(src_pb_graph_node, src_clb)); } @@ -830,7 +830,7 @@ void t_selected_sub_block_info::set(t_pb* new_selected_sub_block, const ClusterB AtomBlockId sink_blk = atom_ctx.netlist().pin_block(sink_pin_id); const ClusterBlockId sink_clb = atom_ctx.lookup().atom_clb(sink_blk); - const t_pb_graph_node* sink_pb_graph_node = atom_ctx.lookup().atom_pb_graph_node(sink_blk); + const t_pb_graph_node* sink_pb_graph_node = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(sink_blk); sinks.insert(gnode_clb_pair(sink_pb_graph_node, sink_clb)); } @@ -882,7 +882,7 @@ t_selected_sub_block_info::clb_pin_tuple::clb_pin_tuple(ClusterBlockId clb_index t_selected_sub_block_info::clb_pin_tuple::clb_pin_tuple(const AtomPinId atom_pin) { auto& atom_ctx = g_vpr_ctx.atom(); clb_index = atom_ctx.lookup().atom_clb(atom_ctx.netlist().pin_block(atom_pin)); - pb_gnode = atom_ctx.lookup().atom_pb_graph_node(atom_ctx.netlist().pin_block(atom_pin)); + pb_gnode = atom_ctx.lookup().atom_pb_bimap().atom_pb_graph_node(atom_ctx.netlist().pin_block(atom_pin)); } bool t_selected_sub_block_info::clb_pin_tuple::operator==(const clb_pin_tuple& rhs) const { diff --git a/vpr/src/pack/atom_pb_bimap.cpp b/vpr/src/pack/atom_pb_bimap.cpp new file mode 100644 index 00000000000..df837cfea6b --- /dev/null +++ b/vpr/src/pack/atom_pb_bimap.cpp @@ -0,0 +1,65 @@ +/** + * @file + * @author Amir Poolad + * @date March 2025 + * @brief The code for the AtomPBBimap class. + * + * This file implements the various functions of the AtomPBBimap class. + */ + +#include "atom_pb_bimap.h" +#include "atom_netlist.h" + +AtomPBBimap::AtomPBBimap(const vtr::bimap& atom_to_pb) { + atom_to_pb_ = atom_to_pb; +} + +const t_pb* AtomPBBimap::atom_pb(const AtomBlockId blk_id) const { + auto iter = atom_to_pb_.find(blk_id); + if (iter == atom_to_pb_.end()) { + //Not found + return nullptr; + } + return iter->second; +} + +AtomBlockId AtomPBBimap::pb_atom(const t_pb* pb) const { + auto iter = atom_to_pb_.find(pb); + if (iter == atom_to_pb_.inverse_end()) { + //Not found + return AtomBlockId::INVALID(); + } + return iter->second; +} + +const t_pb_graph_node* AtomPBBimap::atom_pb_graph_node(const AtomBlockId blk_id) const { + const t_pb* pb = atom_pb(blk_id); + if (pb) { + //Found + return pb->pb_graph_node; + } + return nullptr; +} + +void AtomPBBimap::set_atom_pb(const AtomBlockId blk_id, const t_pb* pb) { + //If either of blk_id or pb are not valid, + //remove any mapping + if (!blk_id && pb) { + //Remove + atom_to_pb_.erase(pb); + } else if (blk_id && !pb) { + //Remove + atom_to_pb_.erase(blk_id); + } else if (blk_id && pb) { + //If both are valid store the mapping + atom_to_pb_.update(blk_id, pb); + } +} + +void AtomPBBimap::reset_bimap() { + atom_to_pb_.clear(); +} + +bool AtomPBBimap::is_empty() const { + return atom_to_pb_.empty(); +} diff --git a/vpr/src/pack/atom_pb_bimap.h b/vpr/src/pack/atom_pb_bimap.h new file mode 100644 index 00000000000..d8623543910 --- /dev/null +++ b/vpr/src/pack/atom_pb_bimap.h @@ -0,0 +1,58 @@ +/** + * @file + * @author Amir Poolad + * @date March 2025 + * @brief The declaration of the AtomPBBimap class. + * + * This file declares a class called AtomPBBimap that + * contains a two way mapping between AtomBlockIds and pb types. + */ + +#pragma once + +#include "vpr_types.h" + +// Forward declaration +class t_pb_graph_node; + +/** + * @brief Class that holds a bimap between atoms and pb types. + * This means that you can get a pb from an atom and the + * other way around. + * + * Used in the global AtomLookup context and in ClusterLegalizer + */ +class AtomPBBimap { + public: + AtomPBBimap() = default; + AtomPBBimap(const vtr::bimap& atom_to_pb); + + /** + * @brief Returns the leaf pb associated with the atom blk_id + * @note this is the lowest level pb which corresponds directly to the atom block + */ + const t_pb* atom_pb(const AtomBlockId blk_id) const; + + ///@brief Returns the atom block id associated with pb + AtomBlockId pb_atom(const t_pb* pb) const; + + ///@brief Conveneince wrapper around atom_pb to access the associated graph node + const t_pb_graph_node* atom_pb_graph_node(const AtomBlockId blk_id) const; + + /** + * @brief Sets the bidirectional mapping between an atom and pb + * + * If either blk_id or pb are not valid any, existing mapping is removed + */ + void set_atom_pb(const AtomBlockId blk_id, const t_pb* pb); + + /// @brief Sets the pb for all blocks in the netlist to nullptr. + void reset_bimap(); + + /// @brief Returns if the bimap is empty + bool is_empty() const; + + private: + /// @brief Two way map between AtomBlockIds and t_pb + vtr::bimap atom_to_pb_; +}; diff --git a/vpr/src/pack/cluster_legalizer.cpp b/vpr/src/pack/cluster_legalizer.cpp index 43c3ec0182a..d1a8a72076a 100644 --- a/vpr/src/pack/cluster_legalizer.cpp +++ b/vpr/src/pack/cluster_legalizer.cpp @@ -61,13 +61,13 @@ static void alloc_and_load_pb_stats(t_pb* pb) { * @brief Check the atom blocks of a cluster pb. Used in the verify method. */ /* TODO: May want to check that all atom blocks are actually reached */ -static void check_cluster_atom_blocks(t_pb* pb, std::unordered_set& blocks_checked) { +static void check_cluster_atom_blocks(t_pb* pb, std::unordered_set& blocks_checked, const AtomPBBimap& atom_pb_lookup) { const AtomContext& atom_ctx = g_vpr_ctx.atom(); const t_pb_type* pb_type = pb->pb_graph_node->pb_type; if (pb_type->num_modes == 0) { /* primitive */ - AtomBlockId blk_id = atom_ctx.lookup().pb_atom(pb); + AtomBlockId blk_id = atom_pb_lookup.pb_atom(pb); if (blk_id) { if (blocks_checked.count(blk_id)) { VPR_FATAL_ERROR(VPR_ERROR_PACK, @@ -75,7 +75,7 @@ static void check_cluster_atom_blocks(t_pb* pb, std::unordered_set& pb->name, atom_ctx.netlist().block_name(blk_id).c_str()); } blocks_checked.insert(blk_id); - if (pb != atom_ctx.lookup().atom_pb(blk_id)) { + if (pb != atom_pb_lookup.atom_pb(blk_id)) { VPR_FATAL_ERROR(VPR_ERROR_PACK, "pb %s contains atom block %s but atom block does not link to pb.\n", pb->name, atom_ctx.netlist().block_name(blk_id).c_str()); @@ -89,7 +89,7 @@ static void check_cluster_atom_blocks(t_pb* pb, std::unordered_set& if (pb->child_pbs[i] != nullptr) { if (pb->child_pbs[i][j].name != nullptr) { has_child = true; - check_cluster_atom_blocks(&pb->child_pbs[i][j], blocks_checked); + check_cluster_atom_blocks(&pb->child_pbs[i][j], blocks_checked, atom_pb_lookup); } } } @@ -258,7 +258,7 @@ static enum e_block_pack_status check_chain_root_placement_feasibility(const t_p const t_clustering_chain_info& clustering_chain_info, t_pack_patterns* mol_pack_patterns, const AtomBlockId blk_id) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); + const AtomNetlist& atom_netlist = g_vpr_ctx.atom().netlist(); enum e_block_pack_status block_pack_status = e_block_pack_status::BLK_PASSED; @@ -268,10 +268,10 @@ static enum e_block_pack_status check_chain_root_placement_feasibility(const t_p t_model_ports* root_port = chain_root_pins[0][0]->port->model_port; AtomNetId chain_net_id; - auto port_id = atom_ctx.netlist().find_atom_port(blk_id, root_port); + auto port_id = atom_netlist.find_atom_port(blk_id, root_port); if (port_id) { - chain_net_id = atom_ctx.netlist().port_net(port_id, chain_root_pins[0][0]->pin_number); + chain_net_id = atom_netlist.port_net(port_id, chain_root_pins[0][0]->pin_number); } // if this block is part of a long chain or it is driven by a cluster @@ -392,14 +392,12 @@ static bool primitive_memory_sibling_feasible(const AtomBlockId blk_id, const t_ /* * @brief Check if the given atom is feasible in the given pb. */ -static bool primitive_feasible(const AtomBlockId blk_id, t_pb* cur_pb) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); - +static bool primitive_feasible(const AtomBlockId blk_id, t_pb* cur_pb, const AtomPBBimap& atom_to_pb) { const t_pb_type* cur_pb_type = cur_pb->pb_graph_node->pb_type; VTR_ASSERT(cur_pb_type->num_modes == 0); /* primitive */ - AtomBlockId cur_pb_blk_id = atom_ctx.lookup().pb_atom(cur_pb); + AtomBlockId cur_pb_blk_id = atom_to_pb.pb_atom(cur_pb); if (cur_pb_blk_id && cur_pb_blk_id != blk_id) { /* This pb already has a different logical block */ return false; @@ -410,7 +408,8 @@ static bool primitive_feasible(const AtomBlockId blk_id, t_pb* cur_pb) { * - all siblings must share all nets, including open nets, with the exception of data nets */ /* find sibling if one exists */ - AtomBlockId sibling_memory_blk_id = find_memory_sibling(cur_pb); + const t_pb* sibling_memory_pb = find_memory_sibling(cur_pb); + AtomBlockId sibling_memory_blk_id = atom_to_pb.pb_atom(sibling_memory_pb); if (sibling_memory_blk_id) { //There is a sibling, see if the current block is feasible with it @@ -439,9 +438,9 @@ try_place_atom_block_rec(const t_pb_graph_node* pb_graph_node, t_lb_router_data* router_data, int verbosity, const Prepacker& prepacker, - const vtr::vector_map& clustering_chain_info) { + const vtr::vector_map& clustering_chain_info, + AtomPBBimap& atom_to_pb) { const AtomContext& atom_ctx = g_vpr_ctx.atom(); - AtomContext& mutable_atom_ctx = g_vpr_ctx.mutable_atom(); VTR_ASSERT_SAFE(cb != nullptr); e_block_pack_status block_pack_status = e_block_pack_status::BLK_PASSED; @@ -455,7 +454,7 @@ try_place_atom_block_rec(const t_pb_graph_node* pb_graph_node, atom_cluster, molecule_id, router_data, verbosity, - prepacker, clustering_chain_info); + prepacker, clustering_chain_info, atom_to_pb); parent_pb = my_parent; } else { parent_pb = cb; @@ -513,8 +512,8 @@ try_place_atom_block_rec(const t_pb_graph_node* pb_graph_node, bool is_primitive = (pb_type->num_modes == 0); if (is_primitive) { - VTR_ASSERT(!atom_ctx.lookup().pb_atom(pb) - && atom_ctx.lookup().atom_pb(blk_id) == nullptr + VTR_ASSERT(!atom_to_pb.pb_atom(pb) + && atom_to_pb.atom_pb(blk_id) == nullptr && atom_cluster[blk_id] == LegalizationClusterId::INVALID()); /* try pack to location */ VTR_ASSERT(pb->name == nullptr); @@ -527,10 +526,10 @@ try_place_atom_block_rec(const t_pb_graph_node* pb_graph_node, // TODO: It would be a good idea to remove the use of this global // variables to prevent external users from modifying this by // mistake. - mutable_atom_ctx.mutable_lookup().set_atom_pb(blk_id, pb); + atom_to_pb.set_atom_pb(blk_id, pb); - add_atom_as_target(router_data, blk_id); - if (!primitive_feasible(blk_id, pb)) { + add_atom_as_target(router_data, blk_id, atom_to_pb); + if (!primitive_feasible(blk_id, pb, atom_to_pb)) { /* failed location feasibility check, revert pack */ block_pack_status = e_block_pack_status::BLK_FAILED_FEASIBLE; } @@ -600,13 +599,13 @@ static void reset_lookahead_pins_used(t_pb* cur_pb) { * @brief Checks if the sinks of the given net are reachable from the driver * pb gpin. */ -static int net_sinks_reachable_in_cluster(const t_pb_graph_pin* driver_pb_gpin, const int depth, const AtomNetId net_id) { +static int net_sinks_reachable_in_cluster(const t_pb_graph_pin* driver_pb_gpin, const int depth, const AtomNetId net_id, const AtomPBBimap& atom_to_pb) { const AtomContext& atom_ctx = g_vpr_ctx.atom(); //Record the sink pb graph pins we are looking for std::unordered_set sink_pb_gpins; for (const AtomPinId pin_id : atom_ctx.netlist().net_sinks(net_id)) { - const t_pb_graph_pin* sink_pb_gpin = find_pb_graph_pin(atom_ctx.netlist(), atom_ctx.lookup(), pin_id); + const t_pb_graph_pin* sink_pb_gpin = find_pb_graph_pin(atom_ctx.netlist(), atom_to_pb, pin_id); VTR_ASSERT(sink_pb_gpin); sink_pb_gpins.insert(sink_pb_gpin); @@ -632,20 +631,20 @@ static int net_sinks_reachable_in_cluster(const t_pb_graph_pin* driver_pb_gpin, * @brief Returns the pb_graph_pin of the atom pin defined by the driver_pin_id in the driver_pb */ static t_pb_graph_pin* get_driver_pb_graph_pin(const t_pb* driver_pb, const AtomPinId driver_pin_id) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); + const AtomNetlist& atom_netlist = g_vpr_ctx.atom().netlist(); const auto driver_pb_type = driver_pb->pb_graph_node->pb_type; int output_port = 0; // find the port of the pin driving the net as well as the port model - auto driver_port_id = atom_ctx.netlist().pin_port(driver_pin_id); - auto driver_model_port = atom_ctx.netlist().port_model(driver_port_id); + auto driver_port_id = atom_netlist.pin_port(driver_pin_id); + auto driver_model_port = atom_netlist.port_model(driver_port_id); // find the port id of the port containing the driving pin in the driver_pb_type for (int i = 0; i < driver_pb_type->num_ports; i++) { auto& prim_port = driver_pb_type->ports[i]; if (prim_port.type == OUT_PORT) { if (prim_port.model_port == driver_model_port) { // get the output pb_graph_pin driving this input net - return &(driver_pb->pb_graph_node->output_pins[output_port][atom_ctx.netlist().pin_port_bit(driver_pin_id)]); + return &(driver_pb->pb_graph_node->output_pins[output_port][atom_netlist.pin_port_bit(driver_pin_id)]); } output_port++; } @@ -666,7 +665,8 @@ static t_pb_graph_pin* get_driver_pb_graph_pin(const t_pb* driver_pb, const Atom static void compute_and_mark_lookahead_pins_used_for_pin(const t_pb_graph_pin* pb_graph_pin, const t_pb* primitive_pb, const AtomNetId net_id, - const vtr::vector_map& atom_cluster) { + const vtr::vector_map& atom_cluster, + const AtomPBBimap& atom_to_pb) { const AtomContext& atom_ctx = g_vpr_ctx.atom(); // starting from the parent pb of the input primitive go up in the hierarchy till the root block @@ -683,9 +683,9 @@ static void compute_and_mark_lookahead_pins_used_for_pin(const t_pb_graph_pin* p // find the driver of the input net connected to the pin being studied const auto driver_pin_id = atom_ctx.netlist().net_driver(net_id); // find the id of the atom occupying the input primitive_pb - const auto prim_blk_id = atom_ctx.lookup().pb_atom(primitive_pb); + const auto prim_blk_id = atom_to_pb.pb_atom(primitive_pb); // find the pb block occupied by the driving atom - const auto driver_pb = atom_ctx.lookup().atom_pb(driver_blk_id); + const auto driver_pb = atom_to_pb.atom_pb(driver_blk_id); // pb_graph_pin driving net_id in the driver pb block t_pb_graph_pin* output_pb_graph_pin = nullptr; // if the driver block is in the same clb as the input primitive block @@ -773,7 +773,7 @@ static void compute_and_mark_lookahead_pins_used_for_pin(const t_pb_graph_pin* p //the net does not exit the cluster /* TODO: I should cache the absorbed outputs, once net is absorbed, * net is forever absorbed, no point in rechecking every time */ - if (net_sinks_reachable_in_cluster(pb_graph_pin, depth, net_id)) { + if (net_sinks_reachable_in_cluster(pb_graph_pin, depth, net_id, atom_to_pb)) { //All the sinks are reachable inside the cluster net_exits_cluster = false; } @@ -792,18 +792,19 @@ static void compute_and_mark_lookahead_pins_used_for_pin(const t_pb_graph_pin* p * @brief Determine if pins of speculatively packed pb are legal */ static void compute_and_mark_lookahead_pins_used(const AtomBlockId blk_id, - const vtr::vector_map& atom_cluster) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); + const vtr::vector_map& atom_cluster, + const AtomPBBimap& atom_to_pb) { + const AtomNetlist& atom_netlist = g_vpr_ctx.atom().netlist(); - const t_pb* cur_pb = atom_ctx.lookup().atom_pb(blk_id); + const t_pb* cur_pb = atom_to_pb.atom_pb(blk_id); VTR_ASSERT(cur_pb != nullptr); /* Walk through inputs, outputs, and clocks marking pins off of the same class */ - for (auto pin_id : atom_ctx.netlist().block_pins(blk_id)) { - auto net_id = atom_ctx.netlist().pin_net(pin_id); + for (auto pin_id : atom_netlist.block_pins(blk_id)) { + auto net_id = atom_netlist.pin_net(pin_id); - const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_ctx.lookup(), pin_id); - compute_and_mark_lookahead_pins_used_for_pin(pb_graph_pin, cur_pb, net_id, atom_cluster); + const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_netlist, atom_to_pb, pin_id); + compute_and_mark_lookahead_pins_used_for_pin(pb_graph_pin, cur_pb, net_id, atom_cluster, atom_to_pb); } } @@ -814,9 +815,8 @@ static void compute_and_mark_lookahead_pins_used(const AtomBlockId blk_id, * number of pb_graph pins. Can use hash tables or make incremental if becomes an issue. */ static void try_update_lookahead_pins_used(t_pb* cur_pb, - const vtr::vector_map& atom_cluster) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); - + const vtr::vector_map& atom_cluster, + const AtomPBBimap& atom_to_pb) { // run recursively till a leaf (primitive) pb block is reached const t_pb_type* pb_type = cur_pb->pb_graph_node->pb_type; if (pb_type->num_modes > 0 && cur_pb->name != nullptr) { @@ -824,7 +824,7 @@ static void try_update_lookahead_pins_used(t_pb* cur_pb, for (int i = 0; i < pb_type->modes[cur_pb->mode].num_pb_type_children; i++) { if (cur_pb->child_pbs[i] != nullptr) { for (int j = 0; j < pb_type->modes[cur_pb->mode].pb_type_children[i].num_pb; j++) { - try_update_lookahead_pins_used(&cur_pb->child_pbs[i][j], atom_cluster); + try_update_lookahead_pins_used(&cur_pb->child_pbs[i][j], atom_cluster, atom_to_pb); } } } @@ -832,9 +832,9 @@ static void try_update_lookahead_pins_used(t_pb* cur_pb, } else { // find if this child (primitive) pb block has an atom mapped to it, // if yes compute and mark lookahead pins used for that pb block - AtomBlockId blk_id = atom_ctx.lookup().pb_atom(cur_pb); + AtomBlockId blk_id = atom_to_pb.pb_atom(cur_pb); if (pb_type->blif_model != nullptr && blk_id) { - compute_and_mark_lookahead_pins_used(blk_id, atom_cluster); + compute_and_mark_lookahead_pins_used(blk_id, atom_cluster, atom_to_pb); } } } @@ -964,16 +964,14 @@ void ClusterLegalizer::reset_molecule_info(PackMoleculeId mol_id) { */ static void revert_place_atom_block(const AtomBlockId blk_id, t_lb_router_data* router_data, - vtr::vector_map& atom_cluster) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); - AtomContext& mutable_atom_ctx = g_vpr_ctx.mutable_atom(); - + vtr::vector_map& atom_cluster, + AtomPBBimap& atom_to_pb) { //We cast away const here since we may free the pb, and it is //being removed from the active mapping. // //In general most code works fine accessing cosnt t_pb*, //which is why we store them as such in atom_ctx.lookup() - t_pb* pb = const_cast(atom_ctx.lookup().atom_pb(blk_id)); + t_pb* pb = const_cast(atom_to_pb.atom_pb(blk_id)); if (pb != nullptr) { /* When freeing molecules, the current block might already have been freed by a prior revert @@ -981,7 +979,7 @@ static void revert_place_atom_block(const AtomBlockId blk_id, */ t_pb* next = pb->parent_pb; - free_pb(pb); + free_pb(pb, atom_to_pb); pb = next; while (pb != nullptr) { @@ -997,7 +995,7 @@ static void revert_place_atom_block(const AtomBlockId blk_id, /* If the code gets here, then that means that placing the initial seed molecule * failed, don't free the actual complex block itself as the seed needs to find * another placement */ - free_pb(pb); + free_pb(pb, atom_to_pb); } } pb = next; @@ -1006,7 +1004,7 @@ static void revert_place_atom_block(const AtomBlockId blk_id, //Update the atom netlist mapping atom_cluster[blk_id] = LegalizationClusterId::INVALID(); - mutable_atom_ctx.mutable_lookup().set_atom_pb(blk_id, nullptr); + atom_to_pb.set_atom_pb(blk_id, nullptr); } /* @@ -1235,13 +1233,14 @@ e_block_pack_status ClusterLegalizer::try_pack_molecule(PackMoleculeId molecule_ cluster.router_data, log_verbosity_, prepacker_, - clustering_chain_info_); + clustering_chain_info_, + mutable_atom_pb_lookup()); } if (enable_pin_feasibility_filter_ && block_pack_status == e_block_pack_status::BLK_PASSED) { // Check if pin usage is feasible for the current packing assignment reset_lookahead_pins_used(cluster.pb); - try_update_lookahead_pins_used(cluster.pb, atom_cluster_); + try_update_lookahead_pins_used(cluster.pb, atom_cluster_, atom_pb_lookup()); if (!check_lookahead_pins_used(cluster.pb, max_external_pin_util)) { VTR_LOGV(log_verbosity_ > 4, "\t\t\tFAILED Pin Feasibility Filter\n"); block_pack_status = e_block_pack_status::BLK_FAILED_FEASIBLE; @@ -1303,7 +1302,7 @@ e_block_pack_status ClusterLegalizer::try_pack_molecule(PackMoleculeId molecule_ /* Chained molecules often take up lots of area and are important, * if a chain is packed in, want to rename logic block to match chain name */ AtomBlockId chain_root_blk_id = molecule.atom_block_ids[molecule.pack_pattern->root_block->block_id]; - t_pb* cur_pb = atom_ctx.lookup().atom_pb(chain_root_blk_id)->parent_pb; + t_pb* cur_pb = atom_pb_lookup().atom_pb(chain_root_blk_id)->parent_pb; while (cur_pb != nullptr) { free(cur_pb->name); cur_pb->name = vtr::strdup(atom_ctx.netlist().block_name(chain_root_blk_id).c_str()); @@ -1348,7 +1347,7 @@ e_block_pack_status ClusterLegalizer::try_pack_molecule(PackMoleculeId molecule_ atom_cluster_[atom_blk_id] = cluster_id; // Update the num child blocks in pb - const t_pb* atom_pb = atom_ctx.lookup().atom_pb(atom_blk_id); + const t_pb* atom_pb = atom_pb_lookup().atom_pb(atom_blk_id); VTR_ASSERT_SAFE(atom_pb != nullptr); t_pb* cur_pb = atom_pb->parent_pb; while (cur_pb != nullptr) { @@ -1367,13 +1366,13 @@ e_block_pack_status ClusterLegalizer::try_pack_molecule(PackMoleculeId molecule_ for (size_t i = 0; i < failed_location; i++) { AtomBlockId atom_blk_id = molecule.atom_block_ids[i]; if (atom_blk_id) { - remove_atom_from_target(cluster.router_data, atom_blk_id); + remove_atom_from_target(cluster.router_data, atom_blk_id, atom_pb_lookup()); } } for (size_t i = 0; i < failed_location; i++) { AtomBlockId atom_blk_id = molecule.atom_block_ids[i]; if (atom_blk_id) { - revert_place_atom_block(atom_blk_id, cluster.router_data, atom_cluster_); + revert_place_atom_block(atom_blk_id, cluster.router_data, atom_cluster_, mutable_atom_pb_lookup()); } } reset_molecule_info(molecule_id); @@ -1458,7 +1457,7 @@ ClusterLegalizer::start_new_cluster(PackMoleculeId molecule_id, molecule_cluster_[molecule_id] = new_cluster_id; } else { // Delete the new_cluster. - free_pb(new_cluster.pb); + free_pb(new_cluster.pb, mutable_atom_pb_lookup()); delete new_cluster.pb; free_router_data(new_cluster.router_data); free_cluster_placement_stats(new_cluster.placement_stats); @@ -1511,7 +1510,7 @@ void ClusterLegalizer::destroy_cluster(LegalizationClusterId cluster_id) { const t_pack_molecule& mol = prepacker_.get_molecule(mol_id); for (AtomBlockId atom_blk_id : mol.atom_block_ids) { if (atom_blk_id) { - revert_place_atom_block(atom_blk_id, cluster.router_data, atom_cluster_); + revert_place_atom_block(atom_blk_id, cluster.router_data, atom_cluster_, mutable_atom_pb_lookup()); } } reset_molecule_info(mol_id); @@ -1520,7 +1519,7 @@ void ClusterLegalizer::destroy_cluster(LegalizationClusterId cluster_id) { cluster.molecules.clear(); // Free the rest of the cluster data. // Casting things to nullptr for safety just in case someone is trying to use it. - free_pb(cluster.pb); + free_pb(cluster.pb, mutable_atom_pb_lookup()); delete cluster.pb; cluster.pb = nullptr; free_router_data(cluster.router_data); @@ -1631,6 +1630,8 @@ ClusterLegalizer::ClusterLegalizer(const AtomNetlist& atom_netlist, cluster_legalization_strategy_ = cluster_legalization_strategy; enable_pin_feasibility_filter_ = enable_pin_feasibility_filter; log_verbosity_ = log_verbosity; + VTR_ASSERT(g_vpr_ctx.atom().lookup().atom_pb_bimap().is_empty()); + atom_pb_lookup_ = AtomPBBimap(); } void ClusterLegalizer::reset() { @@ -1640,6 +1641,7 @@ void ClusterLegalizer::reset() { continue; destroy_cluster(cluster_id); } + mutable_atom_pb_lookup().reset_bimap(); compress(); } @@ -1656,7 +1658,7 @@ void ClusterLegalizer::verify() { */ for (auto blk_id : atom_ctx.netlist().blocks()) { //Each atom should be part of a pb - const t_pb* atom_pb = atom_ctx.lookup().atom_pb(blk_id); + const t_pb* atom_pb = atom_pb_lookup().atom_pb(blk_id); if (!atom_pb) { VPR_FATAL_ERROR(VPR_ERROR_PACK, "Atom block %s is not mapped to a pb\n", @@ -1664,7 +1666,7 @@ void ClusterLegalizer::verify() { } //Check the reverse mapping is consistent - if (atom_ctx.lookup().pb_atom(atom_pb) != blk_id) { + if (atom_pb_lookup().pb_atom(atom_pb) != blk_id) { VPR_FATAL_ERROR(VPR_ERROR_PACK, "pb %s does not contain atom block %s but atom block %s maps to pb.\n", atom_pb->name, @@ -1698,8 +1700,7 @@ void ClusterLegalizer::verify() { for (LegalizationClusterId cluster_id : clusters()) { if (!cluster_id.is_valid()) continue; - check_cluster_atom_blocks(get_cluster_pb(cluster_id), - atoms_checked); + check_cluster_atom_blocks(get_cluster_pb(cluster_id), atoms_checked, atom_pb_lookup()); } for (auto blk_id : atom_ctx.netlist().blocks()) { diff --git a/vpr/src/pack/cluster_legalizer.h b/vpr/src/pack/cluster_legalizer.h index e6d304c7481..67bc00a95ec 100644 --- a/vpr/src/pack/cluster_legalizer.h +++ b/vpr/src/pack/cluster_legalizer.h @@ -22,6 +22,7 @@ #include "vtr_strong_id.h" #include "vtr_vector.h" #include "vtr_vector_map.h" +#include "atom_pb_bimap.h" // Forward declarations class Prepacker; @@ -520,6 +521,9 @@ class ClusterLegalizer { log_verbosity_ = verbosity; } + inline const AtomPBBimap& atom_pb_lookup() const { return atom_pb_lookup_; } + inline AtomPBBimap& mutable_atom_pb_lookup() { return atom_pb_lookup_; } + /// @brief Destructor of the class. Frees allocated data. ~ClusterLegalizer(); @@ -587,4 +591,8 @@ class ClusterLegalizer { /// @brief The prepacker object that stores the molecules which will be /// legalized into clusters. const Prepacker& prepacker_; + + /// @brief A two way map between AtomBlockIds and pb types. This is a copy + /// of the AtomPBBimap in the global context's AtomLookup + AtomPBBimap atom_pb_lookup_; }; diff --git a/vpr/src/pack/cluster_router.cpp b/vpr/src/pack/cluster_router.cpp index 6bd4095736b..92d4b183dc5 100644 --- a/vpr/src/pack/cluster_router.cpp +++ b/vpr/src/pack/cluster_router.cpp @@ -34,6 +34,7 @@ #include "pb_type_graph.h" #include "lb_type_rr_graph.h" #include "cluster_router.h" +#include "atom_pb_bimap.h" /* #define PRINT_INTRA_LB_ROUTE */ @@ -74,10 +75,10 @@ class reservable_pq : public std::priority_queue { ******************************************************************************************/ static void free_lb_net_rt(t_lb_trace* lb_trace); static void free_lb_trace(t_lb_trace* lb_trace); -static void add_pin_to_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id); -static void remove_pin_from_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id); +static void add_pin_to_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id, const AtomPBBimap& atom_to_pb); +static void remove_pin_from_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id, const AtomPBBimap& atom_to_pb); -static void fix_duplicate_equivalent_pins(t_lb_router_data* router_data); +static void fix_duplicate_equivalent_pins(t_lb_router_data* router_data, const AtomPBBimap& atom_to_pb); static void commit_remove_rt(t_lb_trace* rt, t_lb_router_data* router_data, e_commit_remove op, std::unordered_map* mode_map, t_mode_selection_status* mode_status); static bool is_skip_route_net(t_lb_trace* rt, t_lb_router_data* router_data); @@ -248,7 +249,7 @@ static bool check_edge_for_route_conflicts(std::unordered_map& atoms_added = *router_data->atoms_added; - const t_pb* pb = atom_ctx.lookup().atom_pb(blk_id); + const t_pb* pb = atom_to_pb.atom_pb(blk_id); if (atoms_added.count(blk_id) == 0) { return; @@ -288,7 +289,7 @@ void remove_atom_from_target(t_lb_router_data* router_data, const AtomBlockId bl set_reset_pb_modes(router_data, pb, false); for (auto pin_id : atom_ctx.netlist().block_pins(blk_id)) { - remove_pin_from_rt_terminals(router_data, pin_id); + remove_pin_from_rt_terminals(router_data, pin_id, atom_to_pb); } atoms_added.erase(blk_id); @@ -625,7 +626,7 @@ static void free_lb_trace(t_lb_trace* lb_trace) { /* Given a pin of a net, assign route tree terminals for it * Assumes that pin is not already assigned */ -static void add_pin_to_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id) { +static void add_pin_to_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id, const AtomPBBimap& atom_to_pb) { std::vector& lb_nets = *router_data->intra_lb_nets; std::vector& lb_type_graph = *router_data->lb_type_graph; t_logical_block_type_ptr lb_type = router_data->lb_type; @@ -633,7 +634,7 @@ static void add_pin_to_rt_terminals(t_lb_router_data* router_data, const AtomPin unsigned int ipos; auto& atom_ctx = g_vpr_ctx.atom(); - const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_ctx.lookup(), pin_id); + const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_to_pb, pin_id); VTR_ASSERT(pb_graph_pin); AtomPortId port_id = atom_ctx.netlist().pin_port(pin_id); @@ -792,7 +793,7 @@ static void add_pin_to_rt_terminals(t_lb_router_data* router_data, const AtomPin /* Given a pin of a net, remove route tree terminals from it */ -static void remove_pin_from_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id) { +static void remove_pin_from_rt_terminals(t_lb_router_data* router_data, const AtomPinId pin_id, const AtomPBBimap& atom_to_pb) { std::vector& lb_nets = *router_data->intra_lb_nets; std::vector& lb_type_graph = *router_data->lb_type_graph; t_logical_block_type_ptr lb_type = router_data->lb_type; @@ -800,7 +801,7 @@ static void remove_pin_from_rt_terminals(t_lb_router_data* router_data, const At unsigned int ipos; auto& atom_ctx = g_vpr_ctx.atom(); - const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_ctx.lookup(), pin_id); + const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_to_pb, pin_id); AtomPortId port_id = atom_ctx.netlist().pin_port(pin_id); AtomNetId net_id = atom_ctx.netlist().pin_net(pin_id); @@ -917,7 +918,7 @@ static void remove_pin_from_rt_terminals(t_lb_router_data* router_data, const At //To work around this, we fix all but one of these duplicate connections to route to specific pins, //(instead of the common sink). This ensures a legal routing is produced and that the duplicate pins //are not 'missing' in the clustered netlist. -static void fix_duplicate_equivalent_pins(t_lb_router_data* router_data) { +static void fix_duplicate_equivalent_pins(t_lb_router_data* router_data, const AtomPBBimap& atom_to_pb) { auto& atom_ctx = g_vpr_ctx.atom(); std::vector& lb_type_graph = *router_data->lb_type_graph; @@ -943,7 +944,7 @@ static void fix_duplicate_equivalent_pins(t_lb_router_data* router_data) { AtomPinId atom_pin = lb_nets[ilb_net].atom_pins[iterm]; VTR_ASSERT(atom_pin); - const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_ctx.lookup(), atom_pin); + const t_pb_graph_pin* pb_graph_pin = find_pb_graph_pin(atom_ctx.netlist(), atom_to_pb, atom_pin); VTR_ASSERT(pb_graph_pin); if (pb_graph_pin->port->equivalent == PortEquivalence::NONE) continue; //Only need to remap equivalent ports diff --git a/vpr/src/pack/cluster_router.h b/vpr/src/pack/cluster_router.h index 4f88f1c0b6a..0b40f84c627 100644 --- a/vpr/src/pack/cluster_router.h +++ b/vpr/src/pack/cluster_router.h @@ -16,8 +16,8 @@ void free_router_data(t_lb_router_data* router_data); void free_intra_lb_nets(std::vector* intra_lb_nets); /* Routing Functions */ -void add_atom_as_target(t_lb_router_data* router_data, const AtomBlockId blk_id); -void remove_atom_from_target(t_lb_router_data* router_data, const AtomBlockId blk_id); +void add_atom_as_target(t_lb_router_data* router_data, const AtomBlockId blk_id, const AtomPBBimap& atom_to_pb); +void remove_atom_from_target(t_lb_router_data* router_data, const AtomBlockId blk_id, const AtomPBBimap& atom_to_pb); void set_reset_pb_modes(t_lb_router_data* router_data, const t_pb* pb, const bool set); bool try_intra_lb_route(t_lb_router_data* router_data, int verbosity, t_mode_selection_status* mode_status); void reset_intra_lb_route(t_lb_router_data* router_data); diff --git a/vpr/src/pack/greedy_candidate_selector.cpp b/vpr/src/pack/greedy_candidate_selector.cpp index bd1cfa2bc88..db4e575da29 100644 --- a/vpr/src/pack/greedy_candidate_selector.cpp +++ b/vpr/src/pack/greedy_candidate_selector.cpp @@ -375,17 +375,13 @@ void GreedyCandidateSelector::mark_and_update_partial_gain( cluster_gain_stats.num_pins_of_net_in_pb[net_id]++; } -/* - * @brief Determine if atom block is in pb. - * - * TODO: This would make more sense in the cluster legalizer class. +/** + * @brief Determine if pb is a child of cluster_pb. */ -static bool is_atom_blk_in_pb(const AtomBlockId blk_id, const t_pb* pb) { - const AtomContext& atom_ctx = g_vpr_ctx.atom(); - - const t_pb* cur_pb = atom_ctx.lookup().atom_pb(blk_id); +static bool is_pb_in_cluster_pb(const t_pb* pb, const t_pb* cluster_pb) { + const t_pb* cur_pb = pb; while (cur_pb) { - if (cur_pb == pb) { + if (cur_pb == cluster_pb) { return true; } cur_pb = cur_pb->parent_pb; @@ -403,11 +399,6 @@ void GreedyCandidateSelector::update_connection_gain_values( /*This function is called when the connection_gain values on the net net_id *require updating. */ - // Atom Context used to lookup the atom pb. - // TODO: Should investigate this. Using the atom pb in this class is very - // strange. - const AtomContext& atom_ctx = g_vpr_ctx.atom(); - int num_internal_connections, num_open_connections, num_stuck_connections; num_internal_connections = num_open_connections = num_stuck_connections = 0; @@ -416,8 +407,12 @@ void GreedyCandidateSelector::update_connection_gain_values( /* may wish to speed things up by ignoring clock nets since they are high fanout */ for (AtomPinId pin_id : atom_netlist_.net_pins(net_id)) { AtomBlockId blk_id = atom_netlist_.pin_block(pin_id); - if (cluster_legalizer.get_atom_cluster(blk_id) == legalization_cluster_id - && is_atom_blk_in_pb(blk_id, atom_ctx.lookup().atom_pb(clustered_blk_id))) { + // TODO: Should investigate this. Using the atom pb bimap through is_atom_blk_in_cluster_block + // in this class is very strange + const t_pb* pin_block_pb = cluster_legalizer.atom_pb_lookup().atom_pb(blk_id); + const t_pb* cluster_pb = cluster_legalizer.atom_pb_lookup().atom_pb(clustered_blk_id); + + if (cluster_legalizer.get_atom_cluster(blk_id) == legalization_cluster_id && is_pb_in_cluster_pb(pin_block_pb, cluster_pb)) { num_internal_connections++; } else if (!cluster_legalizer.is_atom_clustered(blk_id)) { num_open_connections++; diff --git a/vpr/src/pack/pack.cpp b/vpr/src/pack/pack.cpp index 43db1f97ab5..867bfe6446a 100644 --- a/vpr/src/pack/pack.cpp +++ b/vpr/src/pack/pack.cpp @@ -44,11 +44,7 @@ static void get_intercluster_switch_fanin_estimates(const t_arch& arch, int* wire_switch_fanin, int* ipin_switch_fanin); -static float get_arch_switch_info(short switch_index, - int switch_fanin, - float& Tdel_switch, - float& R_switch, - float& Cout_switch); +static float get_arch_switch_info(short switch_index, int switch_fanin, float& Tdel_switch, float& R_switch, float& Cout_switch); static float approximate_inter_cluster_delay(const t_arch& arch, const t_det_routing_arch& routing_arch, @@ -157,7 +153,6 @@ bool try_pack(t_packer_opts* packer_opts, } int pack_iteration = 1; - // Initialize the cluster legalizer. ClusterLegalizer cluster_legalizer(atom_ctx.netlist(), prepacker, @@ -167,7 +162,6 @@ bool try_pack(t_packer_opts* packer_opts, ClusterLegalizationStrategy::SKIP_INTRA_LB_ROUTE, packer_opts->enable_pin_feasibility_filter, packer_opts->pack_verbosity); - VTR_LOG("Packing with pin utilization targets: %s\n", cluster_legalizer.get_target_external_pin_util().to_string().c_str()); VTR_LOG("Packing with high fanout thresholds: %s\n", high_fanout_thresholds.to_string().c_str()); @@ -184,6 +178,8 @@ bool try_pack(t_packer_opts* packer_opts, is_global, appack_ctx); + g_vpr_ctx.mutable_atom().mutable_lookup().set_atom_pb_bimap_lock(true); + while (true) { //Cluster the netlist // num_used_type_instances: A map used to save the number of used @@ -295,10 +291,6 @@ bool try_pack(t_packer_opts* packer_opts, } //Reset clustering for re-packing - for (auto blk : g_vpr_ctx.atom().netlist().blocks()) { - g_vpr_ctx.mutable_atom().mutable_lookup().set_atom_clb(blk, ClusterBlockId::INVALID()); - g_vpr_ctx.mutable_atom().mutable_lookup().set_atom_pb(blk, nullptr); - } for (auto net : g_vpr_ctx.atom().netlist().nets()) { g_vpr_ctx.mutable_atom().mutable_lookup().remove_atom_net(net); } @@ -307,7 +299,6 @@ bool try_pack(t_packer_opts* packer_opts, // Reset the cluster legalizer for re-clustering. cluster_legalizer.reset(); - ++pack_iteration; } @@ -325,7 +316,8 @@ bool try_pack(t_packer_opts* packer_opts, * } */ /******************** End **************************/ - + g_vpr_ctx.mutable_atom().mutable_lookup().set_atom_pb_bimap_lock(false); + g_vpr_ctx.mutable_atom().mutable_lookup().set_atom_to_pb_bimap(cluster_legalizer.atom_pb_lookup()); //check clustering and output it check_and_output_clustering(cluster_legalizer, *packer_opts, is_clock, &arch); @@ -336,11 +328,7 @@ bool try_pack(t_packer_opts* packer_opts, return true; } -static float get_arch_switch_info(short switch_index, - int switch_fanin, - float& Tdel_switch, - float& R_switch, - float& Cout_switch) { +static float get_arch_switch_info(short switch_index, int switch_fanin, float& Tdel_switch, float& R_switch, float& Cout_switch) { /* Fetches delay, resistance and output capacitance of the architecture switch at switch_index. * Returns the total delay through the switch. Used to calculate inter-cluster net delay. */ diff --git a/vpr/src/pack/post_routing_pb_pin_fixup.cpp b/vpr/src/pack/post_routing_pb_pin_fixup.cpp index 427d03e092b..d217f07a83d 100644 --- a/vpr/src/pack/post_routing_pb_pin_fixup.cpp +++ b/vpr/src/pack/post_routing_pb_pin_fixup.cpp @@ -450,7 +450,7 @@ static AtomPinId find_mapped_atom_pin(const AtomContext& atom_ctx, const t_pb_graph_pin* sink_pb_pin = intra_lb_pb_pin_lookup.pb_gpin(logical_type->index, sink_pb_route_id); const t_pb* leaf_pb = pb->find_pb(sink_pb_pin->parent_node); - const AtomPortId& atom_port = atom_ctx.netlist().find_atom_port(atom_ctx.lookup().pb_atom(leaf_pb), sink_pb_pin->port->model_port); + const AtomPortId& atom_port = atom_ctx.netlist().find_atom_port(atom_ctx.lookup().atom_pb_bimap().pb_atom(leaf_pb), sink_pb_pin->port->model_port); BitIndex atom_pin_bit_index = leaf_pb->atom_pin_bit_index(sink_pb_pin); AtomPinId mapped_atom_pin = atom_ctx.netlist().port_pin(atom_port, atom_pin_bit_index); diff --git a/vpr/src/pack/sync_netlists_to_routing_flat.cpp b/vpr/src/pack/sync_netlists_to_routing_flat.cpp index 312eeb0d5a2..0e6be438300 100644 --- a/vpr/src/pack/sync_netlists_to_routing_flat.cpp +++ b/vpr/src/pack/sync_netlists_to_routing_flat.cpp @@ -413,7 +413,7 @@ static void fixup_atom_pb_graph_pin_mapping(void) { const t_pb_graph_pin* atom_pbg_pin = pb_route.pb_graph_pin; t_pb* atom_pb = clb_pb->find_mutable_pb(atom_pbg_pin->parent_node); - AtomBlockId atb = atom_ctx.lookup().pb_atom(atom_pb); + AtomBlockId atb = atom_ctx.lookup().atom_pb_bimap().pb_atom(atom_pb); if (!atb) continue; diff --git a/vpr/src/pack/verify_clustering.cpp b/vpr/src/pack/verify_clustering.cpp index a514c505ed4..ec08e10a40b 100644 --- a/vpr/src/pack/verify_clustering.cpp +++ b/vpr/src/pack/verify_clustering.cpp @@ -107,7 +107,7 @@ static bool is_atom_pb_in_cluster_pb(AtomBlockId atom_blk_id, const AtomLookup& atom_lookup, const ClusteredNetlist& clb_nlist) { // Get the pbs - const t_pb* atom_pb = atom_lookup.atom_pb(atom_blk_id); + const t_pb* atom_pb = atom_lookup.atom_pb_bimap().atom_pb(atom_blk_id); const t_pb* cluster_pb = clb_nlist.block_pb(clb_blk_id); // For the atom pb to be a part of the cluster pb, the atom pb must be a // descendent of the cluster pb (the cluster pb is the ancestor to all atom @@ -179,7 +179,7 @@ static unsigned check_clustering_pb_consistency(const ClusteredNetlist& clb_nlis ClusterBlockId atom_clb_blk_id = atom_lookup.atom_clb(atom_blk_id); if (!atom_clb_blk_id.is_valid()) continue; - const t_pb* atom_pb = atom_lookup.atom_pb(atom_blk_id); + const t_pb* atom_pb = atom_lookup.atom_pb_bimap().atom_pb(atom_blk_id); // Make sure the atom's pb exists if (atom_pb == nullptr) { VTR_LOG_ERROR( @@ -188,7 +188,7 @@ static unsigned check_clustering_pb_consistency(const ClusteredNetlist& clb_nlis num_errors++; } else { // Sanity check: atom_pb == pb_atom - if (atom_lookup.pb_atom(atom_pb) != atom_blk_id) { + if (atom_lookup.atom_pb_bimap().pb_atom(atom_pb) != atom_blk_id) { VTR_LOG_ERROR( "Atom block %zu in cluster block %zu has a pb which " "belongs to another atom.\n", diff --git a/vpr/src/power/power.cpp b/vpr/src/power/power.cpp index a162cf6380c..8e59103cef9 100644 --- a/vpr/src/power/power.cpp +++ b/vpr/src/power/power.cpp @@ -162,7 +162,7 @@ static void power_usage_primitive(t_power_usage* power_usage, t_pb* pb, t_pb_gra } if (pb) { - AtomBlockId blk_id = atom_ctx.lookup().pb_atom(pb); + AtomBlockId blk_id = atom_ctx.lookup().atom_pb_bimap().pb_atom(pb); SRAM_values = alloc_SRAM_values_from_truth_table(LUT_size, atom_ctx.netlist().block_truth_table(blk_id)); } else { diff --git a/vpr/src/route/overuse_report.cpp b/vpr/src/route/overuse_report.cpp index 993f5667d99..92c421eccbd 100644 --- a/vpr/src/route/overuse_report.cpp +++ b/vpr/src/route/overuse_report.cpp @@ -335,7 +335,7 @@ static void report_congested_nets(const Netlist<>& net_list, os << "Net name = " << net_list.net_name(net_id) << ", "; if (is_flat) { AtomBlockId atom_blk_id = convert_to_atom_block_id(block_id); - os << "Driving block name = " << atom_lookup.atom_pb(atom_blk_id)->name << ", "; + os << "Driving block name = " << atom_lookup.atom_pb_bimap().atom_pb(atom_blk_id)->name << ", "; os << "Driving block type = " << g_vpr_ctx.clustering().clb_nlist.block_type(atom_lookup.atom_clb(atom_blk_id))->name << '\n'; } else { ClusterBlockId clb_blk_id = convert_to_cluster_block_id(block_id); @@ -363,7 +363,7 @@ static void report_congested_nets(const Netlist<>& net_list, << "\n"; if (is_flat) { auto pb_pin = atom_lookup.atom_pin_pb_graph_pin(convert_to_atom_pin_id(sink_id)); - auto pb_net_list = atom_lookup.atom_pb(convert_to_atom_block_id(net_list.pin_block(sink_id))); + auto pb_net_list = atom_lookup.atom_pb_bimap().atom_pb(convert_to_atom_block_id(net_list.pin_block(sink_id))); os << " " << "Pin Logical Num: " << pb_pin->pin_count_in_cluster << " PB Type: " << pb_pin->parent_node->pb_type->name << " Netlist PB: " << pb_net_list->name << " Parent PB Type: " << pb_net_list->parent_pb->pb_graph_node->pb_type->name << "Parent Netlist PB : " << pb_net_list->parent_pb->name << "\n"; os << " " diff --git a/vpr/src/timing/VprTimingGraphResolver.cpp b/vpr/src/timing/VprTimingGraphResolver.cpp index 8ace192035e..791615e5585 100644 --- a/vpr/src/timing/VprTimingGraphResolver.cpp +++ b/vpr/src/timing/VprTimingGraphResolver.cpp @@ -218,7 +218,7 @@ std::vector VprTimingGraphResolver::interconnect_delay_br //driver_component.inst_name = cluster_ctx.clb_nlist.block_name(src_blk); driver_component.type_name = "intra '"; if (is_flat_) { - const t_pb* atom_pb = atom_ctx.lookup().atom_pb((AtomBlockId&)src_blk); + const t_pb* atom_pb = atom_ctx.lookup().atom_pb_bimap().atom_pb((AtomBlockId&)src_blk); driver_component.type_name += (std::string(atom_pb->name) + "(" + atom_pb->hierarchical_type_name() + ")"); } else { driver_component.type_name += cluster_ctx.clb_nlist.block_type((ClusterBlockId&)src_blk)->name; @@ -263,7 +263,7 @@ std::vector VprTimingGraphResolver::interconnect_delay_br //sink_component.inst_name = cluster_ctx.clb_nlist.block_name(sink_blk); sink_component.type_name = "intra '"; if (is_flat_) { - sink_component.type_name += atom_ctx.lookup().atom_pb((AtomBlockId&)sink_blk)->name; + sink_component.type_name += atom_ctx.lookup().atom_pb_bimap().atom_pb((AtomBlockId&)sink_blk)->name; } else { sink_component.type_name += cluster_ctx.clb_nlist.block_type((ClusterBlockId&)sink_blk)->name; } diff --git a/vpr/src/util/vpr_utils.cpp b/vpr/src/util/vpr_utils.cpp index 828c024f9a8..853725c5bb9 100644 --- a/vpr/src/util/vpr_utils.cpp +++ b/vpr/src/util/vpr_utils.cpp @@ -300,7 +300,7 @@ static AtomPinId find_atom_pin_for_pb_route_id(ClusterBlockId clb, int pb_route_ //It is a leaf, and hence should map to an atom //Find the associated atom - AtomBlockId atom_block = atom_ctx.lookup().pb_atom(child_pb); + AtomBlockId atom_block = atom_ctx.lookup().atom_pb_bimap().pb_atom(child_pb); VTR_ASSERT(atom_block); //Now find the matching pin by seeing which pin maps to the gpin @@ -517,7 +517,7 @@ t_class_range get_class_range_for_block(const AtomBlockId atom_blk) { ClusterBlockId cluster_blk = atom_look_up.atom_clb(atom_blk); auto [physical_tile, sub_tile, sub_tile_cap, logical_block] = get_cluster_blk_physical_spec(cluster_blk); - const t_pb_graph_node* pb_graph_node = atom_look_up.atom_pb_graph_node(atom_blk); + const t_pb_graph_node* pb_graph_node = atom_look_up.atom_pb_bimap().atom_pb_graph_node(atom_blk); VTR_ASSERT(pb_graph_node != nullptr); return get_pb_graph_node_class_physical_range(physical_tile, sub_tile, @@ -889,8 +889,7 @@ bool primitive_type_feasible(const AtomBlockId blk_id, const t_pb_type* cur_pb_t //Returns the sibling atom of a memory slice pb // Note that the pb must be part of a MEMORY_CLASS -AtomBlockId find_memory_sibling(const t_pb* pb) { - auto& atom_ctx = g_vpr_ctx.atom(); +const t_pb* find_memory_sibling(const t_pb* pb) { const t_pb_type* pb_type = pb->pb_graph_node->pb_type; @@ -902,10 +901,10 @@ AtomBlockId find_memory_sibling(const t_pb* pb) { const t_pb* sibling_pb = &memory_class_pb->child_pbs[pb->mode][isibling]; if (sibling_pb->name != nullptr) { - return atom_ctx.lookup().pb_atom(sibling_pb); + return sibling_pb; } } - return AtomBlockId::INVALID(); + return nullptr; } /** @@ -978,15 +977,13 @@ AtomPinId find_atom_pin(ClusterBlockId blk_id, const t_pb_graph_pin* pb_gpin) { return atom_pin; } -//Retrieves the pb_graph_pin associated with an AtomPinId -// Currently this function just wraps get_pb_graph_node_pin_from_model_port_pin() -// in a more convenient interface. -const t_pb_graph_pin* find_pb_graph_pin(const AtomNetlist& netlist, const AtomLookup& netlist_lookup, const AtomPinId pin_id) { +// Retrieves the pb_graph_pin associated with an AtomPinId +const t_pb_graph_pin* find_pb_graph_pin(const AtomNetlist& netlist, const AtomPBBimap& atom_pb_lookup, const AtomPinId pin_id) { VTR_ASSERT(pin_id); //Get the graph node AtomBlockId blk_id = netlist.pin_block(pin_id); - const t_pb_graph_node* pb_gnode = netlist_lookup.atom_pb_graph_node(blk_id); + const t_pb_graph_node* pb_gnode = atom_pb_lookup.atom_pb_graph_node(blk_id); VTR_ASSERT(pb_gnode); //The graph node and pin/block should agree on the model they represent @@ -1254,7 +1251,7 @@ std::vector get_cluster_internal_class_pairs(const AtomLookup& atom_lookup, const auto& cluster_atoms = cluster_ctx.atoms_lookup[cluster_block_id]; for (AtomBlockId atom_blk_id : cluster_atoms) { - auto atom_pb_graph_node = atom_lookup.atom_pb_graph_node(atom_blk_id); + auto atom_pb_graph_node = atom_lookup.atom_pb_bimap().atom_pb_graph_node(atom_blk_id); auto class_range = get_pb_graph_node_class_physical_range(physical_tile, sub_tile, logical_block, @@ -1367,7 +1364,17 @@ int num_ext_inputs_atom_block(AtomBlockId blk_id) { return (ext_inps); } -void free_pb(t_pb* pb) { +/** + * @brief Free pb and remove its lookup data. + * CLB lookup data is removed from the global context + * and PB to Atom bimap data is removed from atom_pb_bimap + * + * @param pb + * Pointer to t_pb to be freed + * @param atom_pb_bimap + * Reference to the atom to pb bimap to free the data from + */ +void free_pb(t_pb* pb, AtomPBBimap& atom_pb_bimap) { if (pb == nullptr) { return; } @@ -1387,7 +1394,7 @@ void free_pb(t_pb* pb) { for (i = 0; i < pb_type->modes[mode].num_pb_type_children && pb->child_pbs != nullptr; i++) { for (j = 0; j < pb_type->modes[mode].pb_type_children[i].num_pb && pb->child_pbs[i] != nullptr; j++) { if (pb->child_pbs[i][j].name != nullptr || pb->child_pbs[i][j].child_pbs != nullptr) { - free_pb(&pb->child_pbs[i][j]); + free_pb(&pb->child_pbs[i][j], atom_pb_bimap); } } if (pb->child_pbs[i]) { @@ -1405,13 +1412,13 @@ void free_pb(t_pb* pb) { } else { /* Primitive */ auto& atom_ctx = g_vpr_ctx.mutable_atom(); - auto blk_id = atom_ctx.lookup().pb_atom(pb); + auto blk_id = atom_pb_bimap.pb_atom(pb); if (blk_id) { //Update atom netlist mapping atom_ctx.mutable_lookup().set_atom_clb(blk_id, ClusterBlockId::INVALID()); - atom_ctx.mutable_lookup().set_atom_pb(blk_id, nullptr); + atom_pb_bimap.set_atom_pb(blk_id, nullptr); } - atom_ctx.mutable_lookup().set_atom_pb(AtomBlockId::INVALID(), pb); + atom_pb_bimap.set_atom_pb(AtomBlockId::INVALID(), pb); } free_pb_stats(pb); } diff --git a/vpr/src/util/vpr_utils.h b/vpr/src/util/vpr_utils.h index 23d69eae471..f2b62cfac1c 100644 --- a/vpr/src/util/vpr_utils.h +++ b/vpr/src/util/vpr_utils.h @@ -7,6 +7,7 @@ #include "rr_graph_utils.h" #include "vpr_types.h" #include "vtr_vector.h" +#include "atom_pb_bimap.h" #include #include @@ -166,6 +167,8 @@ const t_port* find_pb_graph_port(const t_pb_graph_node* pb_gnode, const std::str //Returns the graph pin matching name at pin index const t_pb_graph_pin* find_pb_graph_pin(const t_pb_graph_node* pb_gnode, const std::string& port_name, int index); +const t_pb_graph_pin* find_pb_graph_pin(const AtomNetlist& netlist, const AtomPBBimap& atom_pb_lookup, const AtomPinId pin_id); + AtomPinId find_atom_pin(ClusterBlockId blk_id, const t_pb_graph_pin* pb_gpin); //Returns the logical block type which is most common in the device grid @@ -186,7 +189,6 @@ int get_max_depth_of_pb_type(t_pb_type* pb_type); int get_max_nets_in_pb_type(const t_pb_type* pb_type); bool primitive_type_feasible(AtomBlockId blk_id, const t_pb_type* cur_pb_type); t_pb_graph_pin* get_pb_graph_node_pin_from_model_port_pin(const t_model_ports* model_port, const int model_pin, const t_pb_graph_node* pb_graph_node); -const t_pb_graph_pin* find_pb_graph_pin(const AtomNetlist& netlist, const AtomLookup& netlist_lookup, const AtomPinId pin_id); /// @brief Gets the pb_graph_node pin at the given pin index for the given /// pb_graph_node. t_pb_graph_pin* get_pb_graph_node_pin_from_pb_graph_node(t_pb_graph_node* pb_graph_node, int ipin); @@ -218,12 +220,12 @@ int num_ext_inputs_atom_block(AtomBlockId blk_id); std::tuple parse_direct_pin_name(std::string_view src_string, int line); void free_pb_stats(t_pb* pb); -void free_pb(t_pb* pb); +void free_pb(t_pb* pb, AtomPBBimap& atom_pb_bimap); void print_switch_usage(); void print_usage_by_wire_length(); -AtomBlockId find_memory_sibling(const t_pb* pb); +const t_pb* find_memory_sibling(const t_pb* pb); int get_atom_pin_class_num(const AtomPinId atom_pin_id);