Skip to content

Commit

Permalink
[reconfigurator] Remove DatasetIdsBackfillFromDb (#7350)
Browse files Browse the repository at this point in the history
Fixes #6645.

I believe this is safe to land now that R12 is out the door: any IDs in
`dataset` should have been assigned. #7229 is related, but in that case
the `dataset` table itself is also missing entries (so there's no ID to
backfill).
  • Loading branch information
jgallagher authored Jan 17, 2025
1 parent 3a994af commit 017830d
Show file tree
Hide file tree
Showing 12 changed files with 49 additions and 468 deletions.
5 changes: 1 addition & 4 deletions dev-tools/reconfigurator-cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -665,12 +665,9 @@ fn cmd_sled_show(
swriteln!(s, "sled {}", sled_id);
swriteln!(s, "subnet {}", sled_resources.subnet.net());
swriteln!(s, "zpools ({}):", sled_resources.zpools.len());
for (zpool, (disk, datasets)) in &sled_resources.zpools {
for (zpool, disk) in &sled_resources.zpools {
swriteln!(s, " {:?}", zpool);
swriteln!(s, " {:?}", disk);
for dataset in datasets {
swriteln!(s, " ↳ {:?}", dataset);
}
}
Ok(Some(s))
}
Expand Down
22 changes: 9 additions & 13 deletions nexus/db-queries/src/db/datastore/deployment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2118,20 +2118,16 @@ mod tests {
.map(|i| {
(
ZpoolUuid::new_v4(),
(
SledDisk {
disk_identity: DiskIdentity {
vendor: String::from("v"),
serial: format!("s-{i}"),
model: String::from("m"),
},
disk_id: PhysicalDiskUuid::new_v4(),
policy: PhysicalDiskPolicy::InService,
state: PhysicalDiskState::Active,
SledDisk {
disk_identity: DiskIdentity {
vendor: String::from("v"),
serial: format!("s-{i}"),
model: String::from("m"),
},
// Datasets
vec![],
),
disk_id: PhysicalDiskUuid::new_v4(),
policy: PhysicalDiskPolicy::InService,
state: PhysicalDiskState::Active,
},
)
})
.collect();
Expand Down
3 changes: 0 additions & 3 deletions nexus/reconfigurator/execution/src/dns.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1328,8 +1328,6 @@ mod test {
.unwrap();
let zpool_rows =
datastore.zpool_list_all_external_batched(&opctx).await.unwrap();
let dataset_rows =
datastore.dataset_list_all_batched(&opctx, None).await.unwrap();
let ip_pool_range_rows = {
let (authz_service_ip_pool, _) =
datastore.ip_pools_service_lookup(&opctx).await.unwrap();
Expand All @@ -1342,7 +1340,6 @@ mod test {
let mut builder = PlanningInputFromDb {
sled_rows: &sled_rows,
zpool_rows: &zpool_rows,
dataset_rows: &dataset_rows,
ip_pool_range_rows: &ip_pool_range_rows,
internal_dns_version: dns_initial_internal.generation.into(),
external_dns_version: dns_latest_external.generation.into(),
Expand Down
133 changes: 1 addition & 132 deletions nexus/reconfigurator/planning/src/blueprint_builder/builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

//! Low-level facility for generating Blueprints
use crate::blueprint_editor::DatasetIdsBackfillFromDb;
use crate::blueprint_editor::EditedSled;
use crate::blueprint_editor::SledEditError;
use crate::blueprint_editor::SledEditor;
Expand Down Expand Up @@ -41,7 +40,6 @@ use nexus_types::deployment::OmicronZoneExternalSnatIp;
use nexus_types::deployment::PlanningInput;
use nexus_types::deployment::SledDetails;
use nexus_types::deployment::SledFilter;
use nexus_types::deployment::SledLookupErrorKind;
use nexus_types::deployment::SledResources;
use nexus_types::deployment::ZpoolFilter;
use nexus_types::deployment::ZpoolName;
Expand Down Expand Up @@ -478,31 +476,6 @@ impl<'a> BlueprintBuilder<'a> {
"parent_id" => parent_blueprint.id.to_string(),
));

// Helper to build a `PreexistingDatasetIds` for a given sled. This will
// go away with https://github.com/oxidecomputer/omicron/issues/6645.
let build_preexisting_dataset_ids =
|sled_id| -> anyhow::Result<DatasetIdsBackfillFromDb> {
match input.sled_lookup(SledFilter::All, sled_id) {
Ok(details) => {
DatasetIdsBackfillFromDb::build(&details.resources)
.with_context(|| {
format!(
"failed building map of preexisting \
dataset IDs for sled {sled_id}"
)
})
}
Err(err) => match err.kind() {
SledLookupErrorKind::Missing => {
Ok(DatasetIdsBackfillFromDb::empty())
}
SledLookupErrorKind::Filtered { .. } => unreachable!(
"SledFilter::All should not filter anything out"
),
},
}
};

// Squish the disparate maps in our parent blueprint into one map of
// `SledEditor`s.
let mut sled_editors = BTreeMap::new();
Expand Down Expand Up @@ -556,7 +529,6 @@ impl<'a> BlueprintBuilder<'a> {
zones.clone(),
disks,
datasets.clone(),
build_preexisting_dataset_ids(*sled_id)?,
)
.with_context(|| {
format!("failed to construct SledEditor for sled {sled_id}")
Expand All @@ -568,9 +540,7 @@ impl<'a> BlueprintBuilder<'a> {
// that weren't in the parent blueprint. (These are newly-added sleds.)
for sled_id in input.all_sled_ids(SledFilter::Commissioned) {
if let Entry::Vacant(slot) = sled_editors.entry(sled_id) {
slot.insert(SledEditor::for_new_active(
build_preexisting_dataset_ids(sled_id)?,
));
slot.insert(SledEditor::for_new_active());
}
}

Expand Down Expand Up @@ -2443,19 +2413,6 @@ pub mod test {
// the blueprint too.
assert_eq!(expunged_datasets.len(), 2);

// Remove these two datasets from the input.
let mut input_builder = input.into_builder();
let zpools = &mut input_builder
.sleds_mut()
.get_mut(&sled_id)
.unwrap()
.resources
.zpools;
for (_, (_, datasets)) in zpools {
datasets.retain(|dataset| !expunged_datasets.contains(&dataset.id));
}
let input = input_builder.build();

let mut builder = BlueprintBuilder::new_based_on(
&logctx.log,
&blueprint,
Expand Down Expand Up @@ -2761,92 +2718,4 @@ pub mod test {

logctx.cleanup_successful();
}

// This test can go away with
// https://github.com/oxidecomputer/omicron/issues/6645; for now, it
// confirms we maintain the compatibility layer it needs.
#[test]
fn test_backcompat_reuse_existing_database_dataset_ids() {
static TEST_NAME: &str =
"backcompat_reuse_existing_database_dataset_ids";
let logctx = test_setup_log(TEST_NAME);

// Start with the standard example blueprint.
let (collection, input, mut parent) = example(&logctx.log, TEST_NAME);

// `parent` was not created prior to the addition of disks and datasets,
// so it should have datasets for all the disks and zones, and the
// dataset IDs should match the input.
let mut input_dataset_ids = BTreeMap::new();
let mut input_ndatasets = 0;
for (_, resources) in input.all_sled_resources(SledFilter::All) {
for (zpool_id, dataset_configs) in
resources.all_datasets(ZpoolFilter::All)
{
for dataset in dataset_configs {
let id = dataset.id;
let kind = dataset.name.kind();
let by_kind: &mut BTreeMap<_, _> =
input_dataset_ids.entry(*zpool_id).or_default();
let prev = by_kind.insert(kind.clone(), id);
input_ndatasets += 1;
assert!(prev.is_none());
}
}
}
// We should have 3 datasets per disk (debug + zone root + crucible),
// plus some number of datasets for discretionary zones. We'll just
// check that we have more than 3 per disk.
assert!(
input_ndatasets
> 3 * usize::from(SledBuilder::DEFAULT_NPOOLS)
* ExampleSystemBuilder::DEFAULT_N_SLEDS,
"too few datasets: {input_ndatasets}"
);

// Now _remove_ the blueprint datasets entirely, to emulate a
// pre-dataset-addition blueprint.
parent.blueprint_datasets = BTreeMap::new();

// Build a new blueprint.
let mut builder = BlueprintBuilder::new_based_on(
&logctx.log,
&parent,
&input,
&collection,
TEST_NAME,
)
.expect("failed to create builder");

// Ensure disks and datasets. This should repopulate the datasets.
for (sled_id, resources) in input.all_sled_resources(SledFilter::All) {
builder
.sled_ensure_disks(sled_id, resources)
.expect("ensured disks");
builder
.sled_ensure_zone_datasets(sled_id)
.expect("ensured zone datasets");
}
let output = builder.build();

// Repeat the logic above on our new blueprint; it should have the same
// number of datasets, and they should all have identical IDs.
let mut output_dataset_ids = BTreeMap::new();
let mut output_ndatasets = 0;
for datasets in output.blueprint_datasets.values() {
for dataset in &datasets.datasets {
let zpool_id = dataset.pool.id();
let kind = dataset.kind.clone();
let by_kind: &mut BTreeMap<_, _> =
output_dataset_ids.entry(zpool_id).or_default();
let prev = by_kind.insert(kind, dataset.id);
output_ndatasets += 1;
assert!(prev.is_none());
}
}
assert_eq!(input_ndatasets, output_ndatasets);
assert_eq!(input_dataset_ids, output_dataset_ids);

logctx.cleanup_successful();
}
}
1 change: 0 additions & 1 deletion nexus/reconfigurator/planning/src/blueprint_editor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
mod sled_editor;

pub(crate) use sled_editor::DatasetIdsBackfillFromDb;
pub(crate) use sled_editor::EditedSled;
pub(crate) use sled_editor::SledEditError;
pub(crate) use sled_editor::SledEditor;
31 changes: 7 additions & 24 deletions nexus/reconfigurator/planning/src/blueprint_editor/sled_editor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@ mod datasets;
mod disks;
mod zones;

pub(crate) use self::datasets::DatasetIdsBackfillFromDb;

pub use self::datasets::DatasetsEditError;
pub use self::datasets::MultipleDatasetsOfKind;
pub use self::disks::DisksEditError;
Expand Down Expand Up @@ -125,16 +123,10 @@ impl SledEditor {
zones: BlueprintZonesConfig,
disks: BlueprintPhysicalDisksConfig,
datasets: BlueprintDatasetsConfig,
preexisting_dataset_ids: DatasetIdsBackfillFromDb,
) -> Result<Self, SledInputError> {
match state {
SledState::Active => {
let inner = ActiveSledEditor::new(
zones,
disks,
datasets,
preexisting_dataset_ids,
)?;
let inner = ActiveSledEditor::new(zones, disks, datasets)?;
Ok(Self(InnerSledEditor::Active(inner)))
}
SledState::Decommissioned => {
Expand All @@ -150,12 +142,8 @@ impl SledEditor {
}
}

pub fn for_new_active(
preexisting_dataset_ids: DatasetIdsBackfillFromDb,
) -> Self {
Self(InnerSledEditor::Active(ActiveSledEditor::new_empty(
preexisting_dataset_ids,
)))
pub fn for_new_active() -> Self {
Self(InnerSledEditor::Active(ActiveSledEditor::new_empty()))
}

pub fn finalize(self) -> EditedSled {
Expand Down Expand Up @@ -191,9 +179,7 @@ impl SledEditor {
// below, we'll be left in the active state with an empty sled
// editor), but omicron in general is not panic safe and aborts
// on panic. Plus `finalize()` should never panic.
let mut stolen = ActiveSledEditor::new_empty(
DatasetIdsBackfillFromDb::empty(),
);
let mut stolen = ActiveSledEditor::new_empty();
mem::swap(editor, &mut stolen);

let mut finalized = stolen.finalize();
Expand Down Expand Up @@ -316,22 +302,19 @@ impl ActiveSledEditor {
zones: BlueprintZonesConfig,
disks: BlueprintPhysicalDisksConfig,
datasets: BlueprintDatasetsConfig,
preexisting_dataset_ids: DatasetIdsBackfillFromDb,
) -> Result<Self, SledInputError> {
Ok(Self {
zones: zones.into(),
disks: disks.try_into()?,
datasets: DatasetsEditor::new(datasets, preexisting_dataset_ids)?,
datasets: DatasetsEditor::new(datasets)?,
})
}

pub fn new_empty(
preexisting_dataset_ids: DatasetIdsBackfillFromDb,
) -> Self {
pub fn new_empty() -> Self {
Self {
zones: ZonesEditor::empty(),
disks: DisksEditor::empty(),
datasets: DatasetsEditor::empty(preexisting_dataset_ids),
datasets: DatasetsEditor::empty(),
}
}

Expand Down
Loading

0 comments on commit 017830d

Please sign in to comment.