Skip to content

Commit

Permalink
feat(backups/_cleanVm): check VHD aliases (#6043)
Browse files Browse the repository at this point in the history
  • Loading branch information
fbeauchamp authored Jan 13, 2022
1 parent 6cf5e10 commit 249f638
Show file tree
Hide file tree
Showing 4 changed files with 122 additions and 25 deletions.
60 changes: 45 additions & 15 deletions @xen-orchestra/backups/_cleanVm.integ.spec.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@ const crypto = require('crypto')
const { RemoteAdapter } = require('./RemoteAdapter')
const { VHDFOOTER, VHDHEADER } = require('./tests.fixtures.js')
const { VhdFile, Constants, VhdDirectory, VhdAbstract } = require('vhd-lib')
const { checkAliases } = require('./_cleanVm')
const { dirname, basename } = require('path')

let tempDir, adapter, handler, jobId, vdiId, basePath

Expand All @@ -35,7 +37,11 @@ const uniqueId = () => crypto.randomBytes(16).toString('hex')
async function generateVhd(path, opts = {}) {
let vhd

const dataPath = opts.useAlias ? path + '.data' : path
let dataPath = path
if (opts.useAlias) {
await handler.mkdir(dirname(path) + '/data/')
dataPath = dirname(path) + '/data/' + basename(path)
}
if (opts.mode === 'directory') {
await handler.mkdir(dataPath)
vhd = new VhdDirectory(handler, dataPath)
Expand Down Expand Up @@ -162,7 +168,7 @@ test('it remove backup meta data referencing a missing vhd in delta backup', asy
`${basePath}/deleted.vhd`, // in metadata but not in vhds
`${basePath}/orphan.vhd`,
`${basePath}/child.vhd`,
// abandonned.json is not here
// abandonned.vhd is not here anymore
],
}),
{ flags: 'w' }
Expand Down Expand Up @@ -235,12 +241,8 @@ test('it finish unterminated merge ', async () => {
`metadata.json`,
JSON.stringify({
mode: 'delta',
size: undefined,
vhds: [
`${basePath}/orphan.vhd`, // grand child should not be merged
`${basePath}/child.vhd`,
// orphan is not here, he should be merged in child
],
size: 209920,
vhds: [`${basePath}/orphan.vhd`, `${basePath}/child.vhd`],
})
)

Expand All @@ -266,7 +268,6 @@ test('it finish unterminated merge ', async () => {
})
)

// a unfinished merging
await adapter.cleanVm('/', { remove: true, merge: true })
// merging is already tested in vhd-lib, don't retest it here (and theses vhd are as empty as my stomach at 12h12)

Expand All @@ -279,12 +280,17 @@ test('it finish unterminated merge ', async () => {
// each of the vhd can be a file, a directory, an alias to a file or an alias to a directory
// the message an resulting files should be identical to the output with vhd files which is tested independantly

describe('tests mulitple combination ', () => {
describe('tests multiple combination ', () => {
for (const useAlias of [true, false]) {
for (const vhdMode of ['file', 'directory']) {
test(`alias : ${useAlias}, mode: ${vhdMode}`, async () => {
// a broken VHD
const brokenVhdDataPath = basePath + useAlias ? 'broken.data' : 'broken.vhd'
if (useAlias) {
await handler.mkdir(basePath + '/data')
}

const brokenVhdDataPath = basePath + (useAlias ? '/data/broken.vhd' : '/broken.vhd')

if (vhdMode === 'directory') {
await handler.mkdir(brokenVhdDataPath)
} else {
Expand All @@ -305,6 +311,7 @@ describe('tests mulitple combination ', () => {
parentUid: crypto.randomBytes(16),
},
})

// an ancestor of a vhd present in metadata
const ancestor = await generateVhd(`${basePath}/ancestor.vhd`, {
useAlias,
Expand Down Expand Up @@ -367,6 +374,7 @@ describe('tests mulitple combination ', () => {
],
})
)

await adapter.cleanVm('/', { remove: true, merge: true })

const metadata = JSON.parse(await handler.readFile(`metadata.json`))
Expand All @@ -379,14 +387,16 @@ describe('tests mulitple combination ', () => {
const survivors = await handler.list(basePath)
// console.log(survivors)
if (useAlias) {
const dataSurvivors = await handler.list(basePath + '/data')
// the goal of the alias : do not move a full folder
expect(survivors).toContain('ancestor.vhd.data')
expect(survivors).toContain('grandchild.vhd.data')
expect(survivors).toContain('cleanAncestor.vhd.data')
expect(dataSurvivors).toContain('ancestor.vhd')
expect(dataSurvivors).toContain('grandchild.vhd')
expect(dataSurvivors).toContain('cleanAncestor.vhd')
expect(survivors).toContain('clean.vhd.alias.vhd')
expect(survivors).toContain('child.vhd.alias.vhd')
expect(survivors).toContain('grandchild.vhd.alias.vhd')
expect(survivors.length).toEqual(6)
expect(survivors.length).toEqual(4) // the 3 ok + data
expect(dataSurvivors.length).toEqual(3) // the 3 ok + data
} else {
expect(survivors).toContain('clean.vhd')
expect(survivors).toContain('child.vhd')
Expand All @@ -405,3 +415,23 @@ test('it cleans orphan merge states ', async () => {

expect(await handler.list(basePath)).toEqual([])
})

test('check Aliases should work alone', async () => {
await handler.mkdir('vhds')
await handler.mkdir('vhds/data')
await generateVhd(`vhds/data/ok.vhd`)
await VhdAbstract.createAlias(handler, 'vhds/ok.alias.vhd', 'vhds/data/ok.vhd')

await VhdAbstract.createAlias(handler, 'vhds/missingData.alias.vhd', 'vhds/data/nonexistent.vhd')

await generateVhd(`vhds/data/missingalias.vhd`)

await checkAliases(['vhds/missingData.alias.vhd', 'vhds/ok.alias.vhd'], 'vhds/data', { remove: true, handler })

// only ok have suvived
const alias = (await handler.list('vhds')).filter(f => f.endsWith('.vhd'))
expect(alias.length).toEqual(1)

const data = await handler.list('vhds/data')
expect(data.length).toEqual(1)
})
70 changes: 64 additions & 6 deletions @xen-orchestra/backups/_cleanVm.js
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
const assert = require('assert')
const sum = require('lodash/sum')
const { asyncMap } = require('@xen-orchestra/async-map')
const { Constants, mergeVhd, openVhd, VhdAbstract, VhdFile } = require('vhd-lib')
const { Constants, isVhdAlias, mergeVhd, openVhd, VhdAbstract, VhdFile, resolveAlias } = require('vhd-lib')
const { dirname, resolve } = require('path')
const { DISK_TYPES } = Constants
const { isMetadataFile, isVhdFile, isXvaFile, isXvaSumFile } = require('./_backupType.js')
Expand Down Expand Up @@ -82,7 +82,6 @@ async function mergeVhdChain(chain, { handler, onLog, remove, merge }) {
)

clearInterval(handle)

await Promise.all([
VhdAbstract.rename(handler, parent, child),
asyncMap(children.slice(0, -1), child => {
Expand All @@ -103,6 +102,7 @@ const noop = Function.prototype
const INTERRUPTED_VHDS_REG = /^\.(.+)\.merge.json$/
const listVhds = async (handler, vmDir) => {
const vhds = new Set()
const aliases = {}
const interruptedVhds = new Map()

await asyncMap(
Expand All @@ -119,7 +119,7 @@ const listVhds = async (handler, vmDir) => {
const list = await handler.list(vdiDir, {
filter: file => isVhdFile(file) || INTERRUPTED_VHDS_REG.test(file),
})

aliases[vdiDir] = list.filter(vhd => isVhdAlias(vhd))
list.forEach(file => {
const res = INTERRUPTED_VHDS_REG.exec(file)
if (res === null) {
Expand All @@ -132,9 +132,63 @@ const listVhds = async (handler, vmDir) => {
)
)

return { vhds, interruptedVhds }
return { vhds, interruptedVhds, aliases }
}

async function checkAliases(aliasPaths, targetDataRepository, { handler, onLog = noop, remove = false }) {
const aliasFound = []
for (const path of aliasPaths) {
const target = await resolveAlias(handler, path)

if (!isVhdFile(target)) {
onLog(`Alias ${path} references a non vhd target: ${target}`)
if (remove) {
await handler.unlink(target)
await handler.unlink(path)
}
continue
}

try {
const { dispose } = await openVhd(handler, target)
try {
await dispose()
} catch (e) {
// error during dispose should not trigger a deletion
}
} catch (error) {
onLog(`target ${target} of alias ${path} is missing or broken`, { error })
if (remove) {
try {
await VhdAbstract.unlink(handler, path)
} catch (e) {
if (e.code !== 'ENOENT') {
onLog(`Error while deleting target ${target} of alias ${path}`, { error: e })
}
}
}
continue
}

aliasFound.push(resolve('/', target))
}

const entries = await handler.list(targetDataRepository, {
ignoreMissing: true,
prependDir: true,
})

entries.forEach(async entry => {
if (!aliasFound.includes(entry)) {
onLog(`the Vhd ${entry} is not referenced by a an alias`)
if (remove) {
await VhdAbstract.unlink(handler, entry)
}
}
})
}
exports.checkAliases = checkAliases

const defaultMergeLimiter = limitConcurrency(1)

exports.cleanVm = async function cleanVm(
Expand All @@ -149,7 +203,7 @@ exports.cleanVm = async function cleanVm(
const vhdParents = { __proto__: null }
const vhdChildren = { __proto__: null }

const { vhds, interruptedVhds } = await listVhds(handler, vmDir)
const { vhds, interruptedVhds, aliases } = await listVhds(handler, vmDir)

// remove broken VHDs
await asyncMap(vhds, async path => {
Expand Down Expand Up @@ -195,7 +249,11 @@ exports.cleanVm = async function cleanVm(
}
}

// @todo : add check for data folder of alias not referenced in a valid alias
// check if alias are correct
// check if all vhd in data subfolder have a corresponding alias
await asyncMap(Object.keys(aliases), async dir => {
await checkAliases(aliases[dir], `${dir}/data`, { handler, onLog, remove })
})

// remove VHDs with missing ancestors
{
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.unreleased.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
Can be changed in `xo-server`'s configuration file: `xapiOptions.vmMigrationConcurrency`
- [Proxy] Now ships a reverse proxy [PR#6072](https://github.com/vatesfr/xen-orchestra/pull/6072)
- [Delta Backup] When using S3 remote, retry uploading VHD parts on Internal Error to support [Blackblaze](https://www.backblaze.com/b2/docs/calling.html#error_handling) (PR [#6086](https://github.com/vatesfr/xen-orchestra/issues/6086)) (Forum [5397](https://xcp-ng.org/forum/topic/5397/delta-backups-failing-aws-s3-uploadpartcopy-cpu-too-busy/5))
- [Backup] Add sanity check of aliases on S3 remotes (PR [6043](https://github.com/vatesfr/xen-orchestra/pull/6043))

### Bug fixes

Expand Down
16 changes: 12 additions & 4 deletions packages/vhd-lib/Vhd/VhdDirectory.js
Original file line number Diff line number Diff line change
Expand Up @@ -173,10 +173,18 @@ exports.VhdDirectory = class VhdDirectory extends VhdAbstract {
}

async readHeaderAndFooter() {
// we need to know if thre is compression before reading headers
await this.#readChunkFilters()
const { buffer: bufHeader } = await this._readChunk('header')
const { buffer: bufFooter } = await this._readChunk('footer')
let bufHeader, bufFooter
try {
bufHeader = (await this._readChunk('header')).buffer
bufFooter = (await this._readChunk('footer')).buffer
} catch (error) {
// emit an AssertionError if the VHD is broken to stay as close as possible to the VhdFile API
if (error.code === 'ENOENT') {
assert(false, 'Header And Footer should exists')
} else {
throw error
}
}
const footer = unpackFooter(bufFooter)
const header = unpackHeader(bufHeader, footer)

Expand Down

0 comments on commit 249f638

Please sign in to comment.