summaryrefslogtreecommitdiff
path: root/lib/activate/dev_manager.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/activate/dev_manager.c')
-rw-r--r--lib/activate/dev_manager.c845
1 files changed, 669 insertions, 176 deletions
diff --git a/lib/activate/dev_manager.c b/lib/activate/dev_manager.c
index a11d918..31c1c27 100644
--- a/lib/activate/dev_manager.c
+++ b/lib/activate/dev_manager.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2002-2004 Sistina Software, Inc. All rights reserved.
- * Copyright (C) 2004-2010 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2012 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@@ -14,7 +14,6 @@
*/
#include "lib.h"
-#include "str_list.h"
#include "dev_manager.h"
#include "lvm-string.h"
#include "fs.h"
@@ -26,6 +25,7 @@
#include "config.h"
#include "filter.h"
#include "activate.h"
+#include "lvm-exec.h"
#include <limits.h>
#include <dirent.h>
@@ -49,6 +49,7 @@ struct dev_manager {
void *target_state;
uint32_t pvmove_mirror_count;
int flush_required;
+ unsigned track_pvmove_deps;
char *vg_name;
};
@@ -58,9 +59,11 @@ struct lv_layer {
const char *old_name;
};
-static int _read_only_lv(struct logical_volume *lv)
+static const char _thin_layer[] = "tpool";
+
+int read_only_lv(struct logical_volume *lv, struct lv_activate_opts *laopts)
{
- return (!(lv->vg->status & LVM_WRITE) || !(lv->status & LVM_WRITE));
+ return (laopts->read_only || !(lv->vg->status & LVM_WRITE) || !(lv->status & LVM_WRITE));
}
/*
@@ -75,19 +78,25 @@ static struct dm_task *_setup_task(const char *name, const char *uuid,
if (!(dmt = dm_task_create(task)))
return_NULL;
- if (name)
- dm_task_set_name(dmt, name);
+ if (name && !dm_task_set_name(dmt, name))
+ goto_out;
- if (uuid && *uuid)
- dm_task_set_uuid(dmt, uuid);
+ if (uuid && *uuid && !dm_task_set_uuid(dmt, uuid))
+ goto_out;
- if (event_nr)
- dm_task_set_event_nr(dmt, *event_nr);
+ if (event_nr && !dm_task_set_event_nr(dmt, *event_nr))
+ goto_out;
- if (major)
- dm_task_set_major_minor(dmt, major, minor, 1);
+ if (major && !dm_task_set_major_minor(dmt, major, minor, 1))
+ goto_out;
+ if (activation_checks() && !dm_task_enable_checks(dmt))
+ goto_out;
+
return dmt;
+ out:
+ dm_task_destroy(dmt);
+ return NULL;
}
static int _info_run(const char *name, const char *dlid, struct dm_info *info,
@@ -138,14 +147,15 @@ int device_is_usable(struct device *dev)
int only_error_target = 1;
int r = 0;
- if (!(dmt = dm_task_create(DM_DEVICE_STATUS))) {
- log_error("Failed to create dm_task struct to check dev status");
- return 0;
- }
+ if (!(dmt = dm_task_create(DM_DEVICE_STATUS)))
+ return_0;
if (!dm_task_set_major_minor(dmt, MAJOR(dev->dev), MINOR(dev->dev), 1))
goto_out;
+ if (activation_checks() && !dm_task_enable_checks(dmt))
+ goto_out;
+
if (!dm_task_run(dmt)) {
log_error("Failed to get state of mapped device");
goto out;
@@ -261,7 +271,7 @@ int dev_manager_info(struct dm_pool *mem, const struct logical_volume *lv,
char *dlid, *name;
int r;
- if (!(name = build_dm_name(mem, lv->vg->name, lv->name, layer))) {
+ if (!(name = dm_build_dm_name(mem, lv->vg->name, lv->name, layer))) {
log_error("name build failed for %s", lv->name);
return 0;
}
@@ -306,6 +316,7 @@ static const struct dm_info *_cached_info(struct dm_pool *mem,
return dinfo;
}
+#if 0
/* FIXME Interface must cope with multiple targets */
static int _status_run(const char *name, const char *uuid,
unsigned long long *s, unsigned long long *l,
@@ -383,6 +394,7 @@ static int _status(const char *name, const char *uuid,
return 0;
}
+#endif
int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
const char *layer, const char *target_type)
@@ -401,7 +413,7 @@ int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
if (!(dmt = _setup_task(NULL, dlid, 0,
DM_DEVICE_STATUS, 0, 0)))
- return_0;
+ goto_bad;
if (!dm_task_no_open_count(dmt))
log_error("Failed to disable open_count");
@@ -423,14 +435,56 @@ int lv_has_target_type(struct dm_pool *mem, struct logical_volume *lv,
}
} while (next);
- out:
+out:
dm_task_destroy(dmt);
+bad:
+ dm_pool_free(mem, dlid);
+
return r;
}
+int add_linear_area_to_dtree(struct dm_tree_node *node, uint64_t size, uint32_t extent_size, int use_linear_target, const char *vgname, const char *lvname)
+{
+ uint32_t page_size;
+
+ /*
+ * Use striped or linear target?
+ */
+ if (!use_linear_target) {
+ page_size = lvm_getpagesize() >> SECTOR_SHIFT;
+
+ /*
+ * We'll use the extent size as the stripe size.
+ * Extent size and page size are always powers of 2.
+ * The striped target requires that the stripe size is
+ * divisible by the page size.
+ */
+ if (extent_size >= page_size) {
+ /* Use striped target */
+ if (!dm_tree_node_add_striped_target(node, size, extent_size))
+ return_0;
+ return 1;
+ } else
+ /* Some exotic cases are unsupported by striped. */
+ log_warn("WARNING: Using linear target for %s/%s: Striped requires extent size (%" PRIu32 " sectors) >= page size (%" PRIu32 ").",
+ vgname, lvname, extent_size, page_size);
+ }
+
+ /*
+ * Use linear target.
+ */
+ if (!dm_tree_node_add_linear_target(node, size))
+ return_0;
+
+ return 1;
+}
+
static percent_range_t _combine_percent(percent_t a, percent_t b,
uint32_t numerator, uint32_t denominator)
{
+ if (a == PERCENT_MERGE_FAILED || b == PERCENT_MERGE_FAILED)
+ return PERCENT_MERGE_FAILED;
+
if (a == PERCENT_INVALID || b == PERCENT_INVALID)
return PERCENT_INVALID;
@@ -440,7 +494,7 @@ static percent_range_t _combine_percent(percent_t a, percent_t b,
if (a == PERCENT_0 && b == PERCENT_0)
return PERCENT_0;
- return make_percent(numerator, denominator);
+ return (percent_range_t) make_percent(numerator, denominator);
}
static int _percent_run(struct dev_manager *dm, const char *name,
@@ -456,15 +510,15 @@ static int _percent_run(struct dev_manager *dm, const char *name,
uint64_t start, length;
char *type = NULL;
char *params = NULL;
- const struct dm_list *segh = &lv->segments;
+ const struct dm_list *segh = lv ? &lv->segments : NULL;
struct lv_segment *seg = NULL;
struct segment_type *segtype;
int first_time = 1;
- percent_t percent;
+ percent_t percent = PERCENT_INVALID;
uint64_t total_numerator = 0, total_denominator = 0;
- *overall_percent = PERCENT_INVALID;
+ *overall_percent = percent;
if (!(dmt = _setup_task(name, dlid, event_nr,
wait ? DM_DEVICE_WAITEVENT : DM_DEVICE_STATUS, 0, 0)))
@@ -614,6 +668,11 @@ int dev_manager_transient(struct dev_manager *dm, struct logical_volume *lv)
if (!type || !params)
continue;
+ if (!seg) {
+ log_error(INTERNAL_ERROR "Segment is not selected.");
+ goto out;
+ }
+
if (seg->segtype->ops->check_transient_status &&
!seg->segtype->ops->check_transient_status(seg, params))
goto_out;
@@ -637,7 +696,8 @@ int dev_manager_transient(struct dev_manager *dm, struct logical_volume *lv)
* dev_manager implementation.
*/
struct dev_manager *dev_manager_create(struct cmd_context *cmd,
- const char *vg_name)
+ const char *vg_name,
+ unsigned track_pvmove_deps)
{
struct dm_pool *mem;
struct dev_manager *dm;
@@ -654,6 +714,12 @@ struct dev_manager *dev_manager_create(struct cmd_context *cmd,
if (!(dm->vg_name = dm_pool_strdup(dm->mem, vg_name)))
goto_bad;
+ /*
+ * When we manipulate (normally suspend/resume) the PVMOVE
+ * device directly, there's no need to touch the LVs above.
+ */
+ dm->track_pvmove_deps = track_pvmove_deps;
+
dm->target_state = NULL;
dm_udev_set_sync_support(cmd->current_settings.udev_sync);
@@ -684,6 +750,7 @@ int dev_manager_snapshot_percent(struct dev_manager *dm,
const struct logical_volume *lv,
percent_t *percent)
{
+ const struct logical_volume *snap_lv;
char *name;
const char *dlid;
int fail_if_percent_unsupported = 0;
@@ -703,13 +770,19 @@ int dev_manager_snapshot_percent(struct dev_manager *dm,
fail_if_percent_unsupported = 1;
}
+ if (lv_is_merging_cow(lv)) {
+ /* must check percent of origin for a merging snapshot */
+ snap_lv = origin_from_cow(lv);
+ } else
+ snap_lv = lv;
+
/*
* Build a name for the top layer.
*/
- if (!(name = build_dm_name(dm->mem, lv->vg->name, lv->name, NULL)))
+ if (!(name = dm_build_dm_name(dm->mem, snap_lv->vg->name, snap_lv->name, NULL)))
return_0;
- if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, NULL)))
+ if (!(dlid = build_dm_uuid(dm->mem, snap_lv->lvid.s, NULL)))
return_0;
/*
@@ -720,8 +793,6 @@ int dev_manager_snapshot_percent(struct dev_manager *dm,
NULL, fail_if_percent_unsupported)))
return_0;
- /* FIXME dm_pool_free ? */
-
/* If the snapshot isn't available, percent will be -1 */
return 1;
}
@@ -734,23 +805,23 @@ int dev_manager_mirror_percent(struct dev_manager *dm,
{
char *name;
const char *dlid;
+ const char *target_type = first_seg(lv)->segtype->name;
const char *layer = (lv_is_origin(lv)) ? "real" : NULL;
/*
* Build a name for the top layer.
*/
- if (!(name = build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
+ if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
return_0;
- /* FIXME dm_pool_free ? */
-
if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer))) {
log_error("dlid build failed for %s", lv->name);
return 0;
}
- log_debug("Getting device mirror status percentage for %s", name);
- if (!(_percent(dm, name, dlid, "mirror", wait, lv, percent,
+ log_debug("Getting device %s status percentage for %s",
+ target_type, name);
+ if (!(_percent(dm, name, dlid, target_type, wait, lv, percent,
event_nr, 0)))
return_0;
@@ -781,7 +852,7 @@ int dev_manager_mirror_percent(struct dev_manager *dm,
/* Rename? */
if ((suffix = strrchr(dl->dlid + sizeof(UUID_PREFIX) - 1, '-')))
suffix++;
- new_name = build_dm_name(dm->mem, dm->vg_name, dl->lv->name,
+ new_name = dm_build_dm_name(dm->mem, dm->vg_name, dl->lv->name,
suffix);
static int _belong_to_vg(const char *vgname, const char *name)
@@ -816,6 +887,94 @@ static int _belong_to_vg(const char *vgname, const char *name)
#endif
+int dev_manager_thin_pool_status(struct dev_manager *dm,
+ const struct logical_volume *lv,
+ struct dm_status_thin_pool **status)
+{
+ const char *dlid;
+ struct dm_task *dmt;
+ struct dm_info info;
+ uint64_t start, length;
+ char *type = NULL;
+ char *params = NULL;
+ int r = 0;
+
+ /* Build dlid for the thin pool layer */
+ if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
+ return_0;
+
+ log_debug("Getting thin pool device status for %s.", lv->name);
+
+ if (!(dmt = _setup_task(NULL, dlid, 0, DM_DEVICE_STATUS, 0, 0)))
+ return_0;
+
+ if (!dm_task_no_open_count(dmt))
+ log_error("Failed to disable open_count.");
+
+ if (!dm_task_run(dmt))
+ goto_out;
+
+ if (!dm_task_get_info(dmt, &info) || !info.exists)
+ goto_out;
+
+ dm_get_next_target(dmt, NULL, &start, &length, &type, &params);
+
+ if (!dm_get_status_thin_pool(dm->mem, params, status))
+ goto_out;
+
+ r = 1;
+out:
+ dm_task_destroy(dmt);
+
+ return r;
+}
+
+int dev_manager_thin_pool_percent(struct dev_manager *dm,
+ const struct logical_volume *lv,
+ int metadata, percent_t *percent)
+{
+ char *name;
+ const char *dlid;
+
+ /* Build a name for the top layer */
+ if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name,
+ _thin_layer)))
+ return_0;
+
+ if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
+ return_0;
+
+ log_debug("Getting device status percentage for %s", name);
+ if (!(_percent(dm, name, dlid, "thin-pool", 0,
+ (metadata) ? lv : NULL, percent, NULL, 1)))
+ return_0;
+
+ return 1;
+}
+
+int dev_manager_thin_percent(struct dev_manager *dm,
+ const struct logical_volume *lv,
+ int mapped, percent_t *percent)
+{
+ char *name;
+ const char *dlid;
+ const char *layer = lv_is_origin(lv) ? "real" : NULL;
+
+ /* Build a name for the top layer */
+ if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
+ return_0;
+
+ if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
+ return_0;
+
+ log_debug("Getting device status percentage for %s", name);
+ if (!(_percent(dm, name, dlid, "thin", 0,
+ (mapped) ? NULL : lv, percent, NULL, 1)))
+ return_0;
+
+ return 1;
+}
+
/*************************/
/* NEW CODE STARTS HERE */
/*************************/
@@ -824,7 +983,7 @@ static int _dev_manager_lv_mknodes(const struct logical_volume *lv)
{
char *name;
- if (!(name = build_dm_name(lv->vg->cmd->mem, lv->vg->name,
+ if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name,
lv->name, NULL)))
return_0;
@@ -842,7 +1001,7 @@ int dev_manager_mknodes(const struct logical_volume *lv)
char *name;
int r = 0;
- if (!(name = build_dm_name(lv->vg->cmd->mem, lv->vg->name, lv->name, NULL)))
+ if (!(name = dm_build_dm_name(lv->vg->cmd->mem, lv->vg->name, lv->name, NULL)))
return_0;
if ((r = _info_run(name, NULL, &dminfo, NULL, 1, 0, 0, 0, 0))) {
@@ -863,10 +1022,18 @@ static uint16_t _get_udev_flags(struct dev_manager *dm, struct logical_volume *l
uint16_t udev_flags = 0;
/*
+ * Instruct also libdevmapper to disable udev
+ * fallback in accordance to LVM2 settings.
+ */
+ if (!dm->cmd->current_settings.udev_fallback)
+ udev_flags |= DM_UDEV_DISABLE_LIBRARY_FALLBACK;
+
+ /*
* Is this top-level and visible device?
* If not, create just the /dev/mapper content.
*/
- if (layer || !lv_is_visible(lv))
+ /* FIXME: add target's method for this */
+ if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv))
udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG;
@@ -907,7 +1074,7 @@ static int _add_dev_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
char *dlid, *name;
struct dm_info info, info2;
- if (!(name = build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
+ if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
@@ -1031,12 +1198,132 @@ static int _add_partial_replicator_to_dtree(struct dev_manager *dm,
return 1;
}
+struct thin_cb_data {
+ const struct logical_volume *pool_lv;
+ struct dev_manager *dm;
+};
+
+static int _thin_pool_callback(struct dm_tree_node *node,
+ dm_node_callback_t type, void *cb_data)
+{
+ int ret, status;
+ const struct thin_cb_data *data = cb_data;
+ const char *dmdir = dm_dir();
+ const struct dm_config_node *cn;
+ const struct dm_config_value *cv;
+ const char *thin_check =
+ find_config_tree_str_allow_empty(data->pool_lv->vg->cmd,
+ "global/thin_check_executable",
+ THIN_CHECK_CMD);
+ const struct logical_volume *mlv = first_seg(data->pool_lv)->metadata_lv;
+ size_t len = strlen(dmdir) + 2 * (strlen(mlv->vg->name) + strlen(mlv->name)) + 3;
+ char meta_path[len];
+ int args = 0;
+ const char *argv[19]; /* Max supported 15 args */
+ char *split, *dm_name;
+
+ if (!thin_check[0])
+ return 1; /* Checking disabled */
+
+ if (!(dm_name = dm_build_dm_name(data->dm->mem, mlv->vg->name,
+ mlv->name, NULL)) ||
+ (dm_snprintf(meta_path, len, "%s/%s", dmdir, dm_name) < 0)) {
+ log_error("Failed to build thin metadata path.");
+ return 0;
+ }
+
+ if ((cn = find_config_tree_node(mlv->vg->cmd, "global/thin_check_options"))) {
+ for (cv = cn->v; cv && args < 16; cv = cv->next) {
+ if (cv->type != DM_CFG_STRING) {
+ log_error("Invalid string in config file: "
+ "global/thin_check_options");
+ return 0;
+ }
+ argv[++args] = cv->v.str;
+ }
+ } else {
+ /* Use default options (no support for options with spaces) */
+ if (!(split = dm_pool_strdup(data->dm->mem, DEFAULT_THIN_CHECK_OPTIONS))) {
+ log_error("Failed to duplicate thin check string.");
+ return 0;
+ }
+ args = dm_split_words(split, 16, 0, (char**) argv + 1);
+ }
+
+ if (args == 16) {
+ log_error("Too many options for thin check command.");
+ return 0;
+ }
+
+ argv[0] = thin_check;
+ argv[++args] = meta_path;
+ argv[++args] = NULL;
+
+ if (!(ret = exec_cmd(data->pool_lv->vg->cmd, (const char * const *)argv,
+ &status, 0))) {
+ switch (type) {
+ case DM_NODE_CALLBACK_PRELOADED:
+ log_err_once("Check of thin pool %s/%s failed (status:%d). "
+ "Manual repair required (thin_dump --repair %s)!",
+ data->pool_lv->vg->name, data->pool_lv->name,
+ status, meta_path);
+ break;
+ default:
+ log_warn("WARNING: Integrity check of metadata for thin pool "
+ "%s/%s failed.",
+ data->pool_lv->vg->name, data->pool_lv->name);
+ }
+ /*
+ * FIXME: What should we do here??
+ *
+ * Maybe mark the node, so it's not activating
+ * as thin_pool but as error/linear and let the
+ * dm tree resolve the issue.
+ */
+ }
+
+ dm_pool_free(data->dm->mem, dm_name);
+
+ return ret;
+}
+
+static int _thin_pool_register_callback(struct dev_manager *dm,
+ struct dm_tree_node *node,
+ const struct logical_volume *lv)
+{
+ struct thin_cb_data *data;
+
+ /* Skip metadata testing for unused pool. */
+ if (!first_seg(lv)->transaction_id)
+ return 1;
+
+ if (!(data = dm_pool_alloc(dm->mem, sizeof(*data)))) {
+ log_error("Failed to allocated path for callback.");
+ return 0;
+ }
+
+ data->dm = dm;
+ data->pool_lv = lv;
+
+ dm_tree_node_set_callback(node, _thin_pool_callback, data);
+
+ return 1;
+}
+
/*
* Add LV and any known dependencies
*/
-static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, struct logical_volume *lv, unsigned origin_only)
+static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
+ struct logical_volume *lv, int origin_only)
{
- if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, NULL))
+ uint32_t s;
+ struct seg_list *sl;
+ struct lv_segment *seg = first_seg(lv);
+ struct dm_tree_node *thin_node;
+ const char *uuid;
+
+ if ((!origin_only || lv_is_thin_volume(lv)) &&
+ !_add_dev_to_dtree(dm, dtree, lv, NULL))
return_0;
/* FIXME Can we avoid doing this every time? */
@@ -1046,36 +1333,79 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree, struc
if (!origin_only && !_add_dev_to_dtree(dm, dtree, lv, "cow"))
return_0;
- if ((lv->status & MIRRORED) && first_seg(lv)->log_lv &&
- !_add_dev_to_dtree(dm, dtree, first_seg(lv)->log_lv, NULL))
+ if ((lv->status & MIRRORED) && seg->log_lv &&
+ !_add_dev_to_dtree(dm, dtree, seg->log_lv, NULL))
return_0;
+ if (lv->status & RAID)
+ for (s = 0; s < seg->area_count; s++)
+ if (!_add_dev_to_dtree(dm, dtree,
+ seg_metalv(seg, s), NULL))
+ return_0;
+
+ /* Add any LVs referencing a PVMOVE LV unless told not to. */
+ if (dm->track_pvmove_deps && lv->status & PVMOVE)
+ dm_list_iterate_items(sl, &lv->segs_using_this_lv)
+ if (!_add_lv_to_dtree(dm, dtree, sl->seg->lv, origin_only))
+ return_0;
+
/* Adding LV head of replicator adds all other related devs */
if (lv_is_replicator_dev(lv) &&
!_add_partial_replicator_to_dtree(dm, dtree, lv))
return_0;
+ if (lv_is_thin_volume(lv)) {
+#if 0
+ /* FIXME Implement dm_tree_node_skip_children optimisation */
+ if (origin_only) {
+ if (!(uuid = build_dm_uuid(dm->mem, lv->lvid.s, NULL)))
+ return_0;
+ if ((thin_node = dm_tree_find_node_by_uuid(dtree, uuid)))
+ dm_tree_node_skip_children(thin_node, 1);
+ }
+#endif
+ /* Add thin pool LV layer */
+ lv = seg->pool_lv;
+ seg = first_seg(lv);
+ }
+
+ if (!origin_only && lv_is_thin_pool(lv)) {
+ if (!_add_lv_to_dtree(dm, dtree, seg->metadata_lv, 0))
+ return_0;
+ /* FIXME code from _create_partial_dtree() should be moved here */
+ if (!_add_lv_to_dtree(dm, dtree, seg_lv(seg, 0), 0))
+ return_0;
+ if (!_add_dev_to_dtree(dm, dtree, lv, _thin_layer))
+ return_0;
+ /* If the partial tree is used for deactivation, setup callback */
+ if (!(uuid = build_dm_uuid(dm->mem, lv->lvid.s, _thin_layer)))
+ return_0;
+ if ((thin_node = dm_tree_find_node_by_uuid(dtree, uuid)) &&
+ !_thin_pool_register_callback(dm, thin_node, lv))
+ return_0;
+ }
+
return 1;
}
-static struct dm_tree *_create_partial_dtree(struct dev_manager *dm, struct logical_volume *lv, unsigned origin_only)
+static struct dm_tree *_create_partial_dtree(struct dev_manager *dm, struct logical_volume *lv, int origin_only)
{
struct dm_tree *dtree;
- struct dm_list *snh, *snht;
+ struct dm_list *snh;
struct lv_segment *seg;
uint32_t s;
if (!(dtree = dm_tree_create())) {
- log_error("Partial dtree creation failed for %s.", lv->name);
+ log_debug("Partial dtree creation failed for %s.", lv->name);
return NULL;
}
- if (!_add_lv_to_dtree(dm, dtree, lv, origin_only))
+ if (!_add_lv_to_dtree(dm, dtree, lv, (lv_is_origin(lv) || lv_is_thin_volume(lv)) ? origin_only : 0))
goto_bad;
/* Add any snapshots of this LV */
- if (!origin_only)
- dm_list_iterate_safe(snh, snht, &lv->snapshot_segs)
+ if (!origin_only && lv_is_origin(lv))
+ dm_list_iterate(snh, &lv->snapshot_segs)
if (!_add_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow, 0))
goto_bad;
@@ -1097,12 +1427,13 @@ bad:
static char *_add_error_device(struct dev_manager *dm, struct dm_tree *dtree,
struct lv_segment *seg, int s)
{
- char *id, *name;
+ char *dlid, *name;
char errid[32];
struct dm_tree_node *node;
struct lv_segment *seg_i;
- int segno = -1, i = 0;;
- uint64_t size = seg->len * seg->lv->vg->extent_size;
+ struct dm_info info;
+ int segno = -1, i = 0;
+ uint64_t size = (uint64_t) seg->len * seg->lv->vg->extent_size;
dm_list_iterate_items(seg_i, &seg->lv->segments) {
if (seg == seg_i)
@@ -1112,23 +1443,40 @@ static char *_add_error_device(struct dev_manager *dm, struct dm_tree *dtree,
if (segno < 0) {
log_error("_add_error_device called with bad segment");
- return_NULL;
+ return NULL;
}
sprintf(errid, "missing_%d_%d", segno, s);
- if (!(id = build_dm_uuid(dm->mem, seg->lv->lvid.s, errid)))
+ if (!(dlid = build_dm_uuid(dm->mem, seg->lv->lvid.s, errid)))
return_NULL;
- if (!(name = build_dm_name(dm->mem, seg->lv->vg->name,
+ if (!(name = dm_build_dm_name(dm->mem, seg->lv->vg->name,
seg->lv->name, errid)))
return_NULL;
- if (!(node = dm_tree_add_new_dev(dtree, name, id, 0, 0, 0, 0, 0)))
- return_NULL;
- if (!dm_tree_node_add_error_target(node, size))
- return_NULL;
- return id;
+ log_debug("Getting device info for %s [%s]", name, dlid);
+ if (!_info(dlid, 1, 0, &info, NULL)) {
+ log_error("Failed to get info for %s [%s].", name, dlid);
+ return 0;
+ }
+
+ if (!info.exists) {
+ /* Create new node */
+ if (!(node = dm_tree_add_new_dev(dtree, name, dlid, 0, 0, 0, 0, 0)))
+ return_NULL;
+ if (!dm_tree_node_add_error_target(node, size))
+ return_NULL;
+ } else {
+ /* Already exists */
+ if (!dm_tree_add_dev(dtree, info.major, info.minor)) {
+ log_error("Failed to add device (%" PRIu32 ":%" PRIu32") to dtree",
+ info.major, info.minor);
+ return_NULL;
+ }
+ }
+
+ return dlid;
}
static int _add_error_area(struct dev_manager *dm, struct dm_tree_node *node,
@@ -1143,15 +1491,13 @@ static int _add_error_area(struct dev_manager *dm, struct dm_tree_node *node,
* we don't have the struct definition available.
*/
struct dm_tree **tree = (struct dm_tree **) node;
- dlid = _add_error_device(dm, *tree, seg, s);
- if (!dlid)
+ if (!(dlid = _add_error_device(dm, *tree, seg, s)))
+ return_0;
+ if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
return_0;
- dm_tree_node_add_target_area(node, NULL, dlid,
- extent_size * seg_le(seg, s));
} else
- dm_tree_node_add_target_area(node,
- dm->cmd->stripe_filler,
- NULL, UINT64_C(0));
+ if (!dm_tree_node_add_target_area(node, dm->cmd->stripe_filler, NULL, UINT64_C(0)))
+ return_0;
return 1;
}
@@ -1163,28 +1509,68 @@ int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
uint64_t extent_size = seg->lv->vg->extent_size;
uint32_t s;
char *dlid;
+ struct stat info;
+ const char *name;
+ unsigned num_error_areas = 0;
+ unsigned num_existing_areas = 0;
+ /* FIXME Avoid repeating identical stat in dm_tree_node_add_target_area */
for (s = start_area; s < areas; s++) {
if ((seg_type(seg, s) == AREA_PV &&
- (!seg_pvseg(seg, s) ||
- !seg_pv(seg, s) ||
- !seg_dev(seg, s))) ||
+ (!seg_pvseg(seg, s) || !seg_pv(seg, s) || !seg_dev(seg, s) ||
+ !(name = dev_name(seg_dev(seg, s))) || !*name ||
+ stat(name, &info) < 0 || !S_ISBLK(info.st_mode))) ||
(seg_type(seg, s) == AREA_LV && !seg_lv(seg, s))) {
+ if (!seg->lv->vg->cmd->partial_activation) {
+ log_error("Aborting. LV %s is now incomplete "
+ "and --partial was not specified.", seg->lv->name);
+ return 0;
+ }
if (!_add_error_area(dm, node, seg, s))
return_0;
- } else if (seg_type(seg, s) == AREA_PV)
- dm_tree_node_add_target_area(node,
- dev_name(seg_dev(seg, s)),
- NULL,
- (seg_pv(seg, s)->pe_start +
- (extent_size * seg_pe(seg, s))));
- else if (seg_type(seg, s) == AREA_LV) {
- if (!(dlid = build_dm_uuid(dm->mem,
- seg_lv(seg, s)->lvid.s,
- NULL)))
+ num_error_areas++;
+ } else if (seg_type(seg, s) == AREA_PV) {
+ if (!dm_tree_node_add_target_area(node, dev_name(seg_dev(seg, s)), NULL,
+ (seg_pv(seg, s)->pe_start + (extent_size * seg_pe(seg, s)))))
+ return_0;
+ num_existing_areas++;
+ } else if (seg_is_raid(seg)) {
+ /*
+ * RAID can handle unassigned areas. It simple puts
+ * '- -' in for the metadata/data device pair. This
+ * is a valid way to indicate to the RAID target that
+ * the device is missing.
+ *
+ * If an image is marked as VISIBLE_LV and !LVM_WRITE,
+ * it means the device has temporarily been extracted
+ * from the array. It may come back at a future date,
+ * so the bitmap must track differences. Again, '- -'
+ * is used in the CTR table.
+ */
+ if ((seg_type(seg, s) == AREA_UNASSIGNED) ||
+ ((seg_lv(seg, s)->status & VISIBLE_LV) &&
+ !(seg_lv(seg, s)->status & LVM_WRITE))) {
+ /* One each for metadata area and data area */
+ if (!dm_tree_node_add_null_area(node, 0) ||
+ !dm_tree_node_add_null_area(node, 0))
+ return_0;
+ continue;
+ }
+ if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s)->lvid.s, NULL)))
+ return_0;
+ if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
+ return_0;
+
+ if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s)->lvid.s, NULL)))
+ return_0;
+ if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
+ return_0;
+ } else if (seg_type(seg, s) == AREA_LV) {
+
+ if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s)->lvid.s, NULL)))
+ return_0;
+ if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_le(seg, s)))
return_0;
- dm_tree_node_add_target_area(node, NULL, dlid,
- extent_size * seg_le(seg, s));
} else {
log_error(INTERNAL_ERROR "Unassigned area found in LV %s.",
seg->lv->name);
@@ -1192,6 +1578,15 @@ int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
}
}
+ if (num_error_areas) {
+ /* Thins currently do not support partial activation */
+ if (lv_is_thin_type(seg->lv)) {
+ log_error("Cannot activate %s%s: pool incomplete.",
+ seg->lv->vg->name, seg->lv->name);
+ return 0;
+ }
+ }
+
return 1;
}
@@ -1235,8 +1630,9 @@ static int _add_snapshot_merge_target_to_dtree(struct dev_manager *dm,
}
static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
- struct dm_tree_node *dnode,
- struct logical_volume *lv)
+ struct dm_tree_node *dnode,
+ struct logical_volume *lv,
+ struct lv_activate_opts *laopts)
{
const char *origin_dlid;
const char *cow_dlid;
@@ -1256,7 +1652,7 @@ static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
size = (uint64_t) snap_seg->len * snap_seg->origin->vg->extent_size;
- if (lv_is_merging_cow(lv)) {
+ if (!laopts->no_merging && lv_is_merging_cow(lv)) {
/* cow is to be merged so load the error target */
if (!dm_tree_node_add_error_target(dnode, size))
return_0;
@@ -1269,8 +1665,9 @@ static int _add_snapshot_target_to_dtree(struct dev_manager *dm,
}
static int _add_target_to_dtree(struct dev_manager *dm,
- struct dm_tree_node *dnode,
- struct lv_segment *seg)
+ struct dm_tree_node *dnode,
+ struct lv_segment *seg,
+ struct lv_activate_opts *laopts)
{
uint64_t extent_size = seg->lv->vg->extent_size;
@@ -1282,47 +1679,52 @@ static int _add_target_to_dtree(struct dev_manager *dm,
return seg->segtype->ops->add_target_line(dm, dm->mem, dm->cmd,
&dm->target_state, seg,
- dnode,
+ laopts, dnode,
extent_size * seg->len,
&dm-> pvmove_mirror_count);
}
static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
- struct logical_volume *lv, const char *layer);
+ struct logical_volume *lv,
+ struct lv_activate_opts *laopts,
+ const char *layer);
/* Add all replicators' LVs */
static int _add_replicator_dev_target_to_dtree(struct dev_manager *dm,
struct dm_tree *dtree,
- struct lv_segment *seg)
+ struct lv_segment *seg,
+ struct lv_activate_opts *laopts)
{
struct replicator_device *rdev;
struct replicator_site *rsite;
/* For inactive replicator add linear mapping */
if (!lv_is_active_replicator_dev(seg->lv)) {
- if (!_add_new_lv_to_dtree(dm, dtree, seg->lv->rdevice->lv, NULL))
+ if (!_add_new_lv_to_dtree(dm, dtree, seg->lv->rdevice->lv, laopts, NULL))
return_0;
return 1;
}
/* Add rlog and replicator nodes */
if (!seg->replicator ||
- !first_seg(seg->replicator)->rlog_lv ||
+ !first_seg(seg->replicator)->rlog_lv ||
!_add_new_lv_to_dtree(dm, dtree,
- first_seg(seg->replicator)->rlog_lv, NULL) ||
- !_add_new_lv_to_dtree(dm, dtree, seg->replicator, NULL))
+ first_seg(seg->replicator)->rlog_lv,
+ laopts, NULL) ||
+ !_add_new_lv_to_dtree(dm, dtree, seg->replicator, laopts, NULL))
return_0;
/* Activation of one replicator_dev node activates all other nodes */
dm_list_iterate_items(rsite, &seg->replicator->rsites) {
dm_list_iterate_items(rdev, &rsite->rdevices) {
if (rdev->lv &&
- !_add_new_lv_to_dtree(dm, dtree, rdev->lv, NULL))
+ !_add_new_lv_to_dtree(dm, dtree, rdev->lv,
+ laopts, NULL))
return_0;
if (rdev->slog &&
- !_add_new_lv_to_dtree(dm, dtree,
- rdev->slog, NULL))
+ !_add_new_lv_to_dtree(dm, dtree, rdev->slog,
+ laopts, NULL))
return_0;
}
}
@@ -1337,7 +1739,7 @@ static int _add_replicator_dev_target_to_dtree(struct dev_manager *dm,
if (!rdev->replicator_dev->lv ||
!_add_new_lv_to_dtree(dm, dtree,
rdev->replicator_dev->lv,
- NULL))
+ laopts, NULL))
return_0;
}
}
@@ -1346,20 +1748,22 @@ static int _add_replicator_dev_target_to_dtree(struct dev_manager *dm,
}
static int _add_segment_to_dtree(struct dev_manager *dm,
- struct dm_tree *dtree,
- struct dm_tree_node *dnode,
- struct lv_segment *seg,
- const char *layer)
+ struct dm_tree *dtree,
+ struct dm_tree_node *dnode,
+ struct lv_segment *seg,
+ struct lv_activate_opts *laopts,
+ const char *layer)
{
uint32_t s;
struct dm_list *snh;
struct lv_segment *seg_present;
const char *target_name;
+ struct lv_activate_opts lva;
/* Ensure required device-mapper targets are loaded */
seg_present = find_cow(seg->lv) ? : seg;
target_name = (seg_present->segtype->ops->target_name ?
- seg_present->segtype->ops->target_name(seg_present) :
+ seg_present->segtype->ops->target_name(seg_present, laopts) :
seg_present->segtype->name);
log_debug("Checking kernel supports %s segment type for %s%s%s",
@@ -1376,45 +1780,56 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
/* Add mirror log */
if (seg->log_lv &&
- !_add_new_lv_to_dtree(dm, dtree, seg->log_lv, NULL))
+ !_add_new_lv_to_dtree(dm, dtree, seg->log_lv, laopts, NULL))
return_0;
if (seg_is_replicator_dev(seg)) {
- if (!_add_replicator_dev_target_to_dtree(dm, dtree, seg))
+ if (!_add_replicator_dev_target_to_dtree(dm, dtree, seg, laopts))
return_0;
/* If this is a snapshot origin, add real LV */
/* If this is a snapshot origin + merging snapshot, add cow + real LV */
} else if (lv_is_origin(seg->lv) && !layer) {
- if (vg_is_clustered(seg->lv->vg)) {
- log_error("Clustered snapshots are not yet supported");
- return 0;
- }
- if (lv_is_merging_origin(seg->lv)) {
+ if (!laopts->no_merging && lv_is_merging_origin(seg->lv)) {
if (!_add_new_lv_to_dtree(dm, dtree,
- find_merging_cow(seg->lv)->cow, "cow"))
+ find_merging_cow(seg->lv)->cow, laopts, "cow"))
return_0;
/*
* Must also add "real" LV for use when
* snapshot-merge target is added
*/
}
- if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, "real"))
+ if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, laopts, "real"))
return_0;
} else if (lv_is_cow(seg->lv) && !layer) {
- if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, "cow"))
+ if (!_add_new_lv_to_dtree(dm, dtree, seg->lv, laopts, "cow"))
+ return_0;
+ } else if ((layer != _thin_layer) && seg_is_thin(seg)) {
+ lva = *laopts;
+ lva.real_pool = 1;
+ if (!_add_new_lv_to_dtree(dm, dtree, seg_is_thin_pool(seg) ?
+ seg->lv : seg->pool_lv, &lva, _thin_layer))
return_0;
} else {
+ if (seg_is_thin_pool(seg) &&
+ !_add_new_lv_to_dtree(dm, dtree, seg->metadata_lv, laopts, NULL))
+ return_0;
+
/* Add any LVs used by this segment */
- for (s = 0; s < seg->area_count; s++)
+ for (s = 0; s < seg->area_count; s++) {
if ((seg_type(seg, s) == AREA_LV) &&
(!_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
- NULL)))
+ laopts, NULL)))
return_0;
+ if (seg_is_raid(seg) &&
+ !_add_new_lv_to_dtree(dm, dtree, seg_metalv(seg, s),
+ laopts, NULL))
+ return_0;
+ }
}
/* Now we've added its dependencies, we can add the target itself */
if (lv_is_origin(seg->lv) && !layer) {
- if (!lv_is_merging_origin(seg->lv)) {
+ if (laopts->no_merging || !lv_is_merging_origin(seg->lv)) {
if (!_add_origin_target_to_dtree(dm, dnode, seg->lv))
return_0;
} else {
@@ -1422,25 +1837,79 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
return_0;
}
} else if (lv_is_cow(seg->lv) && !layer) {
- if (!_add_snapshot_target_to_dtree(dm, dnode, seg->lv))
+ if (!_add_snapshot_target_to_dtree(dm, dnode, seg->lv, laopts))
return_0;
- } else if (!_add_target_to_dtree(dm, dnode, seg))
+ } else if (!_add_target_to_dtree(dm, dnode, seg, laopts))
return_0;
if (lv_is_origin(seg->lv) && !layer)
/* Add any snapshots of this LV */
dm_list_iterate(snh, &seg->lv->snapshot_segs)
- if (!_add_new_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow, NULL))
+ if (!_add_new_lv_to_dtree(dm, dtree, dm_list_struct_base(snh, struct lv_segment, origin_list)->cow,
+ laopts, NULL))
return_0;
return 1;
}
+static int _set_udev_flags_for_children(struct dev_manager *dm,
+ struct volume_group *vg,
+ struct dm_tree_node *dnode)
+{
+ char *p;
+ const char *uuid;
+ void *handle = NULL;
+ struct dm_tree_node *child;
+ const struct dm_info *info;
+ struct lv_list *lvl;
+
+ while ((child = dm_tree_next_child(&handle, dnode, 0))) {
+ /* Ignore root node */
+ if (!(info = dm_tree_node_get_info(child)) || !info->exists)
+ continue;
+
+ if (!(uuid = dm_tree_node_get_uuid(child))) {
+ log_error(INTERNAL_ERROR
+ "Failed to get uuid for %" PRIu32 ":%" PRIu32,
+ info->major, info->minor);
+ continue;
+ }
+
+ /* Ignore non-LVM devices */
+ if (!(p = strstr(uuid, UUID_PREFIX)))
+ continue;
+ p += strlen(UUID_PREFIX);
+
+ /* Ignore LVs that belong to different VGs (due to stacking) */
+ if (strncmp(p, (char *)vg->id.uuid, ID_LEN))
+ continue;
+
+ /* Ignore LVM devices with 'layer' suffixes */
+ if (strrchr(p, '-'))
+ continue;
+
+ if (!(lvl = find_lv_in_vg_by_lvid(vg, (const union lvid *)p))) {
+ log_error(INTERNAL_ERROR
+ "%s (%" PRIu32 ":%" PRIu32 ") not found in VG",
+ dm_tree_node_get_name(child),
+ info->major, info->minor);
+ return 0;
+ }
+
+ dm_tree_node_set_udev_flags(child,
+ _get_udev_flags(dm, lvl->lv, NULL));
+ }
+
+ return 1;
+}
+
static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
- struct logical_volume *lv, const char *layer)
+ struct logical_volume *lv, struct lv_activate_opts *laopts,
+ const char *layer)
{
struct lv_segment *seg;
struct lv_layer *lvlayer;
+ struct seg_list *sl;
struct dm_tree_node *dnode;
const struct dm_info *dinfo;
char *name, *dlid;
@@ -1464,11 +1933,11 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
dtree)) && dinfo->open_count)) {
/* FIXME Is there anything simpler to check for instead? */
if (!lv_has_target_type(dm->mem, lv, NULL, "snapshot-merge"))
- clear_snapshot_merge(lv);
+ laopts->no_merging = 1;
}
}
- if (!(name = build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
+ if (!(name = dm_build_dm_name(dm->mem, lv->vg->name, lv->name, layer)))
return_0;
if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, layer)))
@@ -1493,11 +1962,14 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
* existing inactive table left behind.
* Major/minor settings only apply to the visible layer.
*/
+ /* FIXME Move the clear from here until later, so we can leave
+ * identical inactive tables untouched. (For pvmove.)
+ */
if (!(dnode = dm_tree_add_new_dev_with_udev_flags(dtree, name, dlid,
layer ? UINT32_C(0) : (uint32_t) lv->major,
layer ? UINT32_C(0) : (uint32_t) lv->minor,
- _read_only_lv(lv),
- (lv->vg->status & PRECOMMITTED) ? 1 : 0,
+ read_only_lv(lv, laopts),
+ ((lv->vg->status & PRECOMMITTED) | laopts->revert) ? 1 : 0,
lvlayer,
_get_udev_flags(dm, lv, layer))))
return_0;
@@ -1508,12 +1980,12 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
/* Create table */
dm->pvmove_mirror_count = 0u;
dm_list_iterate_items(seg, &lv->segments) {
- if (!_add_segment_to_dtree(dm, dtree, dnode, seg, layer))
+ if (!_add_segment_to_dtree(dm, dtree, dnode, seg, laopts, layer))
return_0;
/* These aren't real segments in the LVM2 metadata */
if (lv_is_origin(lv) && !layer)
break;
- if (lv_is_cow(lv) && !layer)
+ if (!laopts->no_merging && lv_is_cow(lv) && !layer)
break;
if (max_stripe_size < seg->stripe_size * seg->area_count)
max_stripe_size = seg->stripe_size * seg->area_count;
@@ -1529,6 +2001,20 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
dm_tree_node_set_read_ahead(dnode, read_ahead, read_ahead_flags);
+ /* Setup thin pool callback */
+ if (layer && lv_is_thin_pool(lv) &&
+ !_thin_pool_register_callback(dm, dnode, lv))
+ return_0;
+
+ /* Add any LVs referencing a PVMOVE LV unless told not to */
+ if (dm->track_pvmove_deps && (lv->status & PVMOVE))
+ dm_list_iterate_items(sl, &lv->segs_using_this_lv)
+ if (!_add_new_lv_to_dtree(dm, dtree, sl->seg->lv, laopts, NULL))
+ return_0;
+
+ if (!_set_udev_flags_for_children(dm, lv->vg, dnode))
+ return_0;
+
return 1;
}
@@ -1549,6 +2035,12 @@ static int _create_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root
const char *name;
int r = 1;
+ /* Nothing to do if udev fallback is disabled. */
+ if (!dm->cmd->current_settings.udev_fallback) {
+ fs_set_create();
+ return 1;
+ }
+
while ((child = dm_tree_next_child(&handle, root, 0))) {
if (!(lvlayer = dm_tree_node_get_context(child)))
continue;
@@ -1591,6 +2083,10 @@ static int _remove_lv_symlinks(struct dev_manager *dm, struct dm_tree_node *root
char *vgname, *lvname, *layer;
int r = 1;
+ /* Nothing to do if udev fallback is disabled. */
+ if (!dm->cmd->current_settings.udev_fallback)
+ return 1;
+
while ((child = dm_tree_next_child(&handle, root, 0))) {
if (!dm_split_lvm_name(dm->mem, dm_tree_node_get_name(child), &vgname, &lvname, &layer)) {
r = 0;
@@ -1617,7 +2113,6 @@ static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, char *
struct dm_tree_node *child;
char *vgname, *lvname, *layer;
const char *name, *uuid;
- int r;
while ((child = dm_tree_next_child(&handle, root, 0))) {
if (!(name = dm_tree_node_get_name(child)))
@@ -1639,12 +2134,7 @@ static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, char *
if (non_toplevel_tree_dlid && !strcmp(non_toplevel_tree_dlid, uuid))
continue;
- dm_tree_set_cookie(root, 0);
- r = dm_tree_deactivate_children(root, uuid, strlen(uuid));
- if (!dm_udev_wait(dm_tree_get_cookie(root)))
- stack;
-
- if (!r)
+ if (!dm_tree_deactivate_children(root, uuid, strlen(uuid)))
return_0;
}
@@ -1652,78 +2142,74 @@ static int _clean_tree(struct dev_manager *dm, struct dm_tree_node *root, char *
}
static int _tree_action(struct dev_manager *dm, struct logical_volume *lv,
- unsigned origin_only, action_t action)
+ struct lv_activate_opts *laopts, action_t action)
{
+ const size_t DLID_SIZE = ID_LEN + sizeof(UUID_PREFIX) - 1;
struct dm_tree *dtree;
struct dm_tree_node *root;
char *dlid;
int r = 0;
- if (!(dtree = _create_partial_dtree(dm, lv, origin_only)))
+ laopts->is_activate = (action == ACTIVATE);
+
+ if (!(dtree = _create_partial_dtree(dm, lv, laopts->origin_only)))
return_0;
if (!(root = dm_tree_find_node(dtree, 0, 0))) {
log_error("Lost dependency tree root node");
- goto out;
+ goto out_no_root;
}
- if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, origin_only ? "real" : NULL)))
+ /* Restore fs cookie */
+ dm_tree_set_cookie(root, fs_get_cookie());
+
+ if (!(dlid = build_dm_uuid(dm->mem, lv->lvid.s, (lv_is_origin(lv) && laopts->origin_only) ? "real" : NULL)))
goto_out;
/* Only process nodes with uuid of "LVM-" plus VG id. */
switch(action) {
case CLEAN:
/* Deactivate any unused non-toplevel nodes */
- if (!_clean_tree(dm, root, origin_only ? dlid : NULL))
+ if (!_clean_tree(dm, root, laopts->origin_only ? dlid : NULL))
goto_out;
break;
case DEACTIVATE:
- /* Deactivate LV and all devices it references that nothing else has open. */
- dm_tree_set_cookie(root, 0);
- r = dm_tree_deactivate_children(root, dlid, ID_LEN + sizeof(UUID_PREFIX) - 1);
- if (!dm_udev_wait(dm_tree_get_cookie(root)))
- stack;
- if (!r)
+ if (retry_deactivation())
+ dm_tree_retry_remove(root);
+ /* Deactivate LV and all devices it references that nothing else has open. */
+ if (!dm_tree_deactivate_children(root, dlid, DLID_SIZE))
goto_out;
if (!_remove_lv_symlinks(dm, root))
- log_error("Failed to remove all device symlinks associated with %s.", lv->name);
+ log_warn("Failed to remove all device symlinks associated with %s.", lv->name);
break;
case SUSPEND:
dm_tree_skip_lockfs(root);
- if (!dm->flush_required && (lv->status & MIRRORED) && !(lv->status & PVMOVE))
+ if (!dm->flush_required && !seg_is_raid(first_seg(lv)) &&
+ (lv->status & MIRRORED) && !(lv->status & PVMOVE))
dm_tree_use_no_flush_suspend(root);
+ /* Fall through */
case SUSPEND_WITH_LOCKFS:
- if (!dm_tree_suspend_children(root, dlid, ID_LEN + sizeof(UUID_PREFIX) - 1))
+ if (!dm_tree_suspend_children(root, dlid, DLID_SIZE))
goto_out;
break;
case PRELOAD:
case ACTIVATE:
/* Add all required new devices to tree */
- if (!_add_new_lv_to_dtree(dm, dtree, lv, origin_only ? "real" : NULL))
+ if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, (lv_is_origin(lv) && laopts->origin_only) ? "real" : NULL))
goto_out;
/* Preload any devices required before any suspensions */
- dm_tree_set_cookie(root, 0);
- r = dm_tree_preload_children(root, dlid, ID_LEN + sizeof(UUID_PREFIX) - 1);
- if (!dm_udev_wait(dm_tree_get_cookie(root)))
- stack;
- if (!r)
+ if (!dm_tree_preload_children(root, dlid, DLID_SIZE))
goto_out;
if (dm_tree_node_size_changed(root))
dm->flush_required = 1;
if (action == ACTIVATE) {
- dm_tree_set_cookie(root, 0);
- r = dm_tree_activate_children(root, dlid, ID_LEN + sizeof(UUID_PREFIX) - 1);
- if (!dm_udev_wait(dm_tree_get_cookie(root)))
- stack;
- if (!r)
+ if (!dm_tree_activate_children(root, dlid, DLID_SIZE))
goto_out;
- if (!_create_lv_symlinks(dm, root)) {
- log_error("Failed to create symlinks for %s.", lv->name);
- goto out;
- }
+ if (!_create_lv_symlinks(dm, root))
+ log_warn("Failed to create symlinks for %s.", lv->name);
}
break;
@@ -1735,30 +2221,33 @@ static int _tree_action(struct dev_manager *dm, struct logical_volume *lv,
r = 1;
out:
+ /* Save fs cookie for udev settle, do not wait here */
+ fs_set_cookie(dm_tree_get_cookie(root));
+out_no_root:
dm_tree_free(dtree);
return r;
}
/* origin_only may only be set if we are resuming (not activating) an origin LV */
-int dev_manager_activate(struct dev_manager *dm, struct logical_volume *lv, unsigned origin_only)
+int dev_manager_activate(struct dev_manager *dm, struct logical_volume *lv,
+ struct lv_activate_opts *laopts)
{
- if (!_tree_action(dm, lv, origin_only, ACTIVATE))
+ if (!_tree_action(dm, lv, laopts, ACTIVATE))
return_0;
- return _tree_action(dm, lv, origin_only, CLEAN);
+ if (!_tree_action(dm, lv, laopts, CLEAN))
+ return_0;
+
+ return 1;
}
/* origin_only may only be set if we are resuming (not activating) an origin LV */
int dev_manager_preload(struct dev_manager *dm, struct logical_volume *lv,
- unsigned origin_only, int *flush_required)
+ struct lv_activate_opts *laopts, int *flush_required)
{
- /* FIXME Update the pvmove implementation! */
- if ((lv->status & PVMOVE) || (lv->status & LOCKED))
- return 1;
-
- if (!_tree_action(dm, lv, origin_only, PRELOAD))
- return 0;
+ if (!_tree_action(dm, lv, laopts, PRELOAD))
+ return_0;
*flush_required = dm->flush_required;
@@ -1767,19 +2256,23 @@ int dev_manager_preload(struct dev_manager *dm, struct logical_volume *lv,
int dev_manager_deactivate(struct dev_manager *dm, struct logical_volume *lv)
{
- int r;
+ struct lv_activate_opts laopts = { 0 };
- r = _tree_action(dm, lv, 0, DEACTIVATE);
+ if (!_tree_action(dm, lv, &laopts, DEACTIVATE))
+ return_0;
- return r;
+ return 1;
}
int dev_manager_suspend(struct dev_manager *dm, struct logical_volume *lv,
- unsigned origin_only, int lockfs, int flush_required)
+ struct lv_activate_opts *laopts, int lockfs, int flush_required)
{
dm->flush_required = flush_required;
- return _tree_action(dm, lv, origin_only, lockfs ? SUSPEND_WITH_LOCKFS : SUSPEND);
+ if (!_tree_action(dm, lv, laopts, lockfs ? SUSPEND_WITH_LOCKFS : SUSPEND))
+ return_0;
+
+ return 1;
}
/*