Discussion:
[pve-devel] [PATCH v3 qemu-server 3/7] migrate : phase1 : skip sync_disk for external migration
Alexandre Derumier
2018-11-27 15:38:05 UTC
Permalink
we don't need sync_disk as we mirror all disks through qemu
---
PVE/QemuMigrate.pm | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 922d76c..1dea286 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -568,7 +568,7 @@ sub phase1 {
$conf->{lock} = 'migrate';
PVE::QemuConfig->write_config($vmid, $conf);

- sync_disks($self, $vmid);
+ sync_disks($self, $vmid) if !$self->{opts}->{migration_external};

};
--
2.11.0
Alexandre Derumier
2018-11-27 15:38:06 UTC
Permalink
---
PVE/API2/Qemu.pm | 18 +++++++++++++++---
PVE/QemuMigrate.pm | 21 ++++++++++++++-------
PVE/QemuServer.pm | 20 ++++++++++++++++----
3 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index b23db56..b85fd6d 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -1932,7 +1932,7 @@ __PACKAGE__->register_method({
migratedfrom => get_standard_option('pve-node',{ optional => 1 }),
migration_type => {
type => 'string',
- enum => ['secure', 'insecure'],
+ enum => ['secure', 'insecure', 'external'],
description => "Migration traffic is encrypted using an SSH " .
"tunnel by default. On secure, completely private networks " .
"this can be disabled to increase performance.",
@@ -1948,7 +1948,12 @@ __PACKAGE__->register_method({
description => "Target storage for the migration. (Can be '1' to use the same storage id as on the source node.)",
type => 'string',
optional => 1
- }
+ },
+ external_migration => {
+ description => "Enable external migration.",
+ type => 'boolean',
+ optional => 1,
+ },
},
},
returns => {
@@ -1994,6 +1999,13 @@ __PACKAGE__->register_method({
raise_param_exc({ targetstorage => "targetstorage can only by used with migratedfrom." })
if $targetstorage && !$migratedfrom;

+ my $external_migration = extract_param($param, 'external_migration');
+ raise_param_exc({ external_migration => "Only root may use this option." })
+ if $external_migration && $authuser ne '***@pam';
+
+ raise_param_exc({ external_migration => "targetstorage can't be used with external_migration." })
+ if ($targetstorage && $external_migration);
+
# read spice ticket from STDIN
my $spice_ticket;
if ($stateuri && ($stateuri eq 'tcp') && $migratedfrom && ($rpcenv->{type} eq 'cli')) {
@@ -2034,7 +2046,7 @@ __PACKAGE__->register_method({
syslog('info', "start VM $vmid: $upid\n");

PVE::QemuServer::vm_start($storecfg, $vmid, $stateuri, $skiplock, $migratedfrom, undef,
- $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage);
+ $machine, $spice_ticket, $migration_network, $migration_type, $targetstorage, $external_migration);

return;
};
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 1dea286..b4dc8f7 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -597,7 +597,9 @@ sub phase2 {

my $conf = $self->{vmconf};

- $self->log('info', "starting VM $vmid on remote node '$self->{node}'");
+ my $targetvmid = $self->{opts}->{targetvmid} ? $self->{opts}->{targetvmid} : $vmid;
+
+ $self->log('info', "starting VM $targetvmid on remote node '$self->{node}'");

my $raddr;
my $rport;
@@ -613,10 +615,14 @@ sub phase2 {
$spice_ticket = $res->{ticket};
}

- push @$cmd , 'qm', 'start', $vmid, '--skiplock', '--migratedfrom', $nodename;
-
my $migration_type = $self->{opts}->{migration_type};

+ push @$cmd , 'qm', 'start', $targetvmid, '--skiplock';
+
+ push @$cmd, '--migratedfrom', $nodename if !$self->{opts}->{migration_external};
+
+ push @$cmd, '--external_migration' if $self->{opts}->{migration_external};
+
push @$cmd, '--migration_type', $migration_type;

push @$cmd, '--migration_network', $self->{opts}->{migration_network}
@@ -633,7 +639,7 @@ sub phase2 {
}

if ($self->{opts}->{targetstorage}) {
- push @$cmd, '--targetstorage', $self->{opts}->{targetstorage};
+ push @$cmd, '--targetstorage', $self->{opts}->{targetstorage} if !$self->{opts}->{migration_external};
}

my $spice_port;
@@ -650,7 +656,7 @@ sub phase2 {
}
elsif ($line =~ m!^migration listens on unix:(/run/qemu-server/(\d+)\.migrate)$!) {
$raddr = $1;
- die "Destination UNIX sockets VMID does not match source VMID" if $vmid ne $2;
+ die "Destination UNIX sockets VMID does not match source VMID" if $targetvmid ne $2;
$ruri = "unix:$raddr";
}
elsif ($line =~ m/^migration listens on port (\d+)$/) {
@@ -720,13 +726,14 @@ sub phase2 {

my $start = time();

- if ($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) {
+ if (($self->{opts}->{targetstorage} && defined($self->{online_local_volumes})) || $self->{opts}->{migration_external}) {
$self->{storage_migration} = 1;
$self->{storage_migration_jobs} = {};
$self->log('info', "starting storage migration");

die "The number of local disks does not match between the source and the destination.\n"
- if (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
+ if !$self->{opts}->{migration_external} && (scalar(keys %{$self->{target_drive}}) != scalar @{$self->{online_local_volumes}});
+
foreach my $drive (keys %{$self->{target_drive}}){
my $nbd_uri = $self->{target_drive}->{$drive}->{nbd_uri};
$self->log('info', "$drive: start migration to $nbd_uri");
diff --git a/PVE/QemuServer.pm b/PVE/QemuServer.pm
index 8023150..0b6c857 100644
--- a/PVE/QemuServer.pm
+++ b/PVE/QemuServer.pm
@@ -5006,7 +5006,7 @@ sub vmconfig_update_disk {

sub vm_start {
my ($storecfg, $vmid, $statefile, $skiplock, $migratedfrom, $paused,
- $forcemachine, $spice_ticket, $migration_network, $migration_type, $targetstorage) = @_;
+ $forcemachine, $spice_ticket, $migration_network, $migration_type, $targetstorage, $external_migration) = @_;

PVE::QemuConfig->lock_config($vmid, sub {
my $conf = PVE::QemuConfig->load_config($vmid, $migratedfrom);
@@ -5031,7 +5031,19 @@ sub vm_start {

my $local_volumes = {};

- if ($targetstorage) {
+ if ($external_migration) {
+ foreach_drive($conf, sub {
+ my ($ds, $drive) = @_;
+
+ return if drive_is_cdrom($drive);
+
+ my $volid = $drive->{file};
+
+ return if !$volid;
+
+ $local_volumes->{$ds} = $volid;
+ });
+ } elsif ($targetstorage) {
foreach_drive($conf, sub {
my ($ds, $drive) = @_;

@@ -5219,7 +5231,7 @@ sub vm_start {
}

#start nbd server for storage migration
- if ($targetstorage) {
+ if ($targetstorage || $external_migration) {
my $nodename = PVE::INotify::nodename();
my $migrate_network_addr = PVE::Cluster::get_local_migration_ip($migration_network);
my $localip = $migrate_network_addr ? $migrate_network_addr : PVE::Cluster::remote_node_ip($nodename, 1);
@@ -5238,7 +5250,7 @@ sub vm_start {
}
}

- if ($migratedfrom) {
+ if ($migratedfrom || $external_migration) {
eval {
set_migration_caps($vmid);
};
--
2.11.0
Alexandre Derumier
2018-11-27 15:38:04 UTC
Permalink
Create vm on target cluster with same options.
Disks are created with same size and same options than source
---
PVE/QemuMigrate.pm | 88 ++++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 83 insertions(+), 5 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index e9e9075..922d76c 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -227,6 +227,89 @@ sub prepare {
}
}

+ # test ssh connection
+ my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
+ eval { $self->cmd_quiet($cmd); };
+ die "Can't connect to destination address using public key\n" if $@;
+
+ if($self->{opts}->{migration_external}) {
+
+ #get remote nextvmid
+ eval {
+ my $cmd = [@{$self->{rem_ssh}}, 'pvesh', 'get', '/cluster/nextid'];
+ PVE::Tools::run_command($cmd, outfunc => sub {
+ my $line = shift;
+ if ($line =~ m/^(\d+)/) {
+ $self->{opts}->{targetvmid} = $line;
+ }
+ });
+ };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ die $err;
+ }
+
+ die "can't find the next free vmid on remote cluster\n" if !$self->{opts}->{targetvmid};
+
+ #create vm
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'create', $self->{opts}->{targetvmid}];
+
+ foreach my $opt (keys %{$conf}) {
+ next if $opt =~ m/^(pending|snapshots|digest|parent)/;
+ next if $opt =~ m/^(ide|scsi|virtio)(\d+)/;
+
+ if ($opt =~ m/^(net)(\d+)/ && $self->{opts}->{targetbridge}) {
+ my $netid = "net$2";
+ my $d = PVE::QemuServer::parse_net($conf->{$netid});
+ $d->{bridge} = $self->{opts}->{targetbridge};
+ $conf->{$opt} = PVE::QemuServer::print_net($d);
+ }
+
+ die "can't migrate unused disk. please remove it before migrate\n" if $opt =~ m/^(unused)(\d+)/;
+ push @$cmd , "-$opt", PVE::Tools::shellquote($conf->{$opt});
+ }
+
+ PVE::QemuServer::foreach_drive($conf, sub {
+ my ($ds, $drive) = @_;
+
+ if (PVE::QemuServer::drive_is_cdrom($drive, 1)) {
+ push @$cmd , "-$ds", PVE::Tools::shellquote($conf->{$ds});
+ return;
+ }
+
+ my $volid = $drive->{file};
+ return if !$volid;
+
+ my ($sid, $volname) = PVE::Storage::parse_volume_id($volid, 1);
+ return if !$sid;
+ my $size = PVE::Storage::volume_size_info($self->{storecfg}, $volid, 5);
+ die "can't get size\n" if !$size;
+ $size = $size/1024/1024/1024;
+ my $targetsid = $self->{opts}->{targetstorage} ? $self->{opts}->{targetstorage} : $sid;
+
+ my $data = { %$drive };
+ delete $data->{$_} for qw(index interface file size);
+ my $drive_conf = "$targetsid:$size";
+ foreach my $drive_opt (keys %{$data}) {
+ $drive_conf .= ",$drive_opt=$data->{$drive_opt}";
+ }
+
+ push @$cmd , "-$ds", PVE::Tools::shellquote($drive_conf);
+ });
+
+ push @$cmd , '-lock', 'migrate';
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ die $err;
+ }
+
+ return 1;
+ }
+
my $vollist = PVE::QemuServer::get_vm_volumes($conf);

my $need_activate = [];
@@ -253,11 +336,6 @@ sub prepare {
# activate volumes
PVE::Storage::activate_volumes($self->{storecfg}, $need_activate);

- # test ssh connection
- my $cmd = [ @{$self->{rem_ssh}}, '/bin/true' ];
- eval { $self->cmd_quiet($cmd); };
- die "Can't connect to destination address using public key\n" if $@;
-
return $running;
}
--
2.11.0
Alexandre Derumier
2018-11-27 15:38:03 UTC
Permalink
qm migrate_external <vmid> <targetremotenode> [--targetstorage otherstorage] [--targetbridge otherbridge]
---
PVE/API2/Qemu.pm | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PVE/CLI/qm.pm | 2 ++
2 files changed, 81 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c82e2fa..b23db56 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3162,6 +3162,85 @@ __PACKAGE__->register_method({
}});

__PACKAGE__->register_method({
+ name => 'migrate_vm_external',
+ path => '{vmid}/migrate_external',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Migrate virtual machine to an external cluster. Creates a new migration task.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ }),
+ targetstorage => get_standard_option('pve-storage-id', {
+ description => "Target remote storage.",
+ optional => 1,
+ }),
+ targetbridge => {
+ type => 'string',
+ description => "Target remote bridge.",
+ format_description => 'bridge',
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ die "Only root can do external migration." if $authuser ne '***@pam';
+
+ my $target = extract_param($param, 'target');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ PVE::Cluster::check_cfs_quorum();
+
+ raise_param_exc({ target => "target is member of local cluster."}) if PVE::Cluster::check_node_exists($target, 1);
+
+ die "HA must be disable for external migration." if PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ my $targetip = PVE::Network::get_ip_from_hostname($target, 1);
+
+ # test if VM exists
+ my $conf = PVE::QemuConfig->load_config($vmid);
+
+ # try to detect errors early
+
+ PVE::QemuConfig->check_lock($conf);
+
+ die "VM need to be online for external migration" if !PVE::QemuServer::check_running($vmid);
+
+ $param->{online} = 1;
+ $param->{migration_external} = 1;
+
+ my $realcmd = sub {
+ PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
+ };
+
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
+
+ }});
+
+__PACKAGE__->register_method({
name => 'monitor',
path => '{vmid}/monitor',
method => 'POST',
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index eceb9b3..5aa1d48 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -850,6 +850,8 @@ our $cmddef = {

migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit ],

+ migrate_external => [ "PVE::API2::Qemu", 'migrate_vm_external', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
+
set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { node => $nodename } ],

resize => [ "PVE::API2::Qemu", 'resize_vm', ['vmid', 'disk', 'size'], { node => $nodename } ],
--
2.11.0
David Limbeck
2018-12-07 14:49:52 UTC
Permalink
Sorry for the delay, firewall conntrack logging took longer than expected.

Still need some time to go through your patches in detail, but one thing
Post by Alexandre Derumier
qm migrate_external <vmid> <targetremotenode> [--targetstorage otherstorage] [--targetbridge otherbridge]
---
PVE/API2/Qemu.pm | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PVE/CLI/qm.pm | 2 ++
2 files changed, 81 insertions(+)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c82e2fa..b23db56 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3162,6 +3162,85 @@ __PACKAGE__->register_method({
}});
__PACKAGE__->register_method({
+ name => 'migrate_vm_external',
+ path => '{vmid}/migrate_external',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Migrate virtual machine to an external cluster. Creates a new migration task.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ }),
'pve-node' does not support fully qualified domain names, only
[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])? (copied from
pve-common/src/PVE/JSONSchema.pm).
Post by Alexandre Derumier
+ targetstorage => get_standard_option('pve-storage-id', {
+ description => "Target remote storage.",
+ optional => 1,
+ }),
+ targetbridge => {
+ type => 'string',
+ description => "Target remote bridge.",
+ format_description => 'bridge',
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+
+ my $target = extract_param($param, 'target');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ PVE::Cluster::check_cfs_quorum();
+
+ raise_param_exc({ target => "target is member of local cluster."}) if PVE::Cluster::check_node_exists($target, 1);
+
+ die "HA must be disable for external migration." if PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ my $targetip = PVE::Network::get_ip_from_hostname($target, 1);
+
+ # test if VM exists
+ my $conf = PVE::QemuConfig->load_config($vmid);
+
+ # try to detect errors early
+
+ PVE::QemuConfig->check_lock($conf);
+
+ die "VM need to be online for external migration" if !PVE::QemuServer::check_running($vmid);
+
+ $param->{online} = 1;
+ $param->{migration_external} = 1;
+
+ my $realcmd = sub {
+ PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
+ };
+
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
+
+ }});
+
+__PACKAGE__->register_method({
name => 'monitor',
path => '{vmid}/monitor',
method => 'POST',
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index eceb9b3..5aa1d48 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -850,6 +850,8 @@ our $cmddef = {
migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
+ migrate_external => [ "PVE::API2::Qemu", 'migrate_vm_external', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
+
set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { node => $nodename } ],
resize => [ "PVE::API2::Qemu", 'resize_vm', ['vmid', 'disk', 'size'], { node => $nodename } ],
Alexandre DERUMIER
2018-12-10 08:15:44 UTC
Permalink
Post by Alexandre Derumier
Post by David Limbeck
Sorry for the delay, firewall conntrack logging took longer than expected.
no problem, thanks to work on it !
Post by Alexandre Derumier
Post by David Limbeck
'pve-node' does not support fully qualified domain names, only
[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])? (copied from
pve-common/src/PVE/JSONSchema.pm).
Ok I'll look at this.


----- Mail original -----
De: "David Limbeck" <***@proxmox.com>
À: "pve-devel" <pve-***@pve.proxmox.com>
Envoyé: Vendredi 7 Décembre 2018 15:49:52
Objet: Re: [pve-devel] [PATCH v3 qemu-server 1/7] api2 : add migrate_vm_external

Sorry for the delay, firewall conntrack logging took longer than expected.

Still need some time to go through your patches in detail, but one thing
Post by Alexandre Derumier
qm migrate_external <vmid> <targetremotenode> [--targetstorage otherstorage] [--targetbridge otherbridge]
---
PVE/API2/Qemu.pm | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PVE/CLI/qm.pm | 2 ++
2 files changed, 81 insertions(+)
diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index c82e2fa..b23db56 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3162,6 +3162,85 @@ __PACKAGE__->register_method({
}});
__PACKAGE__->register_method({
+ name => 'migrate_vm_external',
+ path => '{vmid}/migrate_external',
+ method => 'POST',
+ protected => 1,
+ proxyto => 'node',
+ description => "Migrate virtual machine to an external cluster. Creates a new migration task.",
+ permissions => {
+ check => ['perm', '/vms/{vmid}', [ 'VM.Migrate' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ vmid => get_standard_option('pve-vmid', { completion => \&PVE::QemuServer::complete_vmid }),
+ target => get_standard_option('pve-node', {
+ description => "Target node.",
+ }),
'pve-node' does not support fully qualified domain names, only
[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])? (copied from
pve-common/src/PVE/JSONSchema.pm).
Post by Alexandre Derumier
+ targetstorage => get_standard_option('pve-storage-id', {
+ description => "Target remote storage.",
+ optional => 1,
+ }),
+ targetbridge => {
+ type => 'string',
+ description => "Target remote bridge.",
+ format_description => 'bridge',
+ optional => 1,
+ },
+ },
+ },
+ returns => {
+ type => 'string',
+ description => "the task ID.",
+ },
+ code => sub {
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+
+ my $target = extract_param($param, 'target');
+
+ my $vmid = extract_param($param, 'vmid');
+
+ PVE::Cluster::check_cfs_quorum();
+
+ raise_param_exc({ target => "target is member of local cluster."}) if PVE::Cluster::check_node_exists($target, 1);
+
+ die "HA must be disable for external migration." if PVE::HA::Config::vm_is_ha_managed($vmid);
+
+ my $targetip = PVE::Network::get_ip_from_hostname($target, 1);
+
+ # test if VM exists
+ my $conf = PVE::QemuConfig->load_config($vmid);
+
+ # try to detect errors early
+
+ PVE::QemuConfig->check_lock($conf);
+
+ die "VM need to be online for external migration" if !PVE::QemuServer::check_running($vmid);
+
+ $param->{online} = 1;
+ $param->{migration_external} = 1;
+
+ my $realcmd = sub {
+ PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
+ };
+
+ my $worker = sub {
+ return PVE::GuestHelpers::guest_migration_lock($vmid, 10, $realcmd);
+ };
+
+ return $rpcenv->fork_worker('qmigrate', $vmid, $authuser, $worker);
+
+ }});
+
+__PACKAGE__->register_method({
name => 'monitor',
path => '{vmid}/monitor',
method => 'POST',
diff --git a/PVE/CLI/qm.pm b/PVE/CLI/qm.pm
index eceb9b3..5aa1d48 100755
--- a/PVE/CLI/qm.pm
+++ b/PVE/CLI/qm.pm
@@ -850,6 +850,8 @@ our $cmddef = {
migrate => [ "PVE::API2::Qemu", 'migrate_vm', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
+ migrate_external => [ "PVE::API2::Qemu", 'migrate_vm_external', ['vmid', 'target'], { node => $nodename }, $upid_exit ],
+
set => [ "PVE::API2::Qemu", 'update_vm', ['vmid'], { node => $nodename } ],
resize => [ "PVE::API2::Qemu", 'resize_vm', ['vmid', 'disk', 'size'], { node => $nodename } ],
_______________________________________________
pve-devel mailing list
pve-***@pve.proxmox.com
https://pve.proxmox.com/cgi-bin/mailman/listinfo/pve-devel

Alexandre Derumier
2018-11-27 15:38:09 UTC
Permalink
ssh key need to be in

/etc/pve/priv/migrate_external/id_rsa_mytargetnode
---
PVE/API2/Qemu.pm | 5 +++++
PVE/QemuMigrate.pm | 2 ++
2 files changed, 7 insertions(+)

diff --git a/PVE/API2/Qemu.pm b/PVE/API2/Qemu.pm
index b85fd6d..d31fd96 100644
--- a/PVE/API2/Qemu.pm
+++ b/PVE/API2/Qemu.pm
@@ -3226,6 +3226,10 @@ __PACKAGE__->register_method({

die "HA must be disable for external migration." if PVE::HA::Config::vm_is_ha_managed($vmid);

+ my $migration_external_sshkey = "/etc/pve/priv/migrate_external/id_rsa_$target";
+
+ die "ssh privatekey is missing for $target" if !-e $migration_external_sshkey;
+
my $targetip = PVE::Network::get_ip_from_hostname($target, 1);

# test if VM exists
@@ -3239,6 +3243,7 @@ __PACKAGE__->register_method({

$param->{online} = 1;
$param->{migration_external} = 1;
+ $param->{migration_external_sshkey} = $migration_external_sshkey;

my $realcmd = sub {
PVE::QemuMigrate->migrate($target, $targetip, $vmid, $param);
diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index ed8df71..9942757 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -234,6 +234,8 @@ sub prepare {

if($self->{opts}->{migration_external}) {

+ push @{$self->{rem_ssh}}, '-i', $self->{opts}->{migration_external_sshkey};
+
#get remote nextvmid
eval {
my $cmd = [@{$self->{rem_ssh}}, 'pvesh', 'get', '/cluster/nextid'];
--
2.11.0
Alexandre Derumier
2018-11-27 15:38:07 UTC
Permalink
we destroy target vm in case of failure in phase2
---
PVE/QemuMigrate.pm | 18 +++++++++++++++++-
1 file changed, 17 insertions(+), 1 deletion(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index b4dc8f7..9e963d3 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -915,6 +915,9 @@ sub phase2_cleanup {
my ($self, $vmid, $err) = @_;

return if !$self->{errors};
+
+ my $targetvmid = $self->{opts}->{targetvmid} ? $self->{opts}->{targetvmid} : $vmid;
+
$self->{phase2errors} = 1;

$self->log('info', "aborting phase 2 - cleanup resources");
@@ -948,13 +951,26 @@ sub phase2_cleanup {

my $nodename = PVE::INotify::nodename();

- my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $vmid, '--skiplock', '--migratedfrom', $nodename];
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'stop', $targetvmid, '--skiplock'];
+ push @$cmd, '--migratedfrom', $nodename if !$self->{opts}->{migration_external};
+
eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
if (my $err = $@) {
$self->log('err', $err);
$self->{errors} = 1;
}

+ if ($self->{opts}->{migration_external}) {
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'destroy', $targetvmid, '--skiplock'];
+
+ eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
+ if (my $err = $@) {
+ $self->log('err', $err);
+ $self->{errors} = 1;
+ }
+
+ }
+
if ($self->{tunnel}) {
eval { finish_tunnel($self, $self->{tunnel}); };
if (my $err = $@) {
--
2.11.0
Alexandre Derumier
2018-11-27 15:38:08 UTC
Permalink
we don't move original config
---
PVE/QemuMigrate.pm | 33 +++++++++++++++++++--------------
1 file changed, 19 insertions(+), 14 deletions(-)

diff --git a/PVE/QemuMigrate.pm b/PVE/QemuMigrate.pm
index 9e963d3..ed8df71 100644
--- a/PVE/QemuMigrate.pm
+++ b/PVE/QemuMigrate.pm
@@ -1000,6 +1000,8 @@ sub phase3 {
sub phase3_cleanup {
my ($self, $vmid, $err) = @_;

+ my $targetvmid = $self->{opts}->{targetvmid} ? $self->{opts}->{targetvmid} : $vmid;
+
my $conf = $self->{vmconf};
return if $self->{phase2errors};

@@ -1013,7 +1015,7 @@ sub phase3_cleanup {
eval { PVE::QemuServer::qemu_blockjobs_cancel($vmid, $self->{storage_migration_jobs}) };
eval { PVE::QemuMigrate::cleanup_remotedisks($self) };
die "Failed to completed storage migration\n";
- } else {
+ } elsif (!$self->{opts}->{migration_external}) {
foreach my $target_drive (keys %{$self->{target_drive}}) {
my $drive = PVE::QemuServer::parse_drive($target_drive, $self->{target_drive}->{$target_drive}->{volid});
$conf->{$target_drive} = PVE::QemuServer::print_drive($vmid, $drive);
@@ -1022,22 +1024,25 @@ sub phase3_cleanup {
}
}

- # transfer replication state before move config
- $self->transfer_replication_state() if $self->{replicated_volumes};
+ if (!$self->{opts}->{migration_external}) {

- # move config to remote node
- my $conffile = PVE::QemuConfig->config_file($vmid);
- my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});
+ # transfer replication state before move config
+ $self->transfer_replication_state() if $self->{replicated_volumes};

- die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
- if !rename($conffile, $newconffile);
+ # move config to remote node
+ my $conffile = PVE::QemuConfig->config_file($vmid);
+ my $newconffile = PVE::QemuConfig->config_file($vmid, $self->{node});

- $self->switch_replication_job_target() if $self->{replicated_volumes};
+ die "Failed to move config to node '$self->{node}' - rename failed: $!\n"
+ if !rename($conffile, $newconffile);
+
+ $self->switch_replication_job_target() if $self->{replicated_volumes};
+ }

if ($self->{livemigration}) {
if ($self->{storage_migration}) {
# stop nbd server on remote vm - requirement for resume since 2.9
- my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $vmid];
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'nbdstop', $targetvmid];

eval{ PVE::Tools::run_command($cmd, outfunc => sub {}, errfunc => sub {}) };
if (my $err = $@) {
@@ -1049,14 +1054,14 @@ sub phase3_cleanup {
# config moved and nbd server stopped - now we can resume vm on target
if ($tunnel && $tunnel->{version} && $tunnel->{version} >= 1) {
eval {
- $self->write_tunnel($tunnel, 30, "resume $vmid");
+ $self->write_tunnel($tunnel, 30, "resume $targetvmid");
};
if (my $err = $@) {
$self->log('err', $err);
$self->{errors} = 1;
}
} else {
- my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $vmid, '--skiplock', '--nocheck'];
+ my $cmd = [@{$self->{rem_ssh}}, 'qm', 'resume', $targetvmid, '--skiplock', '--nocheck'];
my $logf = sub {
my $line = shift;
$self->log('err', $line);
@@ -1114,7 +1119,7 @@ sub phase3_cleanup {
$self->{errors} = 1;
}

- if($self->{storage_migration}) {
+ if($self->{storage_migration} && !$self->{opts}->{migration_external}) {
# destroy local copies
my $volids = $self->{online_local_volumes};

@@ -1130,7 +1135,7 @@ sub phase3_cleanup {
}

# clear migrate lock
- my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $vmid ];
+ my $cmd = [ @{$self->{rem_ssh}}, 'qm', 'unlock', $targetvmid ];
$self->cmd_logerr($cmd, errmsg => "failed to clear migrate lock");
}
--
2.11.0
Loading...