Discussion:
[pve-devel] [PATCH manager v2 1/5] ceph: move create/destroy pool to CephTools
Thomas Lamprecht
2018-11-22 19:34:18 UTC
Permalink
We will reuse this in the future, e.g., when creating a data and
metadata pool for CephFS.

Allow to pass a $rados object (to reuse it, as initializing is not
that cheap) but also create it if it's undefined, fro convenience.

Signed-off-by: Thomas Lamprecht <***@proxmox.com>
---

no changes

PVE/API2/Ceph.pm | 50 ++--------------------------------
PVE/CephTools.pm | 71 ++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 73 insertions(+), 48 deletions(-)

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index 0a70db05..a6eec24a 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -1624,45 +1624,7 @@ __PACKAGE__->register_method ({

my $worker = sub {

- my $rados = PVE::RADOS->new();
- $rados->mon_command({
- prefix => "osd pool create",
- pool => $pool,
- pg_num => int($pg_num),
- format => 'plain',
- });
-
- $rados->mon_command({
- prefix => "osd pool set",
- pool => $pool,
- var => 'min_size',
- val => $min_size,
- format => 'plain',
- });
-
- $rados->mon_command({
- prefix => "osd pool set",
- pool => $pool,
- var => 'size',
- val => $size,
- format => 'plain',
- });
-
- if (defined($param->{crush_rule})) {
- $rados->mon_command({
- prefix => "osd pool set",
- pool => $pool,
- var => 'crush_rule',
- val => $param->{crush_rule},
- format => 'plain',
- });
- }
-
- $rados->mon_command({
- prefix => "osd pool application enable",
- pool => $pool,
- app => $application,
- });
+ PVE::CephTools::create_pool($pool, $param);

if ($param->{add_storages}) {
my $err;
@@ -1862,15 +1824,7 @@ __PACKAGE__->register_method ({
}
}

- my $rados = PVE::RADOS->new();
- # fixme: '--yes-i-really-really-mean-it'
- $rados->mon_command({
- prefix => "osd pool delete",
- pool => $pool,
- pool2 => $pool,
- sure => '--yes-i-really-really-mean-it',
- format => 'plain',
- });
+ PVE::CephTools::destroy_pool($pool);

if ($param->{remove_storages}) {
my $err;
diff --git a/PVE/CephTools.pm b/PVE/CephTools.pm
index 600243ca..8a9afa84 100644
--- a/PVE/CephTools.pm
+++ b/PVE/CephTools.pm
@@ -7,6 +7,7 @@ use File::Path;
use IO::File;

use PVE::Tools qw(run_command dir_glob_foreach);
+use PVE::RADOS;

my $ccname = 'ceph'; # ceph cluster name
my $ceph_cfgdir = "/etc/ceph";
@@ -183,6 +184,76 @@ sub write_ceph_config {
PVE::Tools::file_set_contents($pve_ceph_cfgpath, $out);
}

+sub create_pool {
+ my ($pool, $param, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $pg_num = $param->{pg_num} || 64;
+ my $size = $param->{size} || 3;
+ my $min_size = $param->{min_size} || 2;
+ my $application = $param->{application} // 'rbd';
+
+ $rados->mon_command({
+ prefix => "osd pool create",
+ pool => $pool,
+ pg_num => int($pg_num),
+ format => 'plain',
+ });
+
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $pool,
+ var => 'min_size',
+ val => $min_size,
+ format => 'plain',
+ });
+
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $pool,
+ var => 'size',
+ val => $size,
+ format => 'plain',
+ });
+
+ if (defined($param->{crush_rule})) {
+ $rados->mon_command({
+ prefix => "osd pool set",
+ pool => $pool,
+ var => 'crush_rule',
+ val => $param->{crush_rule},
+ format => 'plain',
+ });
+ }
+
+ $rados->mon_command({
+ prefix => "osd pool application enable",
+ pool => $pool,
+ app => $application,
+ });
+
+}
+
+sub destroy_pool {
+ my ($pool, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ # fixme: '--yes-i-really-really-mean-it'
+ $rados->mon_command({
+ prefix => "osd pool delete",
+ pool => $pool,
+ pool2 => $pool,
+ sure => '--yes-i-really-really-mean-it',
+ format => 'plain',
+ });
+}
+
sub setup_pve_symlinks {
# fail if we find a real file instead of a link
if (-f $ceph_cfgpath) {
--
2.19.1
Thomas Lamprecht
2018-11-22 19:34:21 UTC
Permalink
add aliases for the existing ones, ignore the ones for MDS and
CephFS, they did never hit any repo.

Signed-off-by: Thomas Lamprecht <***@proxmox.com>
---

no changes

PVE/CLI/pveceph.pm | 63 ++++++++++++++++++++++++++++++----------------
1 file changed, 42 insertions(+), 21 deletions(-)

diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index 57097b13..6f92f95e 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -158,28 +158,49 @@ __PACKAGE__->register_method ({

our $cmddef = {
init => [ 'PVE::API2::Ceph', 'init', [], { node => $nodename } ],
- lspools => [ 'PVE::API2::Ceph', 'lspools', [], { node => $nodename }, sub {
- my $res = shift;
+ pool => {
+ ls => [ 'PVE::API2::Ceph', 'lspools', [], { node => $nodename }, sub {
+ my $res = shift;

- printf("%-20s %10s %10s %10s %10s %20s\n", "Name", "size", "min_size",
- "pg_num", "%-used", "used");
- foreach my $p (sort {$a->{pool_name} cmp $b->{pool_name}} @$res) {
- printf("%-20s %10d %10d %10d %10.2f %20d\n", $p->{pool_name},
- $p->{size}, $p->{min_size}, $p->{pg_num},
- $p->{percent_used}, $p->{bytes_used});
- }
- }],
- createpool => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }],
- destroypool => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ],
- createfs => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
- createosd => [ 'PVE::API2::CephOSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
- destroyosd => [ 'PVE::API2::CephOSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
- createmon => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit],
- destroymon => [ 'PVE::API2::Ceph', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
- createmgr => [ 'PVE::API2::Ceph', 'createmgr', [], { node => $nodename }, $upid_exit],
- destroymgr => [ 'PVE::API2::Ceph', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
- createmds => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
- destroymds => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['id'], { node => $nodename }, $upid_exit],
+ printf("%-20s %10s %10s %10s %10s %20s\n", "Name", "size", "min_size",
+ "pg_num", "%-used", "used");
+ foreach my $p (sort {$a->{pool_name} cmp $b->{pool_name}} @$res) {
+ printf("%-20s %10d %10d %10d %10.2f %20d\n", $p->{pool_name},
+ $p->{size}, $p->{min_size}, $p->{pg_num},
+ $p->{percent_used}, $p->{bytes_used});
+ }
+ }],
+ create => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }],
+ destroy => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ],
+ },
+ lspools => { alias => 'pool ls' },
+ createpool => { alias => 'pool create' },
+ destroypool => { alias => 'pool destroy' },
+ fs => {
+ create => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
+ },
+ osd => {
+ create => [ 'PVE::API2::CephOSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
+ destroy => [ 'PVE::API2::CephOSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
+ },
+ createosd => { alias => 'osd create' },
+ destroyosd => { alias => 'osd destroy' },
+ mon => {
+ create => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit],
+ destroy => [ 'PVE::API2::Ceph', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
+ },
+ createmon => { alias => 'mon create' },
+ destroymon => { alias => 'mon destroy' },
+ mgr => {
+ create => [ 'PVE::API2::Ceph', 'createmgr', [], { node => $nodename }, $upid_exit],
+ destroy => [ 'PVE::API2::Ceph', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
+ },
+ createmgr => { alias => 'mgr create' },
+ destroymgr => { alias => 'mgr destroy' },
+ mds => {
+ create => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
+ destroy => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['id'], { node => $nodename }, $upid_exit],
+ },
start => [ 'PVE::API2::Ceph', 'start', ['service'], { node => $nodename }, $upid_exit],
stop => [ 'PVE::API2::Ceph', 'stop', ['service'], { node => $nodename }, $upid_exit],
install => [ __PACKAGE__, 'install', [] ],
--
2.19.1
Thomas Lamprecht
2018-11-22 19:34:19 UTC
Permalink
Allow to create, list and destroy and Ceph Metadata Server (MDS) over
the API and the CLI `pveceph` tool.

Besides setting up the local systemd service template and the MDS
data directory we also add a reference to the MDS in the ceph.conf
We note the backing host (node) from the respective MDS and set up a
'mds standby for name' = 'pve' so that the PVE created ones are a
single group. If we decide to add integration for rank/path specific
MDS (possible useful for CephFS with quite a bit of load) then this
may help as a starting point.

On create, check early if a reference already exists in ceph.conf and
abort in that case. If we only see existing data directories later
on we do not remove them, they could well be from an older manual
create - where it's possible dangerous to just remove it. Let the
user handle it themself in that case.

Signed-off-by: Thomas Lamprecht <***@proxmox.com>
Co-authored-by: Alwin Antreich <***@proxmox.com>
---

changes v1 -> v2:
* fix directory index, return more info
* added get_cluster_mds_state


PVE/API2/Ceph.pm | 7 ++
PVE/API2/Ceph/MDS.pm | 247 +++++++++++++++++++++++++++++++++++++++++
PVE/API2/Ceph/Makefile | 15 +++
PVE/API2/Makefile | 2 +-
PVE/CLI/pveceph.pm | 3 +
PVE/CephTools.pm | 140 ++++++++++++++++++++++-
6 files changed, 412 insertions(+), 2 deletions(-)
create mode 100644 PVE/API2/Ceph/MDS.pm
create mode 100644 PVE/API2/Ceph/Makefile

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index a6eec24a..d3e8d665 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -548,6 +548,7 @@ use PVE::RPCEnvironment;
use PVE::Storage;
use PVE::Tools qw(run_command file_get_contents file_set_contents);

+use PVE::API2::Ceph::MDS;
use PVE::API2::Storage::Config;

use base qw(PVE::RESTHandler);
@@ -559,6 +560,11 @@ __PACKAGE__->register_method ({
path => 'osd',
});

+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Ceph::MDS",
+ path => 'mds',
+});
+
__PACKAGE__->register_method ({
name => 'index',
path => '',
@@ -590,6 +596,7 @@ __PACKAGE__->register_method ({
{ name => 'mon' },
{ name => 'osd' },
{ name => 'pools' },
+ { name => 'mds' },
{ name => 'stop' },
{ name => 'start' },
{ name => 'status' },
diff --git a/PVE/API2/Ceph/MDS.pm b/PVE/API2/Ceph/MDS.pm
new file mode 100644
index 00000000..30d03285
--- /dev/null
+++ b/PVE/API2/Ceph/MDS.pm
@@ -0,0 +1,247 @@
+package PVE::API2::Ceph::MDS;
+
+use strict;
+use warnings;
+
+use PVE::CephTools;
+use PVE::INotify;
+use PVE::JSONSchema qw(get_standard_option);
+use PVE::RADOS;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "MDS directory index.",
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ name => {
+ description => "The name (ID) for the MDS",
+ },
+ addr => {
+ type => 'string',
+ optional => 1,
+ },
+ host => {
+ type => 'string',
+ optional => 1,
+ },
+ state => {
+ type => 'string',
+ description => 'State of the MDS',
+ },
+ standby_replay => {
+ type => 'boolean',
+ optional => 1,
+ description => 'If true, the standby MDS is polling the active MDS for faster recovery (hot standby).',
+ },
+ rank => {
+ type => 'integer',
+ optional => 1,
+ },
+ },
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $res = [];
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ my $mds_hash = {};
+
+ foreach my $section (keys %$cfg) {
+ my $d = $cfg->{$section};
+
+ if ($section =~ m/^mds\.(\S+)$/) {
+ my $mds_id = $1;
+ if (defined($d->{host})) {
+ $mds_hash->{$mds_id} = {
+ name => $mds_id,
+ state => 'unknown',
+ addr => $d->{host},
+ host => $d->{host},
+ };
+ }
+ }
+ }
+
+ if (scalar(keys %$mds_hash) > 0) {
+ # does not include configured but stopped ones
+ my $mds_state = PVE::CephTools::get_cluster_mds_state();
+
+ foreach my $name (keys %$mds_state) {
+ my $d = $mds_state->{$name};
+ # just overwrite, this always provides more info
+ map { $mds_hash->{$name}->{$_} = $d->{$_} } keys %$d;
+ }
+ }
+
+ return PVE::RESTHandler::hash_to_array($mds_hash, 'name');
+ }
+});
+
+__PACKAGE__->register_method ({
+ name => 'createmds',
+ path => '{name}',
+ method => 'POST',
+ description => "Create Ceph Metadata Server (MDS)",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ type => 'string',
+ optional => 1,
+ default => 'nodename',
+ pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
+ description => "The ID for the mds, when omitted the same as the nodename",
+ },
+ hotstandby => {
+ type => 'boolean',
+ optional => 1,
+ default => '0',
+ description => "Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. ".
+ "Faster switch on MDS failure, but needs more idle resources.",
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_installed('ceph_mds');
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = $param->{node};
+ $nodename = INotify::nodename() if $nodename eq 'localhost';
+
+ my $mds_id = $param->{name} // $nodename;
+
+ my $worker = sub {
+ my $timeout = PVE::CephTools::get_config('long_rados_timeout');
+ my $rados = PVE::RADOS->new(timeout => $timeout);
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ my $section = "mds.$mds_id";
+
+ if (defined($cfg->{$section})) {
+ die "MDS '$mds_id' already referenced in ceph config, abort!\n"
+ }
+
+ if (!defined($cfg->{mds}->{keyring})) {
+ # $id isn't a perl variable but a ceph metavariable
+ my $keyring = '/var/lib/ceph/mds/ceph-$id/keyring';
+
+ $cfg->{mds}->{keyring} = $keyring;
+ }
+
+ $cfg->{$section}->{host} = $nodename;
+ $cfg->{$section}->{"mds standby for name"} = 'pve';
+
+ if ($param->{hotstandby}) {
+ $cfg->{$section}->{"mds standby replay"} = 'true';
+ }
+
+ PVE::CephTools::write_ceph_config($cfg);
+
+ eval { PVE::CephTools::create_mds($mds_id, $rados) };
+ if (my $err = $@) {
+ # we abort early if the section is defined, so we know that we
+ # wrote it at this point. Do not auto remove the service, could
+ # do real harm for previously manual setup MDS
+ warn "Encountered error, remove '$section' from ceph.conf\n";
+ $cfg = PVE::CephTools::parse_ceph_config();
+ delete $cfg->{$section};
+ PVE::CephTools::write_ceph_config($cfg);
+
+ die "$err\n";
+ }
+ };
+
+ return $rpcenv->fork_worker('cephcreatemds', "mds.$mds_id", $authuser, $worker);
+ }
+});
+
+__PACKAGE__->register_method ({
+ name => 'destroymds',
+ path => '{name}',
+ method => 'DELETE',
+ description => "Destroy Ceph Metadata Server",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => 'The name (ID) of the mds',
+ type => 'string',
+ pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $mds_id = $param->{name};
+
+ my $worker = sub {
+ my $timeout = PVE::CephTools::get_config('long_rados_timeout');
+ my $rados = PVE::RADOS->new(timeout => $timeout);
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ if (defined($cfg->{"mds.$mds_id"})) {
+ delete $cfg->{"mds.$mds_id"};
+ PVE::CephTools::write_ceph_config($cfg);
+ }
+
+ PVE::CephTools::destroy_mds($mds_id, $rados);
+ };
+
+ return $rpcenv->fork_worker('cephdestroymds', "mds.$mds_id", $authuser, $worker);
+ }
+});
+
+1;
diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
new file mode 100644
index 00000000..be4d740c
--- /dev/null
+++ b/PVE/API2/Ceph/Makefile
@@ -0,0 +1,15 @@
+include ../../../defines.mk
+
+PERLSOURCE= \
+ MDS.pm
+
+all:
+
+.PHONY: clean
+clean:
+ rm -rf *~
+
+.PHONY: install
+install: ${PERLSOURCE}
+ install -d ${PERLLIBDIR}/PVE/API2/Ceph
+ install -m 0644 ${PERLSOURCE} ${PERLLIBDIR}/PVE/API2/Ceph
diff --git a/PVE/API2/Makefile b/PVE/API2/Makefile
index a62bf909..c5868d7f 100644
--- a/PVE/API2/Makefile
+++ b/PVE/API2/Makefile
@@ -1,6 +1,6 @@
include ../../defines.mk

-SUBDIRS=Hardware
+SUBDIRS=Hardware Ceph

PERLSOURCE = \
Replication.pm \
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index a5a04949..90878d9e 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -19,6 +19,7 @@ use PVE::Tools qw(run_command);
use PVE::JSONSchema qw(get_standard_option);
use PVE::CephTools;
use PVE::API2::Ceph;
+use PVE::API2::Ceph::MDS;

use PVE::CLIHandler;

@@ -175,6 +176,8 @@ our $cmddef = {
destroymon => [ 'PVE::API2::Ceph', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
createmgr => [ 'PVE::API2::Ceph', 'createmgr', [], { node => $nodename }, $upid_exit],
destroymgr => [ 'PVE::API2::Ceph', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
+ createmds => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
+ destroymds => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['id'], { node => $nodename }, $upid_exit],
start => [ 'PVE::API2::Ceph', 'start', ['service'], { node => $nodename }, $upid_exit],
stop => [ 'PVE::API2::Ceph', 'stop', ['service'], { node => $nodename }, $upid_exit],
install => [ __PACKAGE__, 'install', [] ],
diff --git a/PVE/CephTools.pm b/PVE/CephTools.pm
index 8a9afa84..da31ccae 100644
--- a/PVE/CephTools.pm
+++ b/PVE/CephTools.pm
@@ -18,12 +18,14 @@ my $pve_mon_key_path = "/etc/pve/priv/$ccname.mon.keyring";
my $pve_ckeyring_path = "/etc/pve/priv/$ccname.client.admin.keyring";
my $ceph_bootstrap_osd_keyring = "/var/lib/ceph/bootstrap-osd/$ccname.keyring";
my $ceph_bootstrap_mds_keyring = "/var/lib/ceph/bootstrap-mds/$ccname.keyring";
+my $ceph_mds_data_dir = '/var/lib/ceph/mds';

my $ceph_service = {
ceph_bin => "/usr/bin/ceph",
ceph_mon => "/usr/bin/ceph-mon",
ceph_mgr => "/usr/bin/ceph-mgr",
- ceph_osd => "/usr/bin/ceph-osd"
+ ceph_osd => "/usr/bin/ceph-osd",
+ ceph_mds => "/usr/bin/ceph-mds",
};

my $config_hash = {
@@ -33,6 +35,7 @@ my $config_hash = {
pve_ckeyring_path => $pve_ckeyring_path,
ceph_bootstrap_osd_keyring => $ceph_bootstrap_osd_keyring,
ceph_bootstrap_mds_keyring => $ceph_bootstrap_mds_keyring,
+ ceph_mds_data_dir => $ceph_mds_data_dir,
long_rados_timeout => 60,
};

@@ -297,4 +300,139 @@ sub systemd_managed {
}
}

+sub list_local_mds_ids {
+ my $mds_list = [];
+
+ PVE::Tools::dir_glob_foreach($ceph_mds_data_dir, qr/$ccname-(\S+)/, sub {
+ my (undef, $mds_id) = @_;
+ push @$mds_list, $mds_id;
+ });
+
+ return $mds_list;
+}
+
+sub get_cluster_mds_state {
+ my ($rados) = @_;
+
+ my $mds_state = {};
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $add_state = sub {
+ my ($mds) = @_;
+
+ my $state = {};
+ $state->{addr} = $mds->{addr};
+ $state->{rank} = $mds->{rank};
+ $state->{standby_replay} = $mds->{standby_replay} ? 1 : 0;
+ $state->{state} = $mds->{state};
+
+ $mds_state->{$mds->{name}} = $state;
+ };
+
+ my $mds_dump = $rados->mon_command({ prefix => 'mds stat' });
+ my $fsmap = $mds_dump->{fsmap};
+
+
+ foreach my $mds (@{$fsmap->{standbys}}) {
+ $add_state->($mds);
+ }
+
+ my $fs_info = $fsmap->{filesystems}->[0];
+ my $active_mds = $fs_info->{mdsmap}->{info};
+
+ # normally there'S only one active MDS, but we can have multiple active for
+ # different ranks (e.g., different cephs path hierachy). So just add all.
+ foreach my $mds (values %$active_mds) {
+ $add_state->($mds);
+ }
+
+ return $mds_state;
+}
+
+sub create_mds {
+ my ($id, $rados) = @_;
+
+ # `ceph fs status` fails with numeric only ID.
+ die "ID: $id, numeric only IDs are not supported\n"
+ if $id =~ /^\d+$/;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
+ my $service_keyring = "$service_dir/keyring";
+ my $service_name = "mds.$id";
+
+ die "ceph MDS directory '$service_dir' already exists\n"
+ if -d $service_dir;
+
+ print "creating MDS directory '$service_dir'\n";
+ eval { File::Path::mkpath($service_dir) };
+ my $err = $@;
+ die "creation MDS directory '$service_dir' failed\n" if $err;
+
+ # http://docs.ceph.com/docs/luminous/install/manual-deployment/#adding-mds
+ my $priv = [
+ mon => 'allow profile mds',
+ osd => 'allow rwx',
+ mds => 'allow *',
+ ];
+
+ print "creating keys for '$service_name'\n";
+ my $output = $rados->mon_command({
+ prefix => 'auth get-or-create',
+ entity => $service_name,
+ caps => $priv,
+ format => 'plain',
+ });
+
+ PVE::Tools::file_set_contents($service_keyring, $output);
+
+ print "setting ceph as owner for service directory\n";
+ run_command(["chown", 'ceph:ceph', '-R', $service_dir]);
+
+ print "enabling service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('enable', $service_name);
+ print "starting service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('start', $service_name);
+
+ return undef;
+};
+
+sub destroy_mds {
+ my ($id, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $service_name = "mds.$id";
+ my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
+
+ print "disabling service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('disable', $service_name);
+ print "stopping service 'ceph-mds\@$id.service'\n";
+ ceph_service_cmd('stop', $service_name);
+
+ if (-d $service_dir) {
+ print "removing ceph-mds directory '$service_dir'\n";
+ File::Path::remove_tree($service_dir);
+ } else {
+ warn "cannot cleanup MDS $id directory, '$service_dir' not found\n"
+ }
+
+ print "removing ceph auth for '$service_name'\n";
+ $rados->mon_command({
+ prefix => 'auth del',
+ entity => $service_name,
+ format => 'plain'
+ });
+
+ return undef;
+};
+
1;
--
2.19.1
Dominik Csapak
2018-11-23 10:02:00 UTC
Permalink
1 minor thing inline, rest looks good
Post by Thomas Lamprecht
Allow to create, list and destroy and Ceph Metadata Server (MDS) over
the API and the CLI `pveceph` tool.
Besides setting up the local systemd service template and the MDS
data directory we also add a reference to the MDS in the ceph.conf
We note the backing host (node) from the respective MDS and set up a
'mds standby for name' = 'pve' so that the PVE created ones are a
single group. If we decide to add integration for rank/path specific
MDS (possible useful for CephFS with quite a bit of load) then this
may help as a starting point.
On create, check early if a reference already exists in ceph.conf and
abort in that case. If we only see existing data directories later
on we do not remove them, they could well be from an older manual
create - where it's possible dangerous to just remove it. Let the
user handle it themself in that case.
---
* fix directory index, return more info
* added get_cluster_mds_state
PVE/API2/Ceph.pm | 7 ++
PVE/API2/Ceph/MDS.pm | 247 +++++++++++++++++++++++++++++++++++++++++
PVE/API2/Ceph/Makefile | 15 +++
PVE/API2/Makefile | 2 +-
PVE/CLI/pveceph.pm | 3 +
PVE/CephTools.pm | 140 ++++++++++++++++++++++-
6 files changed, 412 insertions(+), 2 deletions(-)
create mode 100644 PVE/API2/Ceph/MDS.pm
create mode 100644 PVE/API2/Ceph/Makefile
diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index a6eec24a..d3e8d665 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -548,6 +548,7 @@ use PVE::RPCEnvironment;
use PVE::Storage;
use PVE::Tools qw(run_command file_get_contents file_set_contents);
+use PVE::API2::Ceph::MDS;
use PVE::API2::Storage::Config;
use base qw(PVE::RESTHandler);
@@ -559,6 +560,11 @@ __PACKAGE__->register_method ({
path => 'osd',
});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Ceph::MDS",
+ path => 'mds',
+});
+
__PACKAGE__->register_method ({
name => 'index',
path => '',
@@ -590,6 +596,7 @@ __PACKAGE__->register_method ({
{ name => 'mon' },
{ name => 'osd' },
{ name => 'pools' },
+ { name => 'mds' },
{ name => 'stop' },
{ name => 'start' },
{ name => 'status' },
diff --git a/PVE/API2/Ceph/MDS.pm b/PVE/API2/Ceph/MDS.pm
new file mode 100644
index 00000000..30d03285
--- /dev/null
+++ b/PVE/API2/Ceph/MDS.pm
@@ -0,0 +1,247 @@
+package PVE::API2::Ceph::MDS;
+
+use strict;
+use warnings;
+
+use PVE::CephTools;
+use PVE::INotify;
+use PVE::JSONSchema qw(get_standard_option);
+use PVE::RADOS;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "MDS directory index.",
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
+ proxyto => 'node',
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ name => {
+ description => "The name (ID) for the MDS",
+ },
+ addr => {
+ type => 'string',
+ optional => 1,
+ },
+ host => {
+ type => 'string',
+ optional => 1,
+ },
+ state => {
+ type => 'string',
+ description => 'State of the MDS',
+ },
+ standby_replay => {
+ type => 'boolean',
+ optional => 1,
+ description => 'If true, the standby MDS is polling the active MDS for faster recovery (hot standby).',
+ },
+ rank => {
+ type => 'integer',
+ optional => 1,
+ },
+ },
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+
+ my $res = [];
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ my $mds_hash = {};
+
+ foreach my $section (keys %$cfg) {
+ my $d = $cfg->{$section};
+
+ if ($section =~ m/^mds\.(\S+)$/) {
+ my $mds_id = $1;
+ if (defined($d->{host})) {
+ $mds_hash->{$mds_id} = {
+ name => $mds_id,
+ state => 'unknown',
+ addr => $d->{host},
+ host => $d->{host},
+ };
+ }
+ }
+ }
+
+ if (scalar(keys %$mds_hash) > 0) {
+ # does not include configured but stopped ones
+ my $mds_state = PVE::CephTools::get_cluster_mds_state();
+
+ foreach my $name (keys %$mds_state) {
+ my $d = $mds_state->{$name};
+ # just overwrite, this always provides more info
+ map { $mds_hash->{$name}->{$_} = $d->{$_} } keys %$d;
+ }
+ }
+
+ return PVE::RESTHandler::hash_to_array($mds_hash, 'name');
+ }
+});
+
+__PACKAGE__->register_method ({
+ name => 'createmds',
+ path => '{name}',
+ method => 'POST',
+ description => "Create Ceph Metadata Server (MDS)",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ type => 'string',
+ optional => 1,
+ default => 'nodename',
+ pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
+ description => "The ID for the mds, when omitted the same as the nodename",
+ },
+ hotstandby => {
+ type => 'boolean',
+ optional => 1,
+ default => '0',
+ description => "Determines whether a ceph-mds daemon should poll and replay the log of an active MDS. ".
+ "Faster switch on MDS failure, but needs more idle resources.",
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+
+ PVE::CephTools::check_ceph_installed('ceph_mds');
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $authuser = $rpcenv->get_user();
+
+ my $nodename = $param->{node};
+ $nodename = INotify::nodename() if $nodename eq 'localhost';
+
+ my $mds_id = $param->{name} // $nodename;
+
+ my $worker = sub {
+ my $timeout = PVE::CephTools::get_config('long_rados_timeout');
+ my $rados = PVE::RADOS->new(timeout => $timeout);
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ my $section = "mds.$mds_id";
+
+ if (defined($cfg->{$section})) {
+ die "MDS '$mds_id' already referenced in ceph config, abort!\n"
+ }
+
+ if (!defined($cfg->{mds}->{keyring})) {
+ # $id isn't a perl variable but a ceph metavariable
+ my $keyring = '/var/lib/ceph/mds/ceph-$id/keyring';
+
+ $cfg->{mds}->{keyring} = $keyring;
+ }
+
+ $cfg->{$section}->{host} = $nodename;
+ $cfg->{$section}->{"mds standby for name"} = 'pve';
+
+ if ($param->{hotstandby}) {
+ $cfg->{$section}->{"mds standby replay"} = 'true';
+ }
+
+ PVE::CephTools::write_ceph_config($cfg);
+
+ eval { PVE::CephTools::create_mds($mds_id, $rados) };
+ # we abort early if the section is defined, so we know that we
+ # wrote it at this point. Do not auto remove the service, could
+ # do real harm for previously manual setup MDS
+ warn "Encountered error, remove '$section' from ceph.conf\n";
+ $cfg = PVE::CephTools::parse_ceph_config();
+ delete $cfg->{$section};
+ PVE::CephTools::write_ceph_config($cfg);
+
+ die "$err\n";
+ }
+ };
+
+ return $rpcenv->fork_worker('cephcreatemds', "mds.$mds_id", $authuser, $worker);
+ }
+});
+
+__PACKAGE__->register_method ({
+ name => 'destroymds',
+ path => '{name}',
+ method => 'DELETE',
+ description => "Destroy Ceph Metadata Server",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => 'The name (ID) of the mds',
+ type => 'string',
+ pattern => '[a-zA-Z0-9]([a-zA-Z0-9\-]*[a-zA-Z0-9])?',
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+
+ my $authuser = $rpcenv->get_user();
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $mds_id = $param->{name};
+
+ my $worker = sub {
+ my $timeout = PVE::CephTools::get_config('long_rados_timeout');
+ my $rados = PVE::RADOS->new(timeout => $timeout);
+
+ my $cfg = PVE::CephTools::parse_ceph_config();
+
+ if (defined($cfg->{"mds.$mds_id"})) {
+ delete $cfg->{"mds.$mds_id"};
+ PVE::CephTools::write_ceph_config($cfg);
+ }
+
+ PVE::CephTools::destroy_mds($mds_id, $rados);
+ };
+
+ return $rpcenv->fork_worker('cephdestroymds', "mds.$mds_id", $authuser, $worker);
+ }
+});
+
+1;
diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
new file mode 100644
index 00000000..be4d740c
--- /dev/null
+++ b/PVE/API2/Ceph/Makefile
@@ -0,0 +1,15 @@
+include ../../../defines.mk
+
+PERLSOURCE= \
+ MDS.pm
+
+
+.PHONY: clean
+ rm -rf *~
+
+.PHONY: install
+install: ${PERLSOURCE}
+ install -d ${PERLLIBDIR}/PVE/API2/Ceph
+ install -m 0644 ${PERLSOURCE} ${PERLLIBDIR}/PVE/API2/Ceph
diff --git a/PVE/API2/Makefile b/PVE/API2/Makefile
index a62bf909..c5868d7f 100644
--- a/PVE/API2/Makefile
+++ b/PVE/API2/Makefile
@@ -1,6 +1,6 @@
include ../../defines.mk
-SUBDIRS=Hardware
+SUBDIRS=Hardware Ceph
PERLSOURCE = \
Replication.pm \
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index a5a04949..90878d9e 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -19,6 +19,7 @@ use PVE::Tools qw(run_command);
use PVE::JSONSchema qw(get_standard_option);
use PVE::CephTools;
use PVE::API2::Ceph;
+use PVE::API2::Ceph::MDS;
use PVE::CLIHandler;
@@ -175,6 +176,8 @@ our $cmddef = {
destroymon => [ 'PVE::API2::Ceph', 'destroymon', ['monid'], { node => $nodename }, $upid_exit],
createmgr => [ 'PVE::API2::Ceph', 'createmgr', [], { node => $nodename }, $upid_exit],
destroymgr => [ 'PVE::API2::Ceph', 'destroymgr', ['id'], { node => $nodename }, $upid_exit],
+ createmds => [ 'PVE::API2::Ceph::MDS', 'createmds', [], { node => $nodename }, $upid_exit],
+ destroymds => [ 'PVE::API2::Ceph::MDS', 'destroymds', ['id'], { node => $nodename }, $upid_exit],
start => [ 'PVE::API2::Ceph', 'start', ['service'], { node => $nodename }, $upid_exit],
stop => [ 'PVE::API2::Ceph', 'stop', ['service'], { node => $nodename }, $upid_exit],
install => [ __PACKAGE__, 'install', [] ],
diff --git a/PVE/CephTools.pm b/PVE/CephTools.pm
index 8a9afa84..da31ccae 100644
--- a/PVE/CephTools.pm
+++ b/PVE/CephTools.pm
@@ -18,12 +18,14 @@ my $pve_mon_key_path = "/etc/pve/priv/$ccname.mon.keyring";
my $pve_ckeyring_path = "/etc/pve/priv/$ccname.client.admin.keyring";
my $ceph_bootstrap_osd_keyring = "/var/lib/ceph/bootstrap-osd/$ccname.keyring";
my $ceph_bootstrap_mds_keyring = "/var/lib/ceph/bootstrap-mds/$ccname.keyring";
+my $ceph_mds_data_dir = '/var/lib/ceph/mds';
my $ceph_service = {
ceph_bin => "/usr/bin/ceph",
ceph_mon => "/usr/bin/ceph-mon",
ceph_mgr => "/usr/bin/ceph-mgr",
- ceph_osd => "/usr/bin/ceph-osd"
+ ceph_osd => "/usr/bin/ceph-osd",
+ ceph_mds => "/usr/bin/ceph-mds",
};
my $config_hash = {
@@ -33,6 +35,7 @@ my $config_hash = {
pve_ckeyring_path => $pve_ckeyring_path,
ceph_bootstrap_osd_keyring => $ceph_bootstrap_osd_keyring,
ceph_bootstrap_mds_keyring => $ceph_bootstrap_mds_keyring,
+ ceph_mds_data_dir => $ceph_mds_data_dir,
long_rados_timeout => 60,
};
@@ -297,4 +300,139 @@ sub systemd_managed {
}
}
+sub list_local_mds_ids {
+ my $mds_list = [];
+
+ PVE::Tools::dir_glob_foreach($ceph_mds_data_dir, qr/$ccname-(\S+)/, sub {
+ });
+
+ return $mds_list;
+}
this is not used afaics ? could be removed in a fixup ofc
Post by Thomas Lamprecht
+
+sub get_cluster_mds_state {
+
+ my $mds_state = {};
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $add_state = sub {
+
+ my $state = {};
+ $state->{addr} = $mds->{addr};
+ $state->{rank} = $mds->{rank};
+ $state->{standby_replay} = $mds->{standby_replay} ? 1 : 0;
+ $state->{state} = $mds->{state};
+
+ $mds_state->{$mds->{name}} = $state;
+ };
+
+ my $mds_dump = $rados->mon_command({ prefix => 'mds stat' });
+ my $fsmap = $mds_dump->{fsmap};
+
+
+ $add_state->($mds);
+ }
+
+ my $fs_info = $fsmap->{filesystems}->[0];
+ my $active_mds = $fs_info->{mdsmap}->{info};
+
+ # normally there'S only one active MDS, but we can have multiple active for
+ # different ranks (e.g., different cephs path hierachy). So just add all.
+ foreach my $mds (values %$active_mds) {
+ $add_state->($mds);
+ }
+
+ return $mds_state;
+}
+
+sub create_mds {
+
+ # `ceph fs status` fails with numeric only ID.
+ die "ID: $id, numeric only IDs are not supported\n"
+ if $id =~ /^\d+$/;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
+ my $service_keyring = "$service_dir/keyring";
+ my $service_name = "mds.$id";
+
+ die "ceph MDS directory '$service_dir' already exists\n"
+ if -d $service_dir;
+
+ print "creating MDS directory '$service_dir'\n";
+ eval { File::Path::mkpath($service_dir) };
+ die "creation MDS directory '$service_dir' failed\n" if $err;
+
+ # http://docs.ceph.com/docs/luminous/install/manual-deployment/#adding-mds
+ my $priv = [
+ mon => 'allow profile mds',
+ osd => 'allow rwx',
+ mds => 'allow *',
+ ];
+
+ print "creating keys for '$service_name'\n";
+ my $output = $rados->mon_command({
+ prefix => 'auth get-or-create',
+ entity => $service_name,
+ caps => $priv,
+ format => 'plain',
+ });
+
+ PVE::Tools::file_set_contents($service_keyring, $output);
+
+ print "setting ceph as owner for service directory\n";
+ run_command(["chown", 'ceph:ceph', '-R', $service_dir]);
+
+ ceph_service_cmd('enable', $service_name);
+ ceph_service_cmd('start', $service_name);
+
+ return undef;
+};
+
+sub destroy_mds {
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $service_name = "mds.$id";
+ my $service_dir = "/var/lib/ceph/mds/$ccname-$id";
+
+ ceph_service_cmd('disable', $service_name);
+ ceph_service_cmd('stop', $service_name);
+
+ if (-d $service_dir) {
+ print "removing ceph-mds directory '$service_dir'\n";
+ File::Path::remove_tree($service_dir);
+ } else {
+ warn "cannot cleanup MDS $id directory, '$service_dir' not found\n"
+ }
+
+ print "removing ceph auth for '$service_name'\n";
+ $rados->mon_command({
+ prefix => 'auth del',
+ entity => $service_name,
+ format => 'plain'
+ });
+
+ return undef;
+};
+
1;
Thomas Lamprecht
2018-11-22 19:34:20 UTC
Permalink
Allow to create a new CephFS instance and allow to list them.

As deletion requires coordination between the active MDS and all
standby MDS next in line this needs a bit more work. One could mark
the MDS cluster down and stop the active, that should work but as
destroying is quite a sensible operation, in production not often
needed I deemed it better to document this only, and leaving API
endpoints for this to the future.

For index/list I slightly transform the result of an RADOS `fs ls`
monitor command, this would allow relative easy display of a CephFS
and it's backing metadata and data pools in a GUI.

While for now it's not enabled by default and marked as experimental,
this API is designed to host multiple CephFS instances - we may not
need this at all, but I did not want to limit us early. And anybody
liking to experiment can use it after the respective ceph.conf
settings.

When encountering errors try to rollback. As we verified at the
beginning that we did not reused pools, destroy the ones which we
created.

Signed-off-by: Thomas Lamprecht <***@proxmox.com>
Co-authored-by: Alwin Antreich <***@proxmox.com>
---

changes v1 -> v2:
* allow to add the newly created CephFS directly to our storage.cfg
* better checks for create, e.g., if any MDS is running (needed immediately for
add_storage and sooner or later else, so always check)

PVE/API2/Ceph.pm | 7 ++
PVE/API2/Ceph/FS.pm | 209 +++++++++++++++++++++++++++++++++++++++++
PVE/API2/Ceph/Makefile | 1 +
PVE/CLI/pveceph.pm | 2 +
PVE/CephTools.pm | 12 +++
5 files changed, 231 insertions(+)
create mode 100644 PVE/API2/Ceph/FS.pm

diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index d3e8d665..0fc95ab0 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -548,6 +548,7 @@ use PVE::RPCEnvironment;
use PVE::Storage;
use PVE::Tools qw(run_command file_get_contents file_set_contents);

+use PVE::API2::Ceph::FS;
use PVE::API2::Ceph::MDS;
use PVE::API2::Storage::Config;

@@ -565,6 +566,11 @@ __PACKAGE__->register_method ({
path => 'mds',
});

+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Ceph::FS",
+ path => 'fs',
+});
+
__PACKAGE__->register_method ({
name => 'index',
path => '',
@@ -596,6 +602,7 @@ __PACKAGE__->register_method ({
{ name => 'mon' },
{ name => 'osd' },
{ name => 'pools' },
+ { name => 'fs' },
{ name => 'mds' },
{ name => 'stop' },
{ name => 'start' },
diff --git a/PVE/API2/Ceph/FS.pm b/PVE/API2/Ceph/FS.pm
new file mode 100644
index 00000000..2869403b
--- /dev/null
+++ b/PVE/API2/Ceph/FS.pm
@@ -0,0 +1,209 @@
+package PVE::API2::Ceph::FS;
+
+use strict;
+use warnings;
+
+use PVE::CephTools;
+use PVE::JSONSchema qw(get_standard_option);
+use PVE::RADOS;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+
+use PVE::API2::Storage::Config;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "Directory index.",
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ },
+ metadata_pool => {
+ description => "The name of the metadata pool.",
+ type => 'string',
+ },
+ data_pool => {
+ description => "The name of the data pool.",
+ type => 'string',
+ },
+ },
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+ my ($param) = @_;
+
+ my $rados = PVE::RADOS->new();
+
+ my $cephfs_list = $rados->mon_command({ prefix => "fs ls" });
+ # we get something like:
+ #{
+ # 'metadata_pool_id' => 2,
+ # 'data_pool_ids' => [ 1 ],
+ # 'metadata_pool' => 'cephfs_metadata',
+ # 'data_pools' => [ 'cephfs_data' ],
+ # 'name' => 'cephfs',
+ #}
+ # pass name for our index and
+
+ my $res = [];
+ map {
+ push @$res, {
+ name => $_->{name},
+ metadata_pool => $_->{metadata_pool},
+ data_pool => $_->{data_pools}->[0],
+ }
+ } @$cephfs_list;
+
+ return $res;
+ }
+});
+
+__PACKAGE__->register_method ({
+ name => 'createfs',
+ path => '{name}',
+ method => 'POST',
+ description => "Create a Ceph filesystem",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ default => 'cephfs',
+ optional => 1,
+ },
+ pg_num => {
+ description => "Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.",
+ type => 'integer',
+ default => 64,
+ optional => 1,
+ minimum => 8,
+ maximum => 32768,
+ },
+ add_storage => {
+ description => "Configure the created CephFS as storage for this cluster.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+ my ($param) = @_;
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
+ die "Ceph is not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+
+ my $fs_name = $param->{name} // 'cephfs';
+ my $pg_num = $param->{pg_num} // 64;
+
+ my $pool_data = "${fs_name}_data";
+ my $pool_metadata = "${fs_name}_metadata";
+
+ my $rados = PVE::RADOS->new();
+ my $ls_pools = PVE::CephTools::ls_pools();
+ my $existing_pools = { map { $_->{poolname} => 1 } @$ls_pools };
+
+ die "ceph pools '$pool_data' and/or '$pool_metadata' already exist\n"
+ if $existing_pools->{$pool_data} || $existing_pools->{$pool_metadata};
+
+ my $running_mds = PVE::CephTools::get_cluster_mds_state($rados);
+ die "no running Metadata Server (MDS) found!\n" if !scalar(keys %$running_mds);
+
+ my $worker = sub {
+ $rados = PVE::RADOS->new();
+
+ my $pool_param = {
+ application => 'cephfs',
+ pg_num => $pg_num,
+ };
+
+ my @created_pools = ();
+ eval {
+ print "creating data pool '$pool_data'...\n";
+ PVE::CephTools::create_pool($pool_data, $pool_param, $rados);
+ push @created_pools, $pool_data;
+
+ print "creating metadata pool '$pool_metadata'...\n";
+ $pool_param->{pg_num} = $pg_num >= 32 ? $pg_num / 4 : 8;
+ PVE::CephTools::create_pool($pool_metadata, $pool_param, $rados);
+ push @created_pools, $pool_metadata;
+
+ print "configuring new CephFS '$fs_name'\n";
+ $rados->mon_command({
+ prefix => "fs new",
+ fs_name => $fs_name,
+ metadata => $pool_metadata,
+ data => $pool_data,
+ format => 'plain',
+ });
+ };
+ if (my $err = $@) {
+ $@ = undef;
+
+ if (@created_pools > 0) {
+ warn "Encountered error after creating at least one pool\n";
+ # our old connection is very likely broken now, recreate
+ $rados = PVE::RADOS->new();
+ foreach my $pool (@created_pools) {
+ warn "cleaning up left over pool '$pool'\n";
+ eval { PVE::CephTools::destroy_pool($pool, $rados) };
+ warn "$@\n" if $@;
+ }
+ }
+
+ die "$err\n";
+ }
+
+ if ($param->{add_storage}) {
+ my $err;
+ eval {
+ PVE::API2::Storage::Config->create({
+ type => 'cephfs',
+ storage => $fs_name,
+ content => 'backup,iso,vztmpl',
+ })
+ };
+ die "adding storage for CephFS '$fs_name' failed, check log ".
+ "and add manually!\n$@\n" if $@;
+ }
+ };
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $user = $rpcenv->get_user();
+
+ return $rpcenv->fork_worker('cephfscreate', $fs_name, $user, $worker);
+ }
+});
+
+1;
diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
index be4d740c..59fcda71 100644
--- a/PVE/API2/Ceph/Makefile
+++ b/PVE/API2/Ceph/Makefile
@@ -1,6 +1,7 @@
include ../../../defines.mk

PERLSOURCE= \
+ FS.pm \
MDS.pm

all:
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index 90878d9e..57097b13 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -19,6 +19,7 @@ use PVE::Tools qw(run_command);
use PVE::JSONSchema qw(get_standard_option);
use PVE::CephTools;
use PVE::API2::Ceph;
+use PVE::API2::Ceph::FS;
use PVE::API2::Ceph::MDS;

use PVE::CLIHandler;
@@ -170,6 +171,7 @@ our $cmddef = {
}],
createpool => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }],
destroypool => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ],
+ createfs => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
createosd => [ 'PVE::API2::CephOSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
destroyosd => [ 'PVE::API2::CephOSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
createmon => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit],
diff --git a/PVE/CephTools.pm b/PVE/CephTools.pm
index da31ccae..80620277 100644
--- a/PVE/CephTools.pm
+++ b/PVE/CephTools.pm
@@ -240,6 +240,18 @@ sub create_pool {

}

+sub ls_pools {
+ my ($pool, $rados) = @_;
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $res = $rados->mon_command({ prefix => "osd lspools" });
+
+ return $res;
+}
+
sub destroy_pool {
my ($pool, $rados) = @_;
--
2.19.1
Dominik Csapak
2018-11-23 10:02:23 UTC
Permalink
one small comment inline, looks good otherwise
Post by Thomas Lamprecht
Allow to create a new CephFS instance and allow to list them.
As deletion requires coordination between the active MDS and all
standby MDS next in line this needs a bit more work. One could mark
the MDS cluster down and stop the active, that should work but as
destroying is quite a sensible operation, in production not often
needed I deemed it better to document this only, and leaving API
endpoints for this to the future.
For index/list I slightly transform the result of an RADOS `fs ls`
monitor command, this would allow relative easy display of a CephFS
and it's backing metadata and data pools in a GUI.
While for now it's not enabled by default and marked as experimental,
this API is designed to host multiple CephFS instances - we may not
need this at all, but I did not want to limit us early. And anybody
liking to experiment can use it after the respective ceph.conf
settings.
When encountering errors try to rollback. As we verified at the
beginning that we did not reused pools, destroy the ones which we
created.
---
* allow to add the newly created CephFS directly to our storage.cfg
* better checks for create, e.g., if any MDS is running (needed immediately for
add_storage and sooner or later else, so always check)
PVE/API2/Ceph.pm | 7 ++
PVE/API2/Ceph/FS.pm | 209 +++++++++++++++++++++++++++++++++++++++++
PVE/API2/Ceph/Makefile | 1 +
PVE/CLI/pveceph.pm | 2 +
PVE/CephTools.pm | 12 +++
5 files changed, 231 insertions(+)
create mode 100644 PVE/API2/Ceph/FS.pm
diff --git a/PVE/API2/Ceph.pm b/PVE/API2/Ceph.pm
index d3e8d665..0fc95ab0 100644
--- a/PVE/API2/Ceph.pm
+++ b/PVE/API2/Ceph.pm
@@ -548,6 +548,7 @@ use PVE::RPCEnvironment;
use PVE::Storage;
use PVE::Tools qw(run_command file_get_contents file_set_contents);
+use PVE::API2::Ceph::FS;
use PVE::API2::Ceph::MDS;
use PVE::API2::Storage::Config;
@@ -565,6 +566,11 @@ __PACKAGE__->register_method ({
path => 'mds',
});
+__PACKAGE__->register_method ({
+ subclass => "PVE::API2::Ceph::FS",
+ path => 'fs',
+});
+
__PACKAGE__->register_method ({
name => 'index',
path => '',
@@ -596,6 +602,7 @@ __PACKAGE__->register_method ({
{ name => 'mon' },
{ name => 'osd' },
{ name => 'pools' },
+ { name => 'fs' },
{ name => 'mds' },
{ name => 'stop' },
{ name => 'start' },
diff --git a/PVE/API2/Ceph/FS.pm b/PVE/API2/Ceph/FS.pm
new file mode 100644
index 00000000..2869403b
--- /dev/null
+++ b/PVE/API2/Ceph/FS.pm
@@ -0,0 +1,209 @@
+package PVE::API2::Ceph::FS;
+
+use strict;
+use warnings;
+
+use PVE::CephTools;
+use PVE::JSONSchema qw(get_standard_option);
+use PVE::RADOS;
+use PVE::RESTHandler;
+use PVE::RPCEnvironment;
+
+use PVE::API2::Storage::Config;
+
+use base qw(PVE::RESTHandler);
+
+__PACKAGE__->register_method ({
+ name => 'index',
+ path => '',
+ method => 'GET',
+ description => "Directory index.",
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Audit', 'Datastore.Audit' ], any => 1],
+ },
+ protected => 1,
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ },
+ },
+ returns => {
+ type => 'array',
+ items => {
+ type => "object",
+ properties => {
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ },
+ metadata_pool => {
+ description => "The name of the metadata pool.",
+ type => 'string',
+ },
+ data_pool => {
+ description => "The name of the data pool.",
+ type => 'string',
+ },
+ },
+ },
+ links => [ { rel => 'child', href => "{name}" } ],
+ },
+ code => sub {
+
+ my $rados = PVE::RADOS->new();
+
+ my $cephfs_list = $rados->mon_command({ prefix => "fs ls" });
+ #{
+ # 'metadata_pool_id' => 2,
+ # 'data_pool_ids' => [ 1 ],
+ # 'metadata_pool' => 'cephfs_metadata',
+ # 'data_pools' => [ 'cephfs_data' ],
+ # 'name' => 'cephfs',
+ #}
+ # pass name for our index and
looks like there was more of the comment?
Post by Thomas Lamprecht
+
+ my $res = [];
+ map {
+ name => $_->{name},
+ metadata_pool => $_->{metadata_pool},
+ data_pool => $_->{data_pools}->[0],
+ }
+
+ return $res;
+ }
+});
+
+__PACKAGE__->register_method ({
+ name => 'createfs',
+ path => '{name}',
+ method => 'POST',
+ description => "Create a Ceph filesystem",
+ proxyto => 'node',
+ protected => 1,
+ permissions => {
+ check => ['perm', '/', [ 'Sys.Modify' ]],
+ },
+ parameters => {
+ additionalProperties => 0,
+ properties => {
+ node => get_standard_option('pve-node'),
+ name => {
+ description => "The ceph filesystem name.",
+ type => 'string',
+ default => 'cephfs',
+ optional => 1,
+ },
+ pg_num => {
+ description => "Number of placement groups for the backing data pool. The metadata pool will use a quarter of this.",
+ type => 'integer',
+ default => 64,
+ optional => 1,
+ minimum => 8,
+ maximum => 32768,
+ },
+ add_storage => {
+ description => "Configure the created CephFS as storage for this cluster.",
+ type => 'boolean',
+ optional => 1,
+ default => 0,
+ },
+ },
+ },
+ returns => { type => 'string' },
+ code => sub {
+
+ PVE::CephTools::check_ceph_inited();
+
+ my $pve_ckeyring_path = PVE::CephTools::get_config('pve_ckeyring_path');
+ die "Ceph is not fully configured - missing '$pve_ckeyring_path'\n"
+ if ! -f $pve_ckeyring_path;
+
+ my $fs_name = $param->{name} // 'cephfs';
+ my $pg_num = $param->{pg_num} // 64;
+
+ my $pool_data = "${fs_name}_data";
+ my $pool_metadata = "${fs_name}_metadata";
+
+ my $rados = PVE::RADOS->new();
+ my $ls_pools = PVE::CephTools::ls_pools();
+
+ die "ceph pools '$pool_data' and/or '$pool_metadata' already exist\n"
+ if $existing_pools->{$pool_data} || $existing_pools->{$pool_metadata};
+
+ my $running_mds = PVE::CephTools::get_cluster_mds_state($rados);
+ die "no running Metadata Server (MDS) found!\n" if !scalar(keys %$running_mds);
+
+ my $worker = sub {
+ $rados = PVE::RADOS->new(); > +
+ my $pool_param = {
+ application => 'cephfs',
+ pg_num => $pg_num,
+ };
+
+ eval {
+ print "creating data pool '$pool_data'...\n";
+ PVE::CephTools::create_pool($pool_data, $pool_param, $rados);
+
+ print "creating metadata pool '$pool_metadata'...\n";
+ $pool_param->{pg_num} = $pg_num >= 32 ? $pg_num / 4 : 8;
+ PVE::CephTools::create_pool($pool_metadata, $pool_param, $rados);
+
+ print "configuring new CephFS '$fs_name'\n";
+ $rados->mon_command({
+ prefix => "fs new",
+ fs_name => $fs_name,
+ metadata => $pool_metadata,
+ data => $pool_data,
+ format => 'plain',
+ });
+ };
+
+ warn "Encountered error after creating at least one pool\n";
+ # our old connection is very likely broken now, recreate
+ $rados = PVE::RADOS->new();
+ warn "cleaning up left over pool '$pool'\n";
+ eval { PVE::CephTools::destroy_pool($pool, $rados) };
+ }
+ }
+
+ die "$err\n";
+ }
+
+ if ($param->{add_storage}) {
+ my $err;
+ eval {
+ PVE::API2::Storage::Config->create({
+ type => 'cephfs',
+ storage => $fs_name,
+ content => 'backup,iso,vztmpl',
+ })
+ };
+ die "adding storage for CephFS '$fs_name' failed, check log ".
+ }
+ };
+
+ my $rpcenv = PVE::RPCEnvironment::get();
+ my $user = $rpcenv->get_user();
+
+ return $rpcenv->fork_worker('cephfscreate', $fs_name, $user, $worker);
+ }
+});
+
+1;
diff --git a/PVE/API2/Ceph/Makefile b/PVE/API2/Ceph/Makefile
index be4d740c..59fcda71 100644
--- a/PVE/API2/Ceph/Makefile
+++ b/PVE/API2/Ceph/Makefile
@@ -1,6 +1,7 @@
include ../../../defines.mk
PERLSOURCE= \
+ FS.pm \
MDS.pm
diff --git a/PVE/CLI/pveceph.pm b/PVE/CLI/pveceph.pm
index 90878d9e..57097b13 100755
--- a/PVE/CLI/pveceph.pm
+++ b/PVE/CLI/pveceph.pm
@@ -19,6 +19,7 @@ use PVE::Tools qw(run_command);
use PVE::JSONSchema qw(get_standard_option);
use PVE::CephTools;
use PVE::API2::Ceph;
+use PVE::API2::Ceph::FS;
use PVE::API2::Ceph::MDS;
use PVE::CLIHandler;
@@ -170,6 +171,7 @@ our $cmddef = {
}],
createpool => [ 'PVE::API2::Ceph', 'createpool', ['name'], { node => $nodename }],
destroypool => [ 'PVE::API2::Ceph', 'destroypool', ['name'], { node => $nodename } ],
+ createfs => [ 'PVE::API2::Ceph::FS', 'createfs', [], { node => $nodename }],
createosd => [ 'PVE::API2::CephOSD', 'createosd', ['dev'], { node => $nodename }, $upid_exit],
destroyosd => [ 'PVE::API2::CephOSD', 'destroyosd', ['osdid'], { node => $nodename }, $upid_exit],
createmon => [ 'PVE::API2::Ceph', 'createmon', [], { node => $nodename }, $upid_exit],
diff --git a/PVE/CephTools.pm b/PVE/CephTools.pm
index da31ccae..80620277 100644
--- a/PVE/CephTools.pm
+++ b/PVE/CephTools.pm
@@ -240,6 +240,18 @@ sub create_pool {
}
+sub ls_pools {
+
+ if (!defined($rados)) {
+ $rados = PVE::RADOS->new();
+ }
+
+ my $res = $rados->mon_command({ prefix => "osd lspools" });
+
+ return $res;
+}
+
sub destroy_pool {
Thomas Lamprecht
2018-11-22 19:34:22 UTC
Permalink
create/destroy MDS and create CephFS (if none is configured yet).
Can be improved, e.g., start/stop/restart for MDS this should be enough for a
starter, though.

Basic code and ui layout is based off my dc/Cluster view. We may want to split
the two grids out in separate defines, it could be a bit much to have all
inline.

Signed-off-by: Thomas Lamprecht <***@proxmox.com>
---

new in v2

www/manager6/Makefile | 1 +
www/manager6/ceph/FS.js | 385 ++++++++++++++++++++++++++++++++++++
www/manager6/node/Config.js | 8 +
3 files changed, 394 insertions(+)
create mode 100644 www/manager6/ceph/FS.js

diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index d005d714..e75f0de6 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -93,6 +93,7 @@ JSSRC= \
panel/IPSet.js \
panel/ConfigPanel.js \
grid/BackupView.js \
+ ceph/FS.js \
ceph/Pool.js \
ceph/OSD.js \
ceph/Monitor.js \
diff --git a/www/manager6/ceph/FS.js b/www/manager6/ceph/FS.js
new file mode 100644
index 00000000..f2743a4d
--- /dev/null
+++ b/www/manager6/ceph/FS.js
@@ -0,0 +1,385 @@
+/*jslint confusion: true */
+Ext.define('PVE.CephCreateFS', {
+ extend: 'Proxmox.window.Edit',
+ alias: 'widget.pveCephCreateFS',
+
+ showTaskViewer: true,
+ //onlineHelp: 'pve_ceph_fs',
+
+ subject: 'Ceph FS',
+ isCreate: true,
+ method: 'POST',
+
+ setFSName: function(fsName) {
+ var me = this;
+
+ if (fsName === '') {
+ fsName = 'cephfs';
+ }
+
+ me.url = "/nodes/" + me.nodename + "/ceph/fs/" + fsName;
+ },
+
+ items: [
+ {
+ xtype: 'textfield',
+ fieldLabel: gettext('Name'),
+ name: 'name',
+ value: 'cephfs',
+ listeners: {
+ change: function(f, value) {
+ this.up('pveCephCreateFS').setFSName(value);
+ }
+ },
+ submitValue: false, // already encoded in apicall URL
+ emptyText: 'cephfs'
+ },
+ {
+ xtype: 'proxmoxintegerfield',
+ fieldLabel: 'pg_num',
+ name: 'pg_num',
+ value: 64,
+ emptyText: 64,
+ minValue: 8,
+ maxValue: 32768,
+ allowBlank: false
+ },
+ {
+ xtype: 'proxmoxcheckbox',
+ fieldLabel: gettext('Add Storage'),
+ value: true,
+ name: 'add_storage'
+ }
+ ],
+
+ initComponent : function() {
+ var me = this;
+
+ if (!me.nodename) {
+ throw "no node name specified";
+ }
+
+ Ext.apply(me, {
+ url: "/nodes/" + me.nodename + "/ceph/fs/cephfs",
+ defaults: {
+ nodename: me.nodename
+ }
+ });
+
+ me.callParent();
+ }
+});
+
+Ext.define('PVE.CephCreateMDS', {
+ extend: 'Proxmox.window.Edit',
+ alias: 'widget.pveCephCreateMDS',
+
+ showProgress: true,
+ //onlineHelp: 'pve_ceph_mds',
+
+ subject: 'Ceph MDS',
+ isCreate: true,
+ method: 'POST',
+
+ setNode: function(nodename) {
+ var me = this;
+
+ me.nodename = nodename;
+ me.url = "/nodes/" + nodename + "/ceph/mds/" + nodename;
+ },
+
+ items: [
+ {
+ xtype: 'pveNodeSelector',
+ fieldLabel: gettext('Node'),
+ selectCurNode: true,
+ submitValue: false,
+ allowBlank: false,
+ listeners: {
+ change: function(f, value) {
+ this.up('pveCephCreateMDS').setNode(value);
+ }
+ }
+ }
+ ],
+
+ initComponent : function() {
+ var me = this;
+
+ if (!me.nodename) {
+ throw "no node name specified";
+ }
+
+ Ext.apply(me, {
+ url: "/nodes/" + me.nodename + "/ceph/mds/" + me.nodename
+ });
+
+ me.callParent();
+ }
+});
+
+Ext.define('PVE.NodeCephFSPanel', {
+ extend: 'Ext.panel.Panel',
+ xtype: 'pveNodeCephFSPanel',
+ mixins: ['Proxmox.Mixin.CBind'],
+
+ title: gettext('Cluster Administration'),
+ onlineHelp: 'chapter_pvecm',
+
+ border: false,
+ defaults: {
+ border: false,
+ cbind: {
+ nodename: '{nodename}'
+ }
+ },
+
+ viewModel: {
+ parent: null,
+ data: {
+ cephfsConfigured: false,
+ mdscount: 0
+ }
+ },
+
+ /*initComponent: function() {
+ var me = this;
+ Ext.apply(me, {
+ defaults: {
+ nodename: me.nodename,
+ border: false
+ }
+ });
+
+ me.callParent();
+ },*/
+
+ items: [
+ {
+ xtype: 'grid',
+ title: gettext('CephFS'),
+ controller: {
+ xclass: 'Ext.app.ViewController',
+
+ init: function(view) {
+ view.rstore = Ext.create('Proxmox.data.UpdateStore', {
+ autoLoad: true,
+ xtype: 'update',
+ interval: 5 * 1000,
+ autoStart: true,
+ storeid: 'pve-ceph-fs',
+ model: 'pve-ceph-fs'
+ });
+ view.setStore(Ext.create('Proxmox.data.DiffStore', {
+ rstore: view.rstore,
+ sorters: {
+ property: 'name',
+ order: 'DESC'
+ }
+ }));
+ Proxmox.Utils.monStoreErrors(view, view.rstore);
+ view.rstore.on('load', this.onLoad, this);
+ view.on('destroy', view.rstore.stopUpdate);
+ },
+
+ onCreate: function() {
+ var view = this.getView();
+ view.rstore.stopUpdate();
+ var win = Ext.create('PVE.CephCreateFS', {
+ autoShow: true,
+ nodename: view.nodename,
+ listeners: {
+ destroy: function() {
+ view.rstore.startUpdate();
+ }
+ }
+ });
+ },
+
+ onLoad: function(store, records, success) {
+ var vm = this.getViewModel();
+ if (!(success && records && records.length > 0)) {
+ vm.set('cephfsConfigured', false);
+ return;
+ }
+ vm.set('cephfsConfigured', true);
+ }
+ },
+ tbar: [
+ {
+ text: gettext('Create CephFS'),
+ reference: 'createButton',
+ handler: 'onCreate',
+ bind: {
+ // only one CephFS per Ceph cluster makes sense for now
+ disabled: '{cephfsConfigured}'
+ }
+ }
+ ],
+ columns: [
+ {
+ header: gettext('Name'),
+ flex: 1,
+ dataIndex: 'name'
+ },
+ {
+ header: 'Data Pool',
+ flex: 1,
+ dataIndex: 'data_pool'
+ },
+ {
+ header: 'Metadata Pool',
+ flex: 1,
+ dataIndex: 'metadata_pool'
+ }
+ ],
+ cbind: {
+ nodename: '{nodename}'
+ }
+ },
+ {
+ xtype: 'grid',
+ title: gettext('Metadata Servers'),
+ viewModel: {
+ data: {
+ rowSelected: false
+ }
+ },
+ controller: {
+ xclass: 'Ext.app.ViewController',
+
+ init: function(view) {
+ view.rstore = Ext.create('Proxmox.data.UpdateStore', {
+ autoLoad: true,
+ xtype: 'update',
+ interval: 3 * 1000,
+ autoStart: true,
+ storeid: 'pve-ceph-mds',
+ model: 'pve-ceph-mds'
+ });
+ view.setStore(Ext.create('Proxmox.data.DiffStore', {
+ rstore: view.rstore,
+ sorters: {
+ property: 'id',
+ order: 'DESC'
+ }
+ }));
+ Proxmox.Utils.monStoreErrors(view, view.rstore);
+ view.rstore.on('load', this.onLoad, this);
+ view.on('destroy', view.rstore.stopUpdate);
+
+ var vm = this.getViewModel();
+ view.mon(view.selModel, "selectionchange", function() {
+ var rec = view.selModel.getSelection()[0];
+
+ vm.set('rowSelected', !!rec);
+ });
+ },
+
+ onCreateMDS: function() {
+ var view = this.getView();
+ view.rstore.stopUpdate();
+ var win = Ext.create('PVE.CephCreateMDS', {
+ autoShow: true,
+ nodename: view.nodename,
+ listeners: {
+ destroy: function() {
+ view.rstore.startUpdate();
+ }
+ }
+ });
+ },
+
+ onDestroyMDS: function() {
+ var view = this.getView();
+ var rec = view.selModel.getSelection()[0];
+
+ if (!rec.data.host) {
+ Ext.Msg.alert(gettext('Error'), "entry has no host");
+ return;
+ }
+
+ Proxmox.Utils.API2Request({
+ url: "/nodes/" + rec.data.host + "/ceph/mds/" + rec.data.name,
+ method: 'DELETE',
+ success: function(response, options) {
+ var upid = response.result.data;
+ var win = Ext.create('Proxmox.window.TaskProgress', { upid: upid });
+ win.show();
+ },
+ failure: function(response, opts) {
+ Ext.Msg.alert(gettext('Error'), response.htmlStatus);
+ }
+ });
+ },
+
+ onLoad: function(store, records, success) {
+ var vm = this.getViewModel();
+ if (!success || !records) {
+ vm.set('mdscount', 0);
+ return;
+ }
+ vm.set('mdscount', records.length);
+ }
+ },
+ tbar: [
+ {
+ text: gettext('Create MDS'),
+ reference: 'createButton',
+ handler: 'onCreateMDS'
+ },
+ {
+ text: gettext('Destroy MDS'),
+ bind: {
+ disabled: '{!rowSelected}'
+ },
+ handler: 'onDestroyMDS'
+ }
+ ],
+ columns: [
+ {
+ header: gettext('Name'),
+ flex: 1,
+ dataIndex: 'name'
+ },
+ {
+ header: gettext('Host'),
+ flex: 1,
+ dataIndex: 'host'
+ },
+ {
+ header: gettext('Address'),
+ flex: 1,
+ dataIndex: 'addr'
+ },
+ {
+ header: gettext('State'),
+ flex: 1,
+ dataIndex: 'state'
+ }
+ ],
+ cbind: {
+ nodename: '{nodename}'
+ }
+ }
+ ]
+}, function() {
+ Ext.define('pve-ceph-mds', {
+ extend: 'Ext.data.Model',
+ fields: [ 'name', 'host', 'addr', 'state' ],
+ proxy: {
+ type: 'proxmox',
+ url: "/api2/json/nodes/localhost/ceph/mds"
+ },
+ idProperty: 'name'
+ });
+ Ext.define('pve-ceph-fs', {
+ extend: 'Ext.data.Model',
+ fields: [ 'name', 'data_pool', 'metadata_pool' ],
+ proxy: {
+ type: 'proxmox',
+ url: "/api2/json/nodes/localhost/ceph/fs"
+ },
+ idProperty: 'name'
+ });
+});
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index 8b2b802a..f9a62670 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -340,6 +340,14 @@ Ext.define('PVE.node.Config', {
groups: ['ceph'],
itemId: 'ceph-osdtree'
},
+ {
+ xtype: 'pveNodeCephFSPanel',
+ title: 'CephFS',
+ iconCls: 'fa fa-folder',
+ groups: ['ceph'],
+ nodename: nodename,
+ itemId: 'ceph-cephfspanel'
+ },
{
xtype: 'pveNodeCephPoolList',
title: 'Pools',
--
2.19.1
Dominik Csapak
2018-11-23 10:01:47 UTC
Permalink
some comments inline
Post by Thomas Lamprecht
create/destroy MDS and create CephFS (if none is configured yet).
Can be improved, e.g., start/stop/restart for MDS this should be enough for a
starter, though.
Basic code and ui layout is based off my dc/Cluster view. We may want to split
the two grids out in separate defines, it could be a bit much to have all
inline.
---
new in v2
www/manager6/Makefile | 1 +
www/manager6/ceph/FS.js | 385 ++++++++++++++++++++++++++++++++++++
www/manager6/node/Config.js | 8 +
3 files changed, 394 insertions(+)
create mode 100644 www/manager6/ceph/FS.js
diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index d005d714..e75f0de6 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -93,6 +93,7 @@ JSSRC= \
panel/IPSet.js \
panel/ConfigPanel.js \
grid/BackupView.js \
+ ceph/FS.js \
ceph/Pool.js \
ceph/OSD.js \
ceph/Monitor.js \
diff --git a/www/manager6/ceph/FS.js b/www/manager6/ceph/FS.js
new file mode 100644
index 00000000..f2743a4d
--- /dev/null
+++ b/www/manager6/ceph/FS.js
@@ -0,0 +1,385 @@
+/*jslint confusion: true */
+Ext.define('PVE.CephCreateFS', {
+ extend: 'Proxmox.window.Edit',
+ alias: 'widget.pveCephCreateFS',
+
+ showTaskViewer: true,
+ //onlineHelp: 'pve_ceph_fs',
+
+ subject: 'Ceph FS',
+ isCreate: true,
+ method: 'POST',
+
+ setFSName: function(fsName) {
+ var me = this;
wrong indentation
Post by Thomas Lamprecht
+
+ if (fsName === '') {
+ fsName = 'cephfs';
+ }
+
+ me.url = "/nodes/" + me.nodename + "/ceph/fs/" + fsName;
wrong indentation
Post by Thomas Lamprecht
+ },
+
+ items: [
+ {
+ xtype: 'textfield',
+ fieldLabel: gettext('Name'),
+ name: 'name',
+ value: 'cephfs',
+ listeners: {
+ change: function(f, value) {
+ this.up('pveCephCreateFS').setFSName(value);
+ }
wrong indentation
Post by Thomas Lamprecht
+ },
+ submitValue: false, // already encoded in apicall URL
+ emptyText: 'cephfs'
+ },
+ {
+ xtype: 'proxmoxintegerfield',
+ fieldLabel: 'pg_num',
+ name: 'pg_num',
+ value: 64,
+ emptyText: 64,
+ minValue: 8,
+ maxValue: 32768,
+ allowBlank: false
+ },
+ {
+ xtype: 'proxmoxcheckbox',
+ fieldLabel: gettext('Add Storage'),
+ value: true,
+ name: 'add_storage'
+ }
+ ],
+
+ initComponent : function()
+ var me = this;
wrong indentation
Post by Thomas Lamprecht
+
+ if (!me.nodename) {
+ throw "no node name specified";
+ }
+
+ Ext.apply(me, {
+ url: "/nodes/" + me.nodename + "/ceph/fs/cephfs",
you could do a me.setFSName(); instead, this way you only have one
location where you define the (default) path
Post by Thomas Lamprecht
+ defaults: {
+ nodename: me.nodename
+ }
+ });
+
+ me.callParent();
wrong indentation
Post by Thomas Lamprecht
+ }
+});
+
+Ext.define('PVE.CephCreateMDS', {
+ extend: 'Proxmox.window.Edit',
+ alias: 'widget.pveCephCreateMDS',
+
+ showProgress: true,
+ //onlineHelp: 'pve_ceph_mds',
+
+ subject: 'Ceph MDS',
+ isCreate: true,
+ method: 'POST',
+
+ setNode: function(nodename) {
+ var me = this;
+
+ me.nodename = nodename;
+ me.url = "/nodes/" + nodename + "/ceph/mds/" + nodename;
+ },
wrong indentation
Post by Thomas Lamprecht
+
+ items: [
+ {
+ xtype: 'pveNodeSelector',
+ fieldLabel: gettext('Node'),
+ selectCurNode: true,
+ submitValue: false,
+ allowBlank: false,
+ listeners: {
+ change: function(f, value) {
+ this.up('pveCephCreateMDS').setNode(value);
+ }
+ }
+ }
+ ],
+
+ initComponent : function() {
+ var me = this;
+
+ if (!me.nodename) {
+ throw "no node name specified";
+ }
+
+ Ext.apply(me, {
+ url: "/nodes/" + me.nodename + "/ceph/mds/" + me.nodename
+ });
+
also here you could call me.setNode() for the same benefits
as above
Post by Thomas Lamprecht
+ me.callParent();
+ }
+});
we could try to refactor CephCreateMon to something like
CephCreateService, since they look almost exactly the same

but we can still do this when/if we add 'create manager'
Post by Thomas Lamprecht
+
+Ext.define('PVE.NodeCephFSPanel', {
+ extend: 'Ext.panel.Panel',
+ xtype: 'pveNodeCephFSPanel',
+ mixins: ['Proxmox.Mixin.CBind'],
high level comment to this panel

does the division of controllers/viewmodel really make sense?

we have a global viewmodel which gets passed to one
and inherited to the other panel, but we have 2 distinct viewcontrollers

in the global viewmodel we have 2 properties, where
each only gets access/set by one of the panels?
and mdscount only ever gets set?
or am i missing something here?

i think this makes the whole thing rather confusing
which panel can access which things in the viewmodel

i would propose either:

have each panel have its own controller/viewmodel

or have one for both together

additionally i think we should try to refactor the mds list
and mon list to a 'cephservicelist' as soon as
we add a managerlist, so the each panel has its own
controller and viewmodel would make more sense
Post by Thomas Lamprecht
+
+ title: gettext('Cluster Administration'),
+ onlineHelp: 'chapter_pvecm',
+
+ border: false,
+ defaults: {
+ border: false,
+ cbind: {
+ nodename: '{nodename}'
+ }
+ },
+
+ viewModel: {
+ parent: null,
+ data: {
+ cephfsConfigured: false,
+ mdscount: 0
+ }
+ },
+
+ /*initComponent: function() {
+ var me = this;
+ Ext.apply(me, {
+ defaults: {
+ nodename: me.nodename,
+ border: false
+ }
+ });
+
+ me.callParent();
+ },*/
is this meant to be still there?
if yes, i would prefer a small comment why
Post by Thomas Lamprecht
+
+ items: [
+ {
+ xtype: 'grid',
+ title: gettext('CephFS'),
+ controller: {
+ xclass: 'Ext.app.ViewController',
+
+ init: function(view) {
+ view.rstore = Ext.create('Proxmox.data.UpdateStore', {
+ autoLoad: true,
+ xtype: 'update',
+ interval: 5 * 1000,
+ autoStart: true,
+ storeid: 'pve-ceph-fs',
+ model: 'pve-ceph-fs'
+ });
+ view.setStore(Ext.create('Proxmox.data.DiffStore', {
+ rstore: view.rstore,
+ sorters: {
+ property: 'name',
+ order: 'DESC'
+ }
+ }));
+ Proxmox.Utils.monStoreErrors(view, view.rstore);
+ view.rstore.on('load', this.onLoad, this);
+ view.on('destroy', view.rstore.stopUpdate);
+ },
+
+ onCreate: function() {
+ var view = this.getView();
+ view.rstore.stopUpdate();
+ var win = Ext.create('PVE.CephCreateFS', {
+ autoShow: true,
+ nodename: view.nodename,
+ listeners: {
+ destroy: function() {
+ view.rstore.startUpdate();
+ }
+ }
+ });
+ },
+
+ onLoad: function(store, records, success) {
+ var vm = this.getViewModel();
+ if (!(success && records && records.length > 0)) {
+ vm.set('cephfsConfigured', false);
+ return;
+ }
+ vm.set('cephfsConfigured', true);
+ }
+ },
+ tbar: [
+ {
+ text: gettext('Create CephFS'),
+ reference: 'createButton',
+ handler: 'onCreate',
+ bind: {
+ // only one CephFS per Ceph cluster makes sense for now
+ disabled: '{cephfsConfigured}'
+ }
+ }
+ ],
+ columns: [
+ {
+ header: gettext('Name'),
+ flex: 1,
+ dataIndex: 'name'
+ },
+ {
+ header: 'Data Pool',
+ flex: 1,
+ dataIndex: 'data_pool'
+ },
+ {
+ header: 'Metadata Pool',
+ flex: 1,
+ dataIndex: 'metadata_pool'
+ }
+ ],
+ cbind: {
+ nodename: '{nodename}'
+ }
+ },
+ {
+ xtype: 'grid',
+ title: gettext('Metadata Servers'),
+ viewModel: {
+ data: {
+ rowSelected: false
+ }
+ },
+ controller: {
+ xclass: 'Ext.app.ViewController',
+
+ init: function(view) {
+ view.rstore = Ext.create('Proxmox.data.UpdateStore', {
+ autoLoad: true,
+ xtype: 'update',
+ interval: 3 * 1000,
+ autoStart: true,
+ storeid: 'pve-ceph-mds',
+ model: 'pve-ceph-mds'
+ });
+ view.setStore(Ext.create('Proxmox.data.DiffStore', {
+ rstore: view.rstore,
+ sorters: {
+ property: 'id',
+ order: 'DESC'
+ }
+ }));
+ Proxmox.Utils.monStoreErrors(view, view.rstore);
+ view.rstore.on('load', this.onLoad, this);
+ view.on('destroy', view.rstore.stopUpdate);
+
+ var vm = this.getViewModel();
+ view.mon(view.selModel, "selectionchange", function() {
+ var rec = view.selModel.getSelection()[0];
+
+ vm.set('rowSelected', !!rec);
+ });
+ },
+
+ onCreateMDS: function() {
+ var view = this.getView();
+ view.rstore.stopUpdate();
+ var win = Ext.create('PVE.CephCreateMDS', {
+ autoShow: true,
+ nodename: view.nodename,
+ listeners: {
+ destroy: function() {
+ view.rstore.startUpdate();
+ }
+ }
+ });
+ },
+
+ onDestroyMDS: function() {
+ var view = this.getView();
+ var rec = view.selModel.getSelection()[0];
+
+ if (!rec.data.host) {
+ Ext.Msg.alert(gettext('Error'), "entry has no host");
+ return;
+ }
+
+ Proxmox.Utils.API2Request({
+ url: "/nodes/" + rec.data.host + "/ceph/mds/" + rec.data.name,
+ method: 'DELETE',
+ success: function(response, options) {
+ var upid = response.result.data;
+ var win = Ext.create('Proxmox.window.TaskProgress', { upid: upid });
+ win.show();
+ },
+ failure: function(response, opts) {
+ Ext.Msg.alert(gettext('Error'), response.htmlStatus);
+ }
+ });
+ },
+
+ onLoad: function(store, records, success) {
+ var vm = this.getViewModel();
+ if (!success || !records) {
+ vm.set('mdscount', 0);
+ return;
+ }
+ vm.set('mdscount', records.length);
+ }
+ },
+ tbar: [
+ {
+ text: gettext('Create MDS'),
+ reference: 'createButton',
+ handler: 'onCreateMDS'
+ },
+ {
+ text: gettext('Destroy MDS'),
+ bind: {
+ disabled: '{!rowSelected}'
+ },
+ handler: 'onDestroyMDS'
+ }
could you not replace that with a proxmoxstdremovebtn ?
Post by Thomas Lamprecht
+ ],
+ columns: [
+ {
+ header: gettext('Name'),
+ flex: 1,
+ dataIndex: 'name'
+ },
+ {
+ header: gettext('Host'),
+ flex: 1,
+ dataIndex: 'host'
+ },
+ {
+ header: gettext('Address'),
+ flex: 1,
+ dataIndex: 'addr'
+ },
+ {
+ header: gettext('State'),
+ flex: 1,
+ dataIndex: 'state'
+ }
+ ],
+ cbind: {
+ nodename: '{nodename}'
+ }
+ }
+ ]
+}, function() {
+ Ext.define('pve-ceph-mds', {
+ extend: 'Ext.data.Model',
+ fields: [ 'name', 'host', 'addr', 'state' ],
+ proxy: {
+ type: 'proxmox',
+ url: "/api2/json/nodes/localhost/ceph/mds"
+ },
+ idProperty: 'name'
+ });
+ Ext.define('pve-ceph-fs', {
+ extend: 'Ext.data.Model',
+ fields: [ 'name', 'data_pool', 'metadata_pool' ],
+ proxy: {
+ type: 'proxmox',
+ url: "/api2/json/nodes/localhost/ceph/fs"
+ },
+ idProperty: 'name'
+ });
+});
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index 8b2b802a..f9a62670 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -340,6 +340,14 @@ Ext.define('PVE.node.Config', {
groups: ['ceph'],
itemId: 'ceph-osdtree'
},
+ {
+ xtype: 'pveNodeCephFSPanel',
+ title: 'CephFS',
+ iconCls: 'fa fa-folder',
+ groups: ['ceph'],
+ nodename: nodename,
+ itemId: 'ceph-cephfspanel'
+ },
{
xtype: 'pveNodeCephPoolList',
title: 'Pools',
Thomas Lamprecht
2018-11-23 10:25:53 UTC
Permalink
Post by Dominik Csapak
some comments inline
thanks for your review!
Post by Dominik Csapak
Post by Thomas Lamprecht
create/destroy MDS and create CephFS (if none is configured yet).
Can be improved, e.g., start/stop/restart for MDS this should be enough for a
starter, though.
Basic code and ui layout is based off my dc/Cluster view. We may want to split
the two grids out in separate defines, it could be a bit much to have all
inline.
---
new in v2
  www/manager6/Makefile       |   1 +
  www/manager6/ceph/FS.js     | 385 ++++++++++++++++++++++++++++++++++++
  www/manager6/node/Config.js |   8 +
  3 files changed, 394 insertions(+)
  create mode 100644 www/manager6/ceph/FS.js
diff --git a/www/manager6/Makefile b/www/manager6/Makefile
index d005d714..e75f0de6 100644
--- a/www/manager6/Makefile
+++ b/www/manager6/Makefile
@@ -93,6 +93,7 @@ JSSRC=                                      \
      panel/IPSet.js                    \
      panel/ConfigPanel.js                \
      grid/BackupView.js                \
+    ceph/FS.js                    \
      ceph/Pool.js                    \
      ceph/OSD.js                    \
      ceph/Monitor.js                    \
diff --git a/www/manager6/ceph/FS.js b/www/manager6/ceph/FS.js
new file mode 100644
index 00000000..f2743a4d
--- /dev/null
+++ b/www/manager6/ceph/FS.js
@@ -0,0 +1,385 @@
+/*jslint confusion: true */
+Ext.define('PVE.CephCreateFS', {
+    extend: 'Proxmox.window.Edit',
+    alias: 'widget.pveCephCreateFS',
+
+    showTaskViewer: true,
+    //onlineHelp: 'pve_ceph_fs',
+
+    subject: 'Ceph FS',
+    isCreate: true,
+    method: 'POST',
+
+    setFSName: function(fsName) {
+        var me = this;
wrong indentation
a sorry about all of them, not sure why I did not see this...
Post by Dominik Csapak
...
Post by Thomas Lamprecht
+
+    if (!me.nodename) {
+        throw "no node name specified";
+    }
+
+        Ext.apply(me, {
+        url: "/nodes/" + me.nodename + "/ceph/fs/cephfs",
you could do a me.setFSName(); instead, this way you only have one
location where you define the (default) path
will do
Post by Dominik Csapak
...
Post by Thomas Lamprecht
+    initComponent : function() {
+        var me = this;
+
+    if (!me.nodename) {
+        throw "no node name specified";
+    }
+
+        Ext.apply(me, {
+        url: "/nodes/" + me.nodename + "/ceph/mds/" + me.nodename
+        });
+
also here you could call me.setNode() for the same benefits
as above
will do
Post by Dominik Csapak
Post by Thomas Lamprecht
+        me.callParent();
+    }
+});
we could try to refactor CephCreateMon to something like
CephCreateService, since they look almost exactly the same
but we can still do this when/if we add 'create manager'
sounds OK, but later is preferred here.
Post by Dominik Csapak
Post by Thomas Lamprecht
+
+Ext.define('PVE.NodeCephFSPanel', {
+    extend: 'Ext.panel.Panel',
+    xtype: 'pveNodeCephFSPanel',
+    mixins: ['Proxmox.Mixin.CBind'],
high level comment to this panel
does the division of controllers/viewmodel really make sense?
it made initially but here I do not use it anymore, will split up
Post by Dominik Csapak
we have a global viewmodel which gets passed to one
and inherited to the other panel, but we have 2 distinct viewcontrollers
in the global viewmodel we have 2 properties, where
each only gets access/set by one of the panels?
and mdscount only ever gets set?
or am i missing something here?
i think this makes the whole thing rather confusing
which panel can access which things in the viewmodel
have each panel have its own controller/viewmodel
for now I'll do above
Post by Dominik Csapak
or have one for both together
I do not like that to much, it intertwines both components unnecessarily
and not really helps in understanding what happens.
Post by Dominik Csapak
additionally i think we should try to refactor the mds list
and mon list to a 'cephservicelist' as soon as
we add a managerlist, so the each panel has its own
controller and viewmodel would make more sense
Post by Thomas Lamprecht
+
+    title: gettext('Cluster Administration'),
+    onlineHelp: 'chapter_pvecm',
+
+    border: false,
+    defaults: {
+    border: false,
+    cbind: {
+        nodename: '{nodename}'
+    }
+    },
+
+    viewModel: {
+    parent: null,
+    data: {
+        cephfsConfigured: false,
+        mdscount: 0
+    }
+    },
+
+    /*initComponent: function() {
+        var me = this;
+        Ext.apply(me, {
+        defaults: {
+        nodename: me.nodename,
+        border: false
+        }
+    });
+
+        me.callParent();
+    },*/
is this meant to be still there?
if yes, i would prefer a small comment why
left over, will remove
Post by Dominik Csapak
Post by Thomas Lamprecht
+
+    items: [
+    {
+        xtype: 'grid',
+        title: gettext('CephFS'),
+        controller: {
+        xclass: 'Ext.app.ViewController',
+
+        init: function(view) {
+            view.rstore = Ext.create('Proxmox.data.UpdateStore', {
+            autoLoad: true,
+            xtype: 'update',
+            interval: 5 * 1000,
+            autoStart: true,
+            storeid: 'pve-ceph-fs',
+            model: 'pve-ceph-fs'
+            });
+            view.setStore(Ext.create('Proxmox.data.DiffStore', {
+            rstore: view.rstore,
+            sorters: {
+                property: 'name',
+                order: 'DESC'
+            }
+            }));
+            Proxmox.Utils.monStoreErrors(view, view.rstore);
+            view.rstore.on('load', this.onLoad, this);
+            view.on('destroy', view.rstore.stopUpdate);
+        },
+
+        onCreate: function() {
+            var view = this.getView();
+            view.rstore.stopUpdate();
+            var win = Ext.create('PVE.CephCreateFS', {
+            autoShow: true,
+            nodename: view.nodename,
+            listeners: {
+                destroy: function() {
+                view.rstore.startUpdate();
+                }
+            }
+            });
+        },
+
+        onLoad: function(store, records, success) {
+            var vm = this.getViewModel();
+            if (!(success && records && records.length > 0)) {
+            vm.set('cephfsConfigured', false);
+            return;
+            }
+            vm.set('cephfsConfigured', true);
+        }
+        },
+        tbar: [
+        {
+            text: gettext('Create CephFS'),
+            reference: 'createButton',
+            handler: 'onCreate',
+            bind: {
+            // only one CephFS per Ceph cluster makes sense for now
+            disabled: '{cephfsConfigured}'
+            }
+        }
+        ],
+        columns: [
+        {
+            header: gettext('Name'),
+            flex: 1,
+            dataIndex: 'name'
+        },
+        {
+            header: 'Data Pool',
+            flex: 1,
+            dataIndex: 'data_pool'
+        },
+        {
+            header: 'Metadata Pool',
+            flex: 1,
+            dataIndex: 'metadata_pool'
+        }
+        ],
+        cbind: {
+        nodename: '{nodename}'
+        }
+    },
+    {
+        xtype: 'grid',
+        title: gettext('Metadata Servers'),
+        viewModel: {
+        data: {
+            rowSelected: false
+        }
+        },
+        controller: {
+        xclass: 'Ext.app.ViewController',
+
+        init: function(view) {
+            view.rstore = Ext.create('Proxmox.data.UpdateStore', {
+            autoLoad: true,
+            xtype: 'update',
+            interval: 3 * 1000,
+            autoStart: true,
+            storeid: 'pve-ceph-mds',
+            model: 'pve-ceph-mds'
+            });
+            view.setStore(Ext.create('Proxmox.data.DiffStore', {
+            rstore: view.rstore,
+            sorters: {
+                property: 'id',
+                order: 'DESC'
+            }
+            }));
+            Proxmox.Utils.monStoreErrors(view, view.rstore);
+            view.rstore.on('load', this.onLoad, this);
+            view.on('destroy', view.rstore.stopUpdate);
+
+            var vm = this.getViewModel();
+            view.mon(view.selModel, "selectionchange", function() {
+            var rec = view.selModel.getSelection()[0];
+
+            vm.set('rowSelected', !!rec);
+            });
+        },
+
+        onCreateMDS: function() {
+            var view = this.getView();
+            view.rstore.stopUpdate();
+            var win = Ext.create('PVE.CephCreateMDS', {
+            autoShow: true,
+            nodename: view.nodename,
+            listeners: {
+                destroy: function() {
+                view.rstore.startUpdate();
+                }
+            }
+            });
+        },
+
+        onDestroyMDS: function() {
+            var view = this.getView();
+            var rec = view.selModel.getSelection()[0];
+
+            if (!rec.data.host) {
+            Ext.Msg.alert(gettext('Error'), "entry has no host");
+            return;
+            }
+
+            Proxmox.Utils.API2Request({
+            url: "/nodes/" + rec.data.host + "/ceph/mds/" + rec.data.name,
+            method: 'DELETE',
+            success: function(response, options) {
+                var upid = response.result.data;
+                var win = Ext.create('Proxmox.window.TaskProgress', { upid: upid });
+                win.show();
+            },
+            failure: function(response, opts) {
+                Ext.Msg.alert(gettext('Error'), response.htmlStatus);
+            }
+            });
+        },
+
+        onLoad: function(store, records, success) {
+            var vm = this.getViewModel();
+            if (!success || !records) {
+            vm.set('mdscount', 0);
+            return;
+            }
+            vm.set('mdscount', records.length);
+        }
+        },
+        tbar: [
+        {
+            text: gettext('Create MDS'),
+            reference: 'createButton',
+            handler: 'onCreateMDS'
+        },
+        {
+            text: gettext('Destroy MDS'),
+            bind: {
+            disabled: '{!rowSelected}'
+            },
+            handler: 'onDestroyMDS'
+        }
could you not replace that with a proxmoxstdremovebtn ?
yes, will do
Post by Dominik Csapak
Post by Thomas Lamprecht
+        ],
+        columns: [
+        {
+            header: gettext('Name'),
+            flex: 1,
+            dataIndex: 'name'
+        },
+        {
+            header: gettext('Host'),
+            flex: 1,
+            dataIndex: 'host'
+        },
+        {
+            header: gettext('Address'),
+            flex: 1,
+            dataIndex: 'addr'
+        },
+        {
+            header: gettext('State'),
+            flex: 1,
+            dataIndex: 'state'
+        }
+        ],
+        cbind: {
+        nodename: '{nodename}'
+        }
+    }
+    ]
+}, function() {
+    Ext.define('pve-ceph-mds', {
+    extend: 'Ext.data.Model',
+    fields: [ 'name', 'host', 'addr', 'state' ],
+    proxy: {
+        type: 'proxmox',
+        url: "/api2/json/nodes/localhost/ceph/mds"
+    },
+    idProperty: 'name'
+    });
+    Ext.define('pve-ceph-fs', {
+    extend: 'Ext.data.Model',
+    fields: [ 'name', 'data_pool', 'metadata_pool' ],
+    proxy: {
+        type: 'proxmox',
+        url: "/api2/json/nodes/localhost/ceph/fs"
+    },
+    idProperty: 'name'
+    });
+});
diff --git a/www/manager6/node/Config.js b/www/manager6/node/Config.js
index 8b2b802a..f9a62670 100644
--- a/www/manager6/node/Config.js
+++ b/www/manager6/node/Config.js
@@ -340,6 +340,14 @@ Ext.define('PVE.node.Config', {
              groups: ['ceph'],
              itemId: 'ceph-osdtree'
          },
+        {
+            xtype: 'pveNodeCephFSPanel',
+            title: 'CephFS',
+            iconCls: 'fa fa-folder',
+            groups: ['ceph'],
+            nodename: nodename,
+            itemId: 'ceph-cephfspanel'
+        },
          {
              xtype: 'pveNodeCephPoolList',
              title: 'Pools',
Dominik Csapak
2018-11-23 10:05:48 UTC
Permalink
oh one high level comment to the panel:

maybe a minheight and emptytext (e.g., 'no cephfs configured')
for the cephfs list would make sense, otherwise it look rather weird
(zero height list with another panel directly iunder it)
Thomas Lamprecht
2018-11-23 10:14:17 UTC
Permalink
Post by Dominik Csapak
maybe a minheight and emptytext (e.g., 'no cephfs configured')
for the cephfs list would make sense, otherwise it look rather weird
(zero height list with another panel directly iunder it)
make sense, will do
Loading...