# Installing CTDB with GlusterFS on Ubunut-Server 16.04 # Install basic-System without any service, except ssh # The System needs two network boards, one for the productive network and one for the heartbeat-network # 192.168.56.0 <-- productive, 192.168.57.0 <-- heartbeat # Create an extra partition for Gluster (don't format yet, will configured as LVM later) # Be shure to use the domaincontoller as DNS-Server on all nodes. If not, you can't join the domain later. ########### Configure Gluster ############ # Prepare all knodes to install actual Gluster-packages # go to: https://launchpad.net/~gluster/+archive/ubuntu/glusterfs-3.8 # Install repository: root@cluster-01:~# add-apt-repository ppa:gluster/glusterfs-3.8 root@cluster-01:~# apt-get update root@cluster-02:~# add-apt-repository ppa:gluster/glusterfs-3.8 root@cluster-02:~# apt-get update # Install all needed packages on all knodes root@cluster-01:~# apt-get install glusterfs-server attr thin-provisioning-tools root@cluster-02:~# apt-get install glusterfs-server attr thin-provisioning-tools # thin-provisioning-tools are needed for Gluster-snapshots # Edit /etc/hosts, on all nodes, for nameresolution without any working DNS-server ------------------------------------ 127.0.0.1 localhost #127.0.1.1 ubuntu 192.168.56.161 cluster-01.example.net cluster-01 192.168.56.162 cluster-02.example.net cluster-02 #heartbeat 192.168.57.161 c1.cluster.gluster c1 192.168.57.162 c2.cluster.gluster c2 ------------------------------------ # Configuring the Gluster-Peers root@cluster-01:~# gluster peer probe c2 peer probe: success. root@cluster-02:~# gluster peer probe c1 peer probe: success. # Creating LVM-volume on all nodes (ATTENTION chose the right partition!) ------------------------------------ root@cluster-01:~# fdisk /dev/sda . . . Partition type p primary (0 primary, 0 extended, 4 free) e extended (container for logical partitions) Auswählen (Vorgabe p): p Partitionsnummer (1-4, Vorgabe 1): Erster Sektor (2048-4194303, Vorgabe 2048): Last sector, +sectors or +size{K,M,G,T,P} (2048-4194303, Vorgabe 4194303): Created a new partition 1 of type 'Linux' and of size 2 GiB. . . . ---------------------------------- # Configure LVM and mount volume on all nodes root@cluster-01:~# pvcreate /dev/sda1 Physical volume "/dev/sda1" successfully created root@cluster-01:~# vgcreate glustergroup /dev/sda1 Volume group "glustergroup" successfully created root@cluster-01:~# lvcreate -L 1950M -T glustergroup/glusterpool Rounding up size to full physical extent 1,91 GiB Logical volume "glusterpool" created. root@cluster-01:~# lvcreate -V 1900M -T glustergroup/glusterpool -n glusterv1 Logical volume "glusterv1" created. root@cluster-01:~# mkfs.xfs /dev/glustergroup/glusterv1 root@cluster-01:~# mkdir /gluster root@cluster-01:~# mount /dev/glustergroup/glusterv1 /gluster root@cluster-01:~# echo /dev/glustergroup/glusterv1 /gluster xfs defaults 0 0 >> /etc/fstab root@cluster-01:~# mkdir /gluster/brick # Now it's time to create the Gluster-volume. Here I created a replicated-volume. See gluster.org for more infos # Just do it on one knode only! root@cluster-01:~# gluster volume create gv1 replica 2 c1:/gluster/brick c2:/gluster/brick volume create: gv1: success: please start the volume to access data # See volume-info: root@cluster-01:~# gluster volume info Volume Name: gv1 Type: Replicate Volume ID: 14027d36-e415-4d5d-bbdf-62c8e5f8a67f Status: Created Snapshot Count: 0 Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: c1:/gluster/brick Brick2: c2:/gluster/brick Options Reconfigured: transport.address-family: inet performance.readdir-ahead: on nfs.disable: on # "Status" is "Created" # Now start the volume root@cluster-01:~# gluster volume start gv1 volume start: gv1: success # Again see volume info root@cluster-01:~# gluster volume info Volume Name: gv1 Type: Replicate Volume ID: 14027d36-e415-4d5d-bbdf-62c8e5f8a67f Status: Started Snapshot Count: 0 Number of Bricks: 1 x 2 = 2 Transport-type: tcp Bricks: Brick1: c1:/gluster/brick Brick2: c2:/gluster/brick Options Reconfigured: transport.address-family: inet performance.readdir-ahead: on nfs.disable: on # "Status" is now "Started" # Take a look at the status root@cluster-01:~# gluster volume status Status of volume: gv1 Gluster process TCP Port RDMA Port Online Pid ------------------------------------------------------------------------------ Brick c1:/gluster/brick 49152 0 Y 4273 Brick c2:/gluster/brick 49152 0 Y 4102 Self-heal Daemon on localhost N/A N/A Y 4293 Self-heal Daemon on c2 N/A N/A Y 4122 Task Status of Volume gv1 ------------------------------------------------------------------------------ There are no active volume tasks ############################################### # NEVER write any data directly on the bricks!# ############################################### # Mount the volume on all nodes root@cluster-01:~# mkdir /glusterfs root@cluster-01:~# mount -t glusterfs c1:/gv1 /glusterfs root@cluster-01:~# mount . . . c1:/gv1 on /glusterfs type fuse.glusterfs (rw,relatime,user_id=0,group_id=0,default_permissions,allow_other,max_read=131072) ### Solve the Bug with mounting glusterfs during systemstart. # Gluster uses fusemount, but fusemount will start before the network. So GlusterFS will not mount. # I have found a solution via a systemd target.mount # Create a file /etc/systemd/system/glusterfs.mount # IMPORTANT! The name of the file must match your mountpoint! #--------------------------------- [Unit] Description = CTDB Data dir After=network.target glusterfs-server.service Wants=network-online.target [Mount] What=c1:/gv1 Where=/glusterfs Type=glusterfs Options=defaults,acl [Install] WantedBy=multi-user.target #--------------------------------- #Then activate the neu target: root@cluster-01:~# systemctl enable glusterfs.mount # Repeat the last two steps on all knodes. # Now the gluster-filesystem will be mountet on system restart. # Do this for all your volumes # To test if everything will work after reboot root@cluster-01:~# reboot root@cluster-02:~# reboot # Create files and dirctories on any node and check on all other nodes root@cluster-01:~# cd /glusterfs/ root@cluster-01:/glusterfs# touch file1 root@cluster-01:/glusterfs# mkdir dir1 root@cluster-01:/glusterfs# ls -l total 4 drwxr-xr-x 2 root root 4096 Okt 6 15:15 dir1 -rw-r--r-- 1 root root 0 Okt 6 15:15 file1 ----------------------------------------- root@cluster-02:~# cd /glusterfs root@cluster-02:/glusterfs# ls -l total 4 drwxr-xr-x 2 root root 4096 Okt 6 15:15 dir1 -rw-r--r-- 1 root root 0 Okt 6 15:15 file1 # Now you have a working Gluster-cluster #### Gluster snapshots #### # Gluster is using LVM2-snapshots if the bricks configured via LVM (as I did here) # Take snapshot root@cluster-01:~# gluster snapshot create snap1 gv1 snapshot create: success: Snap snap1_GMT-2016.10.10-15.17.54 created successfully # The snapshpot can be found in /var/run/gluster/snaps/ # List all snapshots root@cluster-01:~# gluster snapshot list snap1_GMT-2016.10.10-15.17.54 # Info from snapshot root@cluster-01:~# gluster snapshot info snap1_GMT-2016.10.10-15.17.54 Snapshot : snap1_GMT-2016.10.10-15.17.54 Snap UUID : 0a9908f3-7456-4bec-8953-96fe2d84eb4f Created : 2016-10-10 15:17:54 Snap Volumes: Snap Volume Name : 6cb49153b5cf40d38bed0d9b1b3a8b5d Origin Volume name : gv1 Snaps taken for gv1 : 2 Snaps available for gv1 : 254 Status : Stopped # Look at the status of the snapshots root@cluster-01:~# gluster snapshot status Snap Name : snap1_GMT-2016.10.10-15.17.54 Snap UUID : 0a9908f3-7456-4bec-8953-96fe2d84eb4f Brick Path : c1:/run/gluster/snaps/6cb49153b5cf40d38bed0d9b1b3a8b5d/brick1/brick Volume Group : glustergroup Brick Running : No Brick PID : N/A Data Percentage : 0,59 LV Size : 1,86g Brick Path : c2:/run/gluster/snaps/6cb49153b5cf40d38bed0d9b1b3a8b5d/brick2/brick Volume Group : glustergroup Brick Running : No Brick PID : N/A Data Percentage : 0,59 LV Size : 1,86g # Before you can access the snaphot you must activate it root@cluster-01:~# gluster snapshot activate snap1_GMT-2016.10.10-15.17.54 Snapshot activate: snap1_GMT-2016.10.10-15.17.54: Snap activated successfully # Mount the snapshot root@cluster-01:~# mount -t glusterfs c1:/snaps/snap1_GMT-2016.10.10-15.17.54/gv1 /mnt # Now you can copy files from shapshot back to the volume # After recovering files unmount the snapshot root@cluster-01:~# umount /mnt # Recover a complete snapshot # First you MUST stop the Volume root@cluster-01:~# gluster volume stop gv1 Stopping volume will make its data inaccessible. Do you want to continue? (y/n) y volume stop: gv1: success # Then recover the Volume root@cluster-01:~# gluster snapshot restore snap1_GMT-2016.10.10-15.17.54 Restore operation will replace the original volume with the snapshotted volume. Do you still want to continue? (y/n) y Snapshot restore: snap1_GMT-2016.10.10-15.17.54: Snap restored successfully # Restart the volume root@cluster-01:~# gluster volume start gv1 volume start: gv1: success # After recovering a complete snapshot, the snapshot will be deleted. You should create an new snapshot immediately. # Delete a snapshot root@cluster-01:~# gluster snapshot delete snap1_GMT-2016.10.10-15.19.29 Deleting snap will erase all the information about the snap. Do you still want to continue? (y/n) y snapshot delete: snap1_GMT-2016.10.10-15.19.29: snap removed successfully ### That's it, Gluster is running ### ######### Configure CTDB ########### # Install all needed packages root@cluster-01:/glusterfs# apt-get install samba libpam-heimdal heimdal-clients ldb-tools winbind libpam-winbind smbclient libnss-winbind ctdb root@cluster-02:/glusterfs# apt-get install samba libpam-heimdal heimdal-clients ldb-tools winbind libpam-winbind smbclient libnss-winbind ctdb ### Time is important install "ntp" and configure to use your domaincontroller as timeserver on all nodes ### # Configure DNS-entries for the cluster # CTDB is using DNS-round-robin to select different knodes when clients connecting # the internes DNS-server is not supporting DNS-round-robin so use bind9 as nameserver for your DCs # see Howto: https://www.kania-online.de/wp-content/uploads/2016/10/samba-bind-dlz.txt # choose IPs for the cluster-nodes in your productive network # here: 192.168.56.191 and 192.168.56.192 # Create forward and reverse DNS-records for all cluster-IPs. Use the same name for all IPs! root@addc-01:~# kinit administrator administrator@EXAMPLE.NET's Password: root@addc-01:~# samba-tool dns add addc-01 example.net ctdb-cluster A 192.168.56.191 -k yes Record added successfully root@addc-01:~# samba-tool dns add addc-01 example.net ctdb-cluster A 192.168.56.192 -k yes Record added successfully root@addc-01:~# samba-tool dns add addc-01 56.168.192.in-addr.arpa 191 PTR ctdb-cluster.example.net -k yes Record added successfully root@addc-01:~# samba-tool dns add addc-01 56.168.192.in-addr.arpa 192 PTR ctdb-cluster.example.net -k yes Record added successfully # Test the nameresolution root@addc-01:~# host ctdb-cluster ctdb-cluster.example.net has address 192.168.56.192 ctdb-cluster.example.net has address 192.168.56.191 root@addc-01:~# host ctdb-cluster ctdb-cluster.example.net has address 192.168.56.191 ctdb-cluster.example.net has address 192.168.56.192 # because of DNS-round-robin the order of the IPs will change # Configuring CTDB # All configuration will be done in /etc/default/ctdb # change the following line ---------------------------------------- CTDB_RECOVERY_LOCK=/glusterfs/ctdb.lock ---------------------------------------- # !Don't activate Samba at the moment! # Create /etc/ctdb/nodes with the following content: ---------------------------------------- 192.168.57.161 192.168.57.162 ---------------------------------------- # Copy to all nodes: root@cluster-01:~# scp /etc/ctdb/nodes cluster-02:/etc/ctdb/ # These are the heartbeat-IPs. These file must be the same on all nodes #Create /etc/ctdb/public_addresses with the following content: ---------------------------------------- 192.168.56.191/24 enp0s8 192.168.56.192/24 enp0s8 ---------------------------------------- # Copy to all nodes: root@cluster-01:~# scp /etc/ctdb/public_addresses cluster-02:/etc/ctdb/ # These are the IPs for the cluster in your productive network # CTDB will manage this IPs # Start CTDB on all nodes: root@cluster-01:~# systemctl start ctdb.service root@cluster-02:~# systemctl start ctdb.service # Run "ctdb status" until all nodes are "OK" root@cluster-01:~# watch ctdb status ------------------------------------ Alle 2,0s: ctdb status Number of nodes:2 pnn:0 192.168.57.161 OK (THIS NODE) pnn:1 192.168.57.162 OK Generation:1862765093 Size:2 hash:0 lmaster:0 hash:1 lmaster:1 Recovery mode:NORMAL (0) Recovery master:0 ------------------------------------ # !Don't continue until all nodes are ok! ###### Configuring Samba ####### # To use Samba together with CTDB Samba must be configured via the registry # Install the following samba-packages: apt-get install samba libpam-heimdal heimdal-clients winbind libpam-winbind smbclient libnss-winbind # Create a new /etc/samba/smb.conf on all CTDB-nodes with the following content: ------------------------------------ [global] clustering = yes include =registry ------------------------------------ # Create a directory for the first share root@cluster-01:~# mkdir /glusterfs/data # Create the Samba-configuration: root@cluster-01:~# net conf setparm global "workgroup" "example" root@cluster-01:~# net conf setparm global "netbios name" "ctdb-cluster" root@cluster-01:~# net conf setparm global "security" "ads" root@cluster-01:~# net conf setparm global "realm" "EXAMPLE.NET" root@cluster-01:~# net conf setparm global "idmap config *:range" "10000-19999" root@cluster-01:~# net conf setparm global "idmap config example:backend" "rid" root@cluster-01:~# net conf setparm global "idmap config example:range" "1000000-1999999" root@cluster-01:~# net conf setparm global "winbind use default domain" "yes" root@cluster-01:~# net conf setparm global "winbind refresh tickets" "yes" root@cluster-01:~# net conf setparm global "store dos attributes" "yes" root@cluster-01:~# net conf setparm global "map acl inherit" "yes" root@cluster-01:~# net conf setparm global "vfs objects" "acl_xattr" root@cluster-01:~# net conf setparm global "template shell" "/bin/bash" root@cluster-01:~# net conf addshare data /glusterfs/data writeable=yes guest_ok=n "Cluster-storage" root@cluster-01:~# net conf setparm data "browsable" "no" # If you do a "testparm" on all CTDB-nodes you will see the same configuration root@cluster-02:~# testparm #### ATTENTION Remove "smbd", "nmbd" and "winbind" from all runlevels on all CTDB-nodes. CTDB will take care of all three services #### root@cluster-02:~# systemctl disable smbd smbd.service is not a native service, redirecting to systemd-sysv-install Executing /lib/systemd/systemd-sysv-install disable smbd insserv: warning: current start runlevel(s) (empty) of script `smbd' overrides LSB defaults (2 3 4 5). insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `smbd' overrides LSB defaults (0 1 6). insserv: warning: current start runlevel(s) (empty) of script `smbd' overrides LSB defaults (2 3 4 5). insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `smbd' overrides LSB defaults (0 1 6). root@cluster-02:~# systemctl disable nmbd nmbd.service is not a native service, redirecting to systemd-sysv-install Executing /lib/systemd/systemd-sysv-install disable nmbd insserv: warning: current start runlevel(s) (empty) of script `nmbd' overrides LSB defaults (2 3 4 5). insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `nmbd' overrides LSB defaults (0 1 6). insserv: warning: current start runlevel(s) (empty) of script `nmbd' overrides LSB defaults (2 3 4 5). insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `nmbd' overrides LSB defaults (0 1 6). root@cluster-02:~# systemctl disable winbind winbind.service is not a native service, redirecting to systemd-sysv-install Executing /lib/systemd/systemd-sysv-install disable winbind insserv: warning: current start runlevel(s) (empty) of script `winbind' overrides LSB defaults (2 3 4 5). insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `winbind' overrides LSB defaults (0 1 6). insserv: warning: current start runlevel(s) (empty) of script `winbind' overrides LSB defaults (2 3 4 5). insserv: warning: current stop runlevel(s) (0 1 2 3 4 5 6) of script `winbind' overrides LSB defaults (0 1 6). # Copy /etc/krb5.conf from any domaincontroller to all CTDB-nodes root@addc-01:~# scp /etc/krb5.conf 192.168.56.161:/etc/ root@addc-01:~# scp /etc/krb5.conf 192.168.56.162:/etc/ # Now you can join the domain from one CTDB-node root@cluster-01:~# net ads join -U administrator Enter administrator's password: Using short domain name -- EXAMPLE Joined 'CTDB-CLUSTER' to dns domain 'example.net' Not doing automatic DNS update in a clustered setup. # It's always a good idea to start "winbind" and "samba" step by step, so it is easier to find errors. # Change /etc/default/ctdb on all CTDB-nodes to start winbind first. -------------------------------------------- # What services should CTDB manage? Default is none. # CTDB_MANAGES_SAMBA=yes CTDB_MANAGES_WINBIND=yes -------------------------------------------- # After changing this file on all nodes check CTDB status root@cluster-01:~# ctdb status Number of nodes:2 pnn:0 192.168.57.161 OK (THIS NODE) pnn:1 192.168.57.162 OK Generation:1862765093 Size:2 hash:0 lmaster:0 hash:1 lmaster:1 Recovery mode:NORMAL (0) Recovery master:0 # Now check for IPs on all CTDB-nodes root@cluster-01:~# ip a l enp0s8 3: enp0s8: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 link/ether 08:00:27:ad:ba:1e brd ff:ff:ff:ff:ff:ff inet 192.168.56.161/24 brd 192.168.56.255 scope global enp0s8 valid_lft forever preferred_lft forever inet 192.168.56.191/24 brd 192.168.56.255 scope global secondary enp0s8 valid_lft forever preferred_lft forever inet6 fe80::a00:27ff:fead:ba1e/64 scope link valid_lft forever preferred_lft forever # As you can see, each node will have a second IP. This is one IP from /etc/ctdb/public_addresses ##### DON'T uses "ifconfig" to chack for the IPs. "ifconfig" will not show the IP from CTDB! ##### # Test winbind root@cluster-01:~# wbinfo -p Ping to winbindd succeeded root@cluster-02:~# wbinfo -p Ping to winbindd succeeded ########### Befor you start smbd via CTDB ##################### # # IMPORTANT # # There is a missconfiguration in the startscript for samba ofer CTDB in one of the ctdb-event-scripts # change /etc/ctdb/events.d/50.samba, on all nodes, to the following ############################################################### root@cluster-02:~# vi /etc/ctdb/events.d/50.samba ------------------------------ debian) CTDB_SERVICE_SMB=${CTDB_SERVICE_SMB:-smbd} CTDB_SERVICE_NMB=${CTDB_SERVICE_NMB:-nmbd} ;; ------------------------------ # Ubuntu will start both deamons separately, NOT over the script "samba" !! # Now start "samba" via CTDB. # Change /etc/default/ctdb to the following settings: -------------------------------------------- # What services should CTDB manage? Default is none. CTDB_MANAGES_SAMBA=yes CTDB_MANAGES_WINBIND=yes -------------------------------------------- # You don't have to restart CTDB. CTDB will find the new configuration and start all Samba-services # Test Samba-service root@cluster-01:~# ps ax | grep mbd 3080 ? Ss 0:00 /usr/sbin/nmbd -D 3125 ? Ss 0:00 /usr/sbin/smbd -D 3132 ? S 0:00 /usr/sbin/smbd -D 3153 ? S 0:00 /usr/sbin/smbd -D root@cluster-02:~# ps ax | grep mbd 5703 ? Ss 0:00 /usr/sbin/nmbd -D 5748 ? Ss 0:00 /usr/sbin/smbd -D 5758 ? S 0:00 /usr/sbin/smbd -D 5776 ? S 0:00 /usr/sbin/smbd -D # Now you should check CTDB. A few examples shown below: root@cluster-02:~# ctdb scriptstatus 17 scripts were executed last monitor cycle 00.ctdb Status:OK Duration:0.003 Mon Oct 10 17:01:18 2016 01.reclock Status:OK Duration:0.007 Mon Oct 10 17:01:18 2016 10.external Status:DISABLED 10.interface Status:OK Duration:0.009 Mon Oct 10 17:01:18 2016 11.natgw Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 11.routing Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 13.per_ip_routing Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 20.multipathd Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 31.clamd Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 40.fs_use Status:DISABLED 40.vsftpd Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 41.httpd Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 49.winbind Status:OK Duration:0.004 Mon Oct 10 17:01:18 2016 50.samba Status:OK Duration:0.047 Mon Oct 10 17:01:18 2016 60.nfs Status:OK Duration:0.002 Mon Oct 10 17:01:18 2016 62.cnfs Status:OK Duration:0.002 Mon Oct 10 17:01:18 2016 70.iscsi Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 91.lvs Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 99.timeout Status:OK Duration:0.001 Mon Oct 10 17:01:18 2016 root@cluster-02:~# ctdb ip Public IPs on node 1 192.168.56.191 1 192.168.56.192 0 # Take a look at the manpage of ctdb for more tests. # Change /etc/nsswitch.conf to the following on all nodes: passwd: compat winbind group: compat winbind # Check for users and groups: root@cluster-01:~# wbinfo -u administrator dns-addc-02 dns-addc-01 krbtgt guest u4 root@cluster-01:~# wbinfo -g allowed rodc password replication group enterprise read-only domain controllers denied rodc password replication group read-only domain controllers group policy creator owners ras and ias servers domain controllers enterprise admins domain computers cert publishers dnsupdateproxy domain admins domain guests schema admins domain users dnsadmins root@cluster-01:~# getent passwd u4 u4:*:101106:100513:u4:/home/EXAMPLE/u4:/bin/bash # Now you have a running CTDB-Cluster with loadbalancing via DNS-round-robin ##### Configure new shares #### ################################################## # IMPORTANT # # Always create the directory inside the cluster # # before you create the new share. # # # # If you create the share first, CTDB will # # become unhealthy! # ################################################## # Create an administrative share root@cluster-01:~# mkdir /glusterfs/admin-share root@cluster-01:~# chgrp 'domain admins' /glusterfs/admin-share/ root@cluster-01:~# chmod 775 /glusterfs/admin-share/ root@cluster-01:~# net conf addshare admin /glusterfs/admin-share writeable=y guest_ok=n "Adminshare" # Now connect via Windows, as Domainadmin, with the admin-share und now you can configure new shares for users ####### Configuring shares with glusterfs ########## # If you have the glusterfs vfs-Module, you can define share # with native gluster access # fist export your smb.conf from registry into a file root@cluster-02:~# net conf list > smb.reg # then edit the file and add the new share at the end of the file root@cluster-02:~# vi smb.reg [daten] comment = daten guest ok = no read only = no vfs objects = acl_xattr glusterfs glusterfs:volume = gv1 glusterfs:logfile = /var/log/samba/glusterfs-gv1.log glusterfs:loglevel = 8 glusterfs:volfile_server = cluster-01.example.net kernel share modes = no path = /data # the "path" is always relativ to your gluster-volume # in my example the physical path is "/glusterfs/data ############################################# # IMPORTANT! # # The vfs Object "glusterfs" MUST always be # # the LAST in the line! # ############################################# # Now import the new settings root@cluster-02:~# net conf import samba.reg ### Registry will be overwritten!! # Use the share as usually ###### configuring shadow_copy2 and Gluster ######### # change the sam.reg file root@cluster-02:~# vi smb.reg [data] comment = data guest ok = no read only = no vfs objects = acl_xattr shadow_copy2 glusterfs glusterfs:volume = gv1 glusterfs:logfile = /var/log/samba/glusterfs-gv1.log glusterfs:loglevel = 8 glusterfs:volfile_server = cluster-01.example.net kernel share modes = no path = / shadow:mountpoint = / shadow:snapdir = /.snaps shadow:basedir = / shadow:sort = desc shadow:snapprefix = ^S[A-Za-z0-9]*p$ shadow:format = _GMT-%Y.%m.%d-%H.%M.%S # Import the new smb.reg # Setting Parameters in Gluster root@cluster-01:~# gluster volume set gv1 features.uss enable volume set: success # Now create a snapshot # The Name must start with a "S" end must end with a "p" root@cluster-01:~# gluster snapshot create Snap gv1 # Activate the snapshot root@cluster-01:~# gluster snapshot activate Snap_GMT-2017.01.04-12.47.19 Snapshot activate: Snap_GMT-2017.01.04-12.47.19: Snap activated successfully # Now you can access the snapshot in your share. do a right klick on your folder and select # " previous versions" to get access to your snapshot ##### Configuring the recycle bin ########### # to use the recycle bin change smb.reg as follows root@cluster-02:~# vi smb.reg [data] comment = data guest ok = no read only = no vfs objects = acl_xattr shadow_copy2 recycle glusterfs glusterfs:volume = gv1 glusterfs:logfile = /var/log/samba/glusterfs-gv1.log glusterfs:loglevel = 8 glusterfs:volfile_server = cluster-01.example.net kernel share modes = no path = / shadow:snapdir = /.snaps shadow:basedir = / shadow:sort = desc shadow:snapprefix = ^S[A-Za-z0-9]*p$ shadow:format = _GMT-%Y.%m.%d-%H.%M.%S recycle:repository = .recycle/%U recycle:keeptree = yes recycle:versions = yes recycle:touch = yes recycle:directory_mode = 0777 recycle:subdir_mode = 0700 recycle:exclude = *.tmp, *~, *.bak recycle:exclude_dir = tmp recycle:maxsize = 0 # Import the changed smb.reg file # create the folder for the recycle bin root@cluster-02:~# mkdir /glusterfs/.recycle # Now every user has it's own subdirectory inside the recycle bin ############ Have fun #############