Skip to content

Commit

Permalink
Merge pull request #77 from theundefined/master
Browse files Browse the repository at this point in the history
Allow use gluster recipe on already existed filesystem
  • Loading branch information
shortdudey123 authored Dec 8, 2016
2 parents 5551968 + 8475cb0 commit 01e892a
Show file tree
Hide file tree
Showing 6 changed files with 94 additions and 12 deletions.
40 changes: 40 additions & 0 deletions .kitchen.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,46 @@ platforms:
- name: centos-7.2

suites:
- name: shareddisk-replicated1
driver:
ipaddress: 192.168.10.10
vagrantfile_erb: test/integration/Vagrantfile.erb
vm_hostname: gluster1
run_list:
- recipe[testcookbook::set_hostname]
- recipe[gluster::server]
attributes:
gluster:
server:
brick_mount_path: "/data"
volumes:
gv0:
peers:
- "gluster2"
- "gluster1"
replica_count: 2
volume_type: "replicated"

- name: shareddisk-replicated2
driver:
ipaddress: 192.168.10.20
vagrantfile_erb: test/integration/Vagrantfile.erb
vm_hostname: gluster2
run_list:
- recipe[testcookbook::set_hostname]
- recipe[gluster::server]
attributes:
gluster:
server:
brick_mount_path: "/data"
volumes:
gv0:
peers:
- "gluster2"
- "gluster1"
replica_count: 2
volume_type: "replicated"

- name: replicated1
driver:
ipaddress: 192.168.10.10
Expand Down
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# gluster cookbook CHANGELOG

## Unreleased
- **[PR #77](https://github.com/shortdudey123/chef-gluster/pull/77)** - Allow use gluster recipe on already existed filesystem

## v5.1.0 (2016-12-02)
- **[PR #72](https://github.com/shortdudey123/chef-gluster/pull/72)** - Fix backup-volfile-server(s) in mount provider
Expand Down
2 changes: 2 additions & 0 deletions attributes/server.rb
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,8 @@
# Set by the cookbook once bricks are configured and ready to use
default['gluster']['server']['bricks'] = []

default['gluster']['server']['disks'] = []

# Retry delays for attempting peering
default['gluster']['server']['peer_retries'] = 0
default['gluster']['server']['peer_retry_delay'] = 10
Expand Down
42 changes: 30 additions & 12 deletions recipes/server_setup.rb
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,33 @@
# Use either configured LVM volumes or default LVM volumes
# Configure the LV's per gluster volume
# Each LV is one brick
lvm_volume_group 'gluster' do
physical_volumes node['gluster']['server']['disks']
if volume_values.attribute?('filesystem')
filesystem = volume_values['filesystem']
else
Chef::Log.warn('No filesystem specified, defaulting to xfs')
filesystem = 'xfs'
if node['gluster']['server']['disks'].any?
lvm_volume_group 'gluster' do
physical_volumes node['gluster']['server']['disks']
if volume_values.attribute?('filesystem')
filesystem = volume_values['filesystem']
else
Chef::Log.warn('No filesystem specified, defaulting to xfs')
filesystem = 'xfs'
end
# Even though this says volume_name, it's actually Brick Name. At the moment this method only supports one brick per volume per server
logical_volume volume_name do
size volume_values['size']
filesystem filesystem
mount_point "#{node['gluster']['server']['brick_mount_path']}/#{volume_name}"
end
end
# Even though this says volume_name, it's actually Brick Name. At the moment this method only supports one brick per volume per server
logical_volume volume_name do
size volume_values['size']
filesystem filesystem
mount_point "#{node['gluster']['server']['brick_mount_path']}/#{volume_name}"
else
Chef::Log.warn('No disks defined for LVM, create gluster on existing filesystem')
directory "#{node['gluster']['server']['brick_mount_path']}/#{volume_name}" do
owner 'root'
group 'root'
mode '0755'
recursive true
action :create
end
end

bricks << "#{node['gluster']['server']['brick_mount_path']}/#{volume_name}/brick"
# Save the array of bricks to the node's attributes
node.normal['gluster']['server']['volumes'][volume_name]['bricks'] = bricks
Expand Down Expand Up @@ -92,6 +104,12 @@
# Create option string
force = false
options = ''

# force when gluster is on rootfs
if system("df #{node['gluster']['server']['brick_mount_path']}/#{volume_name}/ --output=target |grep -q '^/$'")
Chef::Log.warn("Directory #{node['gluster']['server']['brick_mount_path']}/#{volume_name}/ on root filesystem, force creating volume #{volume_name}")
force = true
end
case volume_values['volume_type']
when 'distributed'
Chef::Log.warn('You have specified distributed, serious data loss can occur in this mode as files are spread randomly among the bricks')
Expand Down
18 changes: 18 additions & 0 deletions test/integration/shareddisk-replicated2/serverspec/server_spec.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
require 'spec_helper'

describe 'gluster::server.rb' do
describe command('gluster volume list') do
its(:stdout) { should match 'gv0' }
end

describe command('gluster volume status') do
its(:stdout) { should include('Brick gluster1:/data/gv0/brick') }
end

describe command('gluster volume info') do
its(:stdout) { should include('Type: Replicate') }
its(:stdout) { should include('Number of Bricks: 1 x 2 = 2') }
its(:stdout) { should include('gluster2:/data/gv0/brick') }
its(:stdout) { should include('gluster1:/data/gv0/brick') }
end
end
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
require 'serverspec'

set :backend, :exec

0 comments on commit 01e892a

Please sign in to comment.