Revision 0ac84a9a
b/snf-deploy/.gitignore | ||
---|---|---|
1 |
*.pyc |
|
2 |
lala.tmp |
|
3 |
kamaki-fab |
|
4 |
images/* |
|
5 |
*.pid |
|
6 |
*.monitor |
|
7 |
packages |
|
8 |
*.swp |
b/snf-deploy/MANIFEST.in | ||
---|---|---|
1 |
include README Changelog |
|
2 |
include distribute_setup.py |
b/snf-deploy/autopkg.sh | ||
---|---|---|
1 |
#!/bin/bash |
|
2 |
|
|
3 |
usage(){ |
|
4 |
|
|
5 |
echo " |
|
6 |
Usage: $0: [options] |
|
7 |
-h, --help Prints this help message |
|
8 |
--debian [branch] Local debian branch to use (default debian) |
|
9 |
--upstream [branch] Local upstream branch to use (default master) |
|
10 |
--remote [repo] Remote repo to use (default origin) |
|
11 |
--packages [dir] Where to store the created packages (default ~/packages) |
|
12 |
--validate Fetch remote repo branches and |
|
13 |
check if local are up-to-date (default false) |
|
14 |
--push Whether to push upstream (default false) |
|
15 |
" |
|
16 |
exit 1 |
|
17 |
} |
|
18 |
|
|
19 |
parse_git_branch() |
|
20 |
{ |
|
21 |
git branch 2> /dev/null | grep '^*' | sed 's/^*\ //g' |
|
22 |
} |
|
23 |
|
|
24 |
die() |
|
25 |
{ |
|
26 |
echo -e $* 1>&2 |
|
27 |
echo Aborting. |
|
28 |
exit 1 |
|
29 |
} |
|
30 |
|
|
31 |
cleanup() |
|
32 |
{ |
|
33 |
trap - EXIT |
|
34 |
|
|
35 |
echo -n Cleaning up... |
|
36 |
if [ ${#CLEANUP[*]} -gt 0 ]; then |
|
37 |
LAST_ELEMENT=$((${#CLEANUP[*]}-1)) |
|
38 |
REVERSE_INDEXES=$(seq ${LAST_ELEMENT} -1 0) |
|
39 |
for i in $REVERSE_INDEXES; do |
|
40 |
local cmd=${CLEANUP[$i]} |
|
41 |
$cmd |
|
42 |
done |
|
43 |
fi |
|
44 |
echo "done" |
|
45 |
} |
|
46 |
|
|
47 |
add_cleanup() { |
|
48 |
local cmd="" |
|
49 |
for arg; do cmd+=$(printf "%q " "$arg"); done |
|
50 |
CLEANUP+=("$cmd") |
|
51 |
} |
|
52 |
|
|
53 |
|
|
54 |
add_checkpoint() |
|
55 |
{ |
|
56 |
commit=$(git reflog | head -n1 | cut -f 1 -d " ") |
|
57 |
add_cleanup git reset --hard $commit |
|
58 |
LASTCHECKPOINT=$commit |
|
59 |
} |
|
60 |
|
|
61 |
CLEANUP=( ) |
|
62 |
|
|
63 |
|
|
64 |
TEMP=$(getopt -o h --long help,validate,push,packages:,upstream:,debian:,remote: -n 'autopkg.sh' -- "$@") |
|
65 |
|
|
66 |
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi |
|
67 |
|
|
68 |
eval set -- "$TEMP" |
|
69 |
|
|
70 |
while true ; do |
|
71 |
case "$1" in |
|
72 |
-h|--help) usage ;; |
|
73 |
--upstream) LOCALUPSTREAM=$2 ; shift 2 ;; |
|
74 |
--debian) LOCALDEBIAN=$2 ; shift 2 ;; |
|
75 |
--remote) REMOTE=$2 ; shift 2 ;; |
|
76 |
--packages) PKGAREA=$2 ; shift 2 ;; |
|
77 |
--validate) VALIDATE=true ; shift ;; |
|
78 |
--push) PUSH=true ; shift ;; |
|
79 |
--) shift ; break ;; |
|
80 |
*) echo "Internal error!" ; usage ;; |
|
81 |
esac |
|
82 |
done |
|
83 |
|
|
84 |
# The root of the git repository, no matter where we're called from |
|
85 |
TOPLEVEL="$(git rev-parse --show-toplevel)" |
|
86 |
|
|
87 |
: ${LOCALUPSTREAM:=$(parse_git_branch)} |
|
88 |
: ${LOCALDEBIAN:=debian} |
|
89 |
: ${REMOTE:=origin} |
|
90 |
: ${VALIDATE:=false} |
|
91 |
: ${PUSH:=false} |
|
92 |
|
|
93 |
: ${PKGAREA:=~/packages} |
|
94 |
: ${BACKUPAREA:=~/backup} |
|
95 |
|
|
96 |
cd "$TOPLEVEL" |
|
97 |
|
|
98 |
# Prerequisites: Test all important directories exist |
|
99 |
test -d "$PKGAREA" || die "Package area directory $PKGAREA missing" |
|
100 |
test -d "$BACKUPAREA" || die "Backup area directory $BACKUPAREA missing" |
|
101 |
|
|
102 |
# Prerequisite: Test the dialog utility is available |
|
103 |
dialog --help &>/dev/null || die "Could not run the 'dialog' utility" |
|
104 |
|
|
105 |
BUILDAREA=$(mktemp -d --tmpdir=/tmp build-area.XXX) |
|
106 |
add_cleanup rm -r $BUILDAREA |
|
107 |
|
|
108 |
echo "############################################################################" |
|
109 |
echo "Will build packages under $BUILDAREA" |
|
110 |
echo "Local upstream branch: $LOCALUPSTREAM" |
|
111 |
echo "Local debian branch: $LOCALDEBIAN" |
|
112 |
$VALIDATE && echo "Will fetch $REMOTE and check if $LOCALUPSTREAM and $LOCALDEBIAN are up-to-date" |
|
113 |
echo "############################################################################" |
|
114 |
echo "Press Enter to continue..." |
|
115 |
read |
|
116 |
|
|
117 |
MODIFIED=$(git status --short | grep -v "??") |
|
118 |
test -z "$MODIFIED" || die "error: Repository is dirty. Commit your local changes:\n $MODIFIED" |
|
119 |
|
|
120 |
set -e |
|
121 |
trap cleanup EXIT |
|
122 |
|
|
123 |
add_checkpoint |
|
124 |
|
|
125 |
# Create a temporary debian branch to do everything |
|
126 |
TMPDEBIAN=$(mktemp -u debian.XXX) |
|
127 |
git branch --track $TMPDEBIAN $LOCALDEBIAN |
|
128 |
add_cleanup git branch -D $TMPDEBIAN |
|
129 |
|
|
130 |
git checkout $TMPDEBIAN |
|
131 |
add_cleanup git checkout $LOCALUPSTREAM |
|
132 |
|
|
133 |
# Whether we are in snapshot or release mode |
|
134 |
snap=false |
|
135 |
mrgextra=-m |
|
136 |
dchextra=-R |
|
137 |
mrgmsg="Merge branch '$LOCALUPSTREAM' into $LOCALDEBIAN" |
|
138 |
dialog --yesno "Create Snapshot?" 5 20 && snap=true && dchextra=-S && mrgextra= && mrgmsg= |
|
139 |
|
|
140 |
# merge local branch into tmp branch with a nice commit message, |
|
141 |
# so it can be pushed as is to upstream debian |
|
142 |
export GIT_MERGE_AUTOEDIT=no |
|
143 |
git merge $mrgextra ${mrgextra:+"$mrgmsg"} $LOCALUPSTREAM |
|
144 |
|
|
145 |
# auto edit Debian changelog depending on Snapshot or Release mode |
|
146 |
export EDITOR=/usr/bin/vim |
|
147 |
git-dch --debian-branch=$TMPDEBIAN --git-author --ignore-regex=".*" --multimaint-merge --since=HEAD $dchextra |
|
148 |
git add debian/changelog |
|
149 |
|
|
150 |
# get version from the changelog |
|
151 |
# we add a git tag here, so setup.py sdist works as expected |
|
152 |
# FIXME: This is a workaround for the way Synnefo packages determine |
|
153 |
# the versions for their Python packages |
|
154 |
version=$(IFS="()" ; read x v x < debian/changelog ; echo $v) |
|
155 |
if ! $snap; then |
|
156 |
git commit -s -a -m "Bump new upstream version" |
|
157 |
TAGFILE=$(mktemp -t tag.XXX) |
|
158 |
add_cleanup rm $TAGFILE |
|
159 |
dialog --inputbox "New Debian Tag: " 5 30 "debian/$version" 2>$TAGFILE |
|
160 |
git tag $(<$TAGFILE) |
|
161 |
add_cleanup git tag -d $(<$TAGFILE) |
|
162 |
fi |
|
163 |
|
|
164 |
add_cleanup git reset --hard HEAD |
|
165 |
# Build all packages |
|
166 |
git-buildpackage --git-export-dir="$BUILDAREA" \ |
|
167 |
--git-upstream-branch=$LOCALUPSTREAM \ |
|
168 |
--git-debian-branch=$TMPDEBIAN \ |
|
169 |
--git-export=INDEX \ |
|
170 |
--git-ignore-new -sa |
|
171 |
|
|
172 |
# do some dirty backup |
|
173 |
# pkgarea might be needed by auto-deploy tool |
|
174 |
rm -f "$PKGAREA"/* || true |
|
175 |
cp -v "$BUILDAREA"/* "$PKGAREA"/ || true |
|
176 |
cp -v "$BUILDAREA"/* "$BACKUPAREA"/ || true |
|
177 |
|
|
178 |
|
|
179 |
|
|
180 |
function check_remote(){ |
|
181 |
|
|
182 |
git fetch $1 2>/dev/null || die "Could not fetch $1" |
|
183 |
git fetch $1 $2 2>/dev/null|| die "Could not fetch $1/$2" |
|
184 |
|
|
185 |
commits_behind=$(git rev-list $2..$1/$2 | wc -l) |
|
186 |
if [ $commits_behind -ne 0 ]; then |
|
187 |
die "Your local branch is outdated. Please run:\ngit pull --rebase $1/$2" |
|
188 |
fi |
|
189 |
|
|
190 |
|
|
191 |
} |
|
192 |
|
|
193 |
if $VALIDATE; then |
|
194 |
check_remote $REMOTE $LOCALUPSTREAM |
|
195 |
check_remote $REMOTE $LOCALDEBIAN |
|
196 |
fi |
|
197 |
|
|
198 |
|
|
199 |
# trap - EXIT |
|
200 |
# here we can push the commits to the remote debian branch as they are |
|
201 |
echo |
|
202 |
echo "#################################################" |
|
203 |
echo "## SUCCESS ##" |
|
204 |
echo "#################################################" |
|
205 |
if $PUSH; then |
|
206 |
git push --tags $REMOTE $TMPDEBIAN:$LOCALDEBIAN |
|
207 |
git push $REMOTE $LOCALUPSTREAM:$LOCALUPSTREAM |
|
208 |
fi |
|
209 |
|
|
210 |
exit 0 |
b/snf-deploy/conf/deploy.conf | ||
---|---|---|
1 |
[packages] |
|
2 |
# whether to use apt-get or local generated package found in packages dir |
|
3 |
use_local_packages = True |
|
4 |
|
|
5 |
# url to obtain latest synnefo packages. |
|
6 |
# To use them change USE_LOCAL_PACKAGES setting to yes |
|
7 |
# To get them run: snf-deploy packages |
|
8 |
package_url = http://builder.dev.grnet.gr/synnefo/packages/Squeeze/40/ |
|
9 |
|
|
10 |
[dirs] |
|
11 |
# dir to find all template files used to customize setup |
|
12 |
# in case you want to add another setting please modify the corresponding file |
|
13 |
templates = /var/lib/snf-deploy/files |
|
14 |
# dir to store local images (disk0, disk1 of the virtual cluster) |
|
15 |
images = /var/lib/snf-deploy/images |
|
16 |
# dir to store/find local packages |
|
17 |
# dir to locally save packages that will be downloaded from package_url |
|
18 |
# put here any locally created packages (useful for development) |
|
19 |
packages = /var/lib/snf-deploy/packages |
|
20 |
# dir to store pidfiles (dnsmasq, kvm) |
|
21 |
run = /var/run/snf-deploy |
|
22 |
# dir to store dnsmasq related files |
|
23 |
dns = /var/lib/snf-deploy/dnsmasq |
|
24 |
# dir to lookup fabfile and ifup script |
|
25 |
lib = /usr/lib/snf-deploy |
|
26 |
# dir to store executed commands (to enforce sequential execution) |
|
27 |
cmd = /var/run/snf-deploy/cmd |
b/snf-deploy/conf/ganeti.conf | ||
---|---|---|
1 |
[ganeti1] |
|
2 |
cluster_nodes = node1 |
|
3 |
master_node = node1 |
|
4 |
|
|
5 |
cluster_netdev = eth0 |
|
6 |
cluster_name = ganeti1 |
|
7 |
cluster_ip = 192.168.0.13 |
|
8 |
|
|
9 |
vg = autovg |
|
10 |
|
|
11 |
synnefo_public_network_subnet = 10.0.1.0/24 |
|
12 |
synnefo_public_network_gateway = 10.0.1.1 |
|
13 |
synnefo_public_network_type = CUSTOM |
|
14 |
|
|
15 |
image_dir = /srv/okeanos |
|
16 |
|
|
17 |
# To add another cluster repeat the above section |
|
18 |
# with different header and nodes |
b/snf-deploy/conf/nodes.conf | ||
---|---|---|
1 |
# please note that currently is only supported deployment |
|
2 |
# with nodes (both ganeti and synnefo) residing in the same subnet/domain |
|
3 |
[network] |
|
4 |
domain = synnefo.live |
|
5 |
subnet = 192.168.0.0/28 |
|
6 |
gateway = 192.168.0.14 |
|
7 |
|
|
8 |
|
|
9 |
[hostnames] |
|
10 |
node1 = auto1 |
|
11 |
# node2 = auto2 |
|
12 |
|
|
13 |
[ips] |
|
14 |
node1 = 192.168.0.1 |
|
15 |
# node2 = 192.168.0.2 |
|
16 |
|
|
17 |
# This is used only in case of vcluster |
|
18 |
# needed to pass the correct dhcp responces to the virtual nodes |
|
19 |
[macs] |
|
20 |
node1 = 52:54:00:00:00:01 |
|
21 |
# node2 = 52:54:00:00:00:02 |
|
22 |
|
|
23 |
[info] |
|
24 |
# Here we define which nodes from the predefined ones to use |
|
25 |
nodes = node1 |
|
26 |
|
|
27 |
# login credentials for the nodes |
|
28 |
# please note that in case of vcluster these are preconfigured |
|
29 |
# and not editable. |
|
30 |
# in case of physical nodes all nodes should have the same login account |
|
31 |
user = root |
|
32 |
password = 12345 |
|
33 |
|
|
34 |
public_iface = eth0 |
|
35 |
vm_public_iface = eth1 |
|
36 |
vm_private_iface = eth2 |
|
37 |
|
|
38 |
# extra disk name inside the nodes |
|
39 |
# if defined, snf-deploy will create a VG for ganeti in order to support lvm storage |
|
40 |
# if not then only file disk template will be supported |
|
41 |
extra_disk = /dev/vdb |
b/snf-deploy/conf/packages.conf | ||
---|---|---|
1 |
[debian] |
|
2 |
rabbitmq-server = testing |
|
3 |
gunicorn = squeeze-backports |
|
4 |
qemu-kvm = squeeze-backports |
|
5 |
qemu = squeeze-backports |
|
6 |
python-gevent = squeeze-backports |
|
7 |
apache2 = |
|
8 |
postgresql = |
|
9 |
python-psycopg2 = |
|
10 |
python-argparse = |
|
11 |
nfs-kernel-server = squeeze-backports |
|
12 |
nfs-common = squeeze-backports |
|
13 |
bind9 = |
|
14 |
vlan = |
|
15 |
vlan = |
|
16 |
lvm2 = |
|
17 |
curl = |
|
18 |
memcached = |
|
19 |
python-memcache = |
|
20 |
bridge-utils = |
|
21 |
python-progress = |
|
22 |
ganeti-instance-debootstrap = |
|
23 |
|
|
24 |
|
|
25 |
[synnefo] |
|
26 |
snf-astakos-app = stable |
|
27 |
snf-common = stable |
|
28 |
snf-cyclades-app = stable |
|
29 |
snf-cyclades-gtools = stable |
|
30 |
snf-django-lib = stable |
|
31 |
python-astakosclient = stable |
|
32 |
python-objpool = stable |
|
33 |
snf-branding = stable |
|
34 |
snf-webproject = stable |
|
35 |
snf-pithos-app = stable |
|
36 |
snf-pithos-backend = stable |
|
37 |
snf-tools = stable |
|
38 |
python-django-south = stable |
|
39 |
|
|
40 |
|
|
41 |
[ganeti] |
|
42 |
snf-ganeti = 2.6.2+ippool11+hotplug5+extstorage3+rbdfix1+kvmfix2+nolvm+netxen-1~squeeze |
|
43 |
ganeti-htools = 2.6.2+ippool11+hotplug5+extstorage3+rbdfix1+kvmfix2+nolvm+netxen-1~squeeze |
|
44 |
|
|
45 |
[other] |
|
46 |
snf-cloudcms = stable |
|
47 |
snf-vncauthproxy = stable |
|
48 |
snf-pithos-webclient = stable |
|
49 |
snf-image = stable |
|
50 |
snf-network = stable |
|
51 |
nfdhcpd = stable |
|
52 |
kamaki = stable |
|
53 |
python-bitarray = stable |
|
54 |
nfqueue-bindings-python = stable |
|
55 |
|
b/snf-deploy/conf/synnefo.conf | ||
---|---|---|
1 |
[cred] |
|
2 |
synnefo_user = synnefo |
|
3 |
synnefo_db_passwd = example_passw0rd |
|
4 |
synnefo_rapi_passwd = example_rapi_passw0rd |
|
5 |
synnefo_rabbitmq_passwd = example_rabbitmq_passw0rd |
|
6 |
user_email = user@synnefo.org |
|
7 |
user_name = John |
|
8 |
user_lastname = Doe |
|
9 |
user_passwd = 12345 |
|
10 |
|
|
11 |
|
|
12 |
[roles] |
|
13 |
accounts = node1 |
|
14 |
compute = node1 |
|
15 |
object-store = node1 |
|
16 |
cyclades = node1 |
|
17 |
pithos = node1 |
|
18 |
cms = node1 |
|
19 |
db = node1 |
|
20 |
mq = node1 |
|
21 |
ns = node1 |
|
22 |
client = node1 |
|
23 |
router = node1 |
|
24 |
|
|
25 |
|
|
26 |
[synnefo] |
|
27 |
pithos_dir = /srv/pithos |
|
28 |
|
|
29 |
vm_public_bridge = br0 |
|
30 |
vm_private_bridge = prv0 |
|
31 |
common_bridge = br0 |
|
32 |
|
|
33 |
debian_base_url = https://pithos.okeanos.grnet.gr/public/RDISy7sNVIJ9KIm4JkmbX4 |
b/snf-deploy/conf/vcluster.conf | ||
---|---|---|
1 |
[image] |
|
2 |
# url to get the base image. This is a debian base image with preconfigured |
|
3 |
# root password and installed rsa/dsa keys. Plus a NetworkManager hook that |
|
4 |
# changes the VM's name based on info provided by dhcp response. |
|
5 |
# To create it run: snf-deploy image |
|
6 |
squeeze_image_url = https://pithos.okeanos.grnet.gr/public/832xv |
|
7 |
ubuntu_image_url = |
|
8 |
|
|
9 |
# in order ganeti nodes to support lvm storage (plain disk template) it will |
|
10 |
# be needed an extra disk to eventually be able to create a VG. Ganeti requires |
|
11 |
# this VG to be at least of 30GB. To this end in order the virtual nodes to have |
|
12 |
# this extra disk an image should be created locally. There are three options: |
|
13 |
# 1. not create an extra disk (only file storage template will be supported) |
|
14 |
# 2. create an image of 30G in image dir (default /var/lib/snf-deploy/images) |
|
15 |
# using dd if=/dev/zero of=squeeze.disk1 |
|
16 |
# 3. create this image in a local VG using lvgreate -L30G squeeze.disk1 lvg |
|
17 |
# and create a symbolic link in /var/lib/snf-deploy/images |
|
18 |
|
|
19 |
# Whether to create an extra disk or not |
|
20 |
create_extra_disk = False |
|
21 |
# lvg is the name of the local VG if any |
|
22 |
lvg = |
|
23 |
|
|
24 |
# OS istalled in the virtual cluster |
|
25 |
os = squeeze |
|
26 |
|
|
27 |
|
|
28 |
[cluster] |
|
29 |
# the bridge to use for the virtual cluster |
|
30 |
# on this bridge we will launch a dnsnmasq and provide |
|
31 |
# fqdns needed to the cluster. |
|
32 |
# In ordrer cluster nodes to have internet access, host must do NAT. |
|
33 |
# iptables -t nat -A POSTROUTING -s 192.0.0.0/28 -j MASQUERADE |
|
34 |
# ip addr add 192.0.0.14/28 dev auto_nodes_br |
|
35 |
# To create run: snf-deploy cluster |
|
36 |
bridge = auto_nodes_br |
b/snf-deploy/distribute_setup.py | ||
---|---|---|
1 |
#!python |
|
2 |
"""Bootstrap distribute installation |
|
3 |
|
|
4 |
If you want to use setuptools in your package's setup.py, just include this |
|
5 |
file in the same directory with it, and add this to the top of your setup.py:: |
|
6 |
|
|
7 |
from distribute_setup import use_setuptools |
|
8 |
use_setuptools() |
|
9 |
|
|
10 |
If you want to require a specific version of setuptools, set a download |
|
11 |
mirror, or use an alternate download directory, you can do so by supplying |
|
12 |
the appropriate options to ``use_setuptools()``. |
|
13 |
|
|
14 |
This file can also be run as a script to install or upgrade setuptools. |
|
15 |
""" |
|
16 |
import os |
|
17 |
import sys |
|
18 |
import time |
|
19 |
import fnmatch |
|
20 |
import tempfile |
|
21 |
import tarfile |
|
22 |
from distutils import log |
|
23 |
|
|
24 |
try: |
|
25 |
from site import USER_SITE |
|
26 |
except ImportError: |
|
27 |
USER_SITE = None |
|
28 |
|
|
29 |
try: |
|
30 |
import subprocess |
|
31 |
|
|
32 |
def _python_cmd(*args): |
|
33 |
args = (sys.executable,) + args |
|
34 |
return subprocess.call(args) == 0 |
|
35 |
|
|
36 |
except ImportError: |
|
37 |
# will be used for python 2.3 |
|
38 |
def _python_cmd(*args): |
|
39 |
args = (sys.executable,) + args |
|
40 |
# quoting arguments if windows |
|
41 |
if sys.platform == 'win32': |
|
42 |
def quote(arg): |
|
43 |
if ' ' in arg: |
|
44 |
return '"%s"' % arg |
|
45 |
return arg |
|
46 |
args = [quote(arg) for arg in args] |
|
47 |
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0 |
|
48 |
|
|
49 |
DEFAULT_VERSION = "0.6.10" |
|
50 |
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/" |
|
51 |
SETUPTOOLS_FAKED_VERSION = "0.6c11" |
|
52 |
|
|
53 |
SETUPTOOLS_PKG_INFO = """\ |
|
54 |
Metadata-Version: 1.0 |
|
55 |
Name: setuptools |
|
56 |
Version: %s |
|
57 |
Summary: xxxx |
|
58 |
Home-page: xxx |
|
59 |
Author: xxx |
|
60 |
Author-email: xxx |
|
61 |
License: xxx |
|
62 |
Description: xxx |
|
63 |
""" % SETUPTOOLS_FAKED_VERSION |
|
64 |
|
|
65 |
|
|
66 |
def _install(tarball): |
|
67 |
# extracting the tarball |
|
68 |
tmpdir = tempfile.mkdtemp() |
|
69 |
log.warn('Extracting in %s', tmpdir) |
|
70 |
old_wd = os.getcwd() |
|
71 |
try: |
|
72 |
os.chdir(tmpdir) |
|
73 |
tar = tarfile.open(tarball) |
|
74 |
_extractall(tar) |
|
75 |
tar.close() |
|
76 |
|
|
77 |
# going in the directory |
|
78 |
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) |
|
79 |
os.chdir(subdir) |
|
80 |
log.warn('Now working in %s', subdir) |
|
81 |
|
|
82 |
# installing |
|
83 |
log.warn('Installing Distribute') |
|
84 |
if not _python_cmd('setup.py', 'install'): |
|
85 |
log.warn('Something went wrong during the installation.') |
|
86 |
log.warn('See the error message above.') |
|
87 |
finally: |
|
88 |
os.chdir(old_wd) |
|
89 |
|
|
90 |
|
|
91 |
def _build_egg(egg, tarball, to_dir): |
|
92 |
# extracting the tarball |
|
93 |
tmpdir = tempfile.mkdtemp() |
|
94 |
log.warn('Extracting in %s', tmpdir) |
|
95 |
old_wd = os.getcwd() |
|
96 |
try: |
|
97 |
os.chdir(tmpdir) |
|
98 |
tar = tarfile.open(tarball) |
|
99 |
_extractall(tar) |
|
100 |
tar.close() |
|
101 |
|
|
102 |
# going in the directory |
|
103 |
subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0]) |
|
104 |
os.chdir(subdir) |
|
105 |
log.warn('Now working in %s', subdir) |
|
106 |
|
|
107 |
# building an egg |
|
108 |
log.warn('Building a Distribute egg in %s', to_dir) |
|
109 |
_python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir) |
|
110 |
|
|
111 |
finally: |
|
112 |
os.chdir(old_wd) |
|
113 |
# returning the result |
|
114 |
log.warn(egg) |
|
115 |
if not os.path.exists(egg): |
|
116 |
raise IOError('Could not build the egg.') |
|
117 |
|
|
118 |
|
|
119 |
def _do_download(version, download_base, to_dir, download_delay): |
|
120 |
egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg' |
|
121 |
% (version, sys.version_info[0], sys.version_info[1])) |
|
122 |
if not os.path.exists(egg): |
|
123 |
tarball = download_setuptools(version, download_base, |
|
124 |
to_dir, download_delay) |
|
125 |
_build_egg(egg, tarball, to_dir) |
|
126 |
sys.path.insert(0, egg) |
|
127 |
import setuptools |
|
128 |
setuptools.bootstrap_install_from = egg |
|
129 |
|
|
130 |
|
|
131 |
def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, |
|
132 |
to_dir=os.curdir, download_delay=15, no_fake=True): |
|
133 |
# making sure we use the absolute path |
|
134 |
to_dir = os.path.abspath(to_dir) |
|
135 |
was_imported = 'pkg_resources' in sys.modules or \ |
|
136 |
'setuptools' in sys.modules |
|
137 |
try: |
|
138 |
try: |
|
139 |
import pkg_resources |
|
140 |
if not hasattr(pkg_resources, '_distribute'): |
|
141 |
if not no_fake: |
|
142 |
_fake_setuptools() |
|
143 |
raise ImportError |
|
144 |
except ImportError: |
|
145 |
return _do_download(version, download_base, to_dir, download_delay) |
|
146 |
try: |
|
147 |
pkg_resources.require("distribute>="+version) |
|
148 |
return |
|
149 |
except pkg_resources.VersionConflict: |
|
150 |
e = sys.exc_info()[1] |
|
151 |
if was_imported: |
|
152 |
sys.stderr.write( |
|
153 |
"The required version of distribute (>=%s) is not available,\n" |
|
154 |
"and can't be installed while this script is running. Please\n" |
|
155 |
"install a more recent version first, using\n" |
|
156 |
"'easy_install -U distribute'." |
|
157 |
"\n\n(Currently using %r)\n" % (version, e.args[0])) |
|
158 |
sys.exit(2) |
|
159 |
else: |
|
160 |
del pkg_resources, sys.modules['pkg_resources'] # reload ok |
|
161 |
return _do_download(version, download_base, to_dir, |
|
162 |
download_delay) |
|
163 |
except pkg_resources.DistributionNotFound: |
|
164 |
return _do_download(version, download_base, to_dir, |
|
165 |
download_delay) |
|
166 |
finally: |
|
167 |
if not no_fake: |
|
168 |
_create_fake_setuptools_pkg_info(to_dir) |
|
169 |
|
|
170 |
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL, |
|
171 |
to_dir=os.curdir, delay=15): |
|
172 |
"""Download distribute from a specified location and return its filename |
|
173 |
|
|
174 |
`version` should be a valid distribute version number that is available |
|
175 |
as an egg for download under the `download_base` URL (which should end |
|
176 |
with a '/'). `to_dir` is the directory where the egg will be downloaded. |
|
177 |
`delay` is the number of seconds to pause before an actual download |
|
178 |
attempt. |
|
179 |
""" |
|
180 |
# making sure we use the absolute path |
|
181 |
to_dir = os.path.abspath(to_dir) |
|
182 |
try: |
|
183 |
from urllib.request import urlopen |
|
184 |
except ImportError: |
|
185 |
from urllib2 import urlopen |
|
186 |
tgz_name = "distribute-%s.tar.gz" % version |
|
187 |
url = download_base + tgz_name |
|
188 |
saveto = os.path.join(to_dir, tgz_name) |
|
189 |
src = dst = None |
|
190 |
if not os.path.exists(saveto): # Avoid repeated downloads |
|
191 |
try: |
|
192 |
log.warn("Downloading %s", url) |
|
193 |
src = urlopen(url) |
|
194 |
# Read/write all in one block, so we don't create a corrupt file |
|
195 |
# if the download is interrupted. |
|
196 |
data = src.read() |
|
197 |
dst = open(saveto, "wb") |
|
198 |
dst.write(data) |
|
199 |
finally: |
|
200 |
if src: |
|
201 |
src.close() |
|
202 |
if dst: |
|
203 |
dst.close() |
|
204 |
return os.path.realpath(saveto) |
|
205 |
|
|
206 |
def _no_sandbox(function): |
|
207 |
def __no_sandbox(*args, **kw): |
|
208 |
try: |
|
209 |
from setuptools.sandbox import DirectorySandbox |
|
210 |
if not hasattr(DirectorySandbox, '_old'): |
|
211 |
def violation(*args): |
|
212 |
pass |
|
213 |
DirectorySandbox._old = DirectorySandbox._violation |
|
214 |
DirectorySandbox._violation = violation |
|
215 |
patched = True |
|
216 |
else: |
|
217 |
patched = False |
|
218 |
except ImportError: |
|
219 |
patched = False |
|
220 |
|
|
221 |
try: |
|
222 |
return function(*args, **kw) |
|
223 |
finally: |
|
224 |
if patched: |
|
225 |
DirectorySandbox._violation = DirectorySandbox._old |
|
226 |
del DirectorySandbox._old |
|
227 |
|
|
228 |
return __no_sandbox |
|
229 |
|
|
230 |
def _patch_file(path, content): |
|
231 |
"""Will backup the file then patch it""" |
|
232 |
existing_content = open(path).read() |
|
233 |
if existing_content == content: |
|
234 |
# already patched |
|
235 |
log.warn('Already patched.') |
|
236 |
return False |
|
237 |
log.warn('Patching...') |
|
238 |
_rename_path(path) |
|
239 |
f = open(path, 'w') |
|
240 |
try: |
|
241 |
f.write(content) |
|
242 |
finally: |
|
243 |
f.close() |
|
244 |
return True |
|
245 |
|
|
246 |
_patch_file = _no_sandbox(_patch_file) |
|
247 |
|
|
248 |
def _same_content(path, content): |
|
249 |
return open(path).read() == content |
|
250 |
|
|
251 |
def _rename_path(path): |
|
252 |
new_name = path + '.OLD.%s' % time.time() |
|
253 |
log.warn('Renaming %s into %s', path, new_name) |
|
254 |
os.rename(path, new_name) |
|
255 |
return new_name |
|
256 |
|
|
257 |
def _remove_flat_installation(placeholder): |
|
258 |
if not os.path.isdir(placeholder): |
|
259 |
log.warn('Unkown installation at %s', placeholder) |
|
260 |
return False |
|
261 |
found = False |
|
262 |
for file in os.listdir(placeholder): |
|
263 |
if fnmatch.fnmatch(file, 'setuptools*.egg-info'): |
|
264 |
found = True |
|
265 |
break |
|
266 |
if not found: |
|
267 |
log.warn('Could not locate setuptools*.egg-info') |
|
268 |
return |
|
269 |
|
|
270 |
log.warn('Removing elements out of the way...') |
|
271 |
pkg_info = os.path.join(placeholder, file) |
|
272 |
if os.path.isdir(pkg_info): |
|
273 |
patched = _patch_egg_dir(pkg_info) |
|
274 |
else: |
|
275 |
patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO) |
|
276 |
|
|
277 |
if not patched: |
|
278 |
log.warn('%s already patched.', pkg_info) |
|
279 |
return False |
|
280 |
# now let's move the files out of the way |
|
281 |
for element in ('setuptools', 'pkg_resources.py', 'site.py'): |
|
282 |
element = os.path.join(placeholder, element) |
|
283 |
if os.path.exists(element): |
|
284 |
_rename_path(element) |
|
285 |
else: |
|
286 |
log.warn('Could not find the %s element of the ' |
|
287 |
'Setuptools distribution', element) |
|
288 |
return True |
|
289 |
|
|
290 |
_remove_flat_installation = _no_sandbox(_remove_flat_installation) |
|
291 |
|
|
292 |
def _after_install(dist): |
|
293 |
log.warn('After install bootstrap.') |
|
294 |
placeholder = dist.get_command_obj('install').install_purelib |
|
295 |
_create_fake_setuptools_pkg_info(placeholder) |
|
296 |
|
|
297 |
def _create_fake_setuptools_pkg_info(placeholder): |
|
298 |
if not placeholder or not os.path.exists(placeholder): |
|
299 |
log.warn('Could not find the install location') |
|
300 |
return |
|
301 |
pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1]) |
|
302 |
setuptools_file = 'setuptools-%s-py%s.egg-info' % \ |
|
303 |
(SETUPTOOLS_FAKED_VERSION, pyver) |
|
304 |
pkg_info = os.path.join(placeholder, setuptools_file) |
|
305 |
if os.path.exists(pkg_info): |
|
306 |
log.warn('%s already exists', pkg_info) |
|
307 |
return |
|
308 |
|
|
309 |
log.warn('Creating %s', pkg_info) |
|
310 |
f = open(pkg_info, 'w') |
|
311 |
try: |
|
312 |
f.write(SETUPTOOLS_PKG_INFO) |
|
313 |
finally: |
|
314 |
f.close() |
|
315 |
|
|
316 |
pth_file = os.path.join(placeholder, 'setuptools.pth') |
|
317 |
log.warn('Creating %s', pth_file) |
|
318 |
f = open(pth_file, 'w') |
|
319 |
try: |
|
320 |
f.write(os.path.join(os.curdir, setuptools_file)) |
|
321 |
finally: |
|
322 |
f.close() |
|
323 |
|
|
324 |
_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info) |
|
325 |
|
|
326 |
def _patch_egg_dir(path): |
|
327 |
# let's check if it's already patched |
|
328 |
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') |
|
329 |
if os.path.exists(pkg_info): |
|
330 |
if _same_content(pkg_info, SETUPTOOLS_PKG_INFO): |
|
331 |
log.warn('%s already patched.', pkg_info) |
|
332 |
return False |
|
333 |
_rename_path(path) |
|
334 |
os.mkdir(path) |
|
335 |
os.mkdir(os.path.join(path, 'EGG-INFO')) |
|
336 |
pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO') |
|
337 |
f = open(pkg_info, 'w') |
|
338 |
try: |
|
339 |
f.write(SETUPTOOLS_PKG_INFO) |
|
340 |
finally: |
|
341 |
f.close() |
|
342 |
return True |
|
343 |
|
|
344 |
_patch_egg_dir = _no_sandbox(_patch_egg_dir) |
|
345 |
|
|
346 |
def _before_install(): |
|
347 |
log.warn('Before install bootstrap.') |
|
348 |
_fake_setuptools() |
|
349 |
|
|
350 |
|
|
351 |
def _under_prefix(location): |
|
352 |
if 'install' not in sys.argv: |
|
353 |
return True |
|
354 |
args = sys.argv[sys.argv.index('install')+1:] |
|
355 |
for index, arg in enumerate(args): |
|
356 |
for option in ('--root', '--prefix'): |
|
357 |
if arg.startswith('%s=' % option): |
|
358 |
top_dir = arg.split('root=')[-1] |
|
359 |
return location.startswith(top_dir) |
|
360 |
elif arg == option: |
|
361 |
if len(args) > index: |
|
362 |
top_dir = args[index+1] |
|
363 |
return location.startswith(top_dir) |
|
364 |
if arg == '--user' and USER_SITE is not None: |
|
365 |
return location.startswith(USER_SITE) |
|
366 |
return True |
|
367 |
|
|
368 |
|
|
369 |
def _fake_setuptools(): |
|
370 |
log.warn('Scanning installed packages') |
|
371 |
try: |
|
372 |
import pkg_resources |
|
373 |
except ImportError: |
|
374 |
# we're cool |
|
375 |
log.warn('Setuptools or Distribute does not seem to be installed.') |
|
376 |
return |
|
377 |
ws = pkg_resources.working_set |
|
378 |
try: |
|
379 |
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools', |
|
380 |
replacement=False)) |
|
381 |
except TypeError: |
|
382 |
# old distribute API |
|
383 |
setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools')) |
|
384 |
|
|
385 |
if setuptools_dist is None: |
|
386 |
log.warn('No setuptools distribution found') |
|
387 |
return |
|
388 |
# detecting if it was already faked |
|
389 |
setuptools_location = setuptools_dist.location |
|
390 |
log.warn('Setuptools installation detected at %s', setuptools_location) |
|
391 |
|
|
392 |
# if --root or --preix was provided, and if |
|
393 |
# setuptools is not located in them, we don't patch it |
|
394 |
if not _under_prefix(setuptools_location): |
|
395 |
log.warn('Not patching, --root or --prefix is installing Distribute' |
|
396 |
' in another location') |
|
397 |
return |
|
398 |
|
|
399 |
# let's see if its an egg |
|
400 |
if not setuptools_location.endswith('.egg'): |
|
401 |
log.warn('Non-egg installation') |
|
402 |
res = _remove_flat_installation(setuptools_location) |
|
403 |
if not res: |
|
404 |
return |
|
405 |
else: |
|
406 |
log.warn('Egg installation') |
|
407 |
pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO') |
|
408 |
if (os.path.exists(pkg_info) and |
|
409 |
_same_content(pkg_info, SETUPTOOLS_PKG_INFO)): |
|
410 |
log.warn('Already patched.') |
|
411 |
return |
|
412 |
log.warn('Patching...') |
|
413 |
# let's create a fake egg replacing setuptools one |
|
414 |
res = _patch_egg_dir(setuptools_location) |
|
415 |
if not res: |
|
416 |
return |
|
417 |
log.warn('Patched done.') |
|
418 |
_relaunch() |
|
419 |
|
|
420 |
|
|
421 |
def _relaunch(): |
|
422 |
log.warn('Relaunching...') |
|
423 |
# we have to relaunch the process |
|
424 |
# pip marker to avoid a relaunch bug |
|
425 |
if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']: |
|
426 |
sys.argv[0] = 'setup.py' |
|
427 |
args = [sys.executable] + sys.argv |
|
428 |
sys.exit(subprocess.call(args)) |
|
429 |
|
|
430 |
|
|
431 |
def _extractall(self, path=".", members=None): |
|
432 |
"""Extract all members from the archive to the current working |
|
433 |
directory and set owner, modification time and permissions on |
|
434 |
directories afterwards. `path' specifies a different directory |
|
435 |
to extract to. `members' is optional and must be a subset of the |
|
436 |
list returned by getmembers(). |
|
437 |
""" |
|
438 |
import copy |
|
439 |
import operator |
|
440 |
from tarfile import ExtractError |
|
441 |
directories = [] |
|
442 |
|
|
443 |
if members is None: |
|
444 |
members = self |
|
445 |
|
|
446 |
for tarinfo in members: |
|
447 |
if tarinfo.isdir(): |
|
448 |
# Extract directories with a safe mode. |
|
449 |
directories.append(tarinfo) |
|
450 |
tarinfo = copy.copy(tarinfo) |
|
451 |
tarinfo.mode = 448 # decimal for oct 0700 |
|
452 |
self.extract(tarinfo, path) |
|
453 |
|
|
454 |
# Reverse sort directories. |
|
455 |
if sys.version_info < (2, 4): |
|
456 |
def sorter(dir1, dir2): |
|
457 |
return cmp(dir1.name, dir2.name) |
|
458 |
directories.sort(sorter) |
|
459 |
directories.reverse() |
|
460 |
else: |
|
461 |
directories.sort(key=operator.attrgetter('name'), reverse=True) |
|
462 |
|
|
463 |
# Set correct owner, mtime and filemode on directories. |
|
464 |
for tarinfo in directories: |
|
465 |
dirpath = os.path.join(path, tarinfo.name) |
|
466 |
try: |
|
467 |
self.chown(tarinfo, dirpath) |
|
468 |
self.utime(tarinfo, dirpath) |
|
469 |
self.chmod(tarinfo, dirpath) |
|
470 |
except ExtractError: |
|
471 |
e = sys.exc_info()[1] |
|
472 |
if self.errorlevel > 1: |
|
473 |
raise |
|
474 |
else: |
|
475 |
self._dbg(1, "tarfile: %s" % e) |
|
476 |
|
|
477 |
|
|
478 |
def main(argv, version=DEFAULT_VERSION): |
|
479 |
"""Install or upgrade setuptools and EasyInstall""" |
|
480 |
tarball = download_setuptools() |
|
481 |
_install(tarball) |
|
482 |
|
|
483 |
|
|
484 |
if __name__ == '__main__': |
|
485 |
main(sys.argv[1:]) |
b/snf-deploy/fabfile.py | ||
---|---|---|
1 |
from __future__ import with_statement |
|
2 |
from fabric.api import * |
|
3 |
from fabric.contrib.console import confirm |
|
4 |
from random import choice |
|
5 |
from fabric.operations import run, put |
|
6 |
import re |
|
7 |
import shutil, os |
|
8 |
from functools import wraps |
|
9 |
import imp |
|
10 |
import ConfigParser |
|
11 |
import sys |
|
12 |
import tempfile |
|
13 |
import ast |
|
14 |
from snfdeploy.lib import * |
|
15 |
from snfdeploy import massedit |
|
16 |
|
|
17 |
|
|
18 |
def setup_env(confdir="conf", packages="packages", |
|
19 |
templates="files", cluster_name="ganeti1", autoconf=False): |
|
20 |
print("Loading configuration for synnefo...") |
|
21 |
print(" * Using config files under %s..." % confdir) |
|
22 |
print(" * Using %s and %s for packages and templates accordingly..." % (packages, templates)) |
|
23 |
|
|
24 |
autoconf = ast.literal_eval(autoconf) |
|
25 |
conf = Conf.configure(confdir=confdir, cluster_name=cluster_name, autoconf=autoconf) |
|
26 |
env.env = Env(conf) |
|
27 |
|
|
28 |
env.local = autoconf |
|
29 |
env.password = env.env.password |
|
30 |
env.user = env.env.user |
|
31 |
env.shell = "/bin/bash -c" |
|
32 |
|
|
33 |
if env.env.cms.hostname in [env.env.accounts.hostname, env.env.cyclades.hostname, env.env.pithos.hostname]: |
|
34 |
env.cms_pass = True |
|
35 |
else: |
|
36 |
env.cms_pass = False |
|
37 |
|
|
38 |
if env.env.accounts.hostname in [env.env.cyclades.hostname, env.env.pithos.hostname]: |
|
39 |
env.csrf_disable = True |
|
40 |
else: |
|
41 |
env.csrf_disable = False |
|
42 |
|
|
43 |
|
|
44 |
env.roledefs = { |
|
45 |
"nodes": env.env.ips, |
|
46 |
"ips": env.env.ips, |
|
47 |
"accounts": [env.env.accounts.ip], |
|
48 |
"cyclades": [env.env.cyclades.ip], |
|
49 |
"pithos": [env.env.pithos.ip], |
|
50 |
"cms": [env.env.cms.ip], |
|
51 |
"mq": [env.env.mq.ip], |
|
52 |
"db": [env.env.db.ip], |
|
53 |
"ns": [env.env.ns.ip], |
|
54 |
"client": [env.env.client.ip], |
|
55 |
"router": [env.env.router.ip], |
|
56 |
} |
|
57 |
|
|
58 |
env.enable_lvm = False |
|
59 |
env.enable_drbd = False |
|
60 |
if ast.literal_eval(env.env.create_extra_disk) and env.env.extra_disk: |
|
61 |
env.enable_lvm = True |
|
62 |
env.enable_drbd = True |
|
63 |
|
|
64 |
env.roledefs.update({ |
|
65 |
"ganeti": env.env.cluster_ips, |
|
66 |
"master": [env.env.master.ip], |
|
67 |
}) |
|
68 |
|
|
69 |
|
|
70 |
def install_package(package): |
|
71 |
debug(env.host, " * Installing package %s..." % package) |
|
72 |
APT_GET = "export DEBIAN_FRONTEND=noninteractive ;apt-get install -y --force-yes " |
|
73 |
|
|
74 |
if ast.literal_eval(env.env.use_local_packages): |
|
75 |
with settings(warn_only=True): |
|
76 |
deb = local("ls %s/%s*deb" % (env.env.packages, package)) |
|
77 |
if deb: |
|
78 |
debug(env.host, " * Package %s found in %s..." % (package, env.env.packages)) |
|
79 |
put(deb, "/tmp/") |
|
80 |
try_run("dpkg -i /tmp/%s*deb || " % package + APT_GET + "-f") |
|
81 |
try_run("rm /tmp/%s*deb" % package) |
|
82 |
return |
|
83 |
|
|
84 |
info = getattr(env.env, package) |
|
85 |
if info in ["stable", "squeeze-backports", "testing", "unstable"]: |
|
86 |
APT_GET += " -t %s %s " % (info, package) |
|
87 |
elif info: |
|
88 |
APT_GET += " %s=%s " % (package, info) |
|
89 |
else: |
|
90 |
APT_GET += package |
|
91 |
|
|
92 |
try_run(APT_GET) |
|
93 |
|
|
94 |
return |
|
95 |
|
|
96 |
|
|
97 |
@roles("ns") |
|
98 |
def update_ns_for_ganeti(): |
|
99 |
debug(env.host, "Updating name server entries for backend %s..." % env.env.cluster.fqdn) |
|
100 |
update_arecord(env.env.cluster) |
|
101 |
update_ptrrecord(env.env.cluster) |
|
102 |
try_run("/etc/init.d/bind9 restart") |
|
103 |
|
|
104 |
|
|
105 |
@roles("ns") |
|
106 |
def update_ns_for_node(node): |
|
107 |
info = env.env.nodes_info.get(node) |
|
108 |
update_arecord(info) |
|
109 |
update_ptrrecord(info) |
|
110 |
try_run("/etc/init.d/bind9 restart") |
|
111 |
|
|
112 |
|
|
113 |
@roles("ns") |
|
114 |
def update_arecord(host): |
|
115 |
filename = "/etc/bind/zones/" + env.env.domain |
|
116 |
cmd = """ |
|
117 |
echo '{0}' >> {1} |
|
118 |
""".format(host.arecord, filename) |
|
119 |
try_run(cmd) |
|
120 |
|
|
121 |
|
|
122 |
@roles("ns") |
|
123 |
def update_cnamerecord(host): |
|
124 |
filename = "/etc/bind/zones/" + env.env.domain |
|
125 |
cmd = """ |
|
126 |
echo '{0}' >> {1} |
|
127 |
""".format(host.cnamerecord, filename) |
|
128 |
try_run(cmd) |
|
129 |
|
|
130 |
|
|
131 |
@roles("ns") |
|
132 |
def update_ptrrecord(host): |
|
133 |
filename = "/etc/bind/rev/synnefo.in-addr.arpa.zone" |
|
134 |
cmd = """ |
|
135 |
echo '{0}' >> {1} |
|
136 |
""".format(host.ptrrecord, filename) |
|
137 |
try_run(cmd) |
|
138 |
|
|
139 |
@roles("nodes") |
|
140 |
def apt_get_update(): |
|
141 |
debug(env.host, "apt-get update....") |
|
142 |
try_run("apt-get update") |
|
143 |
|
|
144 |
@roles("ns") |
|
145 |
def setup_ns(): |
|
146 |
debug(env.host, "Setting up name server..") |
|
147 |
#WARNING: this should be remove after we are done |
|
148 |
# because gevent does pick randomly nameservers and google does |
|
149 |
# not know our setup!!!!! |
|
150 |
apt_get_update() |
|
151 |
install_package("bind9") |
|
152 |
tmpl = "/etc/bind/named.conf.local" |
|
153 |
replace = { |
|
154 |
"domain": env.env.domain, |
|
155 |
} |
|
156 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
157 |
put(custom, tmpl) |
|
158 |
|
|
159 |
try_run("mkdir -p /etc/bind/zones") |
|
160 |
tmpl = "/etc/bind/zones/example.com" |
|
161 |
replace = { |
|
162 |
"domain": env.env.domain, |
|
163 |
"ns_node_ip": env.env.ns.ip, |
|
164 |
} |
|
165 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
166 |
remote = "/etc/bind/zones/" + env.env.domain |
|
167 |
put(custom, remote) |
|
168 |
|
|
169 |
try_run("mkdir -p /etc/bind/rev") |
|
170 |
tmpl = "/etc/bind/rev/synnefo.in-addr.arpa.zone" |
|
171 |
replace = { |
|
172 |
"domain": env.env.domain, |
|
173 |
} |
|
174 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
175 |
put(custom, tmpl) |
|
176 |
|
|
177 |
tmpl = "/etc/bind/named.conf.options" |
|
178 |
replace = { |
|
179 |
"NODE_IPS": ";".join(env.env.ips), |
|
180 |
} |
|
181 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
182 |
put(custom, tmpl, mode=0644) |
|
183 |
|
|
184 |
for role, info in env.env.roles.iteritems(): |
|
185 |
if role == "ns": |
|
186 |
continue |
|
187 |
update_cnamerecord(info) |
|
188 |
for node, info in env.env.nodes_info.iteritems(): |
|
189 |
update_arecord(info) |
|
190 |
update_ptrrecord(info) |
|
191 |
|
|
192 |
try_run("/etc/init.d/bind9 restart") |
|
193 |
|
|
194 |
|
|
195 |
@roles("nodes") |
|
196 |
def check_dhcp(): |
|
197 |
debug(env.host, "Checking IPs for synnefo..") |
|
198 |
for n, info in env.env.nodes_info.iteritems(): |
|
199 |
try_run("ping -c 1 " + info.ip) |
|
200 |
|
|
201 |
@roles("nodes") |
|
202 |
def check_dns(): |
|
203 |
debug(env.host, "Checking fqdns for synnefo..") |
|
204 |
for n, info in env.env.nodes_info.iteritems(): |
|
205 |
try_run("ping -c 1 " + info.fqdn) |
|
206 |
|
|
207 |
for n, info in env.env.roles.iteritems(): |
|
208 |
try_run("ping -c 1 " + info.fqdn) |
|
209 |
|
|
210 |
@roles("nodes") |
|
211 |
def check_connectivity(): |
|
212 |
debug(env.host, "Checking internet connectivity..") |
|
213 |
try_run("ping -c 1 www.google.com") |
|
214 |
|
|
215 |
|
|
216 |
@roles("nodes") |
|
217 |
def check_ssh(): |
|
218 |
debug(env.host, "Checking password-less ssh..") |
|
219 |
for n, info in env.env.nodes_info.iteritems(): |
|
220 |
try_run("ssh " + info.fqdn + " date") |
|
221 |
|
|
222 |
|
|
223 |
@roles("ips") |
|
224 |
def add_keys(): |
|
225 |
debug(env.host, "Adding rsa/dsa keys..") |
|
226 |
try_run("mkdir -p /root/.ssh") |
|
227 |
cmd = """ |
|
228 |
for f in $(ls /root/.ssh/*); do |
|
229 |
cp $f $f.bak |
|
230 |
done |
|
231 |
""" |
|
232 |
try_run(cmd) |
|
233 |
files = ["authorized_keys", "id_dsa", "id_dsa.pub", |
|
234 |
"id_rsa", "id_rsa.pub"] |
|
235 |
for f in files: |
|
236 |
tmpl = "/root/.ssh/" + f |
|
237 |
replace = {} |
|
238 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
239 |
put(custom, tmpl) |
|
240 |
|
|
241 |
cmd = """ |
|
242 |
if [ -e /root/.ssh/authorized_keys.bak ]; then |
|
243 |
cat /root/.ssh/authorized_keys.bak >> /root/.ssh/authorized_keys |
|
244 |
fi |
|
245 |
""" |
|
246 |
debug(env.host, "Updating exising authorized keys..") |
|
247 |
try_run(cmd) |
|
248 |
|
|
249 |
@roles("ips") |
|
250 |
def setup_resolv_conf(): |
|
251 |
debug(env.host, "Tweak /etc/resolv.conf...") |
|
252 |
try_run("/etc/init.d/network-manager stop") |
|
253 |
tmpl = "/etc/dhcp/dhclient-enter-hooks.d/nodnsupdate" |
|
254 |
replace = {} |
|
255 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
256 |
put(custom, tmpl, mode=0644) |
|
257 |
try_run("cp /etc/resolv.conf /etc/resolv.conf.bak") |
|
258 |
tmpl = "/etc/resolv.conf" |
|
259 |
replace = { |
|
260 |
"domain": env.env.domain, |
|
261 |
"ns_node_ip": env.env.ns.ip, |
|
262 |
} |
|
263 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
264 |
put(custom, tmpl) |
|
265 |
try_run("chattr +i /etc/resolv.conf") |
|
266 |
|
|
267 |
|
|
268 |
@roles("ips") |
|
269 |
def setup_hosts(): |
|
270 |
debug(env.host, "Tweaking /etc/hosts and ssh_config files...") |
|
271 |
try_run("echo StrictHostKeyChecking no >> /etc/ssh/ssh_config") |
|
272 |
cmd = " sed -i 's/^127.*/127.0.0.1 localhost/g' /etc/hosts " |
|
273 |
try_run(cmd) |
|
274 |
|
|
275 |
|
|
276 |
def try_run(cmd): |
|
277 |
try: |
|
278 |
if env.local: |
|
279 |
return local(cmd, capture=True) |
|
280 |
else: |
|
281 |
return run(cmd) |
|
282 |
except: |
|
283 |
debug(env.host, "WARNING: command failed. Continuing anyway...") |
|
284 |
|
|
285 |
def create_bridges(): |
|
286 |
debug(env.host, " * Creating bridges...") |
|
287 |
install_package("bridge-utils") |
|
288 |
cmd = """ |
|
289 |
brctl addbr {0} ; ip link set {0} up |
|
290 |
""".format(env.env.common_bridge) |
|
291 |
try_run(cmd) |
|
292 |
|
|
293 |
|
|
294 |
def connect_bridges(): |
|
295 |
debug(env.host, " * Connecting bridges...") |
|
296 |
cmd = """ |
|
297 |
brctl addif {0} {1} |
|
298 |
""".format(env.env.common_bridge, env.env.public_iface) |
|
299 |
#try_run(cmd) |
|
300 |
|
|
301 |
|
|
302 |
@roles("ganeti") |
|
303 |
def setup_net_infra(): |
|
304 |
debug(env.host, "Setup networking infrastracture..") |
|
305 |
create_bridges() |
|
306 |
connect_bridges() |
|
307 |
|
|
308 |
|
|
309 |
@roles("ganeti") |
|
310 |
def setup_lvm(): |
|
311 |
debug(env.host, "create volume group %s for ganeti.." % env.env.vg) |
|
312 |
if env.enable_lvm: |
|
313 |
install_package("lvm2") |
|
314 |
cmd = """ |
|
315 |
pvcreate {0} |
|
316 |
vgcreate {1} {0} |
|
317 |
""".format(env.env.extra_disk, env.env.vg) |
|
318 |
try_run(cmd) |
|
319 |
|
|
320 |
|
|
321 |
def customize_settings_from_tmpl(tmpl, replace): |
|
322 |
debug(env.host, " * Customizing template %s..." % tmpl) |
|
323 |
local = env.env.templates + tmpl |
|
324 |
_, custom = tempfile.mkstemp() |
|
325 |
shutil.copyfile(local, custom) |
|
326 |
for k, v in replace.iteritems(): |
|
327 |
regex = "re.sub('%{0}%', '{1}', line)".format(k.upper(), v) |
|
328 |
massedit.edit_files([custom], [regex], dry_run=False) |
|
329 |
|
|
330 |
return custom |
|
331 |
|
|
332 |
|
|
333 |
@roles("nodes") |
|
334 |
def setup_apt(): |
|
335 |
debug(env.host, "Setting up apt sources...") |
|
336 |
install_package("curl") |
|
337 |
cmd = """ |
|
338 |
echo 'APT::Install-Suggests "false";' >> /etc/apt/apt.conf |
|
339 |
curl -k https://dev.grnet.gr/files/apt-grnetdev.pub | apt-key add - |
|
340 |
""" |
|
341 |
try_run(cmd) |
|
342 |
tmpl = "/etc/apt/sources.list.d/okeanos.list" |
|
343 |
replace = {} |
|
344 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
345 |
put(custom, tmpl) |
|
346 |
apt_get_update() |
|
347 |
|
|
348 |
|
|
349 |
@roles("cyclades", "cms", "pithos", "accounts") |
|
350 |
def restart_services(): |
|
351 |
debug(env.host, " * Restarting apache2 and gunicorn...") |
|
352 |
try_run("/etc/init.d/gunicorn restart") |
|
353 |
try_run("/etc/init.d/apache2 restart") |
|
354 |
|
|
355 |
|
|
356 |
def setup_gunicorn(): |
|
357 |
debug(env.host, " * Setting up gunicorn...") |
|
358 |
install_package("gunicorn") |
|
359 |
tmpl = "/etc/gunicorn.d/synnefo" |
|
360 |
replace = {} |
|
361 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
362 |
put(custom, tmpl, mode=0644) |
|
363 |
try_run("/etc/init.d/gunicorn restart") |
|
364 |
|
|
365 |
|
|
366 |
def setup_apache(): |
|
367 |
debug(env.host, " * Setting up apache2...") |
|
368 |
host_info = env.env.ips_info[env.host] |
|
369 |
install_package("apache2") |
|
370 |
tmpl = "/etc/apache2/sites-available/synnefo" |
|
371 |
replace = { |
|
372 |
"HOST": host_info.fqdn, |
|
373 |
} |
|
374 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
375 |
put(custom, tmpl) |
|
376 |
tmpl = "/etc/apache2/sites-available/synnefo-ssl" |
|
377 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
378 |
put(custom, tmpl) |
|
379 |
cmd = """ |
|
380 |
a2enmod ssl |
|
381 |
a2enmod rewrite |
|
382 |
a2dissite default |
|
383 |
a2ensite synnefo |
|
384 |
a2ensite synnefo-ssl |
|
385 |
a2enmod headers |
|
386 |
a2enmod proxy_http |
|
387 |
a2dismod autoindex |
|
388 |
""" |
|
389 |
try_run(cmd) |
|
390 |
try_run("/etc/init.d/apache2 restart") |
|
391 |
|
|
392 |
|
|
393 |
@roles("mq") |
|
394 |
def setup_mq(): |
|
395 |
debug(env.host, "Setting up RabbitMQ...") |
|
396 |
install_package("rabbitmq-server") |
|
397 |
cmd = """ |
|
398 |
rabbitmqctl add_user {0} {1} |
|
399 |
rabbitmqctl set_permissions {0} ".*" ".*" ".*" |
|
400 |
rabbitmqctl delete_user guest |
|
401 |
rabbitmqctl set_user_tags {0} administrator |
|
402 |
""".format(env.env.synnefo_user, env.env.synnefo_rabbitmq_passwd) |
|
403 |
try_run(cmd) |
|
404 |
try_run("/etc/init.d/rabbitmq-server restart") |
|
405 |
|
|
406 |
|
|
407 |
@roles("db") |
|
408 |
def allow_access_in_db(ip): |
|
409 |
cmd = """ |
|
410 |
echo host all all {0}/32 md5 >> /etc/postgresql/8.4/main/pg_hba.conf |
|
411 |
""".format(ip) |
|
412 |
try_run(cmd) |
|
413 |
try_run("/etc/init.d/postgresql restart") |
|
414 |
|
|
415 |
@roles("db") |
|
416 |
def setup_db(): |
|
417 |
debug(env.host, "Setting up DataBase server...") |
|
418 |
install_package("postgresql") |
|
419 |
|
|
420 |
tmpl = "/tmp/db-init.psql" |
|
421 |
replace = { |
|
422 |
"synnefo_user": env.env.synnefo_user, |
|
423 |
"synnefo_db_passwd": env.env.synnefo_db_passwd, |
|
424 |
} |
|
425 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
426 |
put(custom, tmpl) |
|
427 |
cmd = 'su - postgres -c "psql -w -f %s" ' % tmpl |
|
428 |
try_run(cmd) |
|
429 |
cmd = """ |
|
430 |
echo "listen_addresses = '*'" >> /etc/postgresql/8.4/main/postgresql.conf |
|
431 |
""" |
|
432 |
try_run(cmd) |
|
433 |
|
|
434 |
try_run("/etc/init.d/postgresql restart") |
|
435 |
|
|
436 |
|
|
437 |
@roles("db") |
|
438 |
def destroy_db(): |
|
439 |
try_run("""su - postgres -c ' psql -w -c "drop database snf_apps" '""") |
|
440 |
try_run("""su - postgres -c ' psql -w -c "drop database snf_pithos" '""") |
|
441 |
|
|
442 |
|
|
443 |
def setup_webproject(): |
|
444 |
debug(env.host, " * Setting up snf-webproject...") |
|
445 |
with settings(hide("everything")): |
|
446 |
try_run("ping -c1 " + env.env.db.ip) |
|
447 |
setup_common() |
|
448 |
install_package("snf-webproject") |
|
449 |
install_package("python-psycopg2") |
|
450 |
install_package("python-gevent") |
|
451 |
tmpl = "/etc/synnefo/webproject.conf" |
|
452 |
replace = { |
|
453 |
"synnefo_user": env.env.synnefo_user, |
|
454 |
"synnefo_db_passwd": env.env.synnefo_db_passwd, |
|
455 |
"db_node": env.env.db.ip, |
|
456 |
"domain": env.env.domain, |
|
457 |
} |
|
458 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
459 |
put(custom, tmpl, mode=0644) |
|
460 |
with settings(host_string=env.env.db.hostname): |
|
461 |
host_info = env.env.ips_info[env.host] |
|
462 |
allow_access_in_db(host_info.ip) |
|
463 |
try_run("/etc/init.d/gunicorn restart") |
|
464 |
|
|
465 |
|
|
466 |
def setup_common(): |
|
467 |
debug(env.host, " * Setting up snf-common...") |
|
468 |
host_info = env.env.ips_info[env.host] |
|
469 |
install_package("python-objpool") |
|
470 |
install_package("snf-common") |
|
471 |
install_package("python-astakosclient") |
|
472 |
install_package("snf-django-lib") |
|
473 |
install_package("snf-branding") |
|
474 |
tmpl = "/etc/synnefo/common.conf" |
|
475 |
replace = { |
|
476 |
#FIXME: |
|
477 |
"EMAIL_SUBJECT_PREFIX": env.host, |
|
478 |
"domain": env.env.domain, |
|
479 |
"HOST": host_info.fqdn, |
|
480 |
} |
|
481 |
custom = customize_settings_from_tmpl(tmpl, replace) |
|
482 |
put(custom, tmpl, mode=0644) |
|
483 |
try_run("/etc/init.d/gunicorn restart") |
|
484 |
|
|
485 |
@roles("accounts") |
|
486 |
def astakos_loaddata(): |
|
487 |
debug(env.host, " * Loading initial data to astakos...") |
|
488 |
cmd = """ |
|
489 |
snf-manage loaddata groups |
|
490 |
""" |
|
491 |
try_run(cmd) |
|
492 |
|
|
493 |
|
|
494 |
@roles("accounts") |
|
495 |
def astakos_register_services(): |
|
496 |
debug(env.host, " * Register services in astakos...") |
|
497 |
cmd = """ |
|
498 |
snf-manage component-add "home" https://{0} home-icon.png |
|
499 |
snf-manage component-add "cyclades" https://{1}/cyclades/ui/ |
|
500 |
snf-manage component-add "pithos" https://{2}/pithos/ui/ |
|
501 |
snf-manage component-add "astakos" https://{3}/astakos/ui/ |
|
502 |
""".format(env.env.cms.fqdn, env.env.cyclades.fqdn, env.env.pithos.fqdn, env.env.accounts.fqdn) |
|
503 |
try_run(cmd) |
|
504 |
import_service("astakos") |
|
505 |
import_service("pithos") |
|
506 |
import_service("cyclades") |
Also available in: Unified diff