|
1 |
|
|
2 |
Showcase: create a virtual cluster from scratch
|
|
3 |
===============================================
|
|
4 |
|
|
5 |
In this section we will create a virtual cluster, from scratch.
|
|
6 |
|
|
7 |
Requirements:
|
|
8 |
|
|
9 |
* A `synnefo <http://www.synnefo.org>`_ deployment with functional *Astakos*,
|
|
10 |
*Pithos+*, *Plankton* and *Cyclades* services.
|
|
11 |
|
|
12 |
* A kamaki setup, configured with a default cloud (see how to do this with
|
|
13 |
kamaki as a
|
|
14 |
`shell command <../examplesdir/configuration.html#multiple-clouds-in-a-single-configuration>`_ ,
|
|
15 |
or a
|
|
16 |
`python library <config.html#set-a-new-cloud-name-it-new-cloud-and-set-it-as-default>`_.
|
|
17 |
|
|
18 |
* An image stored at file *./my_image.diskdump* that can run on a predefined
|
|
19 |
hardware flavor, identifiable by the flavor id *42* (see how to create an
|
|
20 |
image with the
|
|
21 |
`synnefo image creator <http://www.synnefo.org/docs/snf-image-creator/latest/index.html>`_
|
|
22 |
).
|
|
23 |
|
|
24 |
This is the pseudocode:
|
|
25 |
|
|
26 |
#. Get credentials and service endpoints, with kamaki config and the
|
|
27 |
**Astakos** *identity* and *account* services
|
|
28 |
#. Upload the image file to the **Pithos+** *object-store* service
|
|
29 |
#. Register the image file to the **Plankton** *image* service
|
|
30 |
#. Create a number of virtual servers to the **Cyclades** *compute* service
|
|
31 |
|
|
32 |
|
|
33 |
Credentials and endpoints
|
|
34 |
-------------------------
|
|
35 |
|
|
36 |
We assume that the kamaki configuration file contains at least one cloud
|
|
37 |
configuration, and this configuration is also set as the default cloud for
|
|
38 |
kamaki. A cloud configuration is basically a name for the cloud, an
|
|
39 |
authentication URL and an authentication TOKEN: the credentials we are looking
|
|
40 |
for!
|
|
41 |
|
|
42 |
This is the plan:
|
|
43 |
|
|
44 |
#. Get the credentials from the kamaki configuration
|
|
45 |
#. Initialize an AstakosClient and test the credentials
|
|
46 |
#. Get the endpoints for all services
|
|
47 |
|
|
48 |
.. code-block:: python
|
|
49 |
|
|
50 |
from sys import stderr
|
|
51 |
from kamaki.cli.config import Config, CONFIG_PATH
|
|
52 |
from kamaki.clients.astakos import AstakosClient, ClientError
|
|
53 |
|
|
54 |
# Initialize Config with default values.
|
|
55 |
cnf = Config()
|
|
56 |
|
|
57 |
# 1. Get the credentials
|
|
58 |
# Get default cloud name
|
|
59 |
try:
|
|
60 |
cloud_name = cnf.get('global', 'default_cloud')
|
|
61 |
except KeyError:
|
|
62 |
stderr.write('No default cloud set in file %s\n' % CONFIG_PATH)
|
|
63 |
raise
|
|
64 |
|
|
65 |
# Get cloud authentication URL and TOKEN
|
|
66 |
try:
|
|
67 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url')
|
|
68 |
except KeyError:
|
|
69 |
stderr.write('No authentication URL in cloud %s\n' % cloud_name)
|
|
70 |
raise
|
|
71 |
try:
|
|
72 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token')
|
|
73 |
except KeyError:
|
|
74 |
stderr.write('No token in cloud %s\n' % cloud_name)
|
|
75 |
raise
|
|
76 |
|
|
77 |
# 2. Test the credentials
|
|
78 |
# Test authentication credentials
|
|
79 |
try:
|
|
80 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN)
|
|
81 |
auth.authenticate()
|
|
82 |
except ClientError:
|
|
83 |
stderr.write('Athentication failed with url %s and token %s\n' % (
|
|
84 |
AUTH_URL, AUTH_TOKEN))
|
|
85 |
raise
|
|
86 |
|
|
87 |
# 3. Get the endpoints
|
|
88 |
# Identity, Account --> astakos
|
|
89 |
# Compute --> cyclades
|
|
90 |
# Object-store --> pithos
|
|
91 |
# Image --> plankton
|
|
92 |
try:
|
|
93 |
endpoints = dict(
|
|
94 |
astakos=AUTH_URL,
|
|
95 |
cyclades=auth.get_service_endpoints('compute')['publicURL'],
|
|
96 |
pithos=auth.get_service_endpoints('object-store')['publicURL'],
|
|
97 |
plankton=auth.get_service_endpoints('image')['publicURL']
|
|
98 |
)
|
|
99 |
user_id = auth.user_info()['id']
|
|
100 |
except ClientError:
|
|
101 |
stderr.write(
|
|
102 |
'Failed to get user id and endpoints from the identity server\n')
|
|
103 |
raise
|
|
104 |
|
|
105 |
Upload the image
|
|
106 |
----------------
|
|
107 |
|
|
108 |
We assume there is an image file at the current local directory, at
|
|
109 |
*./my_image.diskdump* and we need to upload it to a Pithos+ container. We also
|
|
110 |
assume the contains does not currently exist. We will name it *images*.
|
|
111 |
|
|
112 |
This is the plan:
|
|
113 |
|
|
114 |
#. Initialize a Pithos+ client
|
|
115 |
#. Create the container *images*
|
|
116 |
#. Upload the local file to the container
|
|
117 |
|
|
118 |
.. code-block:: python
|
|
119 |
|
|
120 |
from os.path import abspath
|
|
121 |
from kamaki.clients.pithos import PithosClient
|
|
122 |
|
|
123 |
CONTAINER = 'images'
|
|
124 |
IMAGE_FILE = 'my_image.diskdump'
|
|
125 |
|
|
126 |
# 1. Initialize Pithos+ client and set account to current user
|
|
127 |
try:
|
|
128 |
pithos = PithosClient(endpoints['pithos'], AUTH_TOKEN)
|
|
129 |
except ClientError:
|
|
130 |
stderr.write('Failed to initialize a Pithos+ client\n')
|
|
131 |
raise
|
|
132 |
pithos.account = user_id
|
|
133 |
|
|
134 |
# 2. Create the container "images" and let pithos client work with that
|
|
135 |
try:
|
|
136 |
pithos.create_container('images')
|
|
137 |
except ClientError:
|
|
138 |
stderr.write('Failed to create container "image"\n')
|
|
139 |
raise
|
|
140 |
pithos.container = CONTAINER
|
|
141 |
|
|
142 |
# 3. Upload
|
|
143 |
with open(abspath(IMAGE_FILE)) as f:
|
|
144 |
try:
|
|
145 |
pithos.upload_object(IMAGE_FILE, f)
|
|
146 |
except ClientError:
|
|
147 |
stderr.write('Failed to upload file %s to container %s\n' % (
|
|
148 |
IMAGE_FILE, CONTAINER))
|
|
149 |
raise
|
|
150 |
|
|
151 |
Register the image
|
|
152 |
------------------
|
|
153 |
|
|
154 |
Now the image is located at *pithos://<user_id>/images/my_image.diskdump*
|
|
155 |
and we want to register it to the Plankton *image* service.
|
|
156 |
|
|
157 |
.. code-block:: python
|
|
158 |
|
|
159 |
from kamaki.clients.image import ImageClient
|
|
160 |
|
|
161 |
IMAGE_NAME = 'My image'
|
|
162 |
IMAGE_LOCATION = (user_id, CONTAINER, IMAGE_FILE)
|
|
163 |
|
|
164 |
# 3.1 Initialize ImageClient
|
|
165 |
try:
|
|
166 |
plankton = ImageClient(endpoints['plankton'], AUTH_TOKEN)
|
|
167 |
except ClientError:
|
|
168 |
stderr.write('Failed to initialize the Image client client\n')
|
|
169 |
raise
|
|
170 |
|
|
171 |
# 3.2 Register the image
|
|
172 |
try:
|
|
173 |
image = plankton.image_register(IMAGE_NAME, IMAGE_LOCATION)
|
|
174 |
except ClientError:
|
|
175 |
stderr.write('Failed to register image %s\n' % IMAGE_NAME)
|
|
176 |
raise
|
|
177 |
|
|
178 |
Create the virtual cluster
|
|
179 |
--------------------------
|
|
180 |
|
|
181 |
In order to build a virtual cluster, we need some information:
|
|
182 |
|
|
183 |
* an image id. We can get them from *image['id']* (the id of the image we
|
|
184 |
have just created)
|
|
185 |
* a hardware flavor. Assume we have picked the flavor with id *42*
|
|
186 |
* a set of names for our virtual servers. We will name them *cluster1*,
|
|
187 |
*cluster2*, etc.
|
|
188 |
|
|
189 |
Here is the plan:
|
|
190 |
|
|
191 |
#. Initialize a Cyclades/Compute client
|
|
192 |
#. Create a number of virtual servers. Their name should be prefixed as
|
|
193 |
"cluster"
|
|
194 |
|
|
195 |
.. code-block:: python
|
|
196 |
|
|
197 |
# 4. Create virtual cluster
|
|
198 |
from kamaki.clients.cyclades import CycladesClient
|
|
199 |
|
|
200 |
FLAVOR_ID = 42
|
|
201 |
IMAGE_ID = image['id']
|
|
202 |
CLUSTER_SIZE = 2
|
|
203 |
CLUSTER_PREFIX = 'cluster'
|
|
204 |
|
|
205 |
# 4.1 Initialize a cyclades client
|
|
206 |
try:
|
|
207 |
cyclades = CycladesClient(endpoints['cyclades'], AUTH_TOKEN)
|
|
208 |
except ClientError:
|
|
209 |
stderr.write('Failed to initialize cyclades client\n')
|
|
210 |
raise
|
|
211 |
|
|
212 |
# 4.2 Create 2 servers prefixed as "cluster"
|
|
213 |
servers = []
|
|
214 |
for i in range(1, CLUSTER_SIZE + 1):
|
|
215 |
server_name = '%s%s' % (CLUSTER_PREFIX, i)
|
|
216 |
try:
|
|
217 |
servers.append(
|
|
218 |
cyclades.create_server(server_name, FLAVOR_ID, IMAGE_ID))
|
|
219 |
except ClientError:
|
|
220 |
stderr.write('Failed while creating server %s\n' % server_name)
|
|
221 |
raise
|
|
222 |
|
|
223 |
Some improvements
|
|
224 |
-----------------
|
|
225 |
|
|
226 |
Progress Bars
|
|
227 |
'''''''''''''
|
|
228 |
|
|
229 |
Uploading an image might take a while. You can wait patiently, or you can use a
|
|
230 |
progress generator. Even better, combine a generator with the progress bar
|
|
231 |
package that comes with kamaki. The upload_object method accepts two generators
|
|
232 |
as parameters: one for calculating local file hashes and another for uploading
|
|
233 |
|
|
234 |
.. code-block:: python
|
|
235 |
|
|
236 |
from progress.bar import Bar
|
|
237 |
|
|
238 |
def hash_gen(n):
|
|
239 |
bar = Bar('Calculating hashes...')
|
|
240 |
for i in bar.iter(range(int(n))):
|
|
241 |
yield
|
|
242 |
yield
|
|
243 |
|
|
244 |
def upload_gen(n):
|
|
245 |
bar = Bar('Uploading...')
|
|
246 |
for i in bar.iter(range(int(n))):
|
|
247 |
yield
|
|
248 |
yield
|
|
249 |
|
|
250 |
...
|
|
251 |
pithos.upload_object(
|
|
252 |
IMAGE_FILE, f, hash_cb=hash_gen, upload_cb=upload_gen)
|
|
253 |
|
|
254 |
We can create a method to produce progress bar generators, and use it in other
|
|
255 |
methods as well:
|
|
256 |
|
|
257 |
.. code-block:: python
|
|
258 |
|
|
259 |
try:
|
|
260 |
from progress.bar import Bar
|
|
261 |
|
|
262 |
def create_pb(msg):
|
|
263 |
def generator(n):
|
|
264 |
bar=Bar(msg)
|
|
265 |
for i in bar.iter(range(int(n))):
|
|
266 |
yield
|
|
267 |
yield
|
|
268 |
return generator
|
|
269 |
except ImportError:
|
|
270 |
stderr.write('Suggestion: install python-progress\n')
|
|
271 |
def create_pb(msg):
|
|
272 |
return None
|
|
273 |
|
|
274 |
...
|
|
275 |
pithos.upload_object(
|
|
276 |
IMAGE_FILE, f,
|
|
277 |
hash_cb=create_pb('Calculating hashes...'),
|
|
278 |
upload_cb=create_pb('Uploading...'))
|
|
279 |
|
|
280 |
Wait for servers to built
|
|
281 |
'''''''''''''''''''''''''
|
|
282 |
|
|
283 |
When a create_server method is finished successfully, a server is being built.
|
|
284 |
Usually, it takes a while for a server to built. Fortunately, there is a wait
|
|
285 |
method in the kamaki cyclades client. It can use a progress bar too!
|
|
286 |
|
|
287 |
.. code-block:: python
|
|
288 |
|
|
289 |
# 4.2 Create 2 servers prefixed as "cluster"
|
|
290 |
...
|
|
291 |
|
|
292 |
# 4.3 Wait for servers to built
|
|
293 |
for server in servers:
|
|
294 |
cyclades.wait_server(server['id'])
|
|
295 |
|
|
296 |
Asynchronous server creation
|
|
297 |
''''''''''''''''''''''''''''
|
|
298 |
|
|
299 |
In case of a large virtual cluster, it might be faster to spawn the servers
|
|
300 |
with asynchronous requests. Kamaki clients offer an automated mechanism for
|
|
301 |
asynchronous requests.
|
|
302 |
|
|
303 |
.. code-block:: python
|
|
304 |
|
|
305 |
# 4.2 Create 2 servers prefixed as "cluster"
|
|
306 |
create_params = [dict(
|
|
307 |
name='%s%s' % (CLUSTER_PREFIX, i),
|
|
308 |
flavor_id=FLAVOR_ID,
|
|
309 |
image_id=IMAGE_ID) for i in range(1, CLUSTER_SIZE + 1)]
|
|
310 |
try:
|
|
311 |
servers = cyclades.async_run(cyclades.create_server, create_params)
|
|
312 |
except ClientError:
|
|
313 |
stderr.write('Failed while creating servers\n')
|
|
314 |
raise
|
|
315 |
|
|
316 |
Clean up virtual cluster
|
|
317 |
''''''''''''''''''''''''
|
|
318 |
|
|
319 |
We need to clean up Cyclades from servers left from previous cluster creations.
|
|
320 |
This clean up will destroy all servers prefixed with "cluster". It will run
|
|
321 |
before the cluster creation:
|
|
322 |
|
|
323 |
.. code-block:: python
|
|
324 |
|
|
325 |
# 4.2 Clean up virtual cluster
|
|
326 |
to_delete = [server for server in cyclades.list_servers(detail=True) if (
|
|
327 |
server['name'].startswith(CLUSTER_PREFIX))]
|
|
328 |
for server in to_delete:
|
|
329 |
cyclades.delete_server(server['id'])
|
|
330 |
for server in to_delete:
|
|
331 |
cyclades.wait_server(
|
|
332 |
server['id'], server['status'],
|
|
333 |
wait_cb=create_pb('Deleting %s...' % server['name']))
|
|
334 |
|
|
335 |
# 4.3 Create 2 servers prefixed as "cluster"
|
|
336 |
...
|
|
337 |
|
|
338 |
Inject ssh keys
|
|
339 |
'''''''''''''''
|
|
340 |
|
|
341 |
When a server is created, the returned value contains a filed "adminPass". This
|
|
342 |
field can be used to manually log into the server.
|
|
343 |
|
|
344 |
An easier way is to
|
|
345 |
`inject the ssh keys <../examplesdir/server.html#inject-ssh-keys-to-a-debian-server>`_
|
|
346 |
of the users who are going to use the virtual servers.
|
|
347 |
|
|
348 |
Assuming that we have collected the keys in a file named *rsa.pub*, we can
|
|
349 |
inject them into each server, with the personality argument
|
|
350 |
|
|
351 |
.. code-block:: python
|
|
352 |
|
|
353 |
SSH_KEYS = 'rsa.pub'
|
|
354 |
|
|
355 |
...
|
|
356 |
|
|
357 |
# 4.3 Create 2 servers prefixed as "cluster"
|
|
358 |
personality = None
|
|
359 |
if SSH_KEYS:
|
|
360 |
with open(SSH_KEYS) as f:
|
|
361 |
from base64 import b64encode
|
|
362 |
personality=[dict(
|
|
363 |
contents=b64encode(f.read()),
|
|
364 |
path='/root/.ssh/authorized_keys',
|
|
365 |
owner='root',
|
|
366 |
group='root',
|
|
367 |
mode='0777'), ]
|
|
368 |
create_params = [dict(
|
|
369 |
name='%s%s' % (CLUSTER_PREFIX, i),
|
|
370 |
flavor_id=FLAVOR_ID,
|
|
371 |
image_id=IMAGE_ID,
|
|
372 |
personality=personality) for i in range(1, CLUSTER_SIZE + 1)]
|
|
373 |
...
|
|
374 |
|