root / docs / developers / showcase.rst @ e6ce9ae1
History | View | Annotate | Download (27.5 kB)
1 |
|
---|---|
2 |
Showcase: create a virtual cluster from scratch |
3 |
=============================================== |
4 |
|
5 |
In this section we will create a virtual cluster, from scratch. |
6 |
|
7 |
Requirements: |
8 |
|
9 |
* A `synnefo <http://www.synnefo.org>`_ deployment with functional *Astakos*, |
10 |
*Pithos+*, *Plankton* and *Cyclades* services. |
11 |
|
12 |
* A kamaki setup, configured with a default cloud (see how to do this with |
13 |
kamaki as a |
14 |
`shell command <../examplesdir/configuration.html#multiple-clouds-in-a-single-configuration>`_ , |
15 |
or a |
16 |
`python library <config.html#set-a-new-cloud-name-it-new-cloud-and-set-it-as-default>`_. |
17 |
|
18 |
* An image stored at file *./my_image.diskdump* that can run on a predefined |
19 |
hardware flavor, identifiable by the flavor id *42* (see how to create an |
20 |
image with the |
21 |
`synnefo image creator <http://www.synnefo.org/docs/snf-image-creator/latest/index.html>`_ |
22 |
). |
23 |
|
24 |
This is the pseudocode: |
25 |
|
26 |
#. Get credentials and service endpoints, with kamaki config and the |
27 |
**Astakos** *identity* and *account* services |
28 |
#. Upload the image file to the **Pithos+** *object-store* service |
29 |
#. Register the image file to the **Plankton** *image* service |
30 |
#. Create a number of virtual servers to the **Cyclades** *compute* service |
31 |
|
32 |
|
33 |
Credentials and endpoints |
34 |
------------------------- |
35 |
|
36 |
We assume that the kamaki configuration file contains at least one cloud |
37 |
configuration, and this configuration is also set as the default cloud for |
38 |
kamaki. A cloud configuration is basically a name for the cloud, an |
39 |
authentication URL and an authentication TOKEN: the credentials we are looking |
40 |
for! |
41 |
|
42 |
This is the plan: |
43 |
|
44 |
#. Get the credentials from the kamaki configuration |
45 |
#. Initialize an AstakosClient and test the credentials |
46 |
#. Get the endpoints for all services |
47 |
|
48 |
.. code-block:: python |
49 |
|
50 |
from sys import stderr |
51 |
from kamaki.cli.config import Config, CONFIG_PATH |
52 |
from kamaki.clients import ClientError |
53 |
from kamaki.clients.astakos import AstakosClient |
54 |
|
55 |
# Initialize Config with default values. |
56 |
cnf = Config() |
57 |
|
58 |
# 1. Get the credentials |
59 |
# Get default cloud name |
60 |
cloud_name = cnf.get('global', 'default_cloud') |
61 |
assert cloud_name, 'No default_cloud in file %s\n' % CONFIG_PATH |
62 |
|
63 |
# Get cloud authentication URL and TOKEN |
64 |
try: |
65 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url') |
66 |
except KeyError: |
67 |
stderr.write('No authentication URL in cloud %s\n' % cloud_name) |
68 |
raise |
69 |
try: |
70 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token') |
71 |
except KeyError: |
72 |
stderr.write('No token in cloud %s\n' % cloud_name) |
73 |
raise |
74 |
|
75 |
# 2. Test the credentials |
76 |
# Test authentication credentials |
77 |
try: |
78 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN) |
79 |
auth.authenticate() |
80 |
except ClientError: |
81 |
stderr.write('Athentication failed with url %s and token %s\n' % ( |
82 |
AUTH_URL, AUTH_TOKEN)) |
83 |
raise |
84 |
|
85 |
# 3. Get the endpoints |
86 |
# Identity, Account --> astakos |
87 |
# Compute, Network --> cyclades |
88 |
# Object-store --> pithos |
89 |
# Image --> plankton |
90 |
try: |
91 |
endpoints = dict( |
92 |
astakos=AUTH_URL, |
93 |
cyclades=auth.get_service_endpoints('compute')['publicURL'], |
94 |
network=auth.get_service_endpoints('network')['publicURL'], |
95 |
pithos=auth.get_service_endpoints('object-store')['publicURL'], |
96 |
plankton=auth.get_service_endpoints('image')['publicURL'] |
97 |
) |
98 |
user_id = auth.user_info['id'] |
99 |
except ClientError: |
100 |
stderr.write( |
101 |
'Failed to get user id and endpoints from the identity server\n') |
102 |
raise |
103 |
|
104 |
# 4. Pretty print the results |
105 |
stderr.write('Endpoints for user with id %s\n' % user_id) |
106 |
for k, v in endpoints.items(): |
107 |
stderr.write('\t%s:\t%s\n' % (k, v)) |
108 |
|
109 |
The output of this script should look similar to this:: |
110 |
|
111 |
Endpoints for user with id my-us3r-1d-asdf-1234-fd324rt |
112 |
pithos: https://pithos.example.com/object-store/v1 |
113 |
plankton: https://cyclades.example.com/image/v1.0 |
114 |
network: https://cyclades.example.com/network/v2.0 |
115 |
cyclades: https://cyclades.example.com/compute/v2.0 |
116 |
astakos: https://accounts.example.com/identity/v2.0 |
117 |
|
118 |
|
119 |
|
120 |
Upload the image |
121 |
---------------- |
122 |
|
123 |
We assume there is an image file at the current local directory, at |
124 |
*./my_image.diskdump* and we need to upload it to a Pithos+ container. We also |
125 |
assume the contains does not currently exist. We will name it *images*. |
126 |
|
127 |
This is the plan: |
128 |
|
129 |
#. Initialize a Pithos+ client |
130 |
#. Create the container *images* |
131 |
#. Upload the local file to the container |
132 |
|
133 |
.. code-block:: python |
134 |
|
135 |
from os.path import abspath |
136 |
from kamaki.clients.pithos import PithosClient |
137 |
|
138 |
CONTAINER = 'images' |
139 |
IMAGE_FILE = 'my_image.diskdump' |
140 |
|
141 |
|
142 |
# 1. Initialize Pithos+ client and set account to current user |
143 |
try: |
144 |
pithos = PithosClient(endpoints['pithos'], AUTH_TOKEN) |
145 |
except ClientError: |
146 |
stderr.write('Failed to initialize a Pithos+ client\n') |
147 |
raise |
148 |
pithos.account = user_id |
149 |
|
150 |
# 2. Create the container "images" and let pithos client work with that |
151 |
try: |
152 |
pithos.create_container(CONTAINER) |
153 |
except ClientError: |
154 |
stderr.write('Failed to create container %s\n' % CONTAINER) |
155 |
raise |
156 |
pithos.container = CONTAINER |
157 |
|
158 |
# 3. Upload |
159 |
with open(abspath(IMAGE_FILE)) as f: |
160 |
try: |
161 |
stderr.write('This may take a while ...') |
162 |
pithos.upload_object(IMAGE_FILE, f) |
163 |
except ClientError: |
164 |
stderr.write('Failed to upload file %s to container %s\n' % ( |
165 |
IMAGE_FILE, CONTAINER)) |
166 |
raise |
167 |
|
168 |
Register the image |
169 |
------------------ |
170 |
|
171 |
Now the image is located at *pithos://<user_id>/images/my_image.diskdump* |
172 |
and we want to register it to the Plankton *image* service. |
173 |
|
174 |
.. code-block:: python |
175 |
|
176 |
from kamaki.clients.image import ImageClient |
177 |
|
178 |
IMAGE_NAME = 'My image' |
179 |
IMAGE_LOCATION = (user_id, CONTAINER, IMAGE_FILE) |
180 |
|
181 |
# 3.1 Initialize ImageClient |
182 |
try: |
183 |
plankton = ImageClient(endpoints['plankton'], AUTH_TOKEN) |
184 |
except ClientError: |
185 |
stderr.write('Failed to initialize the Image client client\n') |
186 |
raise |
187 |
|
188 |
# 3.2 Register the image |
189 |
properties = dict(osfamily='linux', root_partition='1') |
190 |
try: |
191 |
image = plankton.register(IMAGE_NAME, IMAGE_LOCATION) |
192 |
except ClientError: |
193 |
stderr.write('Failed to register image %s\n' % IMAGE_NAME) |
194 |
raise |
195 |
|
196 |
Create the virtual cluster |
197 |
-------------------------- |
198 |
|
199 |
In order to build a virtual cluster, we need some information: |
200 |
|
201 |
* an image id. We can get them from *image['id']* (the id of the image we |
202 |
have just created) |
203 |
* a hardware flavor. Assume we have picked the flavor with id *42* |
204 |
* a set of names for our virtual servers. We will name them *cluster1*, |
205 |
*cluster2*, etc. |
206 |
|
207 |
Here is the plan: |
208 |
|
209 |
#. Initialize a Cyclades/Compute client |
210 |
#. Create a number of virtual servers. Their name should be prefixed as |
211 |
"cluster" |
212 |
|
213 |
.. code-block:: python |
214 |
|
215 |
# 4. Create virtual cluster |
216 |
from kamaki.clients.cyclades import CycladesClient |
217 |
|
218 |
FLAVOR_ID = 42 |
219 |
IMAGE_ID = image['id'] |
220 |
CLUSTER_SIZE = 2 |
221 |
CLUSTER_PREFIX = 'cluster' |
222 |
|
223 |
# 4.1 Initialize a cyclades client |
224 |
try: |
225 |
cyclades = CycladesClient(endpoints['cyclades'], AUTH_TOKEN) |
226 |
except ClientError: |
227 |
stderr.write('Failed to initialize cyclades client\n') |
228 |
raise |
229 |
|
230 |
# 4.2 Create 2 servers prefixed as "cluster" |
231 |
servers = [] |
232 |
for i in range(1, CLUSTER_SIZE + 1): |
233 |
server_name = '%s%s' % (CLUSTER_PREFIX, i) |
234 |
try: |
235 |
servers.append( |
236 |
cyclades.create_server(server_name, FLAVOR_ID, IMAGE_ID)) |
237 |
except ClientError: |
238 |
stderr.write('Failed while creating server %s\n' % server_name) |
239 |
raise |
240 |
|
241 |
Some improvements |
242 |
----------------- |
243 |
|
244 |
Progress Bars |
245 |
''''''''''''' |
246 |
|
247 |
Uploading an image might take a while. You can wait patiently, or you can use a |
248 |
progress generator. Even better, combine a generator with the progress bar |
249 |
package that comes with kamaki. The upload_object method accepts two generators |
250 |
as parameters: one for calculating local file hashes and another for uploading |
251 |
|
252 |
.. code-block:: python |
253 |
|
254 |
from progress.bar import Bar |
255 |
|
256 |
def hash_gen(n): |
257 |
bar = Bar('Calculating hashes...') |
258 |
for i in bar.iter(range(int(n))): |
259 |
yield |
260 |
yield |
261 |
|
262 |
def upload_gen(n): |
263 |
bar = Bar('Uploading...') |
264 |
for i in bar.iter(range(int(n))): |
265 |
yield |
266 |
yield |
267 |
|
268 |
... |
269 |
pithos.upload_object( |
270 |
IMAGE_FILE, f, hash_cb=hash_gen, upload_cb=upload_gen) |
271 |
|
272 |
We can create a method to produce progress bar generators, and use it in other |
273 |
methods as well: |
274 |
|
275 |
.. code-block:: python |
276 |
|
277 |
try: |
278 |
from progress.bar import Bar |
279 |
|
280 |
def create_pb(msg): |
281 |
def generator(n): |
282 |
bar=Bar(msg) |
283 |
for i in bar.iter(range(int(n))): |
284 |
yield |
285 |
yield |
286 |
return generator |
287 |
except ImportError: |
288 |
stderr.write('Suggestion: install python-progress\n') |
289 |
def create_pb(msg): |
290 |
return None |
291 |
|
292 |
... |
293 |
pithos.upload_object( |
294 |
IMAGE_FILE, f, |
295 |
hash_cb=create_pb('Calculating hashes...'), |
296 |
upload_cb=create_pb('Uploading...')) |
297 |
|
298 |
Wait for servers to built |
299 |
''''''''''''''''''''''''' |
300 |
|
301 |
When a create_server method is finished successfully, a server is being built. |
302 |
Usually, it takes a while for a server to built. Fortunately, there is a wait |
303 |
method in the kamaki cyclades client. It can use a progress bar too! |
304 |
|
305 |
.. code-block:: python |
306 |
|
307 |
# 4.2 Create 2 servers prefixed as "cluster" |
308 |
... |
309 |
|
310 |
# 4.3 Wait for servers to built |
311 |
for server in servers: |
312 |
cyclades.wait_server(server['id']) |
313 |
|
314 |
Asynchronous server creation |
315 |
'''''''''''''''''''''''''''' |
316 |
|
317 |
In case of a large virtual cluster, it might be faster to spawn the servers |
318 |
with asynchronous requests. Kamaki clients offer an automated mechanism for |
319 |
asynchronous requests. |
320 |
|
321 |
.. code-block:: python |
322 |
|
323 |
# 4.2 Create 2 servers prefixed as "cluster" |
324 |
create_params = [dict( |
325 |
name='%s%s' % (CLUSTER_PREFIX, i), |
326 |
flavor_id=FLAVOR_ID, |
327 |
image_id=IMAGE_ID) for i in range(1, CLUSTER_SIZE + 1)] |
328 |
try: |
329 |
servers = cyclades.async_run(cyclades.create_server, create_params) |
330 |
except ClientError: |
331 |
stderr.write('Failed while creating servers\n') |
332 |
raise |
333 |
|
334 |
Clean up virtual cluster |
335 |
'''''''''''''''''''''''' |
336 |
|
337 |
We need to clean up Cyclades from servers left from previous cluster creations. |
338 |
This clean up will destroy all servers prefixed with "cluster". It will run |
339 |
before the cluster creation: |
340 |
|
341 |
.. code-block:: python |
342 |
|
343 |
# 4.2 Clean up virtual cluster |
344 |
to_delete = [server for server in cyclades.list_servers(detail=True) if ( |
345 |
server['name'].startswith(CLUSTER_PREFIX))] |
346 |
for server in to_delete: |
347 |
cyclades.delete_server(server['id']) |
348 |
for server in to_delete: |
349 |
cyclades.wait_server( |
350 |
server['id'], server['status'], |
351 |
wait_cb=create_pb('Deleting %s...' % server['name'])) |
352 |
|
353 |
# 4.3 Create 2 servers prefixed as "cluster" |
354 |
... |
355 |
|
356 |
Inject ssh keys |
357 |
''''''''''''''' |
358 |
|
359 |
When a server is created, the returned value contains a filed "adminPass". This |
360 |
field can be used to manually log into the server. |
361 |
|
362 |
An easier way is to |
363 |
`inject the ssh keys <../examplesdir/server.html#inject-ssh-keys-to-a-debian-server>`_ |
364 |
of the users who are going to use the virtual servers. |
365 |
|
366 |
Assuming that we have collected the keys in a file named *rsa.pub*, we can |
367 |
inject them into each server, with the personality argument |
368 |
|
369 |
.. code-block:: python |
370 |
|
371 |
SSH_KEYS = 'rsa.pub' |
372 |
|
373 |
... |
374 |
|
375 |
# 4.3 Create 2 servers prefixed as "cluster" |
376 |
personality = [] |
377 |
if SSH_KEYS: |
378 |
with open(abspath(SSH_KEYS)) as f: |
379 |
personality.append(dict( |
380 |
contents=b64encode(f.read()), |
381 |
path='/root/.ssh/authorized_keys', |
382 |
owner='root', group='root', mode=0600) |
383 |
personality.append(dict( |
384 |
contents=b64encode('StrictHostKeyChecking no'), |
385 |
path='/root/.ssh/config', |
386 |
owner='root', group='root', mode=0600)) |
387 |
|
388 |
create_params = [dict( |
389 |
name='%s%s' % (CLUSTER_PREFIX, i), |
390 |
flavor_id=FLAVOR_ID, |
391 |
image_id=IMAGE_ID, |
392 |
personality=personality) for i in range(1, CLUSTER_SIZE + 1)] |
393 |
... |
394 |
|
395 |
Save server passwords in a file |
396 |
''''''''''''''''''''''''''''''' |
397 |
|
398 |
A last touch: define a local file to store the created server information, |
399 |
including the superuser password. |
400 |
|
401 |
.. code-block:: python |
402 |
|
403 |
# 4.4 Store passwords in file |
404 |
SERVER_INFO = 'servers.txt' |
405 |
with open(abspath(SERVER_INFO), 'w+') as f: |
406 |
from json import dump |
407 |
dump(servers, f, intend=2) |
408 |
|
409 |
# 4.5 Wait for 2 servers to built |
410 |
... |
411 |
|
412 |
Errors and logs |
413 |
''''''''''''''' |
414 |
|
415 |
Developers may use the kamaki tools for |
416 |
`error handling <clients-api.html#error-handling>`_ and |
417 |
`logging <logging.html>`_, or implement their own methods. |
418 |
|
419 |
To demonstrate, we will modify the container creation code to warn users if the |
420 |
container already exists. We need a stream logger for the warning and a |
421 |
knowledge of the expected return values for the *create_container* method. |
422 |
|
423 |
First, let's get the logger. |
424 |
|
425 |
.. code-block:: python |
426 |
|
427 |
from kamaki.cli.logger import add_stream_logger, get_logger |
428 |
|
429 |
add_stream_logger(__name__) |
430 |
log = get_logger(__name__) |
431 |
|
432 |
The *create_container* method makes an HTTP request to the pithos server. It |
433 |
considers the request succesfull if the status code of the response is 201 |
434 |
(created) or 202 (accepted). These status codes mean that the container has |
435 |
been created or that it was already there anyway, respectively. |
436 |
|
437 |
We will force *create_container* to raise an error in case of a 202 response. |
438 |
This can be done by instructing *create_container* to accept only 201 as a |
439 |
successful status. |
440 |
|
441 |
.. code-block:: python |
442 |
|
443 |
try: |
444 |
pithos.create_container(CONTAINER, success=(201, )) |
445 |
except ClientError as ce: |
446 |
if ce.status in (202, ): |
447 |
log.warning('Container %s already exists' % CONTAINER') |
448 |
else: |
449 |
log.debug('Failed to create container %s' % CONTAINER) |
450 |
raise |
451 |
log.info('Container %s is ready' % CONTAINER) |
452 |
|
453 |
create a cluster from scratch |
454 |
----------------------------- |
455 |
|
456 |
We are ready to create a module that uses kamaki to create a cluster from |
457 |
scratch. We revised the code by grouping functionality in methods and using |
458 |
logging more. We also added some command line interaction candy. |
459 |
|
460 |
.. code-block:: python |
461 |
|
462 |
from sys import argv |
463 |
from os.path import abspath |
464 |
from base64 import b64encode |
465 |
from kamaki.clients import ClientError |
466 |
from kamaki.cli.logger import get_logger, add_file_logger |
467 |
from progress.bar import Bar |
468 |
from logging import DEBUG |
469 |
|
470 |
# Define loggers |
471 |
log = get_logger(__name__) |
472 |
add_file_logger('kamaki.clients', DEBUG, '%s.log' % __name__) |
473 |
add_file_logger(__name__, DEBUG, '%s.log' % __name__) |
474 |
|
475 |
# Create progress bar generator |
476 |
|
477 |
|
478 |
def create_pb(msg): |
479 |
def generator(n): |
480 |
bar = Bar(msg) |
481 |
for i in bar.iter(range(int(n))): |
482 |
yield |
483 |
yield |
484 |
return generator |
485 |
|
486 |
|
487 |
# kamaki.config |
488 |
# Identity,Account / Astakos |
489 |
|
490 |
def init_astakos(): |
491 |
from kamaki.clients.astakos import AstakosClient |
492 |
from kamaki.cli.config import Config, CONFIG_PATH |
493 |
|
494 |
print(' Get the credentials') |
495 |
cnf = Config() |
496 |
|
497 |
# Get default cloud name |
498 |
try: |
499 |
cloud_name = cnf.get('global', 'default_cloud') |
500 |
except KeyError: |
501 |
log.debug('No default cloud set in file %' % CONFIG_PATH) |
502 |
raise |
503 |
|
504 |
try: |
505 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url') |
506 |
except KeyError: |
507 |
log.debug('No authentication URL in cloud %s' % cloud_name) |
508 |
raise |
509 |
try: |
510 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token') |
511 |
except KeyError: |
512 |
log.debug('No token in cloud %s' % cloud_name) |
513 |
raise |
514 |
|
515 |
print(' Test the credentials') |
516 |
try: |
517 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN) |
518 |
auth.authenticate() |
519 |
except ClientError: |
520 |
log.debug('Athentication failed with url %s and token %s' % ( |
521 |
AUTH_URL, AUTH_TOKEN)) |
522 |
raise |
523 |
|
524 |
return auth, AUTH_TOKEN |
525 |
|
526 |
|
527 |
def endpoints_and_user_id(auth): |
528 |
print(' Get the endpoints') |
529 |
try: |
530 |
endpoints = dict( |
531 |
astakos=auth.get_service_endpoints('identity')['publicURL'], |
532 |
cyclades=auth.get_service_endpoints('compute')['publicURL'], |
533 |
network=auth.get_service_endpoints('network')['publicURL'], |
534 |
pithos=auth.get_service_endpoints('object-store')['publicURL'], |
535 |
plankton=auth.get_service_endpoints('image')['publicURL'] |
536 |
) |
537 |
user_id = auth.user_info['id'] |
538 |
except ClientError: |
539 |
print('Failed to get endpoints & user_id from identity server') |
540 |
raise |
541 |
return endpoints, user_id |
542 |
|
543 |
|
544 |
# Object-store / Pithos+ |
545 |
|
546 |
def init_pithos(endpoint, token, user_id): |
547 |
from kamaki.clients.pithos import PithosClient |
548 |
|
549 |
print(' Initialize Pithos+ client and set account to user uuid') |
550 |
try: |
551 |
return PithosClient(endpoint, token, user_id) |
552 |
except ClientError: |
553 |
log.debug('Failed to initialize a Pithos+ client') |
554 |
raise |
555 |
|
556 |
|
557 |
def upload_image(pithos, container, image_path): |
558 |
|
559 |
print(' Create the container "images" and use it') |
560 |
try: |
561 |
pithos.create_container(container, success=(201, )) |
562 |
except ClientError as ce: |
563 |
if ce.status in (202, ): |
564 |
log.warning('Container %s already exists' % container) |
565 |
else: |
566 |
log.debug('Failed to create container %s' % container) |
567 |
raise |
568 |
pithos.container = container |
569 |
|
570 |
print(' Upload to "images"') |
571 |
with open(abspath(image_path)) as f: |
572 |
try: |
573 |
pithos.upload_object( |
574 |
image_path, f, |
575 |
hash_cb=create_pb(' Calculating hashes...'), |
576 |
upload_cb=create_pb(' Uploading...')) |
577 |
except ClientError: |
578 |
log.debug('Failed to upload file %s to container %s' % ( |
579 |
image_path, container)) |
580 |
raise |
581 |
|
582 |
|
583 |
# Image / Plankton |
584 |
|
585 |
def init_plankton(endpoint, token): |
586 |
from kamaki.clients.image import ImageClient |
587 |
|
588 |
print(' Initialize ImageClient') |
589 |
try: |
590 |
return ImageClient(endpoint, token) |
591 |
except ClientError: |
592 |
log.debug('Failed to initialize the Image client') |
593 |
raise |
594 |
|
595 |
|
596 |
def register_image(plankton, name, user_id, container, path, properties): |
597 |
|
598 |
image_location = (user_id, container, path) |
599 |
print(' Register the image') |
600 |
try: |
601 |
return plankton.register(name, image_location, properties) |
602 |
except ClientError: |
603 |
log.debug('Failed to register image %s' % name) |
604 |
raise |
605 |
|
606 |
|
607 |
# Compute / Cyclades |
608 |
|
609 |
def init_cyclades(endpoint, token): |
610 |
from kamaki.clients.cyclades import CycladesClient |
611 |
|
612 |
print(' Initialize a cyclades client') |
613 |
try: |
614 |
return CycladesClient(endpoint, token) |
615 |
except ClientError: |
616 |
log.debug('Failed to initialize cyclades client') |
617 |
raise |
618 |
|
619 |
|
620 |
class Cluster(object): |
621 |
|
622 |
def __init__(self, cyclades, prefix, flavor_id, image_id, size): |
623 |
self.client = cyclades |
624 |
self.prefix, self.size = prefix, int(size) |
625 |
self.flavor_id, self.image_id = flavor_id, image_id |
626 |
|
627 |
def list(self): |
628 |
return [s for s in self.client.list_servers(detail=True) if ( |
629 |
s['name'].startswith(self.prefix))] |
630 |
|
631 |
def clean_up(self): |
632 |
to_delete = self.list() |
633 |
print(' There are %s servers to clean up' % len(to_delete)) |
634 |
for server in to_delete: |
635 |
self.client.delete_server(server['id']) |
636 |
for server in to_delete: |
637 |
self.client.wait_server( |
638 |
server['id'], server['status'], |
639 |
wait_cb=create_pb(' Deleting %s...' % server['name'])) |
640 |
|
641 |
def _personality(self, ssh_keys_path='', pub_keys_path=''): |
642 |
personality = [] |
643 |
if ssh_keys_path: |
644 |
with open(abspath(ssh_keys_path)) as f: |
645 |
personality.append(dict( |
646 |
contents=b64encode(f.read()), |
647 |
path='/root/.ssh/id_rsa', |
648 |
owner='root', group='root', mode=0600)) |
649 |
if pub_keys_path: |
650 |
with open(abspath(pub_keys_path)) as f: |
651 |
personality.append(dict( |
652 |
contents=b64encode(f.read()), |
653 |
path='/root/.ssh/authorized_keys', |
654 |
owner='root', group='root', mode=0600)) |
655 |
if ssh_keys_path or pub_keys_path: |
656 |
personality.append(dict( |
657 |
contents=b64encode('StrictHostKeyChecking no'), |
658 |
path='/root/.ssh/config', |
659 |
owner='root', group='root', mode=0600)) |
660 |
return personality |
661 |
|
662 |
def create(self, ssh_k_path='', pub_k_path='', server_log_path=''): |
663 |
print('\n Create %s servers prefixed as %s' % ( |
664 |
self.size, self.prefix)) |
665 |
servers = [] |
666 |
for i in range(1, self.size + 1): |
667 |
try: |
668 |
server_name = '%s%s' % (self.prefix, i) |
669 |
|
670 |
servers.append(self.client.create_server( |
671 |
server_name, self.flavor_id, self.image_id, |
672 |
networks=[], |
673 |
personality=self._personality(ssh_k_path, pub_k_path))) |
674 |
except ClientError: |
675 |
log.debug('Failed while creating server %s' % server_name) |
676 |
raise |
677 |
|
678 |
if server_log_path: |
679 |
print(' Store passwords in file %s' % server_log_path) |
680 |
with open(abspath(server_log_path), 'w+') as f: |
681 |
from json import dump |
682 |
dump(servers, f, indent=2) |
683 |
|
684 |
print(' Wait for %s servers to built' % self.size) |
685 |
for server in servers: |
686 |
new_status = self.client.wait_server( |
687 |
server['id'], |
688 |
wait_cb=create_pb(' Creating %s...' % server['name'])) |
689 |
print(' Status for server %s is %s' % ( |
690 |
server['name'], new_status or 'not changed yet')) |
691 |
return servers |
692 |
|
693 |
|
694 |
def main(opts): |
695 |
|
696 |
print('1. Credentials and Endpoints') |
697 |
auth, token = init_astakos() |
698 |
endpoints, user_id = endpoints_and_user_id(auth) |
699 |
|
700 |
print('2. Upload the image file') |
701 |
pithos = init_pithos(endpoints['pithos'], token, user_id) |
702 |
|
703 |
upload_image(pithos, opts.container, opts.imagefile) |
704 |
|
705 |
print('3. Register the image') |
706 |
plankton = init_plankton(endpoints['plankton'], token) |
707 |
|
708 |
image = register_image( |
709 |
plankton, 'my image', user_id, opts.container, opts.imagefile, |
710 |
properties=dict( |
711 |
osfamily=opts.osfamily, root_partition=opts.rootpartition)) |
712 |
|
713 |
print('4. Create virtual cluster') |
714 |
cluster = Cluster( |
715 |
cyclades=init_cyclades(endpoints['cyclades'], token), |
716 |
prefix=opts.prefix, |
717 |
flavor_id=opts.flavorid, |
718 |
image_id=image['id'], |
719 |
size=opts.clustersize) |
720 |
if opts.delete_stale: |
721 |
cluster.clean_up() |
722 |
servers = cluster.create( |
723 |
opts.sshkeypath, opts.pubkeypath, opts.serverlogpath) |
724 |
|
725 |
# Group servers |
726 |
cluster_servers = cluster.list() |
727 |
|
728 |
active = [s for s in cluster_servers if s['status'] == 'ACTIVE'] |
729 |
print('%s cluster servers are ACTIVE' % len(active)) |
730 |
|
731 |
attached = [s for s in cluster_servers if s['attachments']] |
732 |
print('%s cluster servers are attached to networks' % len(attached)) |
733 |
|
734 |
build = [s for s in cluster_servers if s['status'] == 'BUILD'] |
735 |
print('%s cluster servers are being built' % len(build)) |
736 |
|
737 |
error = [s for s in cluster_servers if s['status'] in ('ERROR')] |
738 |
print('%s cluster servers failed (ERROR satus)' % len(error)) |
739 |
|
740 |
|
741 |
if __name__ == '__main__': |
742 |
|
743 |
# Add some interaction candy |
744 |
from optparse import OptionParser |
745 |
|
746 |
kw = {} |
747 |
kw['usage'] = '%prog [options]' |
748 |
kw['description'] = '%prog deploys a compute cluster on Synnefo w. kamaki' |
749 |
|
750 |
parser = OptionParser(**kw) |
751 |
parser.disable_interspersed_args() |
752 |
parser.add_option('--prefix', |
753 |
action='store', type='string', dest='prefix', |
754 |
help='The prefix to use for naming cluster nodes', |
755 |
default='node') |
756 |
parser.add_option('--clustersize', |
757 |
action='store', type='string', dest='clustersize', |
758 |
help='Number of virtual cluster nodes to create ', |
759 |
default=2) |
760 |
parser.add_option('--flavor-id', |
761 |
action='store', type='int', dest='flavorid', |
762 |
metavar='FLAVOR ID', |
763 |
help='Choose flavor id for the virtual hardware ' |
764 |
'of cluster nodes', |
765 |
default=42) |
766 |
parser.add_option('--image-file', |
767 |
action='store', type='string', dest='imagefile', |
768 |
metavar='IMAGE FILE PATH', |
769 |
help='The image file to upload and register ', |
770 |
default='my_image.diskdump') |
771 |
parser.add_option('--delete-stale', |
772 |
action='store_true', dest='delete_stale', |
773 |
help='Delete stale servers from previous runs, whose ' |
774 |
'name starts with the specified prefix, see ' |
775 |
'--prefix', |
776 |
default=False) |
777 |
parser.add_option('--container', |
778 |
action='store', type='string', dest='container', |
779 |
metavar='PITHOS+ CONTAINER', |
780 |
help='The Pithos+ container to store image file', |
781 |
default='images') |
782 |
parser.add_option('--ssh-key-path', |
783 |
action='store', type='string', dest='sshkeypath', |
784 |
metavar='PATH OF SSH KEYS', |
785 |
help='The ssh keys to inject to server (e.g., id_rsa) ', |
786 |
default='') |
787 |
parser.add_option('--pub-key-path', |
788 |
action='store', type='string', dest='pubkeypath', |
789 |
metavar='PATH OF PUBLIC KEYS', |
790 |
help='The public keys to inject to server', |
791 |
default='') |
792 |
parser.add_option('--server-log-path', |
793 |
action='store', type='string', dest='serverlogpath', |
794 |
metavar='FILE TO LOG THE VIRTUAL SERVERS', |
795 |
help='Where to store information on created servers ' |
796 |
'including superuser passwords', |
797 |
default='') |
798 |
parser.add_option('--image-osfamily', |
799 |
action='store', type='string', dest='osfamily', |
800 |
metavar='OS FAMILY', |
801 |
help='linux, windows, etc.', |
802 |
default='linux') |
803 |
parser.add_option('--image-root-partition', |
804 |
action='store', type='string', dest='rootpartition', |
805 |
metavar='IMAGE ROOT PARTITION', |
806 |
help='The partition where the root home is ', |
807 |
default='1') |
808 |
|
809 |
opts, args = parser.parse_args(argv[1:]) |
810 |
|
811 |
main(opts) |