root / docs / developers / showcase.rst @ 5cd1aee1
History | View | Annotate | Download (26.9 kB)
1 |
|
---|---|
2 |
Showcase: create a virtual cluster from scratch |
3 |
=============================================== |
4 |
|
5 |
In this section we will create a virtual cluster, from scratch. |
6 |
|
7 |
Requirements: |
8 |
|
9 |
* A `synnefo <http://www.synnefo.org>`_ deployment with functional *Astakos*, |
10 |
*Pithos+*, *Plankton* and *Cyclades* services. |
11 |
|
12 |
* A kamaki setup, configured with a default cloud (see how to do this with |
13 |
kamaki as a |
14 |
`shell command <../examplesdir/configuration.html#multiple-clouds-in-a-single-configuration>`_ , |
15 |
or a |
16 |
`python library <config.html#set-a-new-cloud-name-it-new-cloud-and-set-it-as-default>`_. |
17 |
|
18 |
* An image stored at file *./my_image.diskdump* that can run on a predefined |
19 |
hardware flavor, identifiable by the flavor id *42* (see how to create an |
20 |
image with the |
21 |
`synnefo image creator <http://www.synnefo.org/docs/snf-image-creator/latest/index.html>`_ |
22 |
). |
23 |
|
24 |
This is the pseudocode: |
25 |
|
26 |
#. Get credentials and service endpoints, with kamaki config and the |
27 |
**Astakos** *identity* and *account* services |
28 |
#. Upload the image file to the **Pithos+** *object-store* service |
29 |
#. Register the image file to the **Plankton** *image* service |
30 |
#. Create a number of virtual servers to the **Cyclades** *compute* service |
31 |
|
32 |
|
33 |
Credentials and endpoints |
34 |
------------------------- |
35 |
|
36 |
We assume that the kamaki configuration file contains at least one cloud |
37 |
configuration, and this configuration is also set as the default cloud for |
38 |
kamaki. A cloud configuration is basically a name for the cloud, an |
39 |
authentication URL and an authentication TOKEN: the credentials we are looking |
40 |
for! |
41 |
|
42 |
This is the plan: |
43 |
|
44 |
#. Get the credentials from the kamaki configuration |
45 |
#. Initialize an AstakosClient and test the credentials |
46 |
#. Get the endpoints for all services |
47 |
|
48 |
.. code-block:: python |
49 |
|
50 |
from sys import stderr |
51 |
from kamaki.cli.config import Config, CONFIG_PATH |
52 |
from kamaki.clients.astakos import AstakosClient, ClientError |
53 |
|
54 |
# Initialize Config with default values. |
55 |
cnf = Config() |
56 |
|
57 |
# 1. Get the credentials |
58 |
# Get default cloud name |
59 |
try: |
60 |
cloud_name = cnf.get('global', 'default_cloud') |
61 |
except KeyError: |
62 |
stderr.write('No default cloud set in file %s\n' % CONFIG_PATH) |
63 |
raise |
64 |
|
65 |
# Get cloud authentication URL and TOKEN |
66 |
try: |
67 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url') |
68 |
except KeyError: |
69 |
stderr.write('No authentication URL in cloud %s\n' % cloud_name) |
70 |
raise |
71 |
try: |
72 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token') |
73 |
except KeyError: |
74 |
stderr.write('No token in cloud %s\n' % cloud_name) |
75 |
raise |
76 |
|
77 |
# 2. Test the credentials |
78 |
# Test authentication credentials |
79 |
try: |
80 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN) |
81 |
auth.authenticate() |
82 |
except ClientError: |
83 |
stderr.write('Athentication failed with url %s and token %s\n' % ( |
84 |
AUTH_URL, AUTH_TOKEN)) |
85 |
raise |
86 |
|
87 |
# 3. Get the endpoints |
88 |
# Identity, Account --> astakos |
89 |
# Compute --> cyclades |
90 |
# Object-store --> pithos |
91 |
# Image --> plankton |
92 |
try: |
93 |
endpoints = dict( |
94 |
astakos=AUTH_URL, |
95 |
cyclades=auth.get_service_endpoints('compute')['publicURL'], |
96 |
pithos=auth.get_service_endpoints('object-store')['publicURL'], |
97 |
plankton=auth.get_service_endpoints('image')['publicURL'] |
98 |
) |
99 |
user_id = auth.user_info()['id'] |
100 |
except ClientError: |
101 |
stderr.write( |
102 |
'Failed to get user id and endpoints from the identity server\n') |
103 |
raise |
104 |
|
105 |
Upload the image |
106 |
---------------- |
107 |
|
108 |
We assume there is an image file at the current local directory, at |
109 |
*./my_image.diskdump* and we need to upload it to a Pithos+ container. We also |
110 |
assume the contains does not currently exist. We will name it *images*. |
111 |
|
112 |
This is the plan: |
113 |
|
114 |
#. Initialize a Pithos+ client |
115 |
#. Create the container *images* |
116 |
#. Upload the local file to the container |
117 |
|
118 |
.. code-block:: python |
119 |
|
120 |
from os.path import abspath |
121 |
from kamaki.clients.pithos import PithosClient |
122 |
|
123 |
CONTAINER = 'images' |
124 |
IMAGE_FILE = 'my_image.diskdump' |
125 |
|
126 |
# 1. Initialize Pithos+ client and set account to current user |
127 |
try: |
128 |
pithos = PithosClient(endpoints['pithos'], AUTH_TOKEN) |
129 |
except ClientError: |
130 |
stderr.write('Failed to initialize a Pithos+ client\n') |
131 |
raise |
132 |
pithos.account = user_id |
133 |
|
134 |
# 2. Create the container "images" and let pithos client work with that |
135 |
try: |
136 |
pithos.create_container('images') |
137 |
except ClientError: |
138 |
stderr.write('Failed to create container "image"\n') |
139 |
raise |
140 |
pithos.container = CONTAINER |
141 |
|
142 |
# 3. Upload |
143 |
with open(abspath(IMAGE_FILE)) as f: |
144 |
try: |
145 |
pithos.upload_object(IMAGE_FILE, f) |
146 |
except ClientError: |
147 |
stderr.write('Failed to upload file %s to container %s\n' % ( |
148 |
IMAGE_FILE, CONTAINER)) |
149 |
raise |
150 |
|
151 |
Register the image |
152 |
------------------ |
153 |
|
154 |
Now the image is located at *pithos://<user_id>/images/my_image.diskdump* |
155 |
and we want to register it to the Plankton *image* service. |
156 |
|
157 |
.. code-block:: python |
158 |
|
159 |
from kamaki.clients.image import ImageClient |
160 |
|
161 |
IMAGE_NAME = 'My image' |
162 |
IMAGE_LOCATION = (user_id, CONTAINER, IMAGE_FILE) |
163 |
|
164 |
# 3.1 Initialize ImageClient |
165 |
try: |
166 |
plankton = ImageClient(endpoints['plankton'], AUTH_TOKEN) |
167 |
except ClientError: |
168 |
stderr.write('Failed to initialize the Image client client\n') |
169 |
raise |
170 |
|
171 |
# 3.2 Register the image |
172 |
properties = dict(osfamily='linux', root_partition='1') |
173 |
try: |
174 |
image = plankton.image_register(IMAGE_NAME, IMAGE_LOCATION) |
175 |
except ClientError: |
176 |
stderr.write('Failed to register image %s\n' % IMAGE_NAME) |
177 |
raise |
178 |
|
179 |
Create the virtual cluster |
180 |
-------------------------- |
181 |
|
182 |
In order to build a virtual cluster, we need some information: |
183 |
|
184 |
* an image id. We can get them from *image['id']* (the id of the image we |
185 |
have just created) |
186 |
* a hardware flavor. Assume we have picked the flavor with id *42* |
187 |
* a set of names for our virtual servers. We will name them *cluster1*, |
188 |
*cluster2*, etc. |
189 |
|
190 |
Here is the plan: |
191 |
|
192 |
#. Initialize a Cyclades/Compute client |
193 |
#. Create a number of virtual servers. Their name should be prefixed as |
194 |
"cluster" |
195 |
|
196 |
.. code-block:: python |
197 |
|
198 |
# 4. Create virtual cluster |
199 |
from kamaki.clients.cyclades import CycladesClient |
200 |
|
201 |
FLAVOR_ID = 42 |
202 |
IMAGE_ID = image['id'] |
203 |
CLUSTER_SIZE = 2 |
204 |
CLUSTER_PREFIX = 'cluster' |
205 |
|
206 |
# 4.1 Initialize a cyclades client |
207 |
try: |
208 |
cyclades = CycladesClient(endpoints['cyclades'], AUTH_TOKEN) |
209 |
except ClientError: |
210 |
stderr.write('Failed to initialize cyclades client\n') |
211 |
raise |
212 |
|
213 |
# 4.2 Create 2 servers prefixed as "cluster" |
214 |
servers = [] |
215 |
for i in range(1, CLUSTER_SIZE + 1): |
216 |
server_name = '%s%s' % (CLUSTER_PREFIX, i) |
217 |
try: |
218 |
servers.append( |
219 |
cyclades.create_server(server_name, FLAVOR_ID, IMAGE_ID)) |
220 |
except ClientError: |
221 |
stderr.write('Failed while creating server %s\n' % server_name) |
222 |
raise |
223 |
|
224 |
Some improvements |
225 |
----------------- |
226 |
|
227 |
Progress Bars |
228 |
''''''''''''' |
229 |
|
230 |
Uploading an image might take a while. You can wait patiently, or you can use a |
231 |
progress generator. Even better, combine a generator with the progress bar |
232 |
package that comes with kamaki. The upload_object method accepts two generators |
233 |
as parameters: one for calculating local file hashes and another for uploading |
234 |
|
235 |
.. code-block:: python |
236 |
|
237 |
from progress.bar import Bar |
238 |
|
239 |
def hash_gen(n): |
240 |
bar = Bar('Calculating hashes...') |
241 |
for i in bar.iter(range(int(n))): |
242 |
yield |
243 |
yield |
244 |
|
245 |
def upload_gen(n): |
246 |
bar = Bar('Uploading...') |
247 |
for i in bar.iter(range(int(n))): |
248 |
yield |
249 |
yield |
250 |
|
251 |
... |
252 |
pithos.upload_object( |
253 |
IMAGE_FILE, f, hash_cb=hash_gen, upload_cb=upload_gen) |
254 |
|
255 |
We can create a method to produce progress bar generators, and use it in other |
256 |
methods as well: |
257 |
|
258 |
.. code-block:: python |
259 |
|
260 |
try: |
261 |
from progress.bar import Bar |
262 |
|
263 |
def create_pb(msg): |
264 |
def generator(n): |
265 |
bar=Bar(msg) |
266 |
for i in bar.iter(range(int(n))): |
267 |
yield |
268 |
yield |
269 |
return generator |
270 |
except ImportError: |
271 |
stderr.write('Suggestion: install python-progress\n') |
272 |
def create_pb(msg): |
273 |
return None |
274 |
|
275 |
... |
276 |
pithos.upload_object( |
277 |
IMAGE_FILE, f, |
278 |
hash_cb=create_pb('Calculating hashes...'), |
279 |
upload_cb=create_pb('Uploading...')) |
280 |
|
281 |
Wait for servers to built |
282 |
''''''''''''''''''''''''' |
283 |
|
284 |
When a create_server method is finished successfully, a server is being built. |
285 |
Usually, it takes a while for a server to built. Fortunately, there is a wait |
286 |
method in the kamaki cyclades client. It can use a progress bar too! |
287 |
|
288 |
.. code-block:: python |
289 |
|
290 |
# 4.2 Create 2 servers prefixed as "cluster" |
291 |
... |
292 |
|
293 |
# 4.3 Wait for servers to built |
294 |
for server in servers: |
295 |
cyclades.wait_server(server['id']) |
296 |
|
297 |
Asynchronous server creation |
298 |
'''''''''''''''''''''''''''' |
299 |
|
300 |
In case of a large virtual cluster, it might be faster to spawn the servers |
301 |
with asynchronous requests. Kamaki clients offer an automated mechanism for |
302 |
asynchronous requests. |
303 |
|
304 |
.. code-block:: python |
305 |
|
306 |
# 4.2 Create 2 servers prefixed as "cluster" |
307 |
create_params = [dict( |
308 |
name='%s%s' % (CLUSTER_PREFIX, i), |
309 |
flavor_id=FLAVOR_ID, |
310 |
image_id=IMAGE_ID) for i in range(1, CLUSTER_SIZE + 1)] |
311 |
try: |
312 |
servers = cyclades.async_run(cyclades.create_server, create_params) |
313 |
except ClientError: |
314 |
stderr.write('Failed while creating servers\n') |
315 |
raise |
316 |
|
317 |
Clean up virtual cluster |
318 |
'''''''''''''''''''''''' |
319 |
|
320 |
We need to clean up Cyclades from servers left from previous cluster creations. |
321 |
This clean up will destroy all servers prefixed with "cluster". It will run |
322 |
before the cluster creation: |
323 |
|
324 |
.. code-block:: python |
325 |
|
326 |
# 4.2 Clean up virtual cluster |
327 |
to_delete = [server for server in cyclades.list_servers(detail=True) if ( |
328 |
server['name'].startswith(CLUSTER_PREFIX))] |
329 |
for server in to_delete: |
330 |
cyclades.delete_server(server['id']) |
331 |
for server in to_delete: |
332 |
cyclades.wait_server( |
333 |
server['id'], server['status'], |
334 |
wait_cb=create_pb('Deleting %s...' % server['name'])) |
335 |
|
336 |
# 4.3 Create 2 servers prefixed as "cluster" |
337 |
... |
338 |
|
339 |
Inject ssh keys |
340 |
''''''''''''''' |
341 |
|
342 |
When a server is created, the returned value contains a filed "adminPass". This |
343 |
field can be used to manually log into the server. |
344 |
|
345 |
An easier way is to |
346 |
`inject the ssh keys <../examplesdir/server.html#inject-ssh-keys-to-a-debian-server>`_ |
347 |
of the users who are going to use the virtual servers. |
348 |
|
349 |
Assuming that we have collected the keys in a file named *rsa.pub*, we can |
350 |
inject them into each server, with the personality argument |
351 |
|
352 |
.. code-block:: python |
353 |
|
354 |
SSH_KEYS = 'rsa.pub' |
355 |
|
356 |
... |
357 |
|
358 |
# 4.3 Create 2 servers prefixed as "cluster" |
359 |
personality = [] |
360 |
if SSH_KEYS: |
361 |
with open(abspath(SSH_KEYS)) as f: |
362 |
personality.append(dict( |
363 |
contents=b64encode(f.read()), |
364 |
path='/root/.ssh/authorized_keys', |
365 |
owner='root', group='root', mode=0600) |
366 |
personality.append(dict( |
367 |
contents=b64encode('StrictHostKeyChecking no'), |
368 |
path='/root/.ssh/config', |
369 |
owner='root', group='root', mode=0600)) |
370 |
|
371 |
create_params = [dict( |
372 |
name='%s%s' % (CLUSTER_PREFIX, i), |
373 |
flavor_id=FLAVOR_ID, |
374 |
image_id=IMAGE_ID, |
375 |
personality=personality) for i in range(1, CLUSTER_SIZE + 1)] |
376 |
... |
377 |
|
378 |
Save server passwords in a file |
379 |
''''''''''''''''''''''''''''''' |
380 |
|
381 |
A last touch: define a local file to store the created server information, |
382 |
including the superuser password. |
383 |
|
384 |
.. code-block:: python |
385 |
|
386 |
# 4.4 Store passwords in file |
387 |
SERVER_INFO = 'servers.txt' |
388 |
with open(abspath(SERVER_INFO), 'w+') as f: |
389 |
from json import dump |
390 |
dump(servers, f, intend=2) |
391 |
|
392 |
# 4.5 Wait for 2 servers to built |
393 |
... |
394 |
|
395 |
Errors and logs |
396 |
''''''''''''''' |
397 |
|
398 |
Developers may use the kamaki tools for |
399 |
`error handling <clients-api.html#error-handling>`_ and |
400 |
`logging <logging.html>`_, or implement their own methods. |
401 |
|
402 |
To demonstrate, we will modify the container creation code to warn users if the |
403 |
container already exists. We need a stream logger for the warning and a |
404 |
knowledge of the expected return values for the *create_container* method. |
405 |
|
406 |
First, let's get the logger. |
407 |
|
408 |
.. code-block:: python |
409 |
|
410 |
from kamaki.cli.logger import add_stream_logger, get_logger |
411 |
|
412 |
add_stream_logger(__name__) |
413 |
log = get_logger(__name__) |
414 |
|
415 |
The *create_container* method makes an HTTP request to the pithos server. It |
416 |
considers the request succesfull if the status code of the response is 201 |
417 |
(created) or 202 (accepted). These status codes mean that the container has |
418 |
been created or that it was already there anyway, respectively. |
419 |
|
420 |
We will force *create_container* to raise an error in case of a 202 response. |
421 |
This can be done by instructing *create_container* to accept only 201 as a |
422 |
successful status. |
423 |
|
424 |
.. code-block:: python |
425 |
|
426 |
try: |
427 |
pithos.create_container(CONTAINER, success=(201, )) |
428 |
except ClientError as ce: |
429 |
if ce.status in (202, ): |
430 |
log.warning('Container %s already exists' % CONTAINER') |
431 |
else: |
432 |
log.debug('Failed to create container %s' % CONTAINER) |
433 |
raise |
434 |
log.info('Container %s is ready' % CONTAINER) |
435 |
|
436 |
create a cluster from scratch |
437 |
----------------------------- |
438 |
|
439 |
We are ready to create a module that uses kamaki to create a cluster from |
440 |
scratch. We revised the code by grouping functionality in methods and using |
441 |
logging more. We also added some command line interaction candy. |
442 |
|
443 |
.. code-block:: python |
444 |
|
445 |
#!/usr/bin/env python |
446 |
|
447 |
from sys import argv |
448 |
from os.path import abspath |
449 |
from base64 import b64encode |
450 |
from kamaki.clients import ClientError |
451 |
from kamaki.cli.logger import get_logger, add_file_logger |
452 |
from logging import DEBUG |
453 |
|
454 |
# Define loggers |
455 |
log = get_logger(__name__) |
456 |
add_file_logger('kamaki.clients', DEBUG, '%s.log' % __name__) |
457 |
add_file_logger(__name__, DEBUG, '%s.log' % __name__) |
458 |
|
459 |
# Create progress bar generator |
460 |
try: |
461 |
from progress.bar import Bar |
462 |
|
463 |
def create_pb(msg): |
464 |
def generator(n): |
465 |
bar=Bar(msg) |
466 |
for i in bar.iter(range(int(n))): |
467 |
yield |
468 |
yield |
469 |
return generator |
470 |
except ImportError: |
471 |
log.warning('Suggestion: install python-progress') |
472 |
def create_pb(msg): |
473 |
return None |
474 |
|
475 |
|
476 |
# kamaki.config |
477 |
# Identity,Account / Astakos |
478 |
|
479 |
def init_astakos(): |
480 |
from kamaki.clients.astakos import AstakosClient |
481 |
from kamaki.cli.config import Config, CONFIG_PATH |
482 |
|
483 |
print(' Get the credentials') |
484 |
cnf = Config() |
485 |
|
486 |
# Get default cloud name |
487 |
try: |
488 |
cloud_name = cnf.get('global', 'default_cloud') |
489 |
except KeyError: |
490 |
log.debug('No default cloud set in file %' % CONFIG_PATH) |
491 |
raise |
492 |
|
493 |
try: |
494 |
AUTH_URL = cnf.get_cloud(cloud_name, 'url') |
495 |
except KeyError: |
496 |
log.debug('No authentication URL in cloud %s' % cloud_name) |
497 |
raise |
498 |
try: |
499 |
AUTH_TOKEN = cnf.get_cloud(cloud_name, 'token') |
500 |
except KeyError: |
501 |
log.debug('No token in cloud %s' % cloud_name) |
502 |
raise |
503 |
|
504 |
print(' Test the credentials') |
505 |
try: |
506 |
auth = AstakosClient(AUTH_URL, AUTH_TOKEN) |
507 |
auth.authenticate() |
508 |
except ClientError: |
509 |
log.debug('Athentication failed with url %s and token %s' % ( |
510 |
AUTH_URL, AUTH_TOKEN)) |
511 |
raise |
512 |
|
513 |
return auth, AUTH_TOKEN |
514 |
|
515 |
|
516 |
def endpoints_and_user_id(auth): |
517 |
print(' Get the endpoints') |
518 |
try: |
519 |
endpoints = dict( |
520 |
astakos=auth.get_service_endpoints('identity')['publicURL'], |
521 |
cyclades=auth.get_service_endpoints('compute')['publicURL'], |
522 |
pithos=auth.get_service_endpoints('object-store')['publicURL'], |
523 |
plankton=auth.get_service_endpoints('image')['publicURL'] |
524 |
) |
525 |
user_id = auth.user_info()['id'] |
526 |
except ClientError: |
527 |
print('Failed to get endpoints & user_id from identity server') |
528 |
raise |
529 |
return endpoints, user_id |
530 |
|
531 |
|
532 |
# Object-store / Pithos+ |
533 |
|
534 |
def init_pithos(endpoint, token, user_id): |
535 |
from kamaki.clients.pithos import PithosClient |
536 |
|
537 |
print(' Initialize Pithos+ client and set account to user uuid') |
538 |
try: |
539 |
return PithosClient(endpoint, token, user_id) |
540 |
except ClientError: |
541 |
log.debug('Failed to initialize a Pithos+ client') |
542 |
raise |
543 |
|
544 |
|
545 |
def upload_image(pithos, container, image_path): |
546 |
|
547 |
print(' Create the container "images" and use it') |
548 |
try: |
549 |
pithos.create_container(container, success=(201, )) |
550 |
except ClientError as ce: |
551 |
if ce.status in (202, ): |
552 |
log.warning('Container %s already exists' % container) |
553 |
else: |
554 |
log.debug('Failed to create container %s' % container) |
555 |
raise |
556 |
pithos.container = container |
557 |
|
558 |
print(' Upload to "images"') |
559 |
with open(abspath(image_path)) as f: |
560 |
try: |
561 |
pithos.upload_object( |
562 |
image_path, f, |
563 |
hash_cb=create_pb(' Calculating hashes...'), |
564 |
upload_cb=create_pb(' Uploading...')) |
565 |
except ClientError: |
566 |
log.debug('Failed to upload file %s to container %s' % ( |
567 |
image_path, container)) |
568 |
raise |
569 |
|
570 |
|
571 |
# Image / Plankton |
572 |
|
573 |
def init_plankton(endpoint, token): |
574 |
from kamaki.clients.image import ImageClient |
575 |
|
576 |
print(' Initialize ImageClient') |
577 |
try: |
578 |
return ImageClient(endpoint, token) |
579 |
except ClientError: |
580 |
log.debug('Failed to initialize the Image client') |
581 |
raise |
582 |
|
583 |
|
584 |
def register_image(plankton, name, user_id, container, path, properties): |
585 |
|
586 |
image_location = (user_id, container, path) |
587 |
print(' Register the image') |
588 |
try: |
589 |
return plankton.register(name, image_location, properties) |
590 |
except ClientError: |
591 |
log.debug('Failed to register image %s' % name) |
592 |
raise |
593 |
|
594 |
|
595 |
# Compute / Cyclades |
596 |
|
597 |
def init_cyclades(endpoint, token): |
598 |
from kamaki.clients.cyclades import CycladesClient |
599 |
|
600 |
print(' Initialize a cyclades client') |
601 |
try: |
602 |
return CycladesClient(endpoint, token) |
603 |
except ClientError: |
604 |
log.debug('Failed to initialize cyclades client') |
605 |
raise |
606 |
|
607 |
|
608 |
class Cluster(object): |
609 |
|
610 |
def __init__(self, cyclades, prefix, flavor_id, image_id, size): |
611 |
self.client = cyclades |
612 |
self.prefix, self.size = prefix, int(size) |
613 |
self.flavor_id, self.image_id = flavor_id, image_id |
614 |
|
615 |
def list(self): |
616 |
return [s for s in self.client.list_servers(detail=True) if ( |
617 |
s['name'].startswith(self.prefix))] |
618 |
|
619 |
def clean_up(self): |
620 |
to_delete = self.list() |
621 |
print(' There are %s servers to clean up' % len(to_delete)) |
622 |
for server in to_delete: |
623 |
self.client.delete_server(server['id']) |
624 |
for server in to_delete: |
625 |
self.client.wait_server( |
626 |
server['id'], server['status'], |
627 |
wait_cb=create_pb(' Deleting %s...' % server['name'])) |
628 |
|
629 |
def _personality(self, ssh_keys_path='', pub_keys_path=''): |
630 |
personality = [] |
631 |
if ssh_keys_path: |
632 |
with open(abspath(ssh_keys_path)) as f: |
633 |
personality.append(dict( |
634 |
contents=b64encode(f.read()), |
635 |
path='/root/.ssh/id_rsa', |
636 |
owner='root', group='root', mode=0600)) |
637 |
if pub_keys_path: |
638 |
with open(abspath(pub_keys_path)) as f: |
639 |
personality.append(dict( |
640 |
contents=b64encode(f.read()), |
641 |
path='/root/.ssh/authorized_keys', |
642 |
owner='root', group='root', mode=0600)) |
643 |
if ssh_keys_path or pub_keys_path: |
644 |
personality.append(dict( |
645 |
contents=b64encode('StrictHostKeyChecking no'), |
646 |
path='/root/.ssh/config', |
647 |
owner='root', group='root', mode=0600)) |
648 |
return personality |
649 |
|
650 |
def create(self, ssh_k_path='', pub_k_path='', server_log_path=''): |
651 |
print('\n Create %s servers prefixed as %s' % ( |
652 |
self.size, self.prefix)) |
653 |
servers = [] |
654 |
for i in range(1, self.size + 1): |
655 |
try: |
656 |
server_name = '%s%s' % (self.prefix, i) |
657 |
servers.append(self.client.create_server( |
658 |
server_name, self.flavor_id, self.image_id, |
659 |
personality=self._personality(ssh_k_path, pub_k_path))) |
660 |
except ClientError: |
661 |
log.debug('Failed while creating server %s' % server_name) |
662 |
raise |
663 |
|
664 |
if server_log_path: |
665 |
print(' Store passwords in file %s' % server_log_path) |
666 |
with open(abspath(server_log_path), 'w+') as f: |
667 |
from json import dump |
668 |
dump(servers, f, indent=2) |
669 |
|
670 |
print(' Wait for %s servers to built' % self.size) |
671 |
for server in servers: |
672 |
new_status = self.client.wait_server( |
673 |
server['id'], |
674 |
wait_cb=create_pb(' Creating %s...' % server['name'])) |
675 |
print(' Status for server %s is %s' % ( |
676 |
server['name'], new_status or 'not changed yet')) |
677 |
return servers |
678 |
|
679 |
|
680 |
def main(opts): |
681 |
|
682 |
print('1. Credentials and Endpoints') |
683 |
auth, token = init_astakos() |
684 |
endpoints, user_id = endpoints_and_user_id(auth) |
685 |
|
686 |
print('2. Upload the image file') |
687 |
pithos = init_pithos(endpoints['pithos'], token, user_id) |
688 |
|
689 |
upload_image(pithos, opts.container, opts.imagefile) |
690 |
|
691 |
print('3. Register the image') |
692 |
plankton = init_plankton(endpoints['plankton'], token) |
693 |
|
694 |
image = register_image( |
695 |
plankton, 'my image', user_id, opts.container, opts.imagefile, |
696 |
properties=dict( |
697 |
osfamily=opts.osfamily, root_partition=opts.rootpartition)) |
698 |
|
699 |
print('4. Create virtual cluster') |
700 |
cluster = Cluster( |
701 |
cyclades = init_cyclades(endpoints['cyclades'], token), |
702 |
prefix=opts.prefix, |
703 |
flavor_id=opts.flavorid, |
704 |
image_id=image['id'], |
705 |
size=opts.clustersize) |
706 |
if opts.delete_stale: |
707 |
cluster.clean_up() |
708 |
servers = cluster.create( |
709 |
opts.sshkeypath, opts.pubkeypath, opts.serverlogpath) |
710 |
|
711 |
# Group servers |
712 |
cluster_servers = cluster.list() |
713 |
|
714 |
active = [s for s in cluster_servers if s['status'] == 'ACTIVE'] |
715 |
print('%s cluster servers are ACTIVE' % len(active)) |
716 |
|
717 |
attached = [s for s in cluster_servers if s['attachments']] |
718 |
print('%s cluster servers are attached to networks' % len(attached)) |
719 |
|
720 |
build = [s for s in cluster_servers if s['status'] == 'BUILD'] |
721 |
print('%s cluster servers are being built' % len(build)) |
722 |
|
723 |
error = [s for s in cluster_servers if s['status'] in ('ERROR')] |
724 |
print('%s cluster servers failed (ERROR satus)' % len(error)) |
725 |
|
726 |
|
727 |
if __name__ == '__main__': |
728 |
|
729 |
# Add some interaction candy |
730 |
from optparse import OptionParser |
731 |
|
732 |
kw = {} |
733 |
kw['usage'] = '%prog [options]' |
734 |
kw['description'] = '%prog deploys a compute cluster on Synnefo w. kamaki' |
735 |
|
736 |
parser = OptionParser(**kw) |
737 |
parser.disable_interspersed_args() |
738 |
parser.add_option('--prefix', |
739 |
action='store', type='string', dest='prefix', |
740 |
help='The prefix to use for naming cluster nodes', |
741 |
default='cluster') |
742 |
parser.add_option('--clustersize', |
743 |
action='store', type='string', dest='clustersize', |
744 |
help='Number of virtual cluster nodes to create ', |
745 |
default=2) |
746 |
parser.add_option('--flavor-id', |
747 |
action='store', type='int', dest='flavorid', |
748 |
metavar='FLAVOR ID', |
749 |
help='Choose flavor id for the virtual hardware ' |
750 |
'of cluster nodes', |
751 |
default=42) |
752 |
parser.add_option('--image-file', |
753 |
action='store', type='string', dest='imagefile', |
754 |
metavar='IMAGE FILE PATH', |
755 |
help='The image file to upload and register ', |
756 |
default='my_image.diskdump') |
757 |
parser.add_option('--delete-stale', |
758 |
action='store_true', dest='delete_stale', |
759 |
help='Delete stale servers from previous runs, whose ' |
760 |
'name starts with the specified prefix, see ' |
761 |
'--prefix', |
762 |
default=False) |
763 |
parser.add_option('--container', |
764 |
action='store', type='string', dest='container', |
765 |
metavar='PITHOS+ CONTAINER', |
766 |
help='The Pithos+ container to store image file', |
767 |
default='images') |
768 |
parser.add_option('--ssh-key-path', |
769 |
action='store', type='string', dest='sshkeypath', |
770 |
metavar='PATH OF SSH KEYS', |
771 |
help='The ssh keys to inject to server (e.g., id_rsa) ', |
772 |
default='') |
773 |
parser.add_option('--pub-key-path', |
774 |
action='store', type='string', dest='pubkeypath', |
775 |
metavar='PATH OF PUBLIC KEYS', |
776 |
help='The public keys to inject to server', |
777 |
default='') |
778 |
parser.add_option('--server-log-path', |
779 |
action='store', type='string', dest='serverlogpath', |
780 |
metavar='FILE TO LOG THE VIRTUAL SERVERS', |
781 |
help='Where to store information on created servers ' |
782 |
'including superuser passwords', |
783 |
default='') |
784 |
parser.add_option('--image-osfamily', |
785 |
action='store', type='string', dest='osfamily', |
786 |
metavar='OS FAMILY', |
787 |
help='linux, windows, etc.', |
788 |
default='linux') |
789 |
parser.add_option('--image-root-partition', |
790 |
action='store', type='string', dest='rootpartition', |
791 |
metavar='IMAGE ROOT PARTITION', |
792 |
help='The partition where the root home is ', |
793 |
default='1') |
794 |
|
795 |
opts, args = parser.parse_args(argv[1:]) |
796 |
|
797 |
main(opts) |
798 |
|