Statistics
| Branch: | Tag: | Revision:

root / daemons / ganeti-noded @ 4dd42c9d

History | View | Annotate | Download (21.5 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti node daemon"""
23

    
24
# functions in this module need to have a given name structure, so:
25
# pylint: disable-msg=C0103
26

    
27
import os
28
import sys
29
import traceback
30
import SocketServer
31
import errno
32
import logging
33
import signal
34

    
35
from optparse import OptionParser
36

    
37
from ganeti import backend
38
from ganeti import constants
39
from ganeti import objects
40
from ganeti import errors
41
from ganeti import jstore
42
from ganeti import daemon
43
from ganeti import http
44
from ganeti import utils
45

    
46
import ganeti.http.server
47

    
48

    
49
queue_lock = None
50

    
51

    
52
def _RequireJobQueueLock(fn):
53
  """Decorator for job queue manipulating functions.
54

    
55
  """
56
  QUEUE_LOCK_TIMEOUT = 10
57

    
58
  def wrapper(*args, **kwargs):
59
    # Locking in exclusive, blocking mode because there could be several
60
    # children running at the same time. Waiting up to 10 seconds.
61
    queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
62
    try:
63
      return fn(*args, **kwargs)
64
    finally:
65
      queue_lock.Unlock()
66

    
67
  return wrapper
68

    
69

    
70
class NodeHttpServer(http.server.HttpServer):
71
  """The server implementation.
72

    
73
  This class holds all methods exposed over the RPC interface.
74

    
75
  """
76
  def __init__(self, *args, **kwargs):
77
    http.server.HttpServer.__init__(self, *args, **kwargs)
78
    self.noded_pid = os.getpid()
79

    
80
  def HandleRequest(self, req):
81
    """Handle a request.
82

    
83
    """
84
    if req.request_method.upper() != http.HTTP_PUT:
85
      raise http.HttpBadRequest()
86

    
87
    path = req.request_path
88
    if path.startswith("/"):
89
      path = path[1:]
90

    
91
    method = getattr(self, "perspective_%s" % path, None)
92
    if method is None:
93
      raise http.HttpNotFound()
94

    
95
    try:
96
      rvalue = method(req.request_body)
97
      if not isinstance(rvalue, tuple):
98
        return (False, "Invalid result from backend function: expected"
99
                " tuple, got %s" % type(rvalue))
100
      elif len(rvalue) != 2:
101
        return (False, "Invalid result from backend function: expected"
102
                " 2-element tuple, got tuple of length %d" % len(rvalue))
103
      else:
104
        return rvalue
105

    
106
    except backend.RPCFail, err:
107
      # our custom failure exception; str(err) works fine if the
108
      # exception was constructed with a single argument, and in
109
      # this case, err.message == err.args[0] == str(err)
110
      return (False, str(err))
111
    except errors.QuitGanetiException, err:
112
      # Tell parent to quit
113
      logging.info("Shutting down the node daemon, arguments: %s",
114
                   str(err.args))
115
      os.kill(self.noded_pid, signal.SIGTERM)
116
      # And return the error's arguments, which must be already in
117
      # correct tuple format
118
      return err.args
119
    except Exception, err:
120
      logging.exception("Error in RPC call")
121
      return False, "Error while executing backend function: %s" % str(err)
122

    
123
  # the new block devices  --------------------------
124

    
125
  @staticmethod
126
  def perspective_blockdev_create(params):
127
    """Create a block device.
128

    
129
    """
130
    bdev_s, size, owner, on_primary, info = params
131
    bdev = objects.Disk.FromDict(bdev_s)
132
    if bdev is None:
133
      raise ValueError("can't unserialize data!")
134
    return backend.BlockdevCreate(bdev, size, owner, on_primary, info)
135

    
136
  @staticmethod
137
  def perspective_blockdev_remove(params):
138
    """Remove a block device.
139

    
140
    """
141
    bdev_s = params[0]
142
    bdev = objects.Disk.FromDict(bdev_s)
143
    return backend.BlockdevRemove(bdev)
144

    
145
  @staticmethod
146
  def perspective_blockdev_rename(params):
147
    """Remove a block device.
148

    
149
    """
150
    devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params]
151
    return backend.BlockdevRename(devlist)
152

    
153
  @staticmethod
154
  def perspective_blockdev_assemble(params):
155
    """Assemble a block device.
156

    
157
    """
158
    bdev_s, owner, on_primary = params
159
    bdev = objects.Disk.FromDict(bdev_s)
160
    if bdev is None:
161
      raise ValueError("can't unserialize data!")
162
    return backend.BlockdevAssemble(bdev, owner, on_primary)
163

    
164
  @staticmethod
165
  def perspective_blockdev_shutdown(params):
166
    """Shutdown a block device.
167

    
168
    """
169
    bdev_s = params[0]
170
    bdev = objects.Disk.FromDict(bdev_s)
171
    if bdev is None:
172
      raise ValueError("can't unserialize data!")
173
    return backend.BlockdevShutdown(bdev)
174

    
175
  @staticmethod
176
  def perspective_blockdev_addchildren(params):
177
    """Add a child to a mirror device.
178

    
179
    Note: this is only valid for mirror devices. It's the caller's duty
180
    to send a correct disk, otherwise we raise an error.
181

    
182
    """
183
    bdev_s, ndev_s = params
184
    bdev = objects.Disk.FromDict(bdev_s)
185
    ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
186
    if bdev is None or ndevs.count(None) > 0:
187
      raise ValueError("can't unserialize data!")
188
    return backend.BlockdevAddchildren(bdev, ndevs)
189

    
190
  @staticmethod
191
  def perspective_blockdev_removechildren(params):
192
    """Remove a child from a mirror device.
193

    
194
    This is only valid for mirror devices, of course. It's the callers
195
    duty to send a correct disk, otherwise we raise an error.
196

    
197
    """
198
    bdev_s, ndev_s = params
199
    bdev = objects.Disk.FromDict(bdev_s)
200
    ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
201
    if bdev is None or ndevs.count(None) > 0:
202
      raise ValueError("can't unserialize data!")
203
    return backend.BlockdevRemovechildren(bdev, ndevs)
204

    
205
  @staticmethod
206
  def perspective_blockdev_getmirrorstatus(params):
207
    """Return the mirror status for a list of disks.
208

    
209
    """
210
    disks = [objects.Disk.FromDict(dsk_s)
211
            for dsk_s in params]
212
    return backend.BlockdevGetmirrorstatus(disks)
213

    
214
  @staticmethod
215
  def perspective_blockdev_find(params):
216
    """Expose the FindBlockDevice functionality for a disk.
217

    
218
    This will try to find but not activate a disk.
219

    
220
    """
221
    disk = objects.Disk.FromDict(params[0])
222
    return backend.BlockdevFind(disk)
223

    
224
  @staticmethod
225
  def perspective_blockdev_snapshot(params):
226
    """Create a snapshot device.
227

    
228
    Note that this is only valid for LVM disks, if we get passed
229
    something else we raise an exception. The snapshot device can be
230
    remove by calling the generic block device remove call.
231

    
232
    """
233
    cfbd = objects.Disk.FromDict(params[0])
234
    return backend.BlockdevSnapshot(cfbd)
235

    
236
  @staticmethod
237
  def perspective_blockdev_grow(params):
238
    """Grow a stack of devices.
239

    
240
    """
241
    cfbd = objects.Disk.FromDict(params[0])
242
    amount = params[1]
243
    return backend.BlockdevGrow(cfbd, amount)
244

    
245
  @staticmethod
246
  def perspective_blockdev_close(params):
247
    """Closes the given block devices.
248

    
249
    """
250
    disks = [objects.Disk.FromDict(cf) for cf in params[1]]
251
    return backend.BlockdevClose(params[0], disks)
252

    
253
  # blockdev/drbd specific methods ----------
254

    
255
  @staticmethod
256
  def perspective_drbd_disconnect_net(params):
257
    """Disconnects the network connection of drbd disks.
258

    
259
    Note that this is only valid for drbd disks, so the members of the
260
    disk list must all be drbd devices.
261

    
262
    """
263
    nodes_ip, disks = params
264
    disks = [objects.Disk.FromDict(cf) for cf in disks]
265
    return backend.DrbdDisconnectNet(nodes_ip, disks)
266

    
267
  @staticmethod
268
  def perspective_drbd_attach_net(params):
269
    """Attaches the network connection of drbd disks.
270

    
271
    Note that this is only valid for drbd disks, so the members of the
272
    disk list must all be drbd devices.
273

    
274
    """
275
    nodes_ip, disks, instance_name, multimaster = params
276
    disks = [objects.Disk.FromDict(cf) for cf in disks]
277
    return backend.DrbdAttachNet(nodes_ip, disks,
278
                                     instance_name, multimaster)
279

    
280
  @staticmethod
281
  def perspective_drbd_wait_sync(params):
282
    """Wait until DRBD disks are synched.
283

    
284
    Note that this is only valid for drbd disks, so the members of the
285
    disk list must all be drbd devices.
286

    
287
    """
288
    nodes_ip, disks = params
289
    disks = [objects.Disk.FromDict(cf) for cf in disks]
290
    return backend.DrbdWaitSync(nodes_ip, disks)
291

    
292
  # export/import  --------------------------
293

    
294
  @staticmethod
295
  def perspective_snapshot_export(params):
296
    """Export a given snapshot.
297

    
298
    """
299
    disk = objects.Disk.FromDict(params[0])
300
    dest_node = params[1]
301
    instance = objects.Instance.FromDict(params[2])
302
    cluster_name = params[3]
303
    dev_idx = params[4]
304
    return backend.ExportSnapshot(disk, dest_node, instance,
305
                                  cluster_name, dev_idx)
306

    
307
  @staticmethod
308
  def perspective_finalize_export(params):
309
    """Expose the finalize export functionality.
310

    
311
    """
312
    instance = objects.Instance.FromDict(params[0])
313
    snap_disks = [objects.Disk.FromDict(str_data)
314
                  for str_data in params[1]]
315
    return backend.FinalizeExport(instance, snap_disks)
316

    
317
  @staticmethod
318
  def perspective_export_info(params):
319
    """Query information about an existing export on this node.
320

    
321
    The given path may not contain an export, in which case we return
322
    None.
323

    
324
    """
325
    path = params[0]
326
    return backend.ExportInfo(path)
327

    
328
  @staticmethod
329
  def perspective_export_list(params):
330
    """List the available exports on this node.
331

    
332
    Note that as opposed to export_info, which may query data about an
333
    export in any path, this only queries the standard Ganeti path
334
    (constants.EXPORT_DIR).
335

    
336
    """
337
    return backend.ListExports()
338

    
339
  @staticmethod
340
  def perspective_export_remove(params):
341
    """Remove an export.
342

    
343
    """
344
    export = params[0]
345
    return backend.RemoveExport(export)
346

    
347
  # volume  --------------------------
348

    
349
  @staticmethod
350
  def perspective_volume_list(params):
351
    """Query the list of logical volumes in a given volume group.
352

    
353
    """
354
    vgname = params[0]
355
    return True, backend.GetVolumeList(vgname)
356

    
357
  @staticmethod
358
  def perspective_vg_list(params):
359
    """Query the list of volume groups.
360

    
361
    """
362
    return backend.ListVolumeGroups()
363

    
364
  # bridge  --------------------------
365

    
366
  @staticmethod
367
  def perspective_bridges_exist(params):
368
    """Check if all bridges given exist on this node.
369

    
370
    """
371
    bridges_list = params[0]
372
    return backend.BridgesExist(bridges_list)
373

    
374
  # instance  --------------------------
375

    
376
  @staticmethod
377
  def perspective_instance_os_add(params):
378
    """Install an OS on a given instance.
379

    
380
    """
381
    inst_s = params[0]
382
    inst = objects.Instance.FromDict(inst_s)
383
    reinstall = params[1]
384
    return backend.InstanceOsAdd(inst, reinstall)
385

    
386
  @staticmethod
387
  def perspective_instance_run_rename(params):
388
    """Runs the OS rename script for an instance.
389

    
390
    """
391
    inst_s, old_name = params
392
    inst = objects.Instance.FromDict(inst_s)
393
    return backend.RunRenameInstance(inst, old_name)
394

    
395
  @staticmethod
396
  def perspective_instance_os_import(params):
397
    """Run the import function of an OS onto a given instance.
398

    
399
    """
400
    inst_s, src_node, src_images, cluster_name = params
401
    inst = objects.Instance.FromDict(inst_s)
402
    return backend.ImportOSIntoInstance(inst, src_node, src_images,
403
                                        cluster_name)
404

    
405
  @staticmethod
406
  def perspective_instance_shutdown(params):
407
    """Shutdown an instance.
408

    
409
    """
410
    instance = objects.Instance.FromDict(params[0])
411
    return backend.InstanceShutdown(instance)
412

    
413
  @staticmethod
414
  def perspective_instance_start(params):
415
    """Start an instance.
416

    
417
    """
418
    instance = objects.Instance.FromDict(params[0])
419
    return backend.StartInstance(instance)
420

    
421
  @staticmethod
422
  def perspective_migration_info(params):
423
    """Gather information about an instance to be migrated.
424

    
425
    """
426
    instance = objects.Instance.FromDict(params[0])
427
    return backend.MigrationInfo(instance)
428

    
429
  @staticmethod
430
  def perspective_accept_instance(params):
431
    """Prepare the node to accept an instance.
432

    
433
    """
434
    instance, info, target = params
435
    instance = objects.Instance.FromDict(instance)
436
    return backend.AcceptInstance(instance, info, target)
437

    
438
  @staticmethod
439
  def perspective_finalize_migration(params):
440
    """Finalize the instance migration.
441

    
442
    """
443
    instance, info, success = params
444
    instance = objects.Instance.FromDict(instance)
445
    return backend.FinalizeMigration(instance, info, success)
446

    
447
  @staticmethod
448
  def perspective_instance_migrate(params):
449
    """Migrates an instance.
450

    
451
    """
452
    instance, target, live = params
453
    instance = objects.Instance.FromDict(instance)
454
    return backend.MigrateInstance(instance, target, live)
455

    
456
  @staticmethod
457
  def perspective_instance_reboot(params):
458
    """Reboot an instance.
459

    
460
    """
461
    instance = objects.Instance.FromDict(params[0])
462
    reboot_type = params[1]
463
    return backend.InstanceReboot(instance, reboot_type)
464

    
465
  @staticmethod
466
  def perspective_instance_info(params):
467
    """Query instance information.
468

    
469
    """
470
    return backend.GetInstanceInfo(params[0], params[1])
471

    
472
  @staticmethod
473
  def perspective_instance_migratable(params):
474
    """Query whether the specified instance can be migrated.
475

    
476
    """
477
    instance = objects.Instance.FromDict(params[0])
478
    return backend.GetInstanceMigratable(instance)
479

    
480
  @staticmethod
481
  def perspective_all_instances_info(params):
482
    """Query information about all instances.
483

    
484
    """
485
    return backend.GetAllInstancesInfo(params[0])
486

    
487
  @staticmethod
488
  def perspective_instance_list(params):
489
    """Query the list of running instances.
490

    
491
    """
492
    return True, backend.GetInstanceList(params[0])
493

    
494
  # node --------------------------
495

    
496
  @staticmethod
497
  def perspective_node_tcp_ping(params):
498
    """Do a TcpPing on the remote node.
499

    
500
    """
501
    return utils.TcpPing(params[1], params[2], timeout=params[3],
502
                         live_port_needed=params[4], source=params[0])
503

    
504
  @staticmethod
505
  def perspective_node_has_ip_address(params):
506
    """Checks if a node has the given ip address.
507

    
508
    """
509
    return True, utils.OwnIpAddress(params[0])
510

    
511
  @staticmethod
512
  def perspective_node_info(params):
513
    """Query node information.
514

    
515
    """
516
    vgname, hypervisor_type = params
517
    return backend.GetNodeInfo(vgname, hypervisor_type)
518

    
519
  @staticmethod
520
  def perspective_node_add(params):
521
    """Complete the registration of this node in the cluster.
522

    
523
    """
524
    return backend.AddNode(params[0], params[1], params[2],
525
                           params[3], params[4], params[5])
526

    
527
  @staticmethod
528
  def perspective_node_verify(params):
529
    """Run a verify sequence on this node.
530

    
531
    """
532
    return backend.VerifyNode(params[0], params[1])
533

    
534
  @staticmethod
535
  def perspective_node_start_master(params):
536
    """Promote this node to master status.
537

    
538
    """
539
    return backend.StartMaster(params[0])
540

    
541
  @staticmethod
542
  def perspective_node_stop_master(params):
543
    """Demote this node from master status.
544

    
545
    """
546
    return backend.StopMaster(params[0])
547

    
548
  @staticmethod
549
  def perspective_node_leave_cluster(params):
550
    """Cleanup after leaving a cluster.
551

    
552
    """
553
    return backend.LeaveCluster()
554

    
555
  @staticmethod
556
  def perspective_node_volumes(params):
557
    """Query the list of all logical volume groups.
558

    
559
    """
560
    return backend.NodeVolumes()
561

    
562
  @staticmethod
563
  def perspective_node_demote_from_mc(params):
564
    """Demote a node from the master candidate role.
565

    
566
    """
567
    return backend.DemoteFromMC()
568

    
569

    
570
  @staticmethod
571
  def perspective_node_powercycle(params):
572
    """Tries to powercycle the nod.
573

    
574
    """
575
    hypervisor_type = params[0]
576
    return backend.PowercycleNode(hypervisor_type)
577

    
578

    
579
  # cluster --------------------------
580

    
581
  @staticmethod
582
  def perspective_version(params):
583
    """Query version information.
584

    
585
    """
586
    return True, constants.PROTOCOL_VERSION
587

    
588
  @staticmethod
589
  def perspective_upload_file(params):
590
    """Upload a file.
591

    
592
    Note that the backend implementation imposes strict rules on which
593
    files are accepted.
594

    
595
    """
596
    return backend.UploadFile(*params)
597

    
598
  @staticmethod
599
  def perspective_master_info(params):
600
    """Query master information.
601

    
602
    """
603
    return backend.GetMasterInfo()
604

    
605
  @staticmethod
606
  def perspective_write_ssconf_files(params):
607
    """Write ssconf files.
608

    
609
    """
610
    (values,) = params
611
    return backend.WriteSsconfFiles(values)
612

    
613
  # os -----------------------
614

    
615
  @staticmethod
616
  def perspective_os_diagnose(params):
617
    """Query detailed information about existing OSes.
618

    
619
    """
620
    return backend.DiagnoseOS()
621

    
622
  @staticmethod
623
  def perspective_os_get(params):
624
    """Query information about a given OS.
625

    
626
    """
627
    name = params[0]
628
    os_obj = backend.OSFromDisk(name)
629
    return True, os_obj.ToDict()
630

    
631
  # hooks -----------------------
632

    
633
  @staticmethod
634
  def perspective_hooks_runner(params):
635
    """Run hook scripts.
636

    
637
    """
638
    hpath, phase, env = params
639
    hr = backend.HooksRunner()
640
    return hr.RunHooks(hpath, phase, env)
641

    
642
  # iallocator -----------------
643

    
644
  @staticmethod
645
  def perspective_iallocator_runner(params):
646
    """Run an iallocator script.
647

    
648
    """
649
    name, idata = params
650
    iar = backend.IAllocatorRunner()
651
    return iar.Run(name, idata)
652

    
653
  # test -----------------------
654

    
655
  @staticmethod
656
  def perspective_test_delay(params):
657
    """Run test delay.
658

    
659
    """
660
    duration = params[0]
661
    return utils.TestDelay(duration)
662

    
663
  # file storage ---------------
664

    
665
  @staticmethod
666
  def perspective_file_storage_dir_create(params):
667
    """Create the file storage directory.
668

    
669
    """
670
    file_storage_dir = params[0]
671
    return backend.CreateFileStorageDir(file_storage_dir)
672

    
673
  @staticmethod
674
  def perspective_file_storage_dir_remove(params):
675
    """Remove the file storage directory.
676

    
677
    """
678
    file_storage_dir = params[0]
679
    return backend.RemoveFileStorageDir(file_storage_dir)
680

    
681
  @staticmethod
682
  def perspective_file_storage_dir_rename(params):
683
    """Rename the file storage directory.
684

    
685
    """
686
    old_file_storage_dir = params[0]
687
    new_file_storage_dir = params[1]
688
    return backend.RenameFileStorageDir(old_file_storage_dir,
689
                                        new_file_storage_dir)
690

    
691
  # jobs ------------------------
692

    
693
  @staticmethod
694
  @_RequireJobQueueLock
695
  def perspective_jobqueue_update(params):
696
    """Update job queue.
697

    
698
    """
699
    (file_name, content) = params
700
    return backend.JobQueueUpdate(file_name, content)
701

    
702
  @staticmethod
703
  @_RequireJobQueueLock
704
  def perspective_jobqueue_purge(params):
705
    """Purge job queue.
706

    
707
    """
708
    return backend.JobQueuePurge()
709

    
710
  @staticmethod
711
  @_RequireJobQueueLock
712
  def perspective_jobqueue_rename(params):
713
    """Rename a job queue file.
714

    
715
    """
716
    # TODO: What if a file fails to rename?
717
    return True, [backend.JobQueueRename(old, new) for old, new in params]
718

    
719
  @staticmethod
720
  def perspective_jobqueue_set_drain(params):
721
    """Set/unset the queue drain flag.
722

    
723
    """
724
    drain_flag = params[0]
725
    return backend.JobQueueSetDrainFlag(drain_flag)
726

    
727

    
728
  # hypervisor ---------------
729

    
730
  @staticmethod
731
  def perspective_hypervisor_validate_params(params):
732
    """Validate the hypervisor parameters.
733

    
734
    """
735
    (hvname, hvparams) = params
736
    return backend.ValidateHVParams(hvname, hvparams)
737

    
738

    
739
def ParseOptions():
740
  """Parse the command line options.
741

    
742
  @return: (options, args) as from OptionParser.parse_args()
743

    
744
  """
745
  parser = OptionParser(description="Ganeti node daemon",
746
                        usage="%prog [-f] [-d] [-b ADDRESS]",
747
                        version="%%prog (ganeti) %s" %
748
                        constants.RELEASE_VERSION)
749

    
750
  parser.add_option("-f", "--foreground", dest="fork",
751
                    help="Don't detach from the current terminal",
752
                    default=True, action="store_false")
753
  parser.add_option("-d", "--debug", dest="debug",
754
                    help="Enable some debug messages",
755
                    default=False, action="store_true")
756
  parser.add_option("-b", "--bind", dest="bind_address",
757
                    help="Bind address",
758
                    default="", metavar="ADDRESS")
759

    
760
  options, args = parser.parse_args()
761
  return options, args
762

    
763

    
764
def main():
765
  """Main function for the node daemon.
766

    
767
  """
768
  global queue_lock
769

    
770
  options, args = ParseOptions()
771
  utils.debug = options.debug
772

    
773
  if options.fork:
774
    utils.CloseFDs()
775

    
776
  for fname in (constants.SSL_CERT_FILE,):
777
    if not os.path.isfile(fname):
778
      print "config %s not there, will not run." % fname
779
      sys.exit(5)
780

    
781
  try:
782
    port = utils.GetNodeDaemonPort()
783
  except errors.ConfigurationError, err:
784
    print "Cluster configuration incomplete: '%s'" % str(err)
785
    sys.exit(5)
786

    
787
  dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
788
  dirs.append((constants.LOG_OS_DIR, 0750))
789
  dirs.append((constants.LOCK_DIR, 1777))
790
  utils.EnsureDirs(dirs)
791

    
792
  # become a daemon
793
  if options.fork:
794
    utils.Daemonize(logfile=constants.LOG_NODESERVER)
795

    
796
  utils.WritePidFile(constants.NODED_PID)
797
  try:
798
    utils.SetupLogging(logfile=constants.LOG_NODESERVER, debug=options.debug,
799
                       stderr_logging=not options.fork)
800
    logging.info("ganeti node daemon startup")
801

    
802
    # Read SSL certificate
803
    ssl_params = http.HttpSslParams(ssl_key_path=constants.SSL_CERT_FILE,
804
                                    ssl_cert_path=constants.SSL_CERT_FILE)
805

    
806
    # Prepare job queue
807
    queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
808

    
809
    mainloop = daemon.Mainloop()
810
    server = NodeHttpServer(mainloop, options.bind_address, port,
811
                            ssl_params=ssl_params, ssl_verify_peer=True)
812
    server.Start()
813
    try:
814
      mainloop.Run()
815
    finally:
816
      server.Stop()
817
  finally:
818
    utils.RemovePidFile(constants.NODED_PID)
819

    
820

    
821
if __name__ == '__main__':
822
  main()