Statistics
| Branch: | Tag: | Revision:

root / daemons / ganeti-noded @ 23829f6f

History | View | Annotate | Download (21.1 kB)

1
#!/usr/bin/python
2
#
3

    
4
# Copyright (C) 2006, 2007 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Ganeti node daemon"""
23

    
24
# functions in this module need to have a given name structure, so:
25
# pylint: disable-msg=C0103
26

    
27
import os
28
import sys
29
import traceback
30
import SocketServer
31
import errno
32
import logging
33
import signal
34

    
35
from optparse import OptionParser
36

    
37
from ganeti import backend
38
from ganeti import constants
39
from ganeti import objects
40
from ganeti import errors
41
from ganeti import jstore
42
from ganeti import daemon
43
from ganeti import http
44
from ganeti import utils
45

    
46
import ganeti.http.server
47

    
48

    
49
queue_lock = None
50

    
51

    
52
def _RequireJobQueueLock(fn):
53
  """Decorator for job queue manipulating functions.
54

    
55
  """
56
  QUEUE_LOCK_TIMEOUT = 10
57

    
58
  def wrapper(*args, **kwargs):
59
    # Locking in exclusive, blocking mode because there could be several
60
    # children running at the same time. Waiting up to 10 seconds.
61
    queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
62
    try:
63
      return fn(*args, **kwargs)
64
    finally:
65
      queue_lock.Unlock()
66

    
67
  return wrapper
68

    
69

    
70
class NodeHttpServer(http.server.HttpServer):
71
  """The server implementation.
72

    
73
  This class holds all methods exposed over the RPC interface.
74

    
75
  """
76
  def __init__(self, *args, **kwargs):
77
    http.server.HttpServer.__init__(self, *args, **kwargs)
78
    self.noded_pid = os.getpid()
79

    
80
  def HandleRequest(self, req):
81
    """Handle a request.
82

    
83
    """
84
    if req.request_method.upper() != http.HTTP_PUT:
85
      raise http.HttpBadRequest()
86

    
87
    path = req.request_path
88
    if path.startswith("/"):
89
      path = path[1:]
90

    
91
    method = getattr(self, "perspective_%s" % path, None)
92
    if method is None:
93
      raise http.HttpNotFound()
94

    
95
    try:
96
      try:
97
        return method(req.request_body)
98
      except:
99
        logging.exception("Error in RPC call")
100
        raise
101
    except errors.QuitGanetiException, err:
102
      # Tell parent to quit
103
      os.kill(self.noded_pid, signal.SIGTERM)
104

    
105
  # the new block devices  --------------------------
106

    
107
  @staticmethod
108
  def perspective_blockdev_create(params):
109
    """Create a block device.
110

    
111
    """
112
    bdev_s, size, owner, on_primary, info = params
113
    bdev = objects.Disk.FromDict(bdev_s)
114
    if bdev is None:
115
      raise ValueError("can't unserialize data!")
116
    return backend.CreateBlockDevice(bdev, size, owner, on_primary, info)
117

    
118
  @staticmethod
119
  def perspective_blockdev_remove(params):
120
    """Remove a block device.
121

    
122
    """
123
    bdev_s = params[0]
124
    bdev = objects.Disk.FromDict(bdev_s)
125
    return backend.RemoveBlockDevice(bdev)
126

    
127
  @staticmethod
128
  def perspective_blockdev_rename(params):
129
    """Remove a block device.
130

    
131
    """
132
    devlist = [(objects.Disk.FromDict(ds), uid) for ds, uid in params]
133
    return backend.RenameBlockDevices(devlist)
134

    
135
  @staticmethod
136
  def perspective_blockdev_assemble(params):
137
    """Assemble a block device.
138

    
139
    """
140
    bdev_s, owner, on_primary = params
141
    bdev = objects.Disk.FromDict(bdev_s)
142
    if bdev is None:
143
      raise ValueError("can't unserialize data!")
144
    return backend.AssembleBlockDevice(bdev, owner, on_primary)
145

    
146
  @staticmethod
147
  def perspective_blockdev_shutdown(params):
148
    """Shutdown a block device.
149

    
150
    """
151
    bdev_s = params[0]
152
    bdev = objects.Disk.FromDict(bdev_s)
153
    if bdev is None:
154
      raise ValueError("can't unserialize data!")
155
    return backend.ShutdownBlockDevice(bdev)
156

    
157
  @staticmethod
158
  def perspective_blockdev_addchildren(params):
159
    """Add a child to a mirror device.
160

    
161
    Note: this is only valid for mirror devices. It's the caller's duty
162
    to send a correct disk, otherwise we raise an error.
163

    
164
    """
165
    bdev_s, ndev_s = params
166
    bdev = objects.Disk.FromDict(bdev_s)
167
    ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
168
    if bdev is None or ndevs.count(None) > 0:
169
      raise ValueError("can't unserialize data!")
170
    return backend.MirrorAddChildren(bdev, ndevs)
171

    
172
  @staticmethod
173
  def perspective_blockdev_removechildren(params):
174
    """Remove a child from a mirror device.
175

    
176
    This is only valid for mirror devices, of course. It's the callers
177
    duty to send a correct disk, otherwise we raise an error.
178

    
179
    """
180
    bdev_s, ndev_s = params
181
    bdev = objects.Disk.FromDict(bdev_s)
182
    ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
183
    if bdev is None or ndevs.count(None) > 0:
184
      raise ValueError("can't unserialize data!")
185
    return backend.MirrorRemoveChildren(bdev, ndevs)
186

    
187
  @staticmethod
188
  def perspective_blockdev_getmirrorstatus(params):
189
    """Return the mirror status for a list of disks.
190

    
191
    """
192
    disks = [objects.Disk.FromDict(dsk_s)
193
            for dsk_s in params]
194
    return backend.GetMirrorStatus(disks)
195

    
196
  @staticmethod
197
  def perspective_blockdev_find(params):
198
    """Expose the FindBlockDevice functionality for a disk.
199

    
200
    This will try to find but not activate a disk.
201

    
202
    """
203
    disk = objects.Disk.FromDict(params[0])
204
    return backend.CallBlockdevFind(disk)
205

    
206
  @staticmethod
207
  def perspective_blockdev_snapshot(params):
208
    """Create a snapshot device.
209

    
210
    Note that this is only valid for LVM disks, if we get passed
211
    something else we raise an exception. The snapshot device can be
212
    remove by calling the generic block device remove call.
213

    
214
    """
215
    cfbd = objects.Disk.FromDict(params[0])
216
    return backend.SnapshotBlockDevice(cfbd)
217

    
218
  @staticmethod
219
  def perspective_blockdev_grow(params):
220
    """Grow a stack of devices.
221

    
222
    """
223
    cfbd = objects.Disk.FromDict(params[0])
224
    amount = params[1]
225
    return backend.GrowBlockDevice(cfbd, amount)
226

    
227
  @staticmethod
228
  def perspective_blockdev_close(params):
229
    """Closes the given block devices.
230

    
231
    """
232
    disks = [objects.Disk.FromDict(cf) for cf in params[1]]
233
    return backend.CloseBlockDevices(params[0], disks)
234

    
235
  # blockdev/drbd specific methods ----------
236

    
237
  @staticmethod
238
  def perspective_drbd_disconnect_net(params):
239
    """Disconnects the network connection of drbd disks.
240

    
241
    Note that this is only valid for drbd disks, so the members of the
242
    disk list must all be drbd devices.
243

    
244
    """
245
    nodes_ip, disks = params
246
    disks = [objects.Disk.FromDict(cf) for cf in disks]
247
    return backend.DrbdDisconnectNet(nodes_ip, disks)
248

    
249
  @staticmethod
250
  def perspective_drbd_attach_net(params):
251
    """Attaches the network connection of drbd disks.
252

    
253
    Note that this is only valid for drbd disks, so the members of the
254
    disk list must all be drbd devices.
255

    
256
    """
257
    nodes_ip, disks, instance_name, multimaster = params
258
    disks = [objects.Disk.FromDict(cf) for cf in disks]
259
    return backend.DrbdAttachNet(nodes_ip, disks, instance_name, multimaster)
260

    
261
  @staticmethod
262
  def perspective_drbd_wait_sync(params):
263
    """Wait until DRBD disks are synched.
264

    
265
    Note that this is only valid for drbd disks, so the members of the
266
    disk list must all be drbd devices.
267

    
268
    """
269
    nodes_ip, disks = params
270
    disks = [objects.Disk.FromDict(cf) for cf in disks]
271
    return backend.DrbdWaitSync(nodes_ip, disks)
272

    
273
  # export/import  --------------------------
274

    
275
  @staticmethod
276
  def perspective_snapshot_export(params):
277
    """Export a given snapshot.
278

    
279
    """
280
    disk = objects.Disk.FromDict(params[0])
281
    dest_node = params[1]
282
    instance = objects.Instance.FromDict(params[2])
283
    cluster_name = params[3]
284
    dev_idx = params[4]
285
    return backend.ExportSnapshot(disk, dest_node, instance,
286
                                  cluster_name, dev_idx)
287

    
288
  @staticmethod
289
  def perspective_finalize_export(params):
290
    """Expose the finalize export functionality.
291

    
292
    """
293
    instance = objects.Instance.FromDict(params[0])
294
    snap_disks = [objects.Disk.FromDict(str_data)
295
                  for str_data in params[1]]
296
    return backend.FinalizeExport(instance, snap_disks)
297

    
298
  @staticmethod
299
  def perspective_export_info(params):
300
    """Query information about an existing export on this node.
301

    
302
    The given path may not contain an export, in which case we return
303
    None.
304

    
305
    """
306
    path = params[0]
307
    einfo = backend.ExportInfo(path)
308
    if einfo is None:
309
      return einfo
310
    return einfo.Dumps()
311

    
312
  @staticmethod
313
  def perspective_export_list(params):
314
    """List the available exports on this node.
315

    
316
    Note that as opposed to export_info, which may query data about an
317
    export in any path, this only queries the standard Ganeti path
318
    (constants.EXPORT_DIR).
319

    
320
    """
321
    return backend.ListExports()
322

    
323
  @staticmethod
324
  def perspective_export_remove(params):
325
    """Remove an export.
326

    
327
    """
328
    export = params[0]
329
    return backend.RemoveExport(export)
330

    
331
  # volume  --------------------------
332

    
333
  @staticmethod
334
  def perspective_volume_list(params):
335
    """Query the list of logical volumes in a given volume group.
336

    
337
    """
338
    vgname = params[0]
339
    return backend.GetVolumeList(vgname)
340

    
341
  @staticmethod
342
  def perspective_vg_list(params):
343
    """Query the list of volume groups.
344

    
345
    """
346
    return backend.ListVolumeGroups()
347

    
348
  # bridge  --------------------------
349

    
350
  @staticmethod
351
  def perspective_bridges_exist(params):
352
    """Check if all bridges given exist on this node.
353

    
354
    """
355
    bridges_list = params[0]
356
    return backend.BridgesExist(bridges_list)
357

    
358
  # instance  --------------------------
359

    
360
  @staticmethod
361
  def perspective_instance_os_add(params):
362
    """Install an OS on a given instance.
363

    
364
    """
365
    inst_s = params[0]
366
    inst = objects.Instance.FromDict(inst_s)
367
    return backend.InstanceOsAdd(inst)
368

    
369
  @staticmethod
370
  def perspective_instance_run_rename(params):
371
    """Runs the OS rename script for an instance.
372

    
373
    """
374
    inst_s, old_name = params
375
    inst = objects.Instance.FromDict(inst_s)
376
    return backend.RunRenameInstance(inst, old_name)
377

    
378
  @staticmethod
379
  def perspective_instance_os_import(params):
380
    """Run the import function of an OS onto a given instance.
381

    
382
    """
383
    inst_s, src_node, src_images, cluster_name = params
384
    inst = objects.Instance.FromDict(inst_s)
385
    return backend.ImportOSIntoInstance(inst, src_node, src_images,
386
                                        cluster_name)
387

    
388
  @staticmethod
389
  def perspective_instance_shutdown(params):
390
    """Shutdown an instance.
391

    
392
    """
393
    instance = objects.Instance.FromDict(params[0])
394
    return backend.ShutdownInstance(instance)
395

    
396
  @staticmethod
397
  def perspective_instance_start(params):
398
    """Start an instance.
399

    
400
    """
401
    instance = objects.Instance.FromDict(params[0])
402
    extra_args = params[1]
403
    return backend.StartInstance(instance, extra_args)
404

    
405
  @staticmethod
406
  def perspective_migration_info(params):
407
    """Gather information about an instance to be migrated.
408

    
409
    """
410
    instance = objects.Instance.FromDict(params[0])
411
    return backend.MigrationInfo(instance)
412

    
413
  @staticmethod
414
  def perspective_accept_instance(params):
415
    """Prepare the node to accept an instance.
416

    
417
    """
418
    instance, info, target = params
419
    instance = objects.Instance.FromDict(instance)
420
    return backend.AcceptInstance(instance, info, target)
421

    
422
  @staticmethod
423
  def perspective_finalize_migration(params):
424
    """Finalize the instance migration.
425

    
426
    """
427
    instance, info, success = params
428
    instance = objects.Instance.FromDict(instance)
429
    return backend.FinalizeMigration(instance, info, success)
430

    
431
  @staticmethod
432
  def perspective_instance_migrate(params):
433
    """Migrates an instance.
434

    
435
    """
436
    instance, target, live = params
437
    instance = objects.Instance.FromDict(instance)
438
    return backend.MigrateInstance(instance, target, live)
439

    
440
  @staticmethod
441
  def perspective_instance_reboot(params):
442
    """Reboot an instance.
443

    
444
    """
445
    instance = objects.Instance.FromDict(params[0])
446
    reboot_type = params[1]
447
    extra_args = params[2]
448
    return backend.RebootInstance(instance, reboot_type, extra_args)
449

    
450
  @staticmethod
451
  def perspective_instance_info(params):
452
    """Query instance information.
453

    
454
    """
455
    return backend.GetInstanceInfo(params[0], params[1])
456

    
457
  @staticmethod
458
  def perspective_instance_migratable(params):
459
    """Query whether the specified instance can be migrated.
460

    
461
    """
462
    instance = objects.Instance.FromDict(params[0])
463
    return backend.GetInstanceMigratable(instance)
464

    
465
  @staticmethod
466
  def perspective_all_instances_info(params):
467
    """Query information about all instances.
468

    
469
    """
470
    return backend.GetAllInstancesInfo(params[0])
471

    
472
  @staticmethod
473
  def perspective_instance_list(params):
474
    """Query the list of running instances.
475

    
476
    """
477
    return backend.GetInstanceList(params[0])
478

    
479
  # node --------------------------
480

    
481
  @staticmethod
482
  def perspective_node_tcp_ping(params):
483
    """Do a TcpPing on the remote node.
484

    
485
    """
486
    return utils.TcpPing(params[1], params[2], timeout=params[3],
487
                         live_port_needed=params[4], source=params[0])
488

    
489
  @staticmethod
490
  def perspective_node_has_ip_address(params):
491
    """Checks if a node has the given ip address.
492

    
493
    """
494
    return utils.OwnIpAddress(params[0])
495

    
496
  @staticmethod
497
  def perspective_node_info(params):
498
    """Query node information.
499

    
500
    """
501
    vgname, hypervisor_type = params
502
    return backend.GetNodeInfo(vgname, hypervisor_type)
503

    
504
  @staticmethod
505
  def perspective_node_add(params):
506
    """Complete the registration of this node in the cluster.
507

    
508
    """
509
    return backend.AddNode(params[0], params[1], params[2],
510
                           params[3], params[4], params[5])
511

    
512
  @staticmethod
513
  def perspective_node_verify(params):
514
    """Run a verify sequence on this node.
515

    
516
    """
517
    return backend.VerifyNode(params[0], params[1])
518

    
519
  @staticmethod
520
  def perspective_node_start_master(params):
521
    """Promote this node to master status.
522

    
523
    """
524
    return backend.StartMaster(params[0])
525

    
526
  @staticmethod
527
  def perspective_node_stop_master(params):
528
    """Demote this node from master status.
529

    
530
    """
531
    return backend.StopMaster(params[0])
532

    
533
  @staticmethod
534
  def perspective_node_leave_cluster(params):
535
    """Cleanup after leaving a cluster.
536

    
537
    """
538
    return backend.LeaveCluster()
539

    
540
  @staticmethod
541
  def perspective_node_volumes(params):
542
    """Query the list of all logical volume groups.
543

    
544
    """
545
    return backend.NodeVolumes()
546

    
547
  @staticmethod
548
  def perspective_node_demote_from_mc(params):
549
    """Demote a node from the master candidate role.
550

    
551
    """
552
    return backend.DemoteFromMC()
553

    
554

    
555
  # cluster --------------------------
556

    
557
  @staticmethod
558
  def perspective_version(params):
559
    """Query version information.
560

    
561
    """
562
    return constants.PROTOCOL_VERSION
563

    
564
  @staticmethod
565
  def perspective_upload_file(params):
566
    """Upload a file.
567

    
568
    Note that the backend implementation imposes strict rules on which
569
    files are accepted.
570

    
571
    """
572
    return backend.UploadFile(*params)
573

    
574
  @staticmethod
575
  def perspective_master_info(params):
576
    """Query master information.
577

    
578
    """
579
    return backend.GetMasterInfo()
580

    
581
  @staticmethod
582
  def perspective_write_ssconf_files(params):
583
    """Write ssconf files.
584

    
585
    """
586
    (values,) = params
587
    return backend.WriteSsconfFiles(values)
588

    
589
  # os -----------------------
590

    
591
  @staticmethod
592
  def perspective_os_diagnose(params):
593
    """Query detailed information about existing OSes.
594

    
595
    """
596
    return [os.ToDict() for os in backend.DiagnoseOS()]
597

    
598
  @staticmethod
599
  def perspective_os_get(params):
600
    """Query information about a given OS.
601

    
602
    """
603
    name = params[0]
604
    try:
605
      os_obj = backend.OSFromDisk(name)
606
    except errors.InvalidOS, err:
607
      os_obj = objects.OS.FromInvalidOS(err)
608
    return os_obj.ToDict()
609

    
610
  # hooks -----------------------
611

    
612
  @staticmethod
613
  def perspective_hooks_runner(params):
614
    """Run hook scripts.
615

    
616
    """
617
    hpath, phase, env = params
618
    hr = backend.HooksRunner()
619
    return hr.RunHooks(hpath, phase, env)
620

    
621
  # iallocator -----------------
622

    
623
  @staticmethod
624
  def perspective_iallocator_runner(params):
625
    """Run an iallocator script.
626

    
627
    """
628
    name, idata = params
629
    iar = backend.IAllocatorRunner()
630
    return iar.Run(name, idata)
631

    
632
  # test -----------------------
633

    
634
  @staticmethod
635
  def perspective_test_delay(params):
636
    """Run test delay.
637

    
638
    """
639
    duration = params[0]
640
    return utils.TestDelay(duration)
641

    
642
  # file storage ---------------
643

    
644
  @staticmethod
645
  def perspective_file_storage_dir_create(params):
646
    """Create the file storage directory.
647

    
648
    """
649
    file_storage_dir = params[0]
650
    return backend.CreateFileStorageDir(file_storage_dir)
651

    
652
  @staticmethod
653
  def perspective_file_storage_dir_remove(params):
654
    """Remove the file storage directory.
655

    
656
    """
657
    file_storage_dir = params[0]
658
    return backend.RemoveFileStorageDir(file_storage_dir)
659

    
660
  @staticmethod
661
  def perspective_file_storage_dir_rename(params):
662
    """Rename the file storage directory.
663

    
664
    """
665
    old_file_storage_dir = params[0]
666
    new_file_storage_dir = params[1]
667
    return backend.RenameFileStorageDir(old_file_storage_dir,
668
                                        new_file_storage_dir)
669

    
670
  # jobs ------------------------
671

    
672
  @staticmethod
673
  @_RequireJobQueueLock
674
  def perspective_jobqueue_update(params):
675
    """Update job queue.
676

    
677
    """
678
    (file_name, content) = params
679
    return backend.JobQueueUpdate(file_name, content)
680

    
681
  @staticmethod
682
  @_RequireJobQueueLock
683
  def perspective_jobqueue_purge(params):
684
    """Purge job queue.
685

    
686
    """
687
    return backend.JobQueuePurge()
688

    
689
  @staticmethod
690
  @_RequireJobQueueLock
691
  def perspective_jobqueue_rename(params):
692
    """Rename a job queue file.
693

    
694
    """
695
    # TODO: What if a file fails to rename?
696
    return [backend.JobQueueRename(old, new) for old, new in params]
697

    
698
  @staticmethod
699
  def perspective_jobqueue_set_drain(params):
700
    """Set/unset the queue drain flag.
701

    
702
    """
703
    drain_flag = params[0]
704
    return backend.JobQueueSetDrainFlag(drain_flag)
705

    
706

    
707
  # hypervisor ---------------
708

    
709
  @staticmethod
710
  def perspective_hypervisor_validate_params(params):
711
    """Validate the hypervisor parameters.
712

    
713
    """
714
    (hvname, hvparams) = params
715
    return backend.ValidateHVParams(hvname, hvparams)
716

    
717

    
718
def ParseOptions():
719
  """Parse the command line options.
720

    
721
  @return: (options, args) as from OptionParser.parse_args()
722

    
723
  """
724
  parser = OptionParser(description="Ganeti node daemon",
725
                        usage="%prog [-f] [-d]",
726
                        version="%%prog (ganeti) %s" %
727
                        constants.RELEASE_VERSION)
728

    
729
  parser.add_option("-f", "--foreground", dest="fork",
730
                    help="Don't detach from the current terminal",
731
                    default=True, action="store_false")
732
  parser.add_option("-d", "--debug", dest="debug",
733
                    help="Enable some debug messages",
734
                    default=False, action="store_true")
735
  options, args = parser.parse_args()
736
  return options, args
737

    
738

    
739
def EnsureRuntimeEnvironment():
740
  """Ensure our run-time environment is complete.
741

    
742
  Currently this creates directories which could be missing, either
743
  due to directories being on a tmpfs mount, or due to incomplete
744
  packaging.
745

    
746
  """
747
  dirs = [(val, constants.RUN_DIRS_MODE) for val in constants.SUB_RUN_DIRS]
748
  dirs.append((constants.LOG_OS_DIR, 0750))
749
  for dir_name, dir_mode in dirs:
750
    if not os.path.exists(dir_name):
751
      try:
752
        os.mkdir(dir_name, dir_mode)
753
      except EnvironmentError, err:
754
        if err.errno != errno.EEXIST:
755
          print ("Node setup wrong, cannot create directory '%s': %s" %
756
                 (dir_name, err))
757
          sys.exit(5)
758
    if not os.path.isdir(dir_name):
759
      print ("Node setup wrong, '%s' is not a directory" % dir_name)
760
      sys.exit(5)
761

    
762

    
763
def main():
764
  """Main function for the node daemon.
765

    
766
  """
767
  global queue_lock
768

    
769
  options, args = ParseOptions()
770
  utils.debug = options.debug
771

    
772
  if options.fork:
773
    utils.CloseFDs()
774

    
775
  for fname in (constants.SSL_CERT_FILE,):
776
    if not os.path.isfile(fname):
777
      print "config %s not there, will not run." % fname
778
      sys.exit(5)
779

    
780
  try:
781
    port = utils.GetNodeDaemonPort()
782
  except errors.ConfigurationError, err:
783
    print "Cluster configuration incomplete: '%s'" % str(err)
784
    sys.exit(5)
785

    
786
  EnsureRuntimeEnvironment()
787

    
788
  # become a daemon
789
  if options.fork:
790
    utils.Daemonize(logfile=constants.LOG_NODESERVER)
791

    
792
  utils.WritePidFile(constants.NODED_PID)
793
  try:
794
    utils.SetupLogging(logfile=constants.LOG_NODESERVER, debug=options.debug,
795
                       stderr_logging=not options.fork)
796
    logging.info("ganeti node daemon startup")
797

    
798
    # Read SSL certificate
799
    ssl_params = http.HttpSslParams(ssl_key_path=constants.SSL_CERT_FILE,
800
                                    ssl_cert_path=constants.SSL_CERT_FILE)
801

    
802
    # Prepare job queue
803
    queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
804

    
805
    mainloop = daemon.Mainloop()
806
    server = NodeHttpServer(mainloop, "", port,
807
                            ssl_params=ssl_params, ssl_verify_peer=True)
808
    server.Start()
809
    try:
810
      mainloop.Run()
811
    finally:
812
      server.Stop()
813
  finally:
814
    utils.RemovePidFile(constants.NODED_PID)
815

    
816

    
817
if __name__ == '__main__':
818
  main()