# has to exist in order for the sphinx module to be loaded
# successfully, but we certainly don't want the docs to be rebuilt if
# it changes
-doc/html/index.html: $(docrst) $(docdot) doc/conf.py configure.ac \
+doc/html/index.html: $(docrst) doc/conf.py configure.ac \
$(RUN_IN_TEMPDIR) lib/build/sphinx_ext.py \
lib/build/shell_example_lexer.py lib/opcodes.py lib/ht.py \
| $(BUILT_PYTHON_SOURCES)
cat $<; \
} > $@
-docdot = \
- doc/arch-2.0.dot \
- doc/design-2.1-lock-acquire.dot \
- doc/design-2.1-lock-release.dot
-
# Things to build but not to install (add it to EXTRA_DIST if it should be
# distributed)
noinst_DATA = \
devel/webserver \
tools/kvm-ifup.in \
tools/vcluster-setup.in \
- $(docdot) \
$(docrst) \
doc/conf.py \
doc/html \
+++ /dev/null
-digraph "ganeti-2.0-architecture" {
- compound=false
- concentrate=true
- mclimit=100.0
- nslimit=100.0
- edge[fontsize="8" fontname="Helvetica-Oblique"]
- node[width="0" height="0" fontsize="12" fontcolor="black" shape=rect]
-
- subgraph outside {
- rclient[label="external clients"]
- label="Outside the cluster"
- }
-
- subgraph cluster_inside {
- label="ganeti cluster"
- labeljust=l
- subgraph cluster_master_node {
- label="master node"
- rapi[label="RAPI daemon"]
- cli[label="CLI"]
- watcher[label="Watcher"]
- burnin[label="Burnin"]
- masterd[shape=record style=filled label="{ <luxi> luxi endpoint | master I/O thread | job queue | {<w1> worker| <w2> worker | <w3> worker }}"]
- {rapi;cli;watcher;burnin} -> masterd:luxi [label="LUXI" labelpos=100]
- }
-
- subgraph cluster_nodes {
- label="nodes"
- noded1 [shape=record label="{ RPC listener | Disk management | Network management | Hypervisor } "]
- noded2 [shape=record label="{ RPC listener | Disk management | Network management | Hypervisor } "]
- noded3 [shape=record label="{ RPC listener | Disk management | Network management | Hypervisor } "]
- }
- masterd:w2 -> {noded1;noded2;noded3} [label="node RPC"]
- cli -> {noded1;noded2;noded3} [label="SSH"]
- }
-
- rclient -> rapi [label="RAPI protocol"]
-}
The new design will change the cluster architecture to:
-.. graphviz:: arch-2.0.dot
+.. digraph:: "ganeti-2.0-architecture"
+
+ compound=false
+ concentrate=true
+ mclimit=100.0
+ nslimit=100.0
+ edge[fontsize="8" fontname="Helvetica-Oblique"]
+ node[width="0" height="0" fontsize="12" fontcolor="black" shape=rect]
+
+ subgraph outside {
+ rclient[label="external clients"]
+ label="Outside the cluster"
+ }
+
+ subgraph cluster_inside {
+ label="ganeti cluster"
+ labeljust=l
+ subgraph cluster_master_node {
+ label="master node"
+ rapi[label="RAPI daemon"]
+ cli[label="CLI"]
+ watcher[label="Watcher"]
+ burnin[label="Burnin"]
+ masterd[shape=record style=filled label="{ <luxi> luxi endpoint | master I/O thread | job queue | {<w1> worker| <w2> worker | <w3> worker }}"]
+ {rapi;cli;watcher;burnin} -> masterd:luxi [label="LUXI" labelpos=100]
+ }
+
+ subgraph cluster_nodes {
+ label="nodes"
+ noded1 [shape=record label="{ RPC listener | Disk management | Network management | Hypervisor } "]
+ noded2 [shape=record label="{ RPC listener | Disk management | Network management | Hypervisor } "]
+ noded3 [shape=record label="{ RPC listener | Disk management | Network management | Hypervisor } "]
+ }
+ masterd:w2 -> {noded1;noded2;noded3} [label="node RPC"]
+ cli -> {noded1;noded2;noded3} [label="SSH"]
+ }
+
+ rclient -> rapi [label="RAPI protocol"]
This differs from the 1.2 architecture by the addition of the master
daemon, which will be the only entity to talk to the node daemons.
+++ /dev/null
-digraph "design-2.1-lock-acquire" {
- graph[fontsize=8, fontname="Helvetica"]
- node[fontsize=8, fontname="Helvetica", width="0", height="0"]
- edge[fontsize=8, fontname="Helvetica"]
-
- /* Actions */
- abort[label="Abort\n(couldn't acquire)"]
- acquire[label="Acquire lock"]
- add_to_queue[label="Add condition to queue"]
- wait[label="Wait for notification"]
- remove_from_queue[label="Remove from queue"]
-
- /* Conditions */
- alone[label="Empty queue\nand can acquire?", shape=diamond]
- have_timeout[label="Do I have\ntimeout?", shape=diamond]
- top_of_queue_and_can_acquire[
- label="On top of queue and\ncan acquire lock?",
- shape=diamond,
- ]
-
- /* Lines */
- alone->acquire[label="Yes"]
- alone->add_to_queue[label="No"]
-
- have_timeout->abort[label="Yes"]
- have_timeout->wait[label="No"]
-
- top_of_queue_and_can_acquire->acquire[label="Yes"]
- top_of_queue_and_can_acquire->have_timeout[label="No"]
-
- add_to_queue->wait
- wait->top_of_queue_and_can_acquire
- acquire->remove_from_queue
-}
+++ /dev/null
-digraph "design-2.1-lock-release" {
- graph[fontsize=8, fontname="Helvetica"]
- node[fontsize=8, fontname="Helvetica", width="0", height="0"]
- edge[fontsize=8, fontname="Helvetica"]
-
- /* Actions */
- remove_from_owners[label="Remove from owner list"]
- notify[label="Notify topmost"]
- swap_shared[label="Swap shared conditions"]
- success[label="Success"]
-
- /* Conditions */
- have_pending[label="Any pending\nacquires?", shape=diamond]
- was_active_queue[
- label="Was active condition\nfor shared acquires?",
- shape=diamond,
- ]
-
- /* Lines */
- remove_from_owners->have_pending
-
- have_pending->notify[label="Yes"]
- have_pending->success[label="No"]
-
- notify->was_active_queue
-
- was_active_queue->swap_shared[label="Yes"]
- was_active_queue->success[label="No"]
-
- swap_shared->success
-}
acquires and no current holders. The caller can have the lock
immediately.
-.. graphviz:: design-2.1-lock-acquire.dot
+.. digraph:: "design-2.1-lock-acquire"
+ graph[fontsize=8, fontname="Helvetica"]
+ node[fontsize=8, fontname="Helvetica", width="0", height="0"]
+ edge[fontsize=8, fontname="Helvetica"]
+
+ /* Actions */
+ abort[label="Abort\n(couldn't acquire)"]
+ acquire[label="Acquire lock"]
+ add_to_queue[label="Add condition to queue"]
+ wait[label="Wait for notification"]
+ remove_from_queue[label="Remove from queue"]
+
+ /* Conditions */
+ alone[label="Empty queue\nand can acquire?", shape=diamond]
+ have_timeout[label="Do I have\ntimeout?", shape=diamond]
+ top_of_queue_and_can_acquire[
+ label="On top of queue and\ncan acquire lock?",
+ shape=diamond,
+ ]
+
+ /* Lines */
+ alone->acquire[label="Yes"]
+ alone->add_to_queue[label="No"]
+
+ have_timeout->abort[label="Yes"]
+ have_timeout->wait[label="No"]
+
+ top_of_queue_and_can_acquire->acquire[label="Yes"]
+ top_of_queue_and_can_acquire->have_timeout[label="No"]
+
+ add_to_queue->wait
+ wait->top_of_queue_and_can_acquire
+ acquire->remove_from_queue
Release
*******
exclusive locks by forcing consecutive shared acquires to wait in the
queue.
-.. graphviz:: design-2.1-lock-release.dot
+.. digraph:: "design-2.1-lock-release"
+
+ graph[fontsize=8, fontname="Helvetica"]
+ node[fontsize=8, fontname="Helvetica", width="0", height="0"]
+ edge[fontsize=8, fontname="Helvetica"]
+
+ /* Actions */
+ remove_from_owners[label="Remove from owner list"]
+ notify[label="Notify topmost"]
+ swap_shared[label="Swap shared conditions"]
+ success[label="Success"]
+
+ /* Conditions */
+ have_pending[label="Any pending\nacquires?", shape=diamond]
+ was_active_queue[
+ label="Was active condition\nfor shared acquires?",
+ shape=diamond,
+ ]
+
+ /* Lines */
+ remove_from_owners->have_pending
+
+ have_pending->notify[label="Yes"]
+ have_pending->success[label="No"]
+
+ notify->was_active_queue
+
+ was_active_queue->swap_shared[label="Yes"]
+ was_active_queue->success[label="No"]
+
+ swap_shared->success
Delete