15591 |
15591 |
return result
|
15592 |
15592 |
|
15593 |
15593 |
|
15594 |
|
# Network LUs
|
15595 |
15594 |
class LUNetworkAdd(LogicalUnit):
|
15596 |
15595 |
"""Logical unit for creating networks.
|
15597 |
15596 |
|
... | ... | |
15609 |
15608 |
|
15610 |
15609 |
def ExpandNames(self):
|
15611 |
15610 |
self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
|
15612 |
|
self.needed_locks = {}
|
|
15611 |
|
|
15612 |
if self.op.conflicts_check:
|
|
15613 |
self.share_locks[locking.LEVEL_NODE] = 1
|
|
15614 |
self.needed_locks = {
|
|
15615 |
locking.LEVEL_NODE: locking.ALL_SET,
|
|
15616 |
}
|
|
15617 |
else:
|
|
15618 |
self.needed_locks = {}
|
|
15619 |
|
15613 |
15620 |
self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
|
15614 |
15621 |
|
15615 |
15622 |
def CheckPrereq(self):
|
... | ... | |
15674 |
15681 |
# Check if we need to reserve the nodes and the cluster master IP
|
15675 |
15682 |
# These may not be allocated to any instances in routed mode, as
|
15676 |
15683 |
# they wouldn't function anyway.
|
15677 |
|
for node in self.cfg.GetAllNodesInfo().values():
|
15678 |
|
for ip in [node.primary_ip, node.secondary_ip]:
|
15679 |
|
try:
|
15680 |
|
pool.Reserve(ip)
|
15681 |
|
self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
|
|
15684 |
if self.op.conflicts_check:
|
|
15685 |
for node in self.cfg.GetAllNodesInfo().values():
|
|
15686 |
for ip in [node.primary_ip, node.secondary_ip]:
|
|
15687 |
try:
|
|
15688 |
pool.Reserve(ip)
|
|
15689 |
self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
|
15682 |
15690 |
|
15683 |
|
except errors.AddressPoolError:
|
15684 |
|
pass
|
|
15691 |
except errors.AddressPoolError:
|
|
15692 |
pass
|
15685 |
15693 |
|
15686 |
|
master_ip = self.cfg.GetClusterInfo().master_ip
|
15687 |
|
try:
|
15688 |
|
pool.Reserve(master_ip)
|
15689 |
|
self.LogInfo("Reserved cluster master IP (%s)", master_ip)
|
15690 |
|
except errors.AddressPoolError:
|
15691 |
|
pass
|
|
15694 |
master_ip = self.cfg.GetClusterInfo().master_ip
|
|
15695 |
try:
|
|
15696 |
pool.Reserve(master_ip)
|
|
15697 |
self.LogInfo("Reserved cluster master IP (%s)", master_ip)
|
|
15698 |
except errors.AddressPoolError:
|
|
15699 |
pass
|
15692 |
15700 |
|
15693 |
15701 |
if self.op.add_reserved_ips:
|
15694 |
15702 |
for ip in self.op.add_reserved_ips:
|
... | ... | |
15716 |
15724 |
if not self.network_uuid:
|
15717 |
15725 |
raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
|
15718 |
15726 |
errors.ECODE_INVAL)
|
|
15727 |
|
|
15728 |
self.share_locks[locking.LEVEL_NODEGROUP] = 1
|
15719 |
15729 |
self.needed_locks = {
|
15720 |
15730 |
locking.LEVEL_NETWORK: [self.network_uuid],
|
|
15731 |
locking.LEVEL_NODEGROUP: locking.ALL_SET,
|
15721 |
15732 |
}
|
15722 |
15733 |
|
15723 |
15734 |
def CheckPrereq(self):
|
... | ... | |
16059 |
16070 |
raise errors.OpPrereqError("Group %s does not exist" %
|
16060 |
16071 |
self.group_name, errors.ECODE_INVAL)
|
16061 |
16072 |
|
|
16073 |
self.share_locks[locking.LEVEL_INSTANCE] = 1
|
16062 |
16074 |
self.needed_locks = {
|
16063 |
16075 |
locking.LEVEL_INSTANCE: [],
|
16064 |
16076 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
16065 |
16077 |
}
|
16066 |
|
self.share_locks[locking.LEVEL_INSTANCE] = 1
|
16067 |
16078 |
|
16068 |
16079 |
def DeclareLocks(self, level):
|
16069 |
16080 |
if level == locking.LEVEL_INSTANCE:
|
... | ... | |
16071 |
16082 |
|
16072 |
16083 |
# Lock instances optimistically, needs verification once group lock has
|
16073 |
16084 |
# been acquired
|
16074 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
16075 |
|
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
|
16085 |
if self.op.conflicts_check:
|
|
16086 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
|
16087 |
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
|
16088 |
self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
|
16076 |
16089 |
|
16077 |
16090 |
def BuildHooksEnv(self):
|
16078 |
16091 |
ret = {
|
... | ... | |
16158 |
16171 |
self.group_name, errors.ECODE_INVAL)
|
16159 |
16172 |
|
16160 |
16173 |
self.needed_locks = {
|
16161 |
|
locking.LEVEL_INSTANCE: [],
|
16162 |
16174 |
locking.LEVEL_NODEGROUP: [self.group_uuid],
|
16163 |
16175 |
}
|
16164 |
16176 |
self.share_locks[locking.LEVEL_INSTANCE] = 1
|
... | ... | |
16169 |
16181 |
|
16170 |
16182 |
# Lock instances optimistically, needs verification once group lock has
|
16171 |
16183 |
# been acquired
|
16172 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
16173 |
|
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
|
16184 |
if self.op.conflicts_check:
|
|
16185 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
|
16186 |
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
16174 |
16187 |
|
16175 |
16188 |
def BuildHooksEnv(self):
|
16176 |
16189 |
ret = {
|