Revision cf048aea
b/lib/objects.py | ||
---|---|---|
1577 | 1577 |
"disk_state_static", |
1578 | 1578 |
"enabled_disk_templates", |
1579 | 1579 |
"candidate_certs", |
1580 |
"max_running_jobs", |
|
1580 | 1581 |
] + _TIMESTAMPS + _UUID |
1581 | 1582 |
|
1582 | 1583 |
def UpgradeConfig(self): |
... | ... | |
1702 | 1703 |
if self.candidate_certs is None: |
1703 | 1704 |
self.candidate_certs = {} |
1704 | 1705 |
|
1706 |
if self.max_running_jobs is None: |
|
1707 |
self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT |
|
1708 |
|
|
1705 | 1709 |
@property |
1706 | 1710 |
def primary_hypervisor(self): |
1707 | 1711 |
"""The first hypervisor is the primary. |
b/src/Ganeti/Constants.hs | ||
---|---|---|
3958 | 3958 |
luxidMaximalRunningJobs :: Int |
3959 | 3959 |
luxidMaximalRunningJobs = 20 |
3960 | 3960 |
|
3961 |
-- | The default value for the maximal number of jobs to be running at the same |
|
3962 |
-- time. Once the maximal number is reached, new jobs will just be queued and |
|
3963 |
-- only started, once some of the other jobs have finished. |
|
3964 |
luxidMaximalRunningJobsDefault :: Int |
|
3965 |
luxidMaximalRunningJobsDefault = 20 |
|
3966 |
|
|
3961 | 3967 |
-- * Confd |
3962 | 3968 |
|
3963 | 3969 |
confdProtocolVersion :: Int |
b/src/Ganeti/Objects.hs | ||
---|---|---|
706 | 706 |
, simpleField "ipolicy" [t| FilledIPolicy |] |
707 | 707 |
, simpleField "enabled_disk_templates" [t| [DiskTemplate] |] |
708 | 708 |
, simpleField "candidate_certs" [t| CandidateCertificates |] |
709 |
, simpleField "max_running_jobs" [t| Int |] |
|
709 | 710 |
] |
710 | 711 |
++ timeStampFields |
711 | 712 |
++ uuidFields |
b/tools/cfgupgrade | ||
---|---|---|
414 | 414 |
for param in ["gluster", "gluster_storage_dir"]: |
415 | 415 |
if param in cluster: |
416 | 416 |
del cluster[param] |
417 |
if "max_running_jobs" in cluster: |
|
418 |
del cluster["max_running_jobs"] |
|
417 | 419 |
|
418 | 420 |
|
419 | 421 |
def DowngradeGroups(config_data): |
Also available in: Unified diff