Revision ec5c88dc
b/tools/burnin | ||
---|---|---|
69 | 69 |
self.instances = [] |
70 | 70 |
self.to_rem = [] |
71 | 71 |
self.opts = None |
72 |
self.cl = cli.GetClient() |
|
72 | 73 |
self.ParseOptions() |
73 | 74 |
self.GetState() |
74 | 75 |
|
... | ... | |
90 | 91 |
def ExecOp(self, op): |
91 | 92 |
"""Execute an opcode and manage the exec buffer.""" |
92 | 93 |
self.ClearFeedbackBuf() |
93 |
return cli.SubmitOpCode(op, feedback_fn=self.Feedback) |
|
94 |
return cli.SubmitOpCode(op, feedback_fn=self.Feedback, cl=self.cl) |
|
95 |
|
|
96 |
def ExecJobSet(self, jobs): |
|
97 |
"""Execute a set of jobs and return once all are done. |
|
98 |
|
|
99 |
The method will return the list of results, if all jobs are |
|
100 |
successfull. Otherwise, OpExecError will be raised from within |
|
101 |
cli.py. |
|
102 |
|
|
103 |
""" |
|
104 |
self.ClearFeedbackBuf() |
|
105 |
job_ids = [cli.SendJob(job, cl=self.cl) for job in jobs] |
|
106 |
Log("- Submitted job IDs %s" % ", ".join(job_ids)) |
|
107 |
results = [] |
|
108 |
for jid in job_ids: |
|
109 |
Log("- Waiting for job %s" % jid) |
|
110 |
results.append(cli.PollJob(jid, cl=self.cl, feedback_fn=self.Feedback)) |
|
111 |
|
|
112 |
return results |
|
94 | 113 |
|
95 | 114 |
def ParseOptions(self): |
96 | 115 |
"""Parses the command line options. |
... | ... | |
149 | 168 |
help="Perform the allocation using an iallocator" |
150 | 169 |
" instead of fixed node spread (node restrictions no" |
151 | 170 |
" longer apply, therefore -n/--nodes must not be used") |
171 |
parser.add_option("-p", "--parallel", default=False, action="store_true", |
|
172 |
dest="parallel", |
|
173 |
help="Enable parallelization of some operations in" |
|
174 |
" order to speed burnin or to test granular locking") |
|
152 | 175 |
|
153 | 176 |
options, args = parser.parse_args() |
154 | 177 |
if len(args) < 1 or options.os is None: |
... | ... | |
206 | 229 |
mytor = izip(cycle(self.nodes), |
207 | 230 |
islice(cycle(self.nodes), 1, None), |
208 | 231 |
self.instances) |
232 |
jobset = [] |
|
209 | 233 |
for pnode, snode, instance in mytor: |
210 | 234 |
if self.opts.iallocator: |
211 | 235 |
pnode = snode = None |
... | ... | |
240 | 264 |
hvm_nic_type=constants.HT_HVM_NIC_RTL8139, |
241 | 265 |
hvm_disk_type=constants.HT_HVM_DEV_IOEMU) |
242 | 266 |
|
243 |
self.ExecOp(op) |
|
244 |
self.to_rem.append(instance) |
|
267 |
if self.opts.parallel: |
|
268 |
jobset.append([op]) |
|
269 |
# FIXME: here we should not append to to_rem uncoditionally, |
|
270 |
# but only when the job is successful |
|
271 |
self.to_rem.append(instance) |
|
272 |
else: |
|
273 |
self.ExecOp(op) |
|
274 |
self.to_rem.append(instance) |
|
275 |
if self.opts.parallel: |
|
276 |
self.ExecJobSet(jobset) |
|
245 | 277 |
|
246 | 278 |
def ReplaceDisks1D8(self): |
247 | 279 |
"""Replace disks on primary and secondary for drbd8.""" |
Also available in: Unified diff