Revision b1ffe1eb qa/qa_instance.py

b/qa/qa_instance.py
239 239
             r'\s+primary:\s+(/dev/drbd\d+)\s+')
240 240
  drbddevs = re.findall(pattern, output, re.M)
241 241

  
242
  # Deactivate disks on secondary node
243 242
  halted_disks = []
244
  cmds = []
245
  for name in node2disk[[node2_full, node_full][int(onmaster)]]:
246
    halted_disks.append(name)
247
    cmds.append(sq(["echo", "offline"]) + " >%s" % _GetDiskStatePath(name))
248
  AssertEqual(StartSSH([node2, node][int(onmaster)]['primary'],
249
                       '; '.join(cmds)).wait(), 0)
250 243
  try:
244
    # Deactivate disks
245
    cmds = []
246
    for name in node2disk[[node2_full, node_full][int(onmaster)]]:
247
      halted_disks.append(name)
248
      cmds.append(sq(["echo", "offline"]) + " >%s" % _GetDiskStatePath(name))
249
    AssertEqual(StartSSH([node2, node][int(onmaster)]['primary'],
250
                         ' && '.join(cmds)).wait(), 0)
251

  
251 252
    # Write something to the disks and give some time to notice the problem
252 253
    cmds = []
253 254
    for disk in devpath:
......
257 258
      AssertEqual(StartSSH(node['primary'], ' && '.join(cmds)).wait(), 0)
258 259
      time.sleep(3)
259 260

  
261
    for name in drbddevs:
262
      cmd = ['drbdsetup', name, 'show']
263
      AssertEqual(StartSSH(node['primary'], sq(cmd)).wait(), 0)
264

  
260 265
    # For manual checks
261 266
    cmd = ['gnt-instance', 'info', instance['name']]
262 267
    AssertEqual(StartSSH(master['primary'], sq(cmd)).wait(), 0)
......
269 274
    AssertEqual(StartSSH([node2, node][int(onmaster)]['primary'],
270 275
                         '; '.join(cmds)).wait(), 0)
271 276

  
277
  if onmaster:
278
    for name in drbddevs:
279
      cmd = ['drbdsetup', name, 'detach']
280
      AssertEqual(StartSSH(node['primary'], sq(cmd)).wait(), 0)
281
  else:
282
    for name in drbddevs:
283
      cmd = ['drbdsetup', name, 'disconnect']
284
      AssertEqual(StartSSH(node2['primary'], sq(cmd)).wait(), 0)
285

  
286
  # Make sure disks are up again
287
  #cmd = ['gnt-instance', 'activate-disks', instance['name']]
288
  #AssertEqual(StartSSH(master['primary'], sq(cmd)).wait(), 0)
289

  
272 290
  # Restart instance
273 291
  cmd = ['gnt-instance', 'shutdown', instance['name']]
274 292
  AssertEqual(StartSSH(master['primary'], sq(cmd)).wait(), 0)
275 293

  
276
  cmd = ['gnt-instance', 'startup', '--force', instance['name']]
277
  AssertEqual(StartSSH(master['primary'], sq(cmd)).wait(), 0)
278

  
279
  # Make sure disks are up again
280
  cmd = ['gnt-instance', 'activate-disks', instance['name']]
294
  #cmd = ['gnt-instance', 'startup', '--force', instance['name']]
295
  cmd = ['gnt-instance', 'startup', instance['name']]
281 296
  AssertEqual(StartSSH(master['primary'], sq(cmd)).wait(), 0)
282 297

  
283 298
  cmd = ['gnt-cluster', 'verify']
......
289 304
  qa_utils.PrintError("Disk failure on primary node cannot be "
290 305
                      "tested due to potential crashes.")
291 306
  # The following can cause crashes, thus it's disabled until fixed
292
  #return _TestInstanceDiskFailure(instance, node, node2, True)
307
  return _TestInstanceDiskFailure(instance, node, node2, True)
293 308

  
294 309

  
295 310
def TestInstanceSecondaryDiskFailure(instance, node, node2):

Also available in: Unified diff