, testLoader
, testTypes
, testCLI
+ , testJSON
+ , testLUXI
+ , testSsconf
) where
import Test.QuickCheck
import qualified Data.Set as Set
import Data.Maybe
import Control.Monad
+import Control.Applicative
import qualified System.Console.GetOpt as GetOpt
import qualified Text.JSON as J
import qualified Data.Map
import qualified Ganeti.OpCodes as OpCodes
import qualified Ganeti.Jobs as Jobs
-import qualified Ganeti.Luxi
+import qualified Ganeti.Luxi as Luxi
+import qualified Ganeti.Ssconf as Ssconf
import qualified Ganeti.HTools.CLI as CLI
import qualified Ganeti.HTools.Cluster as Cluster
import qualified Ganeti.HTools.Container as Container
import qualified Ganeti.HTools.Instance as Instance
import qualified Ganeti.HTools.JSON as JSON
import qualified Ganeti.HTools.Loader as Loader
-import qualified Ganeti.HTools.Luxi
+import qualified Ganeti.HTools.Luxi as HTools.Luxi
import qualified Ganeti.HTools.Node as Node
import qualified Ganeti.HTools.Group as Group
import qualified Ganeti.HTools.PeerMap as PeerMap
maxSpindleRatio :: Double
maxSpindleRatio = 1024.0
+-- | Max nodes, used just to limit arbitrary instances for smaller
+-- opcode definitions (e.g. list of nodes in OpTestDelay).
+maxNodes :: Int
+maxNodes = 32
+
+-- | Max opcodes or jobs in a submit job and submit many jobs.
+maxOpCodes :: Int
+maxOpCodes = 16
+
-- | All disk templates (used later)
allDiskTemplates :: [Types.DiskTemplate]
allDiskTemplates = [minBound..maxBound]
, Types.iSpecDiskSize = 0
, Types.iSpecDiskCount = 0
, Types.iSpecNicCount = 0
+ , Types.iSpecSpindleUse = 0
}
, Types.iPolicyMaxSpec = Types.ISpec { Types.iSpecMemorySize = maxBound
, Types.iSpecCpuCount = maxBound
, Types.iSpecDiskSize = maxBound
, Types.iSpecDiskCount = C.maxDisks
, Types.iSpecNicCount = C.maxNics
+ , Types.iSpecSpindleUse = maxBound
}
, Types.iPolicyStdSpec = Types.ISpec { Types.iSpecMemorySize = Types.unitMem
, Types.iSpecCpuCount = Types.unitCpu
, Types.iSpecDiskSize = Types.unitDsk
, Types.iSpecDiskCount = 1
, Types.iSpecNicCount = 1
+ , Types.iSpecSpindleUse = 1
}
- , Types.iPolicyDiskTemplates = [Types.DTDrbd8, Types.DTPlain]
+ , Types.iPolicyDiskTemplates = [minBound..maxBound]
, Types.iPolicyVcpuRatio = maxVcpuRatio -- somewhat random value, high
-- enough to not impact us
, Types.iPolicySpindleRatio = maxSpindleRatio
-- | Create an instance given its spec.
createInstance mem dsk vcpus =
Instance.create "inst-unnamed" mem dsk vcpus Types.Running [] True (-1) (-1)
- Types.DTDrbd8
+ Types.DTDrbd8 1
-- | Create a small cluster by repeating a node spec.
makeSmallCluster :: Node.Node -> Int -> Node.List
newelem <- arbitrary `suchThat` (`notElem` lst)
return (newelem:lst)) [] [1..cnt]
+-- | Checks if an instance is mirrored.
+isMirrored :: Instance.Instance -> Bool
+isMirrored = (/= Types.MirrorNone) . Instance.mirrorType
+
+-- | Returns the possible change node types for a disk template.
+evacModeOptions :: Types.MirrorType -> [Types.EvacMode]
+evacModeOptions Types.MirrorNone = []
+evacModeOptions Types.MirrorInternal = [minBound..maxBound] -- DRBD can do all
+evacModeOptions Types.MirrorExternal = [Types.ChangePrimary, Types.ChangeAll]
+
-- * Arbitrary instances
-- | Defines a DNS name.
getName :: Gen String
getName = do
n <- choose (1, 64)
- dn <- vector n::Gen [DNSChar]
+ dn <- vector n
return (map dnsGetChar dn)
-- | Generates an entire FQDN.
getFQDN :: Gen String
getFQDN = do
ncomps <- choose (1, 4)
- names <- mapM (const getName) [1..ncomps::Int]
+ names <- vectorOf ncomps getName
return $ intercalate "." names
+-- | Combinator that generates a 'Maybe' using a sub-combinator.
+getMaybe :: Gen a -> Gen (Maybe a)
+getMaybe subgen = do
+ bool <- arbitrary
+ if bool
+ then Just <$> subgen
+ else return Nothing
+
+-- | Generates a fields list. This uses the same character set as a
+-- DNS name (just for simplicity).
+getFields :: Gen [String]
+getFields = do
+ n <- choose (1, 32)
+ vectorOf n getName
+
-- | Defines a tag type.
newtype TagChar = TagChar { tagGetChar :: Char }
pn <- arbitrary
sn <- arbitrary
vcpus <- choose (0, lim_cpu)
- return $ Instance.create name mem dsk vcpus run_st [] True pn sn
- Types.DTDrbd8
+ dt <- arbitrary
+ return $ Instance.create name mem dsk vcpus run_st [] True pn sn dt 1
-- | Generates an instance smaller than a node.
genInstanceSmallerThanNode :: Node.Node -> Gen Instance.Instance
]
case op_id of
"OP_TEST_DELAY" ->
- liftM3 OpCodes.OpTestDelay arbitrary arbitrary arbitrary
+ OpCodes.OpTestDelay <$> arbitrary <*> arbitrary
+ <*> resize maxNodes (listOf getFQDN)
"OP_INSTANCE_REPLACE_DISKS" ->
- liftM5 OpCodes.OpInstanceReplaceDisks arbitrary arbitrary
- arbitrary arbitrary arbitrary
+ OpCodes.OpInstanceReplaceDisks <$> getFQDN <*> getMaybe getFQDN <*>
+ arbitrary <*> resize C.maxDisks arbitrary <*> getMaybe getName
"OP_INSTANCE_FAILOVER" ->
- liftM3 OpCodes.OpInstanceFailover arbitrary arbitrary
- arbitrary
+ OpCodes.OpInstanceFailover <$> getFQDN <*> arbitrary <*>
+ getMaybe getFQDN
"OP_INSTANCE_MIGRATE" ->
- liftM5 OpCodes.OpInstanceMigrate arbitrary arbitrary
- arbitrary arbitrary arbitrary
+ OpCodes.OpInstanceMigrate <$> getFQDN <*> arbitrary <*>
+ arbitrary <*> arbitrary <*> getMaybe getFQDN
_ -> fail "Wrong opcode"
instance Arbitrary Jobs.OpStatus where
instance Arbitrary a => Arbitrary (Types.OpResult a) where
arbitrary = arbitrary >>= \c ->
if c
- then liftM Types.OpGood arbitrary
- else liftM Types.OpFail arbitrary
+ then Types.OpGood <$> arbitrary
+ else Types.OpFail <$> arbitrary
instance Arbitrary Types.ISpec where
arbitrary = do
dsk_s <- arbitrary::Gen (NonNegative Int)
cpu_c <- arbitrary::Gen (NonNegative Int)
nic_c <- arbitrary::Gen (NonNegative Int)
+ su <- arbitrary::Gen (NonNegative Int)
return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s
, Types.iSpecCpuCount = fromIntegral cpu_c
, Types.iSpecDiskSize = fromIntegral dsk_s
, Types.iSpecDiskCount = fromIntegral dsk_c
, Types.iSpecNicCount = fromIntegral nic_c
+ , Types.iSpecSpindleUse = fromIntegral su
}
-- | Generates an ispec bigger than the given one.
dsk_s <- choose (Types.iSpecDiskSize imin, maxBound)
cpu_c <- choose (Types.iSpecCpuCount imin, maxBound)
nic_c <- choose (Types.iSpecNicCount imin, maxBound)
+ su <- choose (Types.iSpecSpindleUse imin, maxBound)
return Types.ISpec { Types.iSpecMemorySize = fromIntegral mem_s
, Types.iSpecCpuCount = fromIntegral cpu_c
, Types.iSpecDiskSize = fromIntegral dsk_s
, Types.iSpecDiskCount = fromIntegral dsk_c
, Types.iSpecNicCount = fromIntegral nic_c
+ , Types.iSpecSpindleUse = fromIntegral su
}
instance Arbitrary Types.IPolicy where
cndlist = flist ++ tlist ++ [undefined]
prop_Utils_parseUnit (NonNegative n) =
- Utils.parseUnit (show n) == Types.Ok n &&
- Utils.parseUnit (show n ++ "m") == Types.Ok n &&
- (case Utils.parseUnit (show n ++ "M") of
- Types.Ok m -> if n > 0
- then m < n -- for positive values, X MB is < than X MiB
- else m == 0 -- but for 0, 0 MB == 0 MiB
- Types.Bad _ -> False) &&
- Utils.parseUnit (show n ++ "g") == Types.Ok (n*1024) &&
- Utils.parseUnit (show n ++ "t") == Types.Ok (n*1048576) &&
- Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int)
- where _types = n::Int
+ Utils.parseUnit (show n) ==? Types.Ok n .&&.
+ Utils.parseUnit (show n ++ "m") ==? Types.Ok n .&&.
+ Utils.parseUnit (show n ++ "M") ==? Types.Ok (truncate n_mb::Int) .&&.
+ Utils.parseUnit (show n ++ "g") ==? Types.Ok (n*1024) .&&.
+ Utils.parseUnit (show n ++ "G") ==? Types.Ok (truncate n_gb::Int) .&&.
+ Utils.parseUnit (show n ++ "t") ==? Types.Ok (n*1048576) .&&.
+ Utils.parseUnit (show n ++ "T") ==? Types.Ok (truncate n_tb::Int) .&&.
+ printTestCase "Internal error/overflow?"
+ (n_mb >=0 && n_gb >= 0 && n_tb >= 0) .&&.
+ property (Types.isBad (Utils.parseUnit (show n ++ "x")::Types.Result Int))
+ where _types = (n::Int)
+ n_mb = (fromIntegral n::Rational) * 1000 * 1000 / 1024 / 1024
+ n_gb = n_mb * 1000
+ n_tb = n_gb * 1000
-- | Test list for the Utils module.
testSuite "Utils"
-- | We test that in a cluster, given a random node, we can find it by
-- its name and alias, as long as all names and aliases are unique,
-- and that we fail to find a non-existing name.
-prop_Container_findByName node =
+prop_Container_findByName =
+ forAll (genNode (Just 1) Nothing) $ \node ->
forAll (choose (1, 20)) $ \ cnt ->
forAll (choose (0, cnt - 1)) $ \ fidx ->
forAll (genUniquesList (cnt * 2)) $ \ allnames ->
$ zip names nodes
nl' = Container.fromList nodes'
target = snd (nodes' !! fidx)
- in Container.findByName nl' (Node.name target) == Just target &&
- Container.findByName nl' (Node.alias target) == Just target &&
- isNothing (Container.findByName nl' othername)
+ in Container.findByName nl' (Node.name target) ==? Just target .&&.
+ Container.findByName nl' (Node.alias target) ==? Just target .&&.
+ printTestCase "Found non-existing name"
+ (isNothing (Container.findByName nl' othername))
testSuite "Container"
[ 'prop_Container_addTwo
prop_Text_Load_Instance name mem dsk vcpus status
(NonEmpty pnode) snode
- (NonNegative pdx) (NonNegative sdx) autobal dt =
+ (NonNegative pdx) (NonNegative sdx) autobal dt su =
pnode /= snode && pdx /= sdx ==>
let vcpus_s = show vcpus
dsk_s = show dsk
mem_s = show mem
+ su_s = show su
status_s = Types.instanceStatusToRaw status
ndx = if null snode
then [(pnode, pdx)]
sdt = Types.diskTemplateToRaw dt
inst = Text.loadInst nl
[name, mem_s, dsk_s, vcpus_s, status_s,
- sbal, pnode, snode, sdt, tags]
+ sbal, pnode, snode, sdt, tags, su_s]
fail1 = Text.loadInst nl
[name, mem_s, dsk_s, vcpus_s, status_s,
sbal, pnode, pnode, tags]
then Node.noSecondary
else sdx) &&
Instance.autoBalance i == autobal &&
+ Instance.spindleUse i == su &&
Types.isBad fail1
prop_Text_Load_InstanceFail ktn fields =
- length fields /= 10 ==>
+ length fields /= 10 && length fields /= 11 ==>
case Text.loadInst nl fields of
Types.Ok _ -> failTest "Managed to load instance from invalid data"
Types.Bad msg -> printTestCase ("Unrecognised error message: " ++ msg) $
prop_Text_Load_NodeFail fields =
length fields /= 8 ==> isNothing $ Text.loadNode Data.Map.empty fields
-prop_Text_NodeLSIdempotent node =
- (Text.loadNode defGroupAssoc.
- Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==?
- Just (Node.name n, n)
- -- override failN1 to what loadNode returns by default
- where n = Node.setPolicy Types.defIPolicy $
- node { Node.failN1 = True, Node.offline = False }
+prop_Text_NodeLSIdempotent =
+ forAll (genNode (Just 1) Nothing) $ \node ->
+ -- override failN1 to what loadNode returns by default
+ let n = Node.setPolicy Types.defIPolicy $
+ node { Node.failN1 = True, Node.offline = False }
+ in
+ (Text.loadNode defGroupAssoc.
+ Utils.sepSplit '|' . Text.serializeNode defGroupList) n ==?
+ Just (Node.name n, n)
prop_Text_ISpecIdempotent ispec =
case Text.loadISpec "dummy" . Utils.sepSplit ',' .
-- small cluster sizes.
prop_Text_CreateSerialise =
forAll genTags $ \ctags ->
- forAll (choose (1, 2)) $ \reqnodes ->
forAll (choose (1, 20)) $ \maxiter ->
forAll (choose (2, 10)) $ \count ->
forAll genOnlineNode $ \node ->
forAll (genInstanceSmallerThanNode node) $ \inst ->
- let inst' = Instance.setMovable inst (reqnodes == 2)
- nl = makeSmallCluster node count
+ let nl = makeSmallCluster node count
+ reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
in case Cluster.genAllocNodes defGroupList nl reqnodes True >>= \allocn ->
- Cluster.iterateAlloc nl Container.empty (Just maxiter) inst' allocn [] []
+ Cluster.iterateAlloc nl Container.empty (Just maxiter) inst allocn [] []
of
Types.Bad msg -> failTest $ "Failed to allocate: " ++ msg
Types.Ok (_, _, _, [], _) -> printTestCase
inst' = setInstanceSmallerThanNode node inst
inst'' = inst' { Instance.mem = Instance.mem inst }
+-- | Check that adding a primary instance with too much disk fails
+-- with type FailDisk.
prop_Node_addPriFD node inst =
+ forAll (elements Instance.localStorageTemplates) $ \dt ->
Instance.dsk inst >= Node.fDsk node && not (Node.failN1 node) ==>
- case Node.addPri node inst'' of
- Types.OpFail Types.FailDisk -> True
- _ -> False
- where _types = (node::Node.Node, inst::Instance.Instance)
- inst' = setInstanceSmallerThanNode node inst
- inst'' = inst' { Instance.dsk = Instance.dsk inst }
+ let inst' = setInstanceSmallerThanNode node inst
+ inst'' = inst' { Instance.dsk = Instance.dsk inst
+ , Instance.diskTemplate = dt }
+ in case Node.addPri node inst'' of
+ Types.OpFail Types.FailDisk -> True
+ _ -> False
+-- | Check that adding a primary instance with too many VCPUs fails
+-- with type FailCPU.
prop_Node_addPriFC =
forAll (choose (1, maxCpu)) $ \extra ->
forAll genOnlineNode $ \node ->
-- | Check that an offline instance with reasonable disk size but
-- extra mem/cpu can always be added.
-prop_Node_addOffline (NonNegative extra_mem) (NonNegative extra_cpu) pdx =
+prop_Node_addOfflinePri (NonNegative extra_mem) (NonNegative extra_cpu) =
forAll genOnlineNode $ \node ->
forAll (genInstanceSmallerThanNode node) $ \inst ->
let inst' = inst { Instance.runSt = Types.AdminOffline
, Instance.mem = Node.availMem node + extra_mem
, Instance.vcpus = Node.availCpu node + extra_cpu }
- in case (Node.addPri node inst', Node.addSec node inst' pdx) of
- (Types.OpGood _, Types.OpGood _) -> property True
+ in case Node.addPri node inst' of
+ Types.OpGood _ -> property True
+ v -> failTest $ "Expected OpGood, but got: " ++ show v
+
+-- | Check that an offline instance with reasonable disk size but
+-- extra mem/cpu can always be added.
+prop_Node_addOfflineSec (NonNegative extra_mem) (NonNegative extra_cpu) pdx =
+ forAll genOnlineNode $ \node ->
+ forAll (genInstanceSmallerThanNode node) $ \inst ->
+ let inst' = inst { Instance.runSt = Types.AdminOffline
+ , Instance.mem = Node.availMem node + extra_mem
+ , Instance.vcpus = Node.availCpu node + extra_cpu
+ , Instance.diskTemplate = Types.DTDrbd8 }
+ in case Node.addSec node inst' pdx of
+ Types.OpGood _ -> property True
v -> failTest $ "Expected OpGood/OpGood, but got: " ++ show v
-- | Checks for memory reservation changes.
prop_Node_rMem inst =
not (Instance.isOffline inst) ==>
- forAll (arbitrary `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
+ forAll (genOnlineNode `suchThat` ((> Types.unitMem) . Node.fMem)) $ \node ->
-- ab = auto_balance, nb = non-auto_balance
-- we use -1 as the primary node of the instance
- let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True }
+ let inst' = inst { Instance.pNode = -1, Instance.autoBalance = True
+ , Instance.diskTemplate = Types.DTDrbd8 }
inst_ab = setInstanceSmallerThanNode node inst'
inst_nb = inst_ab { Instance.autoBalance = False }
-- now we have the two instances, identical except the
forAll (genInstanceSmallerThanNode node) $ \inst ->
let pdx = Node.idx node + 1
inst' = Instance.setPri inst pdx
- in case Node.addSec node inst' pdx of
- Types.OpGood node' -> Node.removeSec node' inst' ==? node
+ inst'' = inst' { Instance.diskTemplate = Types.DTDrbd8 }
+ in case Node.addSec node inst'' pdx of
+ Types.OpGood node' -> Node.removeSec node' inst'' ==? node
_ -> failTest "Can't add instance"
testSuite "Node"
, 'prop_Node_addPriFD
, 'prop_Node_addPriFC
, 'prop_Node_addSec
- , 'prop_Node_addOffline
+ , 'prop_Node_addOfflinePri
+ , 'prop_Node_addOfflineSec
, 'prop_Node_rMem
, 'prop_Node_setMdsk
, 'prop_Node_tagMaps_idempotent
forAll (choose (5, 20)) $ \count ->
forAll genOnlineNode $ \node ->
let (nl, il, inst') = makeSmallEmptyCluster node count inst
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
+ in case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
Cluster.tryAlloc nl il inst' of
Types.Bad _ -> False
Types.Ok as ->
-- computed allocation statistics are correct.
prop_ClusterCanTieredAlloc inst =
forAll (choose (2, 5)) $ \count ->
- forAll (choose (1, 2)) $ \rqnodes ->
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
let nl = makeSmallCluster node count
il = Container.empty
+ rqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
allocnodes = Cluster.genAllocNodes defGroupList nl rqnodes True
in case allocnodes >>= \allocnodes' ->
Cluster.tieredAlloc nl il (Just 1) inst allocnodes' [] [] of
-- and allocate an instance on it.
genClusterAlloc count node inst =
let nl = makeSmallCluster node count
- in case Cluster.genAllocNodes defGroupList nl 2 True >>=
+ reqnodes = Instance.requiredNodes $ Instance.diskTemplate inst
+ in case Cluster.genAllocNodes defGroupList nl reqnodes True >>=
Cluster.tryAlloc nl Container.empty inst of
Types.Bad _ -> Types.Bad "Can't allocate"
Types.Ok as ->
prop_ClusterAllocRelocate =
forAll (choose (4, 8)) $ \count ->
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
- forAll (genInstanceSmallerThanNode node) $ \inst ->
+ forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
case genClusterAlloc count node inst of
Types.Bad msg -> failTest msg
Types.Ok (nl, il, inst') ->
case IAlloc.processRelocate defGroupList nl il
- (Instance.idx inst) 1 [Instance.sNode inst'] of
- Types.Ok _ -> printTestCase "??" True -- huh, how to make
- -- this nicer...
+ (Instance.idx inst) 1
+ [(if Instance.diskTemplate inst' == Types.DTDrbd8
+ then Instance.sNode
+ else Instance.pNode) inst'] of
+ Types.Ok _ -> property True
Types.Bad msg -> failTest $ "Failed to relocate: " ++ msg
-- | Helper property checker for the result of a nodeEvac or
prop_ClusterAllocEvacuate =
forAll (choose (4, 8)) $ \count ->
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
- forAll (genInstanceSmallerThanNode node) $ \inst ->
+ forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
case genClusterAlloc count node inst of
Types.Bad msg -> failTest msg
Types.Ok (nl, il, inst') ->
conjoin $ map (\mode -> check_EvacMode defGroup inst' $
Cluster.tryNodeEvac defGroupList nl il mode
- [Instance.idx inst']) [minBound..maxBound]
+ [Instance.idx inst']) .
+ evacModeOptions .
+ Instance.mirrorType $ inst'
-- | Checks that on a 4-8 node cluster with two node groups, once we
-- allocate an instance on the first node group, we can also change
prop_ClusterAllocChangeGroup =
forAll (choose (4, 8)) $ \count ->
forAll (genOnlineNode `suchThat` (isNodeBig 4)) $ \node ->
- forAll (genInstanceSmallerThanNode node) $ \inst ->
+ forAll (genInstanceSmallerThanNode node `suchThat` isMirrored) $ \inst ->
case genClusterAlloc count node inst of
Types.Bad msg -> failTest msg
Types.Ok (nl, il, inst') ->
, 'prop_CLI_StringArg
, 'prop_CLI_stdopts
]
+
+-- * JSON tests
+
+prop_JSON_toArray :: [Int] -> Property
+prop_JSON_toArray intarr =
+ let arr = map J.showJSON intarr in
+ case JSON.toArray (J.JSArray arr) of
+ Types.Ok arr' -> arr ==? arr'
+ Types.Bad err -> failTest $ "Failed to parse array: " ++ err
+
+prop_JSON_toArrayFail :: Int -> String -> Bool -> Property
+prop_JSON_toArrayFail i s b =
+ -- poor man's instance Arbitrary JSValue
+ forAll (elements [J.showJSON i, J.showJSON s, J.showJSON b]) $ \item ->
+ case JSON.toArray item of
+ Types.Bad _ -> property True
+ Types.Ok result -> failTest $ "Unexpected parse, got " ++ show result
+
+testSuite "JSON"
+ [ 'prop_JSON_toArray
+ , 'prop_JSON_toArrayFail
+ ]
+
+-- * Luxi tests
+
+instance Arbitrary Luxi.LuxiReq where
+ arbitrary = elements [minBound..maxBound]
+
+instance Arbitrary Luxi.QrViaLuxi where
+ arbitrary = elements [minBound..maxBound]
+
+instance Arbitrary Luxi.LuxiOp where
+ arbitrary = do
+ lreq <- arbitrary
+ case lreq of
+ Luxi.ReqQuery -> Luxi.Query <$> arbitrary <*> getFields <*> arbitrary
+ Luxi.ReqQueryNodes -> Luxi.QueryNodes <$> (listOf getFQDN) <*>
+ getFields <*> arbitrary
+ Luxi.ReqQueryGroups -> Luxi.QueryGroups <$> arbitrary <*>
+ arbitrary <*> arbitrary
+ Luxi.ReqQueryInstances -> Luxi.QueryInstances <$> (listOf getFQDN) <*>
+ getFields <*> arbitrary
+ Luxi.ReqQueryJobs -> Luxi.QueryJobs <$> arbitrary <*> getFields
+ Luxi.ReqQueryExports -> Luxi.QueryExports <$>
+ (listOf getFQDN) <*> arbitrary
+ Luxi.ReqQueryConfigValues -> Luxi.QueryConfigValues <$> getFields
+ Luxi.ReqQueryClusterInfo -> pure Luxi.QueryClusterInfo
+ Luxi.ReqQueryTags -> Luxi.QueryTags <$> getName <*> getFQDN
+ Luxi.ReqSubmitJob -> Luxi.SubmitJob <$> (resize maxOpCodes arbitrary)
+ Luxi.ReqSubmitManyJobs -> Luxi.SubmitManyJobs <$>
+ (resize maxOpCodes arbitrary)
+ Luxi.ReqWaitForJobChange -> Luxi.WaitForJobChange <$> arbitrary <*>
+ getFields <*> pure J.JSNull <*>
+ pure J.JSNull <*> arbitrary
+ Luxi.ReqArchiveJob -> Luxi.ArchiveJob <$> arbitrary
+ Luxi.ReqAutoArchiveJobs -> Luxi.AutoArchiveJobs <$> arbitrary <*>
+ arbitrary
+ Luxi.ReqCancelJob -> Luxi.CancelJob <$> arbitrary
+ Luxi.ReqSetDrainFlag -> Luxi.SetDrainFlag <$> arbitrary
+ Luxi.ReqSetWatcherPause -> Luxi.SetWatcherPause <$> arbitrary
+
+-- | Simple check that encoding/decoding of LuxiOp works.
+prop_Luxi_CallEncoding :: Luxi.LuxiOp -> Property
+prop_Luxi_CallEncoding op =
+ (Luxi.validateCall (Luxi.buildCall op) >>= Luxi.decodeCall) ==? Types.Ok op
+
+testSuite "LUXI"
+ [ 'prop_Luxi_CallEncoding
+ ]
+
+-- * Ssconf tests
+
+instance Arbitrary Ssconf.SSKey where
+ arbitrary = elements [minBound..maxBound]
+
+prop_Ssconf_filename key =
+ printTestCase "Key doesn't start with correct prefix" $
+ Ssconf.sSFilePrefix `isPrefixOf` Ssconf.keyToFilename (Just "") key
+
+testSuite "Ssconf"
+ [ 'prop_Ssconf_filename
+ ]