1"""
2Management of Pacemaker/Corosync clusters with PCS
3==================================================
4
5A state module to manage Pacemaker/Corosync clusters
6with the Pacemaker/Corosync configuration system (PCS)
7
8.. versionadded:: 2016.110
9
10:depends: pcs
11
12Walkthrough of a complete PCS cluster setup:
13http://clusterlabs.org/doc/en-US/Pacemaker/1.1/html/Clusters_from_Scratch/
14
15Requirements:
16    PCS is installed, pcs service is started and
17    the password for the hacluster user is set and known.
18
19Remark on the cibname variable used in the examples:
20    The use of the cibname variable is optional.
21    Use it only if you want to deploy your changes into a cibfile first and then push it.
22    This makes only sense if you want to deploy multiple changes (which require each other) at once to the cluster.
23
24At first the cibfile must be created:
25
26.. code-block:: yaml
27
28    mysql_pcs__cib_present_cib_for_galera:
29        pcs.cib_present:
30            - cibname: cib_for_galera
31            - scope: None
32            - extra_args: None
33
34Then the cibfile can be modified by creating resources (creating only 1 resource for demonstration, see also 7.):
35
36.. code-block:: yaml
37
38    mysql_pcs__resource_present_galera:
39        pcs.resource_present:
40            - resource_id: galera
41            - resource_type: "ocf:heartbeat:galera"
42            - resource_options:
43                - 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org'
44                - '--master'
45            - cibname: cib_for_galera
46
47After modifying the cibfile, it can be pushed to the live CIB in the cluster:
48
49.. code-block:: yaml
50
51    mysql_pcs__cib_pushed_cib_for_galera:
52        pcs.cib_pushed:
53            - cibname: cib_for_galera
54            - scope: None
55            - extra_args: None
56
57Create a cluster from scratch:
58
591. This authorizes nodes to each other. It probably won't work with Ubuntu as
60    it rolls out a default cluster that needs to be destroyed before the
61    new cluster can be created. This is a little complicated so it's best
62    to just run the cluster_setup below in most cases.:
63
64   .. code-block:: yaml
65
66       pcs_auth__auth:
67           pcs.auth:
68               - nodes:
69                   - node1.example.com
70                   - node2.example.com
71               - pcsuser: hacluster
72               - pcspasswd: hoonetorg
73
74
752. Do the initial cluster setup:
76
77   .. code-block:: yaml
78
79       pcs_setup__setup:
80           pcs.cluster_setup:
81               - nodes:
82                   - node1.example.com
83                   - node2.example.com
84               - pcsclustername: pcscluster
85               - extra_args:
86                   - '--start'
87                   - '--enable'
88               - pcsuser: hacluster
89               - pcspasswd: hoonetorg
90
913. Optional: Set cluster properties:
92
93   .. code-block:: yaml
94
95       pcs_properties__prop_has_value_no-quorum-policy:
96           pcs.prop_has_value:
97               - prop: no-quorum-policy
98               - value: ignore
99               - cibname: cib_for_cluster_settings
100
1014. Optional: Set resource defaults:
102
103   .. code-block:: yaml
104
105       pcs_properties__resource_defaults_to_resource-stickiness:
106           pcs.resource_defaults_to:
107               - default: resource-stickiness
108               - value: 100
109               - cibname: cib_for_cluster_settings
110
1115. Optional: Set resource op defaults:
112
113   .. code-block:: yaml
114
115       pcs_properties__resource_op_defaults_to_monitor-interval:
116           pcs.resource_op_defaults_to:
117               - op_default: monitor-interval
118               - value: 60s
119               - cibname: cib_for_cluster_settings
120
1216. Configure Fencing (!is often not optional on production ready cluster!):
122
123   .. code-block:: yaml
124
125       pcs_stonith__created_eps_fence:
126           pcs.stonith_present:
127               - stonith_id: eps_fence
128               - stonith_device_type: fence_eps
129               - stonith_device_options:
130                   - 'pcmk_host_map=node1.example.org:01;node2.example.org:02'
131                   - 'ipaddr=myepsdevice.example.org'
132                   - 'power_wait=5'
133                   - 'verbose=1'
134                   - 'debug=/var/log/pcsd/eps_fence.log'
135                   - 'login=hidden'
136                   - 'passwd=hoonetorg'
137               - cibname: cib_for_stonith
138
1397. Add resources to your cluster:
140
141   .. code-block:: yaml
142
143       mysql_pcs__resource_present_galera:
144           pcs.resource_present:
145               - resource_id: galera
146               - resource_type: "ocf:heartbeat:galera"
147               - resource_options:
148                   - 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org'
149                    - '--master'
150                - cibname: cib_for_galera
151
1528. Optional: Add constraints (locations, colocations, orders):
153
154   .. code-block:: yaml
155
156       haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
157           pcs.constraint_present:
158               - constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
159               - constraint_type: colocation
160               - constraint_options:
161                   - 'add'
162                   - 'vip_galera'
163                   - 'with'
164                   - 'haproxy-clone'
165               - cibname: cib_for_haproxy
166
167.. versionadded:: 2016.3.0
168"""
169
170import logging
171import os
172
173import salt.utils.files
174import salt.utils.path
175import salt.utils.stringutils
176
177log = logging.getLogger(__name__)
178
179
180def __virtual__():
181    """
182    Only load if pcs package is installed
183    """
184    if salt.utils.path.which("pcs"):
185        return "pcs"
186    return (False, "Unable to locate command: pcs")
187
188
189def _file_read(path):
190    """
191    Read a file and return content
192    """
193    content = False
194    if os.path.exists(path):
195        with salt.utils.files.fopen(path, "r+") as fp_:
196            content = salt.utils.stringutils.to_unicode(fp_.read())
197        fp_.close()
198    return content
199
200
201def _file_write(path, content):
202    """
203    Write content to a file
204    """
205    with salt.utils.files.fopen(path, "w+") as fp_:
206        fp_.write(salt.utils.stringutils.to_str(content))
207    fp_.close()
208
209
210def _get_cibpath():
211    """
212    Get the path to the directory on the minion where CIB's are saved
213    """
214    cibpath = os.path.join(__opts__["cachedir"], "pcs", __env__)
215    log.trace("cibpath: %s", cibpath)
216    return cibpath
217
218
219def _get_cibfile(cibname):
220    """
221    Get the full path of a cached CIB-file with the name of the CIB
222    """
223    cibfile = os.path.join(_get_cibpath(), "{}.{}".format(cibname, "cib"))
224    log.trace("cibfile: %s", cibfile)
225    return cibfile
226
227
228def _get_cibfile_tmp(cibname):
229    """
230    Get the full path of a temporary CIB-file with the name of the CIB
231    """
232    cibfile_tmp = "{}.tmp".format(_get_cibfile(cibname))
233    log.trace("cibfile_tmp: %s", cibfile_tmp)
234    return cibfile_tmp
235
236
237def _get_cibfile_cksum(cibname):
238    """
239    Get the full path of the file containing a checksum of a CIB-file with the name of the CIB
240    """
241    cibfile_cksum = "{}.cksum".format(_get_cibfile(cibname))
242    log.trace("cibfile_cksum: %s", cibfile_cksum)
243    return cibfile_cksum
244
245
246def _get_node_list_for_version(nodes):
247    """
248    PCS with version < 0.10 returns lowercase hostnames. Newer versions return the proper hostnames.
249    This accomodates for the old functionality.
250    """
251    pcs_version = __salt__["pkg.version"]("pcs")
252    if __salt__["pkg.version_cmp"](pcs_version, "0.10") == -1:
253        log.info("Node list converted to lower case for backward compatibility")
254        nodes_for_version = [x.lower() for x in nodes]
255    else:
256        nodes_for_version = nodes
257    return nodes_for_version
258
259
260def _item_present(
261    name,
262    item,
263    item_id,
264    item_type,
265    show="show",
266    create="create",
267    extra_args=None,
268    cibname=None,
269):
270    """
271    Ensure that an item is created
272
273    name
274        Irrelevant, not used
275    item
276        config, property, resource, constraint etc.
277    item_id
278        id of the item
279    item_type
280        item type
281    show
282        show command (probably None, default: show)
283    create
284        create command (create or set f.e., default: create)
285    extra_args
286        additional options for the pcs command
287    cibname
288        use a cached CIB-file named like cibname instead of the live CIB
289    """
290    ret = {"name": name, "result": True, "comment": "", "changes": {}}
291    item_create_required = True
292
293    cibfile = None
294    if isinstance(cibname, str):
295        cibfile = _get_cibfile(cibname)
296
297    if not isinstance(extra_args, (list, tuple)):
298        extra_args = []
299
300    # split off key and value (item_id contains =)
301    item_id_key = item_id
302    item_id_value = None
303    if "=" in item_id:
304        item_id_key = item_id.split("=")[0].strip()
305        item_id_value = item_id.replace(item_id.split("=")[0] + "=", "").strip()
306        log.trace("item_id_key=%s item_id_value=%s", item_id_key, item_id_value)
307
308    # constraints, properties, resource defaults or resource op defaults
309    # do not support specifying an id on 'show' command
310    item_id_show = item_id
311    if item in ["constraint"] or "=" in item_id:
312        item_id_show = None
313
314    is_existing = __salt__["pcs.item_show"](
315        item=item, item_id=item_id_show, item_type=item_type, show=show, cibfile=cibfile
316    )
317    log.trace(
318        "Output of pcs.item_show item=%s item_id=%s item_type=%s cibfile=%s: %s",
319        item,
320        item_id_show,
321        item_type,
322        cibfile,
323        is_existing,
324    )
325
326    # key,value pairs (item_id contains =) - match key and value
327    if item_id_value is not None:
328        for line in is_existing["stdout"].splitlines():
329            if len(line.split(":")) in [2]:
330                key = line.split(":")[0].strip()
331                value = line.split(":")[1].strip()
332                if item_id_key in [key]:
333                    if item_id_value in [value]:
334                        item_create_required = False
335
336    # constraints match on '(id:<id>)'
337    elif item in ["constraint"]:
338        for line in is_existing["stdout"].splitlines():
339            if "(id:{})".format(item_id) in line:
340                item_create_required = False
341
342    # item_id was provided,
343    # return code 0 indicates, that resource already exists
344    else:
345        if is_existing["retcode"] in [0]:
346            item_create_required = False
347
348    if not item_create_required:
349        ret["comment"] += "{} {} ({}) is already existing\n".format(
350            str(item), str(item_id), str(item_type)
351        )
352        return ret
353
354    if __opts__["test"]:
355        ret["result"] = None
356        ret["comment"] += "{} {} ({}) is set to be created\n".format(
357            str(item), str(item_id), str(item_type)
358        )
359        return ret
360
361    item_create = __salt__["pcs.item_create"](
362        item=item,
363        item_id=item_id,
364        item_type=item_type,
365        create=create,
366        extra_args=extra_args,
367        cibfile=cibfile,
368    )
369
370    log.trace("Output of pcs.item_create: %s", item_create)
371
372    if item_create["retcode"] in [0]:
373        ret["comment"] += "Created {} {} ({})\n".format(item, item_id, item_type)
374        ret["changes"].update({item_id: {"old": "", "new": str(item_id)}})
375    else:
376        ret["result"] = False
377        ret["comment"] += "Failed to create {} {} ({})\n".format(
378            item, item_id, item_type
379        )
380
381    log.trace("ret: %s", ret)
382
383    return ret
384
385
386def auth(name, nodes, pcsuser="hacluster", pcspasswd="hacluster", extra_args=None):
387    """
388    Ensure all nodes are authorized to the cluster
389
390    name
391        Irrelevant, not used (recommended: pcs_auth__auth)
392    nodes
393        a list of nodes which should be authorized to the cluster
394    pcsuser
395        user for communication with pcs (default: hacluster)
396    pcspasswd
397        password for pcsuser (default: hacluster)
398    extra_args
399        list of extra args for the \'pcs cluster auth\' command, there are none so it's here for compatibility.
400
401    Example:
402
403    .. code-block:: yaml
404
405        pcs_auth__auth:
406            pcs.auth:
407                - nodes:
408                    - node1.example.com
409                    - node2.example.com
410                - pcsuser: hacluster
411                - pcspasswd: hoonetorg
412                - extra_args: []
413    """
414
415    ret = {"name": name, "result": True, "comment": "", "changes": {}}
416    auth_required = False
417
418    nodes = _get_node_list_for_version(nodes)
419
420    authorized = __salt__["pcs.is_auth"](
421        nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd
422    )
423    log.trace("Output of pcs.is_auth: %s", authorized)
424
425    authorized_dict = {}
426    for line in authorized["stdout"].splitlines():
427        node = line.split(":")[0].strip()
428        auth_state = line.split(":")[1].strip()
429        if node in nodes:
430            authorized_dict.update({node: auth_state})
431    log.trace("authorized_dict: %s", authorized_dict)
432
433    for node in nodes:
434        if node in authorized_dict and (
435            authorized_dict[node] == "Already authorized"
436            or authorized_dict[node] == "Authorized"
437        ):
438            ret["comment"] += "Node {} is already authorized\n".format(node)
439        else:
440            auth_required = True
441            if __opts__["test"]:
442                ret["comment"] += "Node is set to authorize: {}\n".format(node)
443
444    if not auth_required:
445        return ret
446
447    if __opts__["test"]:
448        ret["result"] = None
449        return ret
450
451    authorize = __salt__["pcs.auth"](
452        nodes=nodes, pcsuser=pcsuser, pcspasswd=pcspasswd, extra_args=extra_args
453    )
454    log.trace("Output of pcs.auth: %s", authorize)
455
456    authorize_dict = {}
457    for line in authorize["stdout"].splitlines():
458        node = line.split(":")[0].strip()
459        auth_state = line.split(":")[1].strip()
460        if node in nodes:
461            authorize_dict.update({node: auth_state})
462    log.trace("authorize_dict: %s", authorize_dict)
463
464    for node in nodes:
465        if node in authorize_dict and authorize_dict[node] == "Authorized":
466            ret["comment"] += "Authorized {}\n".format(node)
467            ret["changes"].update({node: {"old": "", "new": "Authorized"}})
468        else:
469            ret["result"] = False
470            if node in authorized_dict:
471                ret[
472                    "comment"
473                ] += "Authorization check for node {} returned: {}\n".format(
474                    node, authorized_dict[node]
475                )
476            if node in authorize_dict:
477                ret["comment"] += "Failed to authorize {} with error {}\n".format(
478                    node, authorize_dict[node]
479                )
480
481    return ret
482
483
484def cluster_setup(
485    name,
486    nodes,
487    pcsclustername="pcscluster",
488    extra_args=None,
489    pcsuser="hacluster",
490    pcspasswd="hacluster",
491    pcs_auth_extra_args=None,
492    wipe_default=False,
493):
494    """
495    Setup Pacemaker cluster on nodes.
496    Should be run on one cluster node only to avoid race conditions.
497    This performs auth as well as setup so can be run in place of the auth state.
498    It is recommended not to run auth on Debian/Ubuntu for a new cluster and just
499    to run this because of the initial cluster config that is installed on
500    Ubuntu/Debian by default.
501
502
503    name
504        Irrelevant, not used (recommended: pcs_setup__setup)
505    nodes
506        a list of nodes which should be set up
507    pcsclustername
508        Name of the Pacemaker cluster
509    extra_args
510        list of extra args for the \'pcs cluster setup\' command
511    pcsuser
512        The username for authenticating the cluster (default: hacluster)
513    pcspasswd
514        The password for authenticating the cluster (default: hacluster)
515    pcs_auth_extra_args
516        Extra args to be passed to the auth function in case of reauth.
517    wipe_default
518        This removes the files that are installed with Debian based operating systems.
519
520    Example:
521
522    .. code-block:: yaml
523
524        pcs_setup__setup:
525            pcs.cluster_setup:
526                - nodes:
527                    - node1.example.com
528                    - node2.example.com
529                - pcsclustername: pcscluster
530                - extra_args:
531                    - '--start'
532                    - '--enable'
533                - pcsuser: hacluster
534                - pcspasswd: hoonetorg
535    """
536
537    ret = {"name": name, "result": True, "comment": "", "changes": {}}
538    setup_required = False
539
540    config_show = __salt__["pcs.config_show"]()
541    log.trace("Output of pcs.config_show: %s", config_show)
542
543    for line in config_show["stdout"].splitlines():
544        if len(line.split(":")) in [2]:
545            key = line.split(":")[0].strip()
546            value = line.split(":")[1].strip()
547            if key in ["Cluster Name"]:
548                if value in [pcsclustername]:
549                    ret["comment"] += "Cluster {} is already set up\n".format(
550                        pcsclustername
551                    )
552                else:
553                    setup_required = True
554                    if __opts__["test"]:
555                        ret["comment"] += "Cluster {} is set to set up\n".format(
556                            pcsclustername
557                        )
558
559    if not setup_required:
560        log.info("No setup required")
561        return ret
562
563    if __opts__["test"]:
564        ret["result"] = None
565        return ret
566
567    # Debian based distros deploy corosync with some initial cluster setup.
568    # The following detects if it's a Debian based distro and then stops Corosync
569    # and removes the config files. I've put this here because trying to do all this in the
570    # state file can break running clusters and can also take quite a long time to debug.
571
572    log.debug("OS_Family: %s", __grains__.get("os_family"))
573    if __grains__.get("os_family") == "Debian" and wipe_default:
574        __salt__["file.remove"]("/etc/corosync/corosync.conf")
575        __salt__["file.remove"]("/var/lib/pacemaker/cib/cib.xml")
576        __salt__["service.stop"]("corosync")
577        auth("pcs_auth__auth", nodes, pcsuser, pcspasswd, pcs_auth_extra_args)
578
579    nodes = _get_node_list_for_version(nodes)
580
581    if not isinstance(extra_args, (list, tuple)):
582        extra_args = []
583
584    setup = __salt__["pcs.cluster_setup"](
585        nodes=nodes, pcsclustername=pcsclustername, extra_args=extra_args
586    )
587    log.trace("Output of pcs.cluster_setup: %s", setup)
588
589    setup_dict = {}
590    for line in setup["stdout"].splitlines():
591        log.trace("line: %s", line)
592        log.trace("line.split(:).len: %s", len(line.split(":")))
593        if len(line.split(":")) in [2]:
594            node = line.split(":")[0].strip()
595            setup_state = line.split(":")[1].strip()
596            if node in nodes:
597                setup_dict.update({node: setup_state})
598
599    log.trace("setup_dict: %s", setup_dict)
600
601    for node in nodes:
602        if node in setup_dict and setup_dict[node] in [
603            "Succeeded",
604            "Success",
605            "Cluster enabled",
606        ]:
607            ret["comment"] += "Set up {}\n".format(node)
608            ret["changes"].update({node: {"old": "", "new": "Setup"}})
609        else:
610            ret["result"] = False
611            ret["comment"] += "Failed to setup {}\n".format(node)
612            if node in setup_dict:
613                ret["comment"] += "{}: setup_dict: {}\n".format(node, setup_dict[node])
614            ret["comment"] += str(setup)
615
616    log.trace("ret: %s", ret)
617
618    return ret
619
620
621def cluster_node_present(name, node, extra_args=None):
622    """
623    Add a node to the Pacemaker cluster via PCS
624    Should be run on one cluster node only
625    (there may be races)
626    Can only be run on a already setup/added node
627
628    name
629        Irrelevant, not used (recommended: pcs_setup__node_add_{{node}})
630    node
631        node that should be added
632    extra_args
633        list of extra args for the \'pcs cluster node add\' command
634
635    Example:
636
637    .. code-block:: yaml
638
639        pcs_setup__node_add_node1.example.com:
640            pcs.cluster_node_present:
641                - node: node1.example.com
642                - extra_args:
643                    - '--start'
644                    - '--enable'
645    """
646
647    ret = {"name": name, "result": True, "comment": "", "changes": {}}
648    node_add_required = True
649    current_nodes = []
650
651    is_member_cmd = ["pcs", "status", "nodes", "corosync"]
652    is_member = __salt__["cmd.run_all"](
653        is_member_cmd, output_loglevel="trace", python_shell=False
654    )
655    log.trace("Output of pcs status nodes corosync: %s", is_member)
656
657    for line in is_member["stdout"].splitlines():
658        if len(line.split(":")) in [2]:
659            key = line.split(":")[0].strip()
660            value = line.split(":")[1].strip()
661            if key in ["Offline", "Online"]:
662                if len(value.split()) > 0:
663                    if node in value.split():
664                        node_add_required = False
665                        ret[
666                            "comment"
667                        ] += "Node {} is already member of the cluster\n".format(node)
668                    else:
669                        current_nodes += value.split()
670
671    if not node_add_required:
672        return ret
673
674    if __opts__["test"]:
675        ret["result"] = None
676        ret["comment"] += "Node {} is set to be added to the cluster\n".format(node)
677        return ret
678
679    if not isinstance(extra_args, (list, tuple)):
680        extra_args = []
681
682    node_add = __salt__["pcs.cluster_node_add"](node=node, extra_args=extra_args)
683    log.trace("Output of pcs.cluster_node_add: %s", node_add)
684
685    node_add_dict = {}
686    for line in node_add["stdout"].splitlines():
687        log.trace("line: %s", line)
688        log.trace("line.split(:).len: %s", len(line.split(":")))
689        if len(line.split(":")) in [2]:
690            current_node = line.split(":")[0].strip()
691            current_node_add_state = line.split(":")[1].strip()
692            if current_node in current_nodes + [node]:
693                node_add_dict.update({current_node: current_node_add_state})
694    log.trace("node_add_dict: %s", node_add_dict)
695
696    for current_node in current_nodes:
697        if current_node in node_add_dict:
698            if node_add_dict[current_node] not in ["Corosync updated"]:
699                ret["result"] = False
700                ret["comment"] += "Failed to update corosync.conf on node {}\n".format(
701                    current_node
702                )
703                ret["comment"] += "{}: node_add_dict: {}\n".format(
704                    current_node, node_add_dict[current_node]
705                )
706        else:
707            ret["result"] = False
708            ret["comment"] += "Failed to update corosync.conf on node {}\n".format(
709                current_node
710            )
711
712    if node in node_add_dict and node_add_dict[node] in ["Succeeded", "Success"]:
713        ret["comment"] += "Added node {}\n".format(node)
714        ret["changes"].update({node: {"old": "", "new": "Added"}})
715    else:
716        ret["result"] = False
717        ret["comment"] += "Failed to add node{}\n".format(node)
718        if node in node_add_dict:
719            ret["comment"] += "{}: node_add_dict: {}\n".format(
720                node, node_add_dict[node]
721            )
722        ret["comment"] += str(node_add)
723
724    log.trace("ret: %s", ret)
725
726    return ret
727
728
729def cib_present(name, cibname, scope=None, extra_args=None):
730    """
731    Ensure that a CIB-file with the content of the current live CIB is created
732
733    Should be run on one cluster node only
734    (there may be races)
735
736    name
737        Irrelevant, not used (recommended: {{formulaname}}__cib_present_{{cibname}})
738    cibname
739        name/path of the file containing the CIB
740    scope
741        specific section of the CIB (default: None)
742    extra_args
743        additional options for creating the CIB-file
744
745    Example:
746
747    .. code-block:: yaml
748
749        mysql_pcs__cib_present_cib_for_galera:
750            pcs.cib_present:
751                - cibname: cib_for_galera
752                - scope: None
753                - extra_args: None
754    """
755    ret = {"name": name, "result": True, "comment": "", "changes": {}}
756
757    cib_hash_form = "sha256"
758
759    cib_create_required = False
760    cib_cksum_required = False
761    cib_required = False
762
763    cibpath = _get_cibpath()
764    cibfile = _get_cibfile(cibname)
765    cibfile_tmp = _get_cibfile_tmp(cibname)
766    cibfile_cksum = _get_cibfile_cksum(cibname)
767
768    if not os.path.exists(cibpath):
769        os.makedirs(cibpath)
770
771    if not isinstance(extra_args, (list, tuple)):
772        extra_args = []
773
774    if os.path.exists(cibfile_tmp):
775        __salt__["file.remove"](cibfile_tmp)
776
777    cib_create = __salt__["pcs.cib_create"](
778        cibfile=cibfile_tmp, scope=scope, extra_args=extra_args
779    )
780    log.trace("Output of pcs.cib_create: %s", cib_create)
781
782    if cib_create["retcode"] not in [0] or not os.path.exists(cibfile_tmp):
783        ret["result"] = False
784        ret["comment"] += "Failed to get live CIB\n"
785        return ret
786
787    cib_hash_live = "{}:{}".format(
788        cib_hash_form, __salt__["file.get_hash"](path=cibfile_tmp, form=cib_hash_form)
789    )
790    log.trace("cib_hash_live: %s", cib_hash_live)
791
792    cib_hash_cur = _file_read(path=cibfile_cksum)
793
794    if cib_hash_cur not in [cib_hash_live]:
795        cib_cksum_required = True
796
797    log.trace("cib_hash_cur: %s", cib_hash_cur)
798
799    if not os.path.exists(cibfile) or not __salt__["file.check_hash"](
800        path=cibfile, file_hash=cib_hash_live
801    ):
802        cib_create_required = True
803
804    if cib_cksum_required or cib_create_required:
805        cib_required = True
806
807    if not cib_create_required:
808        __salt__["file.remove"](cibfile_tmp)
809        ret["comment"] += "CIB {} is already equal to the live CIB\n".format(cibname)
810
811    if not cib_cksum_required:
812        ret["comment"] += "CIB {} checksum is correct\n".format(cibname)
813
814    if not cib_required:
815        return ret
816
817    if __opts__["test"]:
818        __salt__["file.remove"](cibfile_tmp)
819        ret["result"] = None
820        if cib_create_required:
821            ret["comment"] += "CIB {} is set to be created/updated\n".format(cibname)
822        if cib_cksum_required:
823            ret["comment"] += "CIB {} checksum is set to be created/updated\n".format(
824                cibname
825            )
826        return ret
827
828    if cib_create_required:
829        __salt__["file.move"](cibfile_tmp, cibfile)
830
831        if __salt__["file.check_hash"](path=cibfile, file_hash=cib_hash_live):
832            ret["comment"] += "Created/updated CIB {}\n".format(cibname)
833            ret["changes"].update({"cibfile": cibfile})
834        else:
835            ret["result"] = False
836            ret["comment"] += "Failed to create/update CIB {}\n".format(cibname)
837
838    if cib_cksum_required:
839        _file_write(cibfile_cksum, cib_hash_live)
840
841        if _file_read(cibfile_cksum) in [cib_hash_live]:
842            ret["comment"] += "Created/updated checksum {} of CIB {}\n".format(
843                cib_hash_live, cibname
844            )
845            ret["changes"].update({"cibcksum": cib_hash_live})
846        else:
847            ret["result"] = False
848            ret["comment"] += "Failed to create/update checksum {} CIB {}\n".format(
849                cib_hash_live, cibname
850            )
851
852    log.trace("ret: %s", ret)
853
854    return ret
855
856
857def cib_pushed(name, cibname, scope=None, extra_args=None):
858    """
859    Ensure that a CIB-file is pushed if it is changed since the creation of it with pcs.cib_present
860
861    Should be run on one cluster node only
862    (there may be races)
863
864    name
865        Irrelevant, not used (recommended: {{formulaname}}__cib_pushed_{{cibname}})
866    cibname
867        name/path of the file containing the CIB
868    scope
869        specific section of the CIB
870    extra_args
871        additional options for creating the CIB-file
872
873    Example:
874
875    .. code-block:: yaml
876
877        mysql_pcs__cib_pushed_cib_for_galera:
878            pcs.cib_pushed:
879                - cibname: cib_for_galera
880                - scope: None
881                - extra_args: None
882    """
883    ret = {"name": name, "result": True, "comment": "", "changes": {}}
884
885    cib_hash_form = "sha256"
886
887    cib_push_required = False
888
889    cibfile = _get_cibfile(cibname)
890    cibfile_cksum = _get_cibfile_cksum(cibname)
891
892    if not isinstance(extra_args, (list, tuple)):
893        extra_args = []
894
895    if not os.path.exists(cibfile):
896        ret["result"] = False
897        ret["comment"] += "CIB-file {} does not exist\n".format(cibfile)
898        return ret
899
900    cib_hash_cibfile = "{}:{}".format(
901        cib_hash_form, __salt__["file.get_hash"](path=cibfile, form=cib_hash_form)
902    )
903    log.trace("cib_hash_cibfile: %s", cib_hash_cibfile)
904
905    if _file_read(cibfile_cksum) not in [cib_hash_cibfile]:
906        cib_push_required = True
907
908    if not cib_push_required:
909        ret[
910            "comment"
911        ] += "CIB {} is not changed since creation through pcs.cib_present\n".format(
912            cibname
913        )
914        return ret
915
916    if __opts__["test"]:
917        ret["result"] = None
918        ret["comment"] += "CIB {} is set to be pushed as the new live CIB\n".format(
919            cibname
920        )
921        return ret
922
923    cib_push = __salt__["pcs.cib_push"](
924        cibfile=cibfile, scope=scope, extra_args=extra_args
925    )
926    log.trace("Output of pcs.cib_push: %s", cib_push)
927
928    if cib_push["retcode"] in [0]:
929        ret["comment"] += "Pushed CIB {}\n".format(cibname)
930        ret["changes"].update({"cibfile_pushed": cibfile})
931    else:
932        ret["result"] = False
933        ret["comment"] += "Failed to push CIB {}\n".format(cibname)
934
935    log.trace("ret: %s", ret)
936
937    return ret
938
939
940def prop_has_value(name, prop, value, extra_args=None, cibname=None):
941    """
942    Ensure that a property in the cluster is set to a given value
943
944    Should be run on one cluster node only
945    (there may be races)
946
947    name
948        Irrelevant, not used (recommended: pcs_properties__prop_has_value_{{prop}})
949    prop
950        name of the property
951    value
952        value of the property
953    extra_args
954        additional options for the pcs property command
955    cibname
956        use a cached CIB-file named like cibname instead of the live CIB
957
958    Example:
959
960    .. code-block:: yaml
961
962        pcs_properties__prop_has_value_no-quorum-policy:
963            pcs.prop_has_value:
964                - prop: no-quorum-policy
965                - value: ignore
966                - cibname: cib_for_cluster_settings
967    """
968    return _item_present(
969        name=name,
970        item="property",
971        item_id="{}={}".format(prop, value),
972        item_type=None,
973        create="set",
974        extra_args=extra_args,
975        cibname=cibname,
976    )
977
978
979def resource_defaults_to(name, default, value, extra_args=None, cibname=None):
980    """
981    Ensure a resource default in the cluster is set to a given value
982
983    Should be run on one cluster node only
984    (there may be races)
985    Can only be run on a node with a functional pacemaker/corosync
986
987    name
988        Irrelevant, not used (recommended: pcs_properties__resource_defaults_to_{{default}})
989    default
990        name of the default resource property
991    value
992        value of the default resource property
993    extra_args
994        additional options for the pcs command
995    cibname
996        use a cached CIB-file named like cibname instead of the live CIB
997
998    Example:
999
1000    .. code-block:: yaml
1001
1002        pcs_properties__resource_defaults_to_resource-stickiness:
1003            pcs.resource_defaults_to:
1004                - default: resource-stickiness
1005                - value: 100
1006                - cibname: cib_for_cluster_settings
1007    """
1008    return _item_present(
1009        name=name,
1010        item="resource",
1011        item_id="{}={}".format(default, value),
1012        item_type=None,
1013        show="defaults",
1014        create="defaults",
1015        extra_args=extra_args,
1016        cibname=cibname,
1017    )
1018
1019
1020def resource_op_defaults_to(name, op_default, value, extra_args=None, cibname=None):
1021    """
1022    Ensure a resource operation default in the cluster is set to a given value
1023
1024    Should be run on one cluster node only
1025    (there may be races)
1026    Can only be run on a node with a functional pacemaker/corosync
1027
1028    name
1029        Irrelevant, not used (recommended: pcs_properties__resource_op_defaults_to_{{op_default}})
1030    op_default
1031        name of the operation default resource property
1032    value
1033        value of the operation default resource property
1034    extra_args
1035        additional options for the pcs command
1036    cibname
1037        use a cached CIB-file named like cibname instead of the live CIB
1038
1039    Example:
1040
1041    .. code-block:: yaml
1042
1043        pcs_properties__resource_op_defaults_to_monitor-interval:
1044            pcs.resource_op_defaults_to:
1045                - op_default: monitor-interval
1046                - value: 60s
1047                - cibname: cib_for_cluster_settings
1048    """
1049    return _item_present(
1050        name=name,
1051        item="resource",
1052        item_id="{}={}".format(op_default, value),
1053        item_type=None,
1054        show=["op", "defaults"],
1055        create=["op", "defaults"],
1056        extra_args=extra_args,
1057        cibname=cibname,
1058    )
1059
1060
1061def stonith_present(
1062    name, stonith_id, stonith_device_type, stonith_device_options=None, cibname=None
1063):
1064    """
1065    Ensure that a fencing resource is created
1066
1067    Should be run on one cluster node only
1068    (there may be races)
1069    Can only be run on a node with a functional pacemaker/corosync
1070
1071    name
1072        Irrelevant, not used (recommended: pcs_stonith__created_{{stonith_id}})
1073    stonith_id
1074        name for the stonith resource
1075    stonith_device_type
1076        name of the stonith agent fence_eps, fence_xvm f.e.
1077    stonith_device_options
1078        additional options for creating the stonith resource
1079    cibname
1080        use a cached CIB-file named like cibname instead of the live CIB
1081
1082    Example:
1083
1084    .. code-block:: yaml
1085
1086        pcs_stonith__created_eps_fence:
1087            pcs.stonith_present:
1088                - stonith_id: eps_fence
1089                - stonith_device_type: fence_eps
1090                - stonith_device_options:
1091                    - 'pcmk_host_map=node1.example.org:01;node2.example.org:02'
1092                    - 'ipaddr=myepsdevice.example.org'
1093                    - 'power_wait=5'
1094                    - 'verbose=1'
1095                    - 'debug=/var/log/pcsd/eps_fence.log'
1096                    - 'login=hidden'
1097                    - 'passwd=hoonetorg'
1098                - cibname: cib_for_stonith
1099    """
1100    return _item_present(
1101        name=name,
1102        item="stonith",
1103        item_id=stonith_id,
1104        item_type=stonith_device_type,
1105        extra_args=stonith_device_options,
1106        cibname=cibname,
1107    )
1108
1109
1110def resource_present(
1111    name, resource_id, resource_type, resource_options=None, cibname=None
1112):
1113    """
1114    Ensure that a resource is created
1115
1116    Should be run on one cluster node only
1117    (there may be races)
1118    Can only be run on a node with a functional pacemaker/corosync
1119
1120    name
1121        Irrelevant, not used (recommended: {{formulaname}}__resource_present_{{resource_id}})
1122    resource_id
1123        name for the resource
1124    resource_type
1125        resource type (f.e. ocf:heartbeat:IPaddr2 or VirtualIP)
1126    resource_options
1127        additional options for creating the resource
1128    cibname
1129        use a cached CIB-file named like cibname instead of the live CIB
1130
1131    Example:
1132
1133    .. code-block:: yaml
1134
1135        mysql_pcs__resource_present_galera:
1136            pcs.resource_present:
1137                - resource_id: galera
1138                - resource_type: "ocf:heartbeat:galera"
1139                - resource_options:
1140                    - 'wsrep_cluster_address=gcomm://node1.example.org,node2.example.org,node3.example.org'
1141                    - '--master'
1142                - cibname: cib_for_galera
1143    """
1144    return _item_present(
1145        name=name,
1146        item="resource",
1147        item_id=resource_id,
1148        item_type=resource_type,
1149        extra_args=resource_options,
1150        cibname=cibname,
1151    )
1152
1153
1154def constraint_present(
1155    name, constraint_id, constraint_type, constraint_options=None, cibname=None
1156):
1157    """
1158    Ensure that a constraint is created
1159
1160    Should be run on one cluster node only
1161    (there may be races)
1162    Can only be run on a node with a functional pacemaker/corosync
1163
1164    name
1165        Irrelevant, not used (recommended: {{formulaname}}__constraint_present_{{constraint_id}})
1166    constraint_id
1167        name for the constraint (try first to create manually to find out the autocreated name)
1168    constraint_type
1169        constraint type (location, colocation, order)
1170    constraint_options
1171        options for creating the constraint
1172    cibname
1173        use a cached CIB-file named like cibname instead of the live CIB
1174
1175    Example:
1176
1177    .. code-block:: yaml
1178
1179        haproxy_pcs__constraint_present_colocation-vip_galera-haproxy-clone-INFINITY:
1180            pcs.constraint_present:
1181                - constraint_id: colocation-vip_galera-haproxy-clone-INFINITY
1182                - constraint_type: colocation
1183                - constraint_options:
1184                    - 'add'
1185                    - 'vip_galera'
1186                    - 'with'
1187                    - 'haproxy-clone'
1188                - cibname: cib_for_haproxy
1189    """
1190    return _item_present(
1191        name=name,
1192        item="constraint",
1193        item_id=constraint_id,
1194        item_type=constraint_type,
1195        create=None,
1196        extra_args=constraint_options,
1197        cibname=cibname,
1198    )
1199