1#!/usr/local/bin/python3.8
2
3# (c) 2020, NetApp, Inc
4# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
5from __future__ import absolute_import, division, print_function
6
7__metaclass__ = type
8
9DOCUMENTATION = """
10module: na_santricity_snapshot
11short_description: NetApp E-Series storage system's snapshots.
12description: Manage NetApp E-Series manage the storage system's snapshots.
13author: Nathan Swartz (@ndswartz)
14extends_documentation_fragment:
15    - netapp_eseries.santricity.santricity.santricity_doc
16options:
17  state:
18    description:
19      - When I(state==absent) ensures the I(type) has been removed.
20      - When I(state==present) ensures the I(type) is available.
21      - When I(state==rollback) the consistency group will be rolled back to the point-in-time snapshot images selected by I(pit_name or pit_timestamp).
22      - I(state==rollback) will always return changed since it is not possible to evaluate the current state of the base volume in relation to a snapshot image.
23    type: str
24    choices:
25      - absent
26      - present
27      - rollback
28    default: present
29    required: false
30  type:
31    description:
32      - Type of snapshot object to effect.
33      - Group indicates a snapshot consistency group; consistency groups may have one or more base volume members which are defined in I(volumes).
34      - Pit indicates a snapshot consistency group point-in-time image(s); a snapshot image will be taken of each base volume when I(state==present).
35      - Warning! When I(state==absent and type==pit), I(pit_name) or I(pit_timestamp) must be defined and all point-in-time images created prior to the
36        selection will also be deleted.
37      - View indicates a consistency group snapshot volume of particular point-in-time image(s); snapshot volumes will be created for each base volume member.
38      - Views are created from images from a single point-in-time so once created they cannot be modified.
39    type: str
40    default: group
41    choices:
42      - group
43      - pit
44      - view
45    required: false
46  group_name:
47    description:
48      - Name of the snapshot consistency group or snapshot volume.
49      - Be sure to use different names for snapshot consistency groups and snapshot volumes to avoid name conflicts.
50    type: str
51    required: true
52  volumes:
53    description:
54      - Details for each consistency group base volume for defining reserve capacity, preferred reserve capacity storage pool, and snapshot volume options.
55      - When I(state==present and type==group) the volume entries will be used to add or remove base volume from a snapshot consistency group.
56      - When I(state==present and type==view) the volume entries will be used to select images from a point-in-time for their respective snapshot volumes.
57      - If I(state==present and type==view) and I(volume) is not specified then all volumes will be selected with the defaults.
58      - Views are created from images from a single point-in-time so once created they cannot be modified.
59      - When I(state==rollback) then I(volumes) can be used to specify which base volumes to rollback; otherwise all consistency group volumes will rollback.
60    type: list
61    required: false
62    suboptions:
63      volume:
64        description:
65          - Base volume for consistency group.
66        type: str
67        required: true
68      reserve_capacity_pct:
69        description:
70          - Percentage of base volume capacity to reserve for snapshot copy-on-writes (COW).
71          - Used to define reserve capacity for both snapshot consistency group volume members and snapshot volumes.
72        type: int
73        default: 40
74        required: false
75      preferred_reserve_storage_pool:
76        description:
77          - Preferred storage pool or volume group for the reserve capacity volume.
78          - The base volume's storage pool or volume group will be selected by default if not defined.
79          - Used to specify storage pool or volume group for both snapshot consistency group volume members and snapshot volumes
80        type: str
81        required: false
82      snapshot_volume_writable:
83        description:
84          - Whether snapshot volume of base volume images should be writable.
85        type: bool
86        default: true
87        required: false
88      snapshot_volume_validate:
89        description:
90          - Whether snapshot volume should be validated which includes both a media scan and parity validation.
91        type: bool
92        default: false
93        required: false
94      snapshot_volume_host:
95        description:
96          - Host or host group to map snapshot volume.
97        type: str
98        required: false
99  maximum_snapshots:
100    description:
101      - Total number of snapshot images to maintain.
102    type: int
103    default: 32
104    required: false
105  reserve_capacity_pct:
106    description:
107      - Default percentage of base volume capacity to reserve for snapshot copy-on-writes (COW).
108      - Used to define reserve capacity for both snapshot consistency group volume members and snapshot volumes.
109    type: int
110    default: 40
111    required: false
112  preferred_reserve_storage_pool:
113    description:
114      - Default preferred storage pool or volume group for the reserve capacity volume.
115      - The base volume's storage pool or volume group will be selected by default if not defined.
116      - Used to specify storage pool or volume group for both snapshot consistency group volume members and snapshot volumes
117    type: str
118    required: false
119  alert_threshold_pct:
120    description:
121      - Percent of filled reserve capacity to issue alert.
122    type: int
123    default: 75
124    required: false
125  reserve_capacity_full_policy:
126    description:
127      - Policy for full reserve capacity.
128      - Purge deletes the oldest snapshot image for the base volume in the consistency group.
129      - Reject writes to base volume (keep snapshot images valid).
130    choices:
131      - purge
132      - reject
133    type: str
134    default: purge
135    required: false
136  rollback_priority:
137    description:
138      - Storage system priority given to restoring snapshot point in time.
139    type: str
140    choices:
141      - highest
142      - high
143      - medium
144      - low
145      - lowest
146    default: medium
147    required: false
148  rollback_backup:
149    description:
150      - Whether a point-in-time snapshot should be taken prior to performing a rollback.
151    type: bool
152    default: true
153    required: false
154  pit_name:
155    description:
156      - Name of a consistency group's snapshot images.
157    type: str
158    required: false
159  pit_description:
160    description:
161      - Arbitrary description for a consistency group's snapshot images
162    type: str
163    required: false
164  pit_timestamp:
165    description:
166      - Snapshot image timestamp in the YYYY-MM-DD HH:MM:SS (AM|PM) (hours, minutes, seconds, and day-period are optional)
167      - Define only as much time as necessary to distinguish the desired snapshot image from the others.
168      - 24 hour time will be assumed if day-period indicator (AM, PM) is not specified.
169      - The terms latest and oldest may be used to select newest and oldest consistency group images.
170      - Mutually exclusive with I(pit_name or pit_description)
171    type: str
172    required: false
173  view_name:
174    description:
175      - Consistency group snapshot volume group.
176      - Required when I(state==volume) or when ensuring the views absence when I(state==absent).
177    type: str
178    required: false
179  view_host:
180    description:
181      - Default host or host group to map snapshot volumes.
182    type: str
183    required: false
184  view_writable:
185    description:
186      - Default whether snapshot volumes should be writable.
187    type: bool
188    default: true
189    required: false
190  view_validate:
191    description:
192      - Default whether snapshop volumes should be validated.
193    type: bool
194    default: false
195    required: false
196notes:
197  - Key-value pairs are used to keep track of snapshot names and descriptions since the snapshot point-in-time images do have metadata associated with their
198    data structures; therefore, it is necessary to clean out old keys that are no longer associated with an actual image. This cleaning action is performed each
199    time this module is executed.
200"""
201EXAMPLES = """
202- name: Ensure snapshot consistency group exists.
203  na_santricity_snapshot:
204    ssid: "1"
205    api_url: https://192.168.1.100:8443/devmgr/v2
206    api_username: admin
207    api_password: adminpass
208    state: present
209    type: group
210    group_name: snapshot_group1
211    volumes:
212      - volume: vol1
213        reserve_capacity_pct: 20
214        preferred_reserve_storage_pool: vg1
215      - volume: vol2
216        reserve_capacity_pct: 30
217      - volume: vol3
218    alert_threshold_pct: 80
219    maximum_snapshots: 30
220- name: Take the current consistency group's base volumes point-in-time snapshot images.
221  na_santricity_snapshot:
222    ssid: "1"
223    api_url: https://192.168.1.100:8443/devmgr/v2
224    api_username: admin
225    api_password: adminpass
226    state: present
227    type: pit
228    group_name: snapshot_group1
229    pit_name: pit1
230    pit_description: Initial consistency group's point-in-time snapshot images.
231- name: Ensure snapshot consistency group view exists and is mapped to host group.
232  na_santricity_snapshot:
233    ssid: "1"
234    api_url: https://192.168.1.100:8443/devmgr/v2
235    api_username: admin
236    api_password: adminpass
237    state: present
238    type: view
239    group_name: snapshot_group1
240    pit_name: pit1
241    view_name: view1
242    view_host: view1_hosts_group
243    volumes:
244      - volume: vol1
245        reserve_capacity_pct: 20
246        preferred_reserve_storage_pool: vg4
247        snapshot_volume_writable: false
248        snapshot_volume_validate: true
249      - volume: vol2
250        reserve_capacity_pct: 20
251        preferred_reserve_storage_pool: vg4
252        snapshot_volume_writable: true
253        snapshot_volume_validate: true
254      - volume: vol3
255        reserve_capacity_pct: 20
256        preferred_reserve_storage_pool: vg4
257        snapshot_volume_writable: false
258        snapshot_volume_validate: true
259    alert_threshold_pct: 80
260    maximum_snapshots: 30
261- name: Rollback base volumes to consistency group's point-in-time pit1.
262  na_santricity_snapshot:
263    ssid: "1"
264    api_url: https://192.168.1.100:8443/devmgr/v2
265    api_username: admin
266    api_password: adminpass
267    state: present
268    type: group
269    group_name: snapshot_group1
270    pit_name: pit1
271    rollback: true
272    rollback_priority: high
273- name: Ensure snapshot consistency group view no longer exists.
274  na_santricity_snapshot:
275    ssid: "1"
276    api_url: https://192.168.1.100:8443/devmgr/v2
277    api_username: admin
278    api_password: adminpass
279    state: absent
280    type: view
281    group_name: snapshot_group1
282    view_name: view1
283- name: Ensure that the consistency group's base volumes point-in-time snapshot images pit1 no longer exists.
284  na_santricity_snapshot:
285    ssid: "1"
286    api_url: https://192.168.1.100:8443/devmgr/v2
287    api_username: admin
288    api_password: adminpass
289    state: absent
290    type: image
291    group_name: snapshot_group1
292    pit_name: pit1
293- name: Ensure snapshot consistency group no longer exists.
294  na_santricity_snapshot:
295    ssid: "1"
296    api_url: https://192.168.1.100:8443/devmgr/v2
297    api_username: admin
298    api_password: adminpass
299    state: absent
300    type: group
301    group_name: snapshot_group1
302"""
303RETURN = """
304changed:
305  description: Whether changes have been made.
306  type: bool
307  returned: always
308group_changes:
309  description: All changes performed to the consistency group.
310  type: dict
311  returned: always
312deleted_metadata_keys:
313  description: Keys that were purged from the key-value datastore.
314  type: list
315  returned: always
316"""
317from datetime import datetime
318import re
319from time import sleep
320
321from ansible_collections.netapp_eseries.santricity.plugins.module_utils.santricity import NetAppESeriesModule
322
323
324class NetAppESeriesSnapshot(NetAppESeriesModule):
325    def __init__(self):
326        ansible_options = dict(state=dict(type="str", default="present", choices=["absent", "present", "rollback"], required=False),
327                               type=dict(type="str", default="group", choices=["group", "pit", "view"], required=False),
328                               group_name=dict(type="str", required=True),
329                               volumes=dict(type="list", required=False,
330                                            suboptions=dict(volume=dict(type="str", required=True),
331                                                            reserve_capacity_pct=dict(type="int", default=40, required=False),
332                                                            preferred_reserve_storage_pool=dict(type="str", required=False),
333                                                            snapshot_volume_writable=dict(type="bool", default=True, required=False),
334                                                            snapshot_volume_validate=dict(type="bool", default=False, required=False),
335                                                            snapshot_volume_host=dict(type="str", default=None, required=False),
336                                                            snapshot_volume_lun=dict(type="int", default=None, required=False))),
337                               maximum_snapshots=dict(type="int", default=32, required=False),
338                               reserve_capacity_pct=dict(type="int", default=40, required=False),
339                               preferred_reserve_storage_pool=dict(type="str", required=False),
340                               alert_threshold_pct=dict(type="int", default=75, required=False),
341                               reserve_capacity_full_policy=dict(type="str", default="purge", choices=["purge", "reject"], required=False),
342                               rollback_priority=dict(type="str", default="medium", choices=["highest", "high", "medium", "low", "lowest"], required=False),
343                               rollback_backup=dict(type="bool", default=True, required=False),
344                               pit_name=dict(type="str", required=False),
345                               pit_description=dict(type="str", required=False),
346                               pit_timestamp=dict(type="str", required=False),
347                               view_name=dict(type="str", required=False),
348                               view_host=dict(type="str", default=None, required=False),
349                               view_writable=dict(type="bool", default=True, required=False),
350                               view_validate=dict(type="bool", default=False, required=False))
351
352        super(NetAppESeriesSnapshot, self).__init__(ansible_options=ansible_options,
353                                                    web_services_version="05.00.0000.0000",
354                                                    supports_check_mode=True)
355        args = self.module.params
356        self.state = args["state"]
357        self.type = args["type"]
358        self.group_name = args["group_name"]
359        self.maximum_snapshots = args["maximum_snapshots"]
360        self.reserve_capacity_pct = args["reserve_capacity_pct"]
361        self.preferred_reserve_storage_pool = args["preferred_reserve_storage_pool"]
362        self.alert_threshold_pct = args["alert_threshold_pct"]
363        self.reserve_capacity_full_policy = "purgepit" if args["reserve_capacity_full_policy"] == "purge" else "failbasewrites"
364        self.rollback_priority = args["rollback_priority"]
365        self.rollback_backup = args["rollback_backup"]
366        self.rollback_priority = args["rollback_priority"]
367        self.pit_name = args["pit_name"]
368        self.pit_description = args["pit_description"]
369        self.view_name = args["view_name"]
370        self.view_host = args["view_host"]
371        self.view_writable = args["view_writable"]
372        self.view_validate = args["view_validate"]
373
374        # Complete volume definitions.
375        self.volumes = {}
376        if args["volumes"]:
377            for volume_info in args["volumes"]:
378                reserve_capacity_pct = volume_info["reserve_capacity_pct"] if "reserve_capacity_pct" in volume_info else self.reserve_capacity_pct
379                snapshot_volume_writable = volume_info["snapshot_volume_writable"] if "snapshot_volume_writable" in volume_info else self.view_writable
380                snapshot_volume_validate = volume_info["snapshot_volume_validate"] if "snapshot_volume_validate" in volume_info else self.view_validate
381                snapshot_volume_host = volume_info["snapshot_volume_host"] if "snapshot_volume_host" in volume_info else self.view_host
382                snapshot_volume_lun = volume_info["snapshot_volume_lun"] if "snapshot_volume_lun" in volume_info else None
383                if "preferred_reserve_storage_pool" in volume_info and volume_info["preferred_reserve_storage_pool"]:
384                    preferred_reserve_storage_pool = volume_info["preferred_reserve_storage_pool"]
385                else:
386                    preferred_reserve_storage_pool = self.preferred_reserve_storage_pool
387
388                self.volumes.update({volume_info["volume"]: {"reserve_capacity_pct": reserve_capacity_pct,
389                                                             "preferred_reserve_storage_pool": preferred_reserve_storage_pool,
390                                                             "snapshot_volume_writable": snapshot_volume_writable,
391                                                             "snapshot_volume_validate": snapshot_volume_validate,
392                                                             "snapshot_volume_host": snapshot_volume_host,
393                                                             "snapshot_volume_lun": snapshot_volume_lun}})
394
395        # Check and convert pit_timestamp to datetime object. volume: snap-vol1
396        self.pit_timestamp = None
397        self.pit_timestamp_tokens = 0
398        if args["pit_timestamp"]:
399            if args["pit_timestamp"] in ["newest", "oldest"]:
400                self.pit_timestamp = args["pit_timestamp"]
401            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]):
402                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I:%M:%S %p")
403                self.pit_timestamp_tokens = 6
404            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]):
405                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I:%M %p")
406                self.pit_timestamp_tokens = 5
407            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2} (AM|PM|am|pm)", args["pit_timestamp"]):
408                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %I %p")
409                self.pit_timestamp_tokens = 4
410            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2}", args["pit_timestamp"]):
411                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H:%M:%S")
412                self.pit_timestamp_tokens = 6
413            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}", args["pit_timestamp"]):
414                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H:%M")
415                self.pit_timestamp_tokens = 5
416            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}", args["pit_timestamp"]):
417                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d %H")
418                self.pit_timestamp_tokens = 4
419            elif re.match("[0-9]{4}-[0-9]{2}-[0-9]{2}", args["pit_timestamp"]):
420                self.pit_timestamp = datetime.strptime(args["pit_timestamp"], "%Y-%m-%d")
421                self.pit_timestamp_tokens = 3
422            else:
423                self.module.fail_json(msg="Invalid argument! pit_timestamp must be in the form YYYY-MM-DD HH:MM:SS (AM|PM) (time portion is optional)."
424                                          " Array [%s]." % self.ssid)
425
426        # Check for required arguments
427        if self.state == "present":
428            if self.type == "group":
429                if not self.volumes:
430                    self.module.fail_json(msg="Missing argument! Volumes must be defined to create a snapshot consistency group."
431                                              " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
432            elif self.type == "pit":
433                if self.pit_timestamp and self.pit_name:
434                    self.module.fail_json(msg="Invalid arguments! Either define pit_name with or without pit_description or pit_timestamp."
435                                              " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
436
437            elif self.type == "view":
438                if not self.view_name:
439                    self.module.fail_json(msg="Missing argument! view_name must be defined to create a snapshot consistency group view."
440                                              " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
441                if not (self.pit_name or self.pit_timestamp):
442                    self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time"
443                                              " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid))
444        elif self.state == "rollback":
445            if not (self.pit_name or self.pit_timestamp):
446                self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time"
447                                          " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid))
448        else:
449            if self.type == "pit":
450                if self.pit_name and self.pit_timestamp:
451                    self.module.fail_json(msg="Invalid arguments! Either define pit_name or pit_timestamp."
452                                              " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
453                if not (self.pit_name or self.pit_timestamp):
454                    self.module.fail_json(msg="Missing argument! Either pit_name or pit_timestamp must be defined to create a consistency group point-in-time"
455                                              " snapshot. Group [%s]. Array [%s]" % (self.group_name, self.ssid))
456            elif self.type == "view":
457                if not self.view_name:
458                    self.module.fail_json(msg="Missing argument! view_name must be defined to create a snapshot consistency group view."
459                                              " Group [%s]. Array [%s]" % (self.group_name, self.ssid))
460
461        # Check whether request needs to be forwarded on to the controller web services rest api.
462        self.url_path_prefix = ""
463        if not self.is_embedded():
464            if self.ssid == "0" or self.ssid.lower() == "proxy":
465                self.module.fail_json(msg="Snapshot is not a valid operation for SANtricity Web Services Proxy! ssid cannot be '0' or 'proxy'."
466                                          " Array [%s]" % self.ssid)
467            self.url_path_prefix = "storage-systems/%s/forward/devmgr/v2/" % self.ssid
468
469        self.cache = {"get_consistency_group": {},
470                      "get_all_storage_pools_by_id": {},
471                      "get_all_storage_pools_by_name": {},
472                      "get_all_volumes_by_id": {},
473                      "get_all_volumes_by_name": {},
474                      "get_all_hosts_and_hostgroups_by_name": {},
475                      "get_all_hosts_and_hostgroups_by_id": {},
476                      "get_mapping_by_id": {},
477                      "get_mapping_by_name": {},
478                      "get_all_concat_volumes_by_id": {},
479                      "get_pit_images_by_timestamp": {},
480                      "get_pit_images_by_name": {},
481                      "get_pit_images_metadata": {},
482                      "get_unused_pit_key_values": [],
483                      "get_pit_info": None,
484                      "get_consistency_group_view": {},
485                      "view_changes_required": []}
486
487    def get_all_storage_pools_by_id(self):
488        """Retrieve and return all storage pools/volume groups."""
489        if not self.cache["get_all_storage_pools_by_id"]:
490            try:
491                rc, storage_pools = self.request("storage-systems/%s/storage-pools" % self.ssid)
492
493                for storage_pool in storage_pools:
494                    self.cache["get_all_storage_pools_by_id"].update({storage_pool["id"]: storage_pool})
495                    self.cache["get_all_storage_pools_by_name"].update({storage_pool["name"]: storage_pool})
496            except Exception as error:
497                self.module.fail_json(msg="Failed to retrieve volumes! Error [%s]. Array [%s]." % (error, self.ssid))
498
499        return self.cache["get_all_storage_pools_by_id"]
500
501    def get_all_storage_pools_by_name(self):
502        """Retrieve and return all storage pools/volume groups."""
503        if not self.cache["get_all_storage_pools_by_name"]:
504            self.get_all_storage_pools_by_id()
505
506        return self.cache["get_all_storage_pools_by_name"]
507
508    def get_all_volumes_by_id(self):
509        """Retrieve and return a dictionary of all thick and thin volumes keyed by id."""
510        if not self.cache["get_all_volumes_by_id"]:
511            try:
512                rc, thick_volumes = self.request("storage-systems/%s/volumes" % self.ssid)
513                rc, thin_volumes = self.request("storage-systems/%s/thin-volumes" % self.ssid)
514
515                for volume in thick_volumes + thin_volumes:
516                    self.cache["get_all_volumes_by_id"].update({volume["id"]: volume})
517                    self.cache["get_all_volumes_by_name"].update({volume["name"]: volume})
518            except Exception as error:
519                self.module.fail_json(msg="Failed to retrieve volumes! Error [%s]. Array [%s]." % (error, self.ssid))
520
521        return self.cache["get_all_volumes_by_id"]
522
523    def get_all_volumes_by_name(self):
524        """Retrieve and return a dictionary of all thick and thin volumes keyed by name."""
525        if not self.cache["get_all_volumes_by_name"]:
526            self.get_all_volumes_by_id()
527
528        return self.cache["get_all_volumes_by_name"]
529
530    def get_all_hosts_and_hostgroups_by_id(self):
531        """Retrieve and return a dictionary of all host and host groups keyed by name."""
532        if not self.cache["get_all_hosts_and_hostgroups_by_id"]:
533            try:
534                rc, hostgroups = self.request("storage-systems/%s/host-groups" % self.ssid)
535                # hostgroup_by_id = {hostgroup["id"]: hostgroup for hostgroup in hostgroups}
536                hostgroup_by_id = dict((hostgroup["id"], hostgroup) for hostgroup in hostgroups)
537
538                rc, hosts = self.request("storage-systems/%s/hosts" % self.ssid)
539                for host in hosts:
540                    if host["clusterRef"] != "0000000000000000000000000000000000000000":
541                        hostgroup_name = hostgroup_by_id[host["clusterRef"]]["name"]
542
543                        if host["clusterRef"] not in self.cache["get_all_hosts_and_hostgroups_by_id"].keys():
544                            hostgroup_by_id[host["clusterRef"]].update({"hostgroup": True, "host_ids": [host["id"]]})
545                            self.cache["get_all_hosts_and_hostgroups_by_id"].update({host["clusterRef"]: hostgroup_by_id[host["clusterRef"]]})
546                            self.cache["get_all_hosts_and_hostgroups_by_name"].update({hostgroup_name: hostgroup_by_id[host["clusterRef"]]})
547                        else:
548                            self.cache["get_all_hosts_and_hostgroups_by_id"][host["clusterRef"]]["host_ids"].append(host["id"])
549                            self.cache["get_all_hosts_and_hostgroups_by_name"][hostgroup_name]["host_ids"].append(host["id"])
550
551                    self.cache["get_all_hosts_and_hostgroups_by_id"].update({host["id"]: host, "hostgroup": False})
552                    self.cache["get_all_hosts_and_hostgroups_by_name"].update({host["name"]: host, "hostgroup": False})
553            except Exception as error:
554                self.module.fail_json(msg="Failed to retrieve all host and host group objects! Error [%s]. Array [%s]." % (error, self.ssid))
555
556        return self.cache["get_all_hosts_and_hostgroups_by_id"]
557
558    def get_all_hosts_and_hostgroups_by_name(self):
559        """Retrieve and return a dictionary of all thick and thin volumes keyed by name."""
560        if not self.cache["get_all_hosts_and_hostgroups_by_name"]:
561            self.get_all_hosts_and_hostgroups_by_id()
562
563        return self.cache["get_all_hosts_and_hostgroups_by_name"]
564
565    def get_mapping_by_id(self):
566        """Retrieve and return a dictionary of """
567        if not self.cache["get_mapping_by_id"]:
568            existing_hosts_and_hostgroups_by_id = self.get_all_hosts_and_hostgroups_by_id()
569            existing_hosts_and_hostgroups_by_name = self.get_all_hosts_and_hostgroups_by_name()
570            try:
571                rc, mappings = self.request("storage-systems/%s/volume-mappings" % self.ssid)
572
573                for mapping in mappings:
574                    host_ids = [mapping["mapRef"]]
575                    map_entry = {mapping["lun"]: mapping["volumeRef"]}
576
577                    if mapping["type"] == "cluster":
578                        host_ids = existing_hosts_and_hostgroups_by_id[mapping["mapRef"]]["host_ids"]
579                        if mapping["mapRef"] in self.cache["get_mapping_by_id"].keys():
580                            self.cache["get_mapping_by_id"][mapping["mapRef"]].update(map_entry)
581                            self.cache["get_mapping_by_name"][mapping["mapRef"]].update(map_entry)
582                        else:
583                            self.cache["get_mapping_by_id"].update({mapping["mapRef"]: map_entry})
584                            self.cache["get_mapping_by_name"].update({mapping["mapRef"]: map_entry})
585
586                    for host_id in host_ids:
587                        if host_id in self.cache["get_mapping_by_id"].keys():
588                            self.cache["get_mapping_by_id"][mapping["mapRef"]].update(map_entry)
589                            self.cache["get_mapping_by_name"][mapping["mapRef"]].update(map_entry)
590                        else:
591                            self.cache["get_mapping_by_id"].update({host_id: map_entry})
592                            self.cache["get_mapping_by_name"].update({host_id: map_entry})
593            except Exception as error:
594                self.module.fail_json(msg="Failed to retrieve all volume map definitions! Error [%s]. Array [%s]." % (error, self.ssid))
595
596        return self.cache["get_mapping_by_id"]
597
598    def get_mapping_by_name(self):
599        """Retrieve and return a dictionary of """
600        if not self.cache["get_mapping_by_name"]:
601            self.get_mapping_by_id()
602
603        return self.cache["get_mapping_by_name"]
604
605    def get_all_concat_volumes_by_id(self):
606        """Retrieve and return a dictionary of all thick and thin volumes keyed by id."""
607        if not self.cache["get_all_concat_volumes_by_id"]:
608            try:
609                rc, concat_volumes = self.request("storage-systems/%s/repositories/concat" % self.ssid)
610
611                for volume in concat_volumes:
612                    self.cache["get_all_concat_volumes_by_id"].update({volume["id"]: volume})
613            except Exception as error:
614                self.module.fail_json(msg="Failed to retrieve reserve capacity volumes! Error [%s]. Array [%s]." % (error, self.ssid))
615
616        return self.cache["get_all_concat_volumes_by_id"]
617
618    def get_consistency_group(self):
619        """Retrieve consistency groups and return information on the expected group."""
620        existing_volumes = self.get_all_volumes_by_id()
621
622        if not self.cache["get_consistency_group"]:
623            try:
624                rc, consistency_groups = self.request("storage-systems/%s/consistency-groups" % self.ssid)
625
626                for consistency_group in consistency_groups:
627                    if consistency_group["label"] == self.group_name:
628                        rc, member_volumes = self.request("storage-systems/%s/consistency-groups/%s/member-volumes" % (self.ssid, consistency_group["id"]))
629
630                        self.cache["get_consistency_group"].update({"consistency_group_id": consistency_group["cgRef"],
631                                                                    "alert_threshold_pct": consistency_group["fullWarnThreshold"],
632                                                                    "maximum_snapshots": consistency_group["autoDeleteLimit"],
633                                                                    "rollback_priority": consistency_group["rollbackPriority"],
634                                                                    "reserve_capacity_full_policy": consistency_group["repFullPolicy"],
635                                                                    "sequence_numbers": consistency_group["uniqueSequenceNumber"],
636                                                                    "base_volumes": []})
637
638                        for member_volume in member_volumes:
639                            base_volume = existing_volumes[member_volume["volumeId"]]
640                            base_volume_size_b = int(base_volume["totalSizeInBytes"])
641                            total_reserve_capacity_b = int(member_volume["totalRepositoryCapacity"])
642                            reserve_capacity_pct = int(round(float(total_reserve_capacity_b) / float(base_volume_size_b) * 100))
643
644                            rc, concat = self.request("storage-systems/%s/repositories/concat/%s" % (self.ssid, member_volume["repositoryVolume"]))
645
646                            self.cache["get_consistency_group"]["base_volumes"].append({"name": base_volume["name"],
647                                                                                        "id": base_volume["id"],
648                                                                                        "base_volume_size_b": base_volume_size_b,
649                                                                                        "total_reserve_capacity_b": total_reserve_capacity_b,
650                                                                                        "reserve_capacity_pct": reserve_capacity_pct,
651                                                                                        "repository_volume_info": concat})
652                        break
653
654            except Exception as error:
655                self.module.fail_json(msg="Failed to retrieve snapshot consistency groups! Error [%s]. Array [%s]." % (error, self.ssid))
656
657        return self.cache["get_consistency_group"]
658
659    def get_candidate(self, volume_name, volume_info):
660        """Return candidate for volume."""
661        existing_storage_pools_by_id = self.get_all_storage_pools_by_id()
662        existing_storage_pools_by_name = self.get_all_storage_pools_by_name()
663        existing_volumes_by_name = self.get_all_volumes_by_name()
664
665        if volume_name in existing_volumes_by_name:
666            base_volume_storage_pool_id = existing_volumes_by_name[volume_name]["volumeGroupRef"]
667            base_volume_storage_pool_name = existing_storage_pools_by_id[base_volume_storage_pool_id]["name"]
668
669            preferred_reserve_storage_pool = base_volume_storage_pool_id
670            if volume_info["preferred_reserve_storage_pool"]:
671                if volume_info["preferred_reserve_storage_pool"] in existing_storage_pools_by_name:
672                    preferred_reserve_storage_pool = existing_storage_pools_by_name[volume_info["preferred_reserve_storage_pool"]]["id"]
673                else:
674                    self.module.fail_json(msg="Preferred storage pool or volume group does not exist! Storage pool [%s]. Group [%s]."
675                                              " Array [%s]." % (volume_info["preferred_reserve_storage_pool"], self.group_name, self.ssid))
676
677            volume_info.update({"name": volume_name,
678                                "id": existing_volumes_by_name[volume_name]["id"],
679                                "storage_pool_name": base_volume_storage_pool_name,
680                                "storage_pool_id": base_volume_storage_pool_id,
681                                "preferred_reserve_storage_pool": preferred_reserve_storage_pool,
682                                "candidate": None})
683
684        else:
685            self.module.fail_json(msg="Volume does not exist! Volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid))
686
687        candidate_request = {"candidateRequest": {"baseVolumeRef": volume_info["id"],
688                                                  "percentCapacity": volume_info["reserve_capacity_pct"],
689                                                  "concatVolumeType": "snapshot"}}
690        try:
691            rc, candidates = self.request("storage-systems/%s/repositories/concat/single" % self.ssid, method="POST", data=candidate_request)
692            for candidate in candidates:
693                if candidate["volumeGroupId"] == volume_info["preferred_reserve_storage_pool"]:
694                    volume_info["candidate"] = candidate
695                    break
696            else:
697                self.module.fail_json(msg="Failed to retrieve capacity volume candidate in preferred storage pool or volume group!"
698                                          " Volume [%s]. Group [%s]. Array [%s]." % (volume_info["name"], self.group_name, self.ssid))
699        except Exception as error:
700            self.module.fail_json(msg="Failed to get reserve capacity candidates!"
701                                      " Volumes %s. Group [%s]. Array [%s]. Error [%s]" % (volume_info["name"], self.group_name, self.ssid, error))
702
703        return volume_info
704
705    def get_pit_images_metadata(self):
706        """Retrieve and return consistency group snapshot images' metadata keyed on timestamps."""
707        if not self.cache["get_pit_images_metadata"]:
708            try:
709                rc, key_values = self.request(self.url_path_prefix + "key-values")
710
711                for entry in key_values:
712                    if re.search("ansible\\|%s\\|" % self.group_name, entry["key"]):
713                        name = entry["key"].replace("ansible|%s|" % self.group_name, "")
714                        values = entry["value"].split("|")
715                        if len(values) == 3:
716                            timestamp, image_id, description = values
717                            self.cache["get_pit_images_metadata"].update({timestamp: {"name": name, "description": description}})
718
719            except Exception as error:
720                self.module.fail_json(msg="Failed to retrieve consistency group snapshot images metadata!  Array [%s]. Error [%s]." % (self.ssid, error))
721
722        return self.cache["get_pit_images_metadata"]
723
724    def get_pit_images_by_timestamp(self):
725        """Retrieve and return snapshot images."""
726        if not self.cache["get_pit_images_by_timestamp"]:
727            group_id = self.get_consistency_group()["consistency_group_id"]
728            images_metadata = self.get_pit_images_metadata()
729            existing_volumes_by_id = self.get_all_volumes_by_id()
730
731            try:
732                rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots" % (self.ssid, group_id))
733                for image_info in images:
734
735                    metadata = {"id": "", "name": "", "description": ""}
736                    if image_info["pitTimestamp"] in images_metadata.keys():
737                        metadata = images_metadata[image_info["pitTimestamp"]]
738
739                    timestamp = datetime.fromtimestamp(int(image_info["pitTimestamp"]))
740                    info = {"id": image_info["id"],
741                            "name": metadata["name"],
742                            "timestamp": timestamp,
743                            "description": metadata["description"],
744                            "sequence_number": image_info["pitSequenceNumber"],
745                            "base_volume_id": image_info["baseVol"],
746                            "base_volume_name": existing_volumes_by_id[image_info["baseVol"]]["name"],
747                            "image_info": image_info}
748
749                    if timestamp not in self.cache["get_pit_images_by_timestamp"].keys():
750                        self.cache["get_pit_images_by_timestamp"].update({timestamp: {"sequence_number": image_info["pitSequenceNumber"], "images": [info]}})
751                        if metadata["name"]:
752                            self.cache["get_pit_images_by_name"].update({metadata["name"]: {"sequence_number": image_info["pitSequenceNumber"],
753                                                                                            "images": [info]}})
754                    else:
755                        self.cache["get_pit_images_by_timestamp"][timestamp]["images"].append(info)
756                        if metadata["name"]:
757                            self.cache["get_pit_images_by_name"][metadata["name"]]["images"].append(info)
758
759            except Exception as error:
760                self.module.fail_json(msg="Failed to retrieve consistency group snapshot images!"
761                                          " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
762
763        return self.cache["get_pit_images_by_timestamp"]
764
765    def get_pit_images_by_name(self):
766        """Retrieve and return snapshot images."""
767        if not self.cache["get_pit_images_by_name"]:
768            self.get_pit_images_by_timestamp()
769
770        return self.cache["get_pit_images_by_name"]
771
772    def get_unused_pit_key(self):
773        """Determine all embedded pit key-values that do not match existing snapshot images."""
774        if not self.cache["get_unused_pit_key_values"]:
775            try:
776                rc, images = self.request("storage-systems/%s/snapshot-images" % self.ssid)
777                rc, key_values = self.request("key-values")
778
779                for key_value in key_values:
780                    key = key_value["key"]
781                    value = key_value["value"]
782                    if re.match("ansible\\|.*\\|.*", value):
783                        for image in images:
784                            if str(image["pitTimestamp"]) == value.split("|")[0]:
785                                break
786                        else:
787                            self.cache["get_unused_pit_key_values"].append(key)
788            except Exception as error:
789                self.module.warn("Failed to retrieve all snapshots to determine all key-value pairs that do no match a point-in-time snapshot images!"
790                                 " Array [%s]. Error [%s]." % (self.ssid, error))
791
792        return self.cache["get_unused_pit_key_values"]
793
794    def get_pit_info(self):
795        """Determine consistency group's snapshot images base on provided arguments (pit_name or timestamp)."""
796
797        def _check_timestamp(timestamp):
798            """Check whether timestamp matches I(pit_timestamp)"""
799            return (self.pit_timestamp.year == timestamp.year and
800                    self.pit_timestamp.month == timestamp.month and
801                    self.pit_timestamp.day == timestamp.day and
802                    (self.pit_timestamp_tokens < 4 or self.pit_timestamp.hour == timestamp.hour) and
803                    (self.pit_timestamp_tokens < 5 or self.pit_timestamp.minute == timestamp.minute) and
804                    (self.pit_timestamp_tokens < 6 or self.pit_timestamp.second == timestamp.second))
805
806        if self.cache["get_pit_info"] is None:
807            group = self.get_consistency_group()
808            pit_images_by_timestamp = self.get_pit_images_by_timestamp()
809            pit_images_by_name = self.get_pit_images_by_name()
810
811            if self.pit_name:
812                if self.pit_name in pit_images_by_name.keys():
813                    self.cache["get_pit_info"] = pit_images_by_name[self.pit_name]
814
815                    if self.pit_timestamp:
816                        for image in self.cache["get_pit_info"]["images"]:
817                            if not _check_timestamp(image["timestamp"]):
818                                self.module.fail_json(msg="Snapshot image does not exist that matches both name and supplied timestamp!"
819                                                          " Group [%s]. Image [%s]. Array [%s]." % (self.group_name, image, self.ssid))
820            elif self.pit_timestamp and pit_images_by_timestamp:
821                sequence_number = None
822                if self.pit_timestamp == "newest":
823                    sequence_number = group["sequence_numbers"][-1]
824
825                    for image_timestamp in pit_images_by_timestamp.keys():
826                        if int(pit_images_by_timestamp[image_timestamp]["sequence_number"]) == int(sequence_number):
827                            self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp]
828                            break
829                elif self.pit_timestamp == "oldest":
830                    sequence_number = group["sequence_numbers"][0]
831                    for image_timestamp in pit_images_by_timestamp.keys():
832                        if int(pit_images_by_timestamp[image_timestamp]["sequence_number"]) == int(sequence_number):
833                            self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp]
834                            break
835                else:
836                    for image_timestamp in pit_images_by_timestamp.keys():
837                        if _check_timestamp(image_timestamp):
838                            if sequence_number and sequence_number != pit_images_by_timestamp[image_timestamp]["sequence_number"]:
839                                self.module.fail_json(msg="Multiple snapshot images match the provided timestamp and do not have the same sequence number!"
840                                                          " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
841
842                            sequence_number = pit_images_by_timestamp[image_timestamp]["sequence_number"]
843                            self.cache["get_pit_info"] = pit_images_by_timestamp[image_timestamp]
844
845        if self.state != "absent" and self.type != "pit" and self.cache["get_pit_info"] is None:
846            self.module.fail_json(msg="Snapshot consistency group point-in-time image does not exist! Name [%s]. Timestamp [%s]. Group [%s]."
847                                      " Array [%s]." % (self.pit_name, self.pit_timestamp, self.group_name, self.ssid))
848
849        return self.cache["get_pit_info"]
850
851    def create_changes_required(self):
852        """Determine the required state changes for creating a new consistency group."""
853        changes = {"create_group": {"name": self.group_name,
854                                    "alert_threshold_pct": self.alert_threshold_pct,
855                                    "maximum_snapshots": self.maximum_snapshots,
856                                    "reserve_capacity_full_policy": self.reserve_capacity_full_policy,
857                                    "rollback_priority": self.rollback_priority},
858                   "add_volumes": self.volumes}
859
860        return changes
861
862    def update_changes_required(self):
863        """Determine the required state changes for updating an existing consistency group."""
864        group = self.get_consistency_group()
865        changes = {"update_group": {},
866                   "add_volumes": [],
867                   "remove_volumes": [],
868                   "expand_reserve_capacity": [],
869                   "trim_reserve_capacity": []}
870
871        # Check if consistency group settings need to be updated.
872        if group["alert_threshold_pct"] != self.alert_threshold_pct:
873            changes["update_group"].update({"alert_threshold_pct": self.alert_threshold_pct})
874        if group["maximum_snapshots"] != self.maximum_snapshots:
875            changes["update_group"].update({"maximum_snapshots": self.maximum_snapshots})
876        if group["rollback_priority"] != self.rollback_priority:
877            changes["update_group"].update({"rollback_priority": self.rollback_priority})
878        if group["reserve_capacity_full_policy"] != self.reserve_capacity_full_policy:
879            changes["update_group"].update({"reserve_capacity_full_policy": self.reserve_capacity_full_policy})
880
881        # Check if base volumes need to be added or removed from consistency group.
882        # remaining_base_volumes = {base_volumes["name"]: base_volumes for base_volumes in group["base_volumes"]}  # NOT python2.6 compatible
883        remaining_base_volumes = dict((base_volumes["name"], base_volumes) for base_volumes in group["base_volumes"])
884        add_volumes = {}
885        expand_volumes = {}
886
887        for volume_name, volume_info in self.volumes.items():
888            reserve_capacity_pct = volume_info["reserve_capacity_pct"]
889            if volume_name in remaining_base_volumes:
890
891                # Check if reserve capacity needs to be expanded or trimmed.
892                base_volume_reserve_capacity_pct = remaining_base_volumes[volume_name]["reserve_capacity_pct"]
893                if reserve_capacity_pct > base_volume_reserve_capacity_pct:
894                    expand_reserve_capacity_pct = reserve_capacity_pct - base_volume_reserve_capacity_pct
895                    expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
896                                                         "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
897                                                         "reserve_volume_id": remaining_base_volumes[volume_name]["repository_volume_info"]["id"]}})
898
899                elif reserve_capacity_pct < base_volume_reserve_capacity_pct:
900                    existing_volumes_by_id = self.get_all_volumes_by_id()
901                    existing_volumes_by_name = self.get_all_volumes_by_name()
902                    existing_concat_volumes_by_id = self.get_all_concat_volumes_by_id()
903                    trim_pct = base_volume_reserve_capacity_pct - reserve_capacity_pct
904
905                    # Check whether there are any snapshot images; if there are then throw an exception indicating that a trim operation
906                    #   cannot be done when snapshots exist.
907                    for timestamp, image in self.get_pit_images_by_timestamp():
908                        if existing_volumes_by_id(image["base_volume_id"])["name"] == volume_name:
909                            self.module.fail_json(msg="Reserve capacity cannot be trimmed when snapshot images exist for base volume!"
910                                                      " Base volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid))
911
912                    # Collect information about all that needs to be trimmed to meet or exceed required trim percentage.
913                    concat_volume_id = remaining_base_volumes[volume_name]["repository_volume_info"]["id"]
914                    concat_volume_info = existing_concat_volumes_by_id[concat_volume_id]
915                    base_volume_info = existing_volumes_by_name[volume_name]
916                    base_volume_size_bytes = int(base_volume_info["totalSizeInBytes"])
917
918                    total_member_volume_size_bytes = 0
919                    member_volumes_to_trim = []
920                    for trim_count, member_volume_id in enumerate(reversed(concat_volume_info["memberRefs"][1:])):
921                        member_volume_info = existing_volumes_by_id[member_volume_id]
922                        member_volumes_to_trim.append(member_volume_info)
923
924                        total_member_volume_size_bytes += int(member_volume_info["totalSizeInBytes"])
925                        total_trimmed_size_pct = round(total_member_volume_size_bytes / base_volume_size_bytes * 100)
926
927                        if total_trimmed_size_pct >= trim_pct:
928                            changes["trim_reserve_capacity"].append({"concat_volume_id": concat_volume_id, "trim_count": trim_count + 1})
929
930                            # Expand after trim if needed.
931                            if total_trimmed_size_pct > trim_pct:
932                                expand_reserve_capacity_pct = total_trimmed_size_pct - trim_pct
933                                expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
934                                                                     "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
935                                                                     "reserve_volume_id": remaining_base_volumes[volume_name]["repository_volume_info"]["id"]}})
936                            break
937                    else:
938                        initial_reserve_volume_info = existing_volumes_by_id[concat_volume_info["memberRefs"][0]]
939                        minimum_capacity_pct = round(int(initial_reserve_volume_info["totalSizeInBytes"]) / base_volume_size_bytes * 100)
940                        self.module.fail_json(msg="Cannot delete initial reserve capacity volume! Minimum reserve capacity percent [%s]. Base volume [%s]. "
941                                                  "Group [%s]. Array [%s]." % (minimum_capacity_pct, volume_name, self.group_name, self.ssid))
942
943                remaining_base_volumes.pop(volume_name)
944            else:
945                add_volumes.update({volume_name: {"reserve_capacity_pct": reserve_capacity_pct,
946                                                  "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"]}})
947
948        changes["add_volumes"] = add_volumes
949        changes["expand_reserve_capacity"] = expand_volumes
950        changes["remove_volumes"] = remaining_base_volumes
951        return changes
952
953    def get_consistency_group_view(self):
954        """Determine and return consistency group view."""
955        group_id = self.get_consistency_group()["consistency_group_id"]
956
957        if not self.cache["get_consistency_group_view"]:
958            try:
959                rc, views = self.request("storage-systems/%s/consistency-groups/%s/views" % (self.ssid, group_id))
960
961                # Check for existing view (collection of snapshot volumes for a consistency group) within consistency group.
962                for view in views:
963                    if view["name"] == self.view_name:
964                        self.cache["get_consistency_group_view"] = view
965                        self.cache["get_consistency_group_view"].update({"snapshot_volumes": []})
966
967                        # Determine snapshot volumes associated with view.
968                        try:
969                            rc, snapshot_volumes = self.request("storage-systems/%s/snapshot-volumes" % self.ssid)
970
971                            for snapshot_volume in snapshot_volumes:
972                                if (snapshot_volume["membership"] and
973                                        snapshot_volume["membership"]["viewType"] == "member" and
974                                        snapshot_volume["membership"]["cgViewRef"] == view["cgViewRef"]):
975                                    self.cache["get_consistency_group_view"]["snapshot_volumes"].append(snapshot_volume)
976                        except Exception as error:
977                            self.module.fail_json(msg="Failed to retrieve host mapping information!."
978                                                      " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
979            except Exception as error:
980                self.module.fail_json(msg="Failed to retrieve consistency group's views!"
981                                          " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
982
983        return self.cache["get_consistency_group_view"]
984
985    def create_view_changes_required(self):
986        """Determine whether snapshot consistency group point-in-time view needs to be created."""
987        changes = {}
988        snapshot_images_info = self.get_pit_info()
989        changes.update({"name": self.view_name,
990                        "sequence_number": snapshot_images_info["sequence_number"],
991                        "images": snapshot_images_info["images"],
992                        "volumes": self.volumes})
993
994        return changes
995
996    def update_view_changes_required(self):
997        """Determine the changes required for snapshot consistency group point-in-time view."""
998        changes = {"expand_reserve_capacity": [],
999                   "trim_reserve_capacity": [],
1000                   "map_snapshot_volumes_mapping": [],
1001                   "unmap_snapshot_volumes_mapping": [],
1002                   "move_snapshot_volumes_mapping": [],
1003                   "update_snapshot_volumes_writable": []}
1004        view = self.get_consistency_group_view()
1005        host_objects_by_name = self.get_all_hosts_and_hostgroups_by_name()
1006        host_objects_by_id = self.get_all_hosts_and_hostgroups_by_id()
1007        existing_volumes_by_id = self.get_all_volumes_by_id()
1008        if view:
1009            if len(view["snapshot_volumes"]) != len(self.volumes):
1010                self.module.fail_json(msg="Cannot add or remove snapshot volumes once view is created! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1011
1012            expand_volumes = {}
1013            writable_volumes = {}
1014            for snapshot_volume in view["snapshot_volumes"]:
1015                for volume_name, volume_info in self.volumes.items():
1016                    if existing_volumes_by_id[snapshot_volume["baseVol"]]["name"] == volume_name:
1017
1018                        # Check snapshot volume needs mapped to host or hostgroup.
1019                        if volume_info["snapshot_volume_host"] and not snapshot_volume["listOfMappings"]:
1020                            changes["map_snapshot_volumes_mapping"].append({"mappableObjectId": snapshot_volume["id"],
1021                                                                            "lun": volume_info["snapshot_volume_lun"],
1022                                                                            "targetId": host_objects_by_name[volume_info["snapshot_volume_host"]]["id"]})
1023
1024                        # Check snapshot volume needs unmapped to host or hostgroup.
1025                        elif not volume_info["snapshot_volume_host"] and snapshot_volume["listOfMappings"]:
1026                            changes["unmap_snapshot_volumes_mapping"].append({"snapshot_volume_name": snapshot_volume["name"],
1027                                                                              "lun_mapping_reference": snapshot_volume["listOfMappings"][0]["lunMappingRef"]})
1028
1029                        # Check host mapping needs moved
1030                        elif (snapshot_volume["listOfMappings"] and
1031                              ((volume_info["snapshot_volume_host"] != host_objects_by_id[snapshot_volume["listOfMappings"][0]["mapRef"]]["name"]) or
1032                               (volume_info["snapshot_volume_lun"] != snapshot_volume["listOfMappings"][0]["lun"]))):
1033                            changes["move_snapshot_volumes_mapping"].append({"lunMappingRef": snapshot_volume["listOfMappings"][0]["lunMappingRef"],
1034                                                                             "lun": volume_info["snapshot_volume_lun"],
1035                                                                             "mapRef": host_objects_by_name[volume_info["snapshot_volume_host"]]["id"]})
1036                        # Check writable mode
1037                        if volume_info["snapshot_volume_writable"] != (snapshot_volume["accessMode"] == "readWrite"):
1038                            volume_info.update({"snapshot_volume_id": snapshot_volume["id"]})
1039                            writable_volumes.update({volume_name: volume_info})
1040
1041                        # Check reserve capacity.
1042                        if volume_info["snapshot_volume_writable"] and snapshot_volume["accessMode"] == "readWrite":
1043                            current_reserve_capacity_pct = int(round(float(snapshot_volume["repositoryCapacity"]) /
1044                                                                     float(snapshot_volume["baseVolumeCapacity"]) * 100))
1045                            if volume_info["reserve_capacity_pct"] > current_reserve_capacity_pct:
1046                                expand_reserve_capacity_pct = volume_info["reserve_capacity_pct"] - current_reserve_capacity_pct
1047                                expand_volumes.update({volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
1048                                                                     "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
1049                                                                     "reserve_volume_id": snapshot_volume["repositoryVolume"]}})
1050
1051                            elif volume_info["reserve_capacity_pct"] < current_reserve_capacity_pct:
1052                                existing_volumes_by_id = self.get_all_volumes_by_id()
1053                                existing_volumes_by_name = self.get_all_volumes_by_name()
1054                                existing_concat_volumes_by_id = self.get_all_concat_volumes_by_id()
1055                                trim_pct = current_reserve_capacity_pct - volume_info["reserve_capacity_pct"]
1056
1057                                # Collect information about all that needs to be trimmed to meet or exceed required trim percentage.
1058                                concat_volume_id = snapshot_volume["repositoryVolume"]
1059                                concat_volume_info = existing_concat_volumes_by_id[concat_volume_id]
1060                                base_volume_info = existing_volumes_by_name[volume_name]
1061                                base_volume_size_bytes = int(base_volume_info["totalSizeInBytes"])
1062
1063                                total_member_volume_size_bytes = 0
1064                                member_volumes_to_trim = []
1065                                for trim_count, member_volume_id in enumerate(reversed(concat_volume_info["memberRefs"][1:])):
1066                                    member_volume_info = existing_volumes_by_id[member_volume_id]
1067                                    member_volumes_to_trim.append(member_volume_info)
1068
1069                                    total_member_volume_size_bytes += int(member_volume_info["totalSizeInBytes"])
1070                                    total_trimmed_size_pct = round(total_member_volume_size_bytes / base_volume_size_bytes * 100)
1071
1072                                    if total_trimmed_size_pct >= trim_pct:
1073                                        changes["trim_reserve_capacity"].append({"concat_volume_id": concat_volume_id, "trim_count": trim_count + 1})
1074
1075                                        # Expand after trim if needed.
1076                                        if total_trimmed_size_pct > trim_pct:
1077                                            expand_reserve_capacity_pct = total_trimmed_size_pct - trim_pct
1078                                            expand_volumes.update({
1079                                                volume_name: {"reserve_capacity_pct": expand_reserve_capacity_pct,
1080                                                              "preferred_reserve_storage_pool": volume_info["preferred_reserve_storage_pool"],
1081                                                              "reserve_volume_id": snapshot_volume["repositoryVolume"]}})
1082                                        break
1083                                else:
1084                                    initial_reserve_volume_info = existing_volumes_by_id[concat_volume_info["memberRefs"][0]]
1085                                    minimum_capacity_pct = round(int(initial_reserve_volume_info["totalSizeInBytes"]) / base_volume_size_bytes * 100)
1086                                    self.module.fail_json(msg="Cannot delete initial reserve capacity volume! Minimum reserve capacity percent [%s]. "
1087                                                              "Base volume [%s]. Group [%s]. Array [%s]." % (minimum_capacity_pct, volume_name,
1088                                                                                                             self.group_name, self.ssid))
1089            changes.update({"expand_reserve_capacity": expand_volumes,
1090                            "update_snapshot_volumes_writable": writable_volumes})
1091        return changes
1092
1093    def rollback_changes_required(self):
1094        """Determine the changes required for snapshot consistency group point-in-time rollback."""
1095        return self.get_pit_info()
1096
1097    def remove_snapshot_consistency_group(self, info):
1098        """remove a new snapshot consistency group."""
1099        try:
1100            rc, resp = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, info["consistency_group_id"]), method="DELETE")
1101        except Exception as error:
1102            self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1103
1104    def create_snapshot_consistency_group(self, group_info):
1105        """Create a new snapshot consistency group."""
1106        consistency_group_request = {"name": self.group_name,
1107                                     "fullWarnThresholdPercent": group_info["alert_threshold_pct"],
1108                                     "autoDeleteThreshold": group_info["maximum_snapshots"],
1109                                     "repositoryFullPolicy": group_info["reserve_capacity_full_policy"],
1110                                     "rollbackPriority": group_info["rollback_priority"]}
1111
1112        try:
1113            rc, group = self.request("storage-systems/%s/consistency-groups" % self.ssid, method="POST", data=consistency_group_request)
1114            self.cache["get_consistency_group"].update({"consistency_group_id": group["cgRef"]})
1115        except Exception as error:
1116            self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1117
1118    def update_snapshot_consistency_group(self, group_info):
1119        """Create a new snapshot consistency group."""
1120        group_id = self.get_consistency_group()["consistency_group_id"]
1121        consistency_group_request = {"name": self.group_name}
1122        if "alert_threshold_pct" in group_info.keys():
1123            consistency_group_request.update({"fullWarnThresholdPercent": group_info["alert_threshold_pct"]})
1124        if "maximum_snapshots" in group_info.keys():
1125            consistency_group_request.update({"autoDeleteThreshold": group_info["maximum_snapshots"]})
1126        if "reserve_capacity_full_policy" in group_info.keys():
1127            consistency_group_request.update({"repositoryFullPolicy": group_info["reserve_capacity_full_policy"]})
1128        if "rollback_priority" in group_info.keys():
1129            consistency_group_request.update({"rollbackPriority": group_info["rollback_priority"]})
1130
1131        try:
1132            rc, group = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, group_id), method="POST", data=consistency_group_request)
1133            return group["cgRef"]
1134        except Exception as error:
1135            self.module.fail_json(msg="Failed to remove snapshot consistency group! Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1136
1137    def add_base_volumes(self, volumes):
1138        """Add base volume(s) to the consistency group."""
1139        group_id = self.get_consistency_group()["consistency_group_id"]
1140        member_volume_request = {"volumeToCandidates": {}}
1141
1142        for volume_name, volume_info in volumes.items():
1143            candidate = self.get_candidate(volume_name, volume_info)
1144            member_volume_request["volumeToCandidates"].update({volume_info["id"]: candidate["candidate"]["candidate"]})
1145
1146        try:
1147            rc, resp = self.request("storage-systems/%s/consistency-groups/%s/member-volumes/batch" % (self.ssid, group_id),
1148                                    method="POST", data=member_volume_request)
1149        except Exception as error:
1150            self.module.fail_json(msg="Failed to add reserve capacity volume! Base volumes %s. Group [%s]. Error [%s]."
1151                                      " Array [%s]." % (", ".join([volume for volume in member_volume_request.keys()]), self.group_name, error, self.ssid))
1152
1153    def remove_base_volumes(self, volume_info_list):
1154        """Add base volume(s) to the consistency group."""
1155        group_id = self.get_consistency_group()["consistency_group_id"]
1156
1157        for name, info in volume_info_list.items():
1158            try:
1159                rc, resp = self.request("storage-systems/%s/consistency-groups/%s/member-volumes/%s" % (self.ssid, group_id, info["id"]), method="DELETE")
1160            except Exception as error:
1161                self.module.fail_json(msg="Failed to remove reserve capacity volume! Base volume [%s]. Group [%s]. Error [%s]. "
1162                                          "Array [%s]." % (name, self.group_name, error, self.ssid))
1163
1164    def expand_reserve_capacities(self, reserve_volumes):
1165        """Expand base volume(s) reserve capacity."""
1166        for volume_name, volume_info in reserve_volumes.items():
1167            candidate = self.get_candidate(volume_name, volume_info)
1168            expand_request = {"repositoryRef": volume_info["reserve_volume_id"],
1169                              "expansionCandidate": candidate["candidate"]["candidate"]}
1170            try:
1171                rc, resp = self.request("/storage-systems/%s/repositories/concat/%s/expand" % (self.ssid, volume_info["reserve_volume_id"]),
1172                                        method="POST", data=expand_request)
1173            except Exception as error:
1174                self.module.fail_json(msg="Failed to expand reserve capacity volume! Group [%s]. Error [%s]. Array [%s]." % (self.group_name, error, self.ssid))
1175
1176    def trim_reserve_capacities(self, trim_reserve_volume_info_list):
1177        """trim base volume(s) reserve capacity."""
1178        for info in trim_reserve_volume_info_list:
1179            trim_request = {"concatVol": info["concat_volume_id"],
1180                            "trimCount": info["trim_count"],
1181                            "retainRepositoryMembers": False}
1182            try:
1183                rc, trim = self.request("storage-systems/%s/symbol/trimConcatVolume?verboseErrorResponse=true" % self.ssid, method="POST", data=trim_request)
1184            except Exception as error:
1185                self.module.fail_json(msg="Failed to trim reserve capacity. Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1186
1187    def create_pit_images(self):
1188        """Generate snapshot image(s) for the base volumes in the consistency group."""
1189        group_id = self.get_consistency_group()["consistency_group_id"]
1190
1191        try:
1192            rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots" % (self.ssid, group_id), method="POST")
1193
1194            # Embedded web services should store the pit_image metadata since sending it to the proxy will be written to it instead.
1195            if self.pit_name:
1196                try:
1197                    rc, key_values = self.request(self.url_path_prefix + "key-values/ansible|%s|%s" % (self.group_name, self.pit_name), method="POST",
1198                                                  data="%s|%s|%s" % (images[0]["pitTimestamp"], self.pit_name, self.pit_description))
1199                except Exception as error:
1200                    self.module.fail_json(msg="Failed to create metadata for snapshot images!"
1201                                              " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1202        except Exception as error:
1203            self.module.fail_json(msg="Failed to create consistency group snapshot images!"
1204                                      " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1205
1206    def remove_pit_images(self, pit_info):
1207        """Remove selected snapshot point-in-time images."""
1208        group_id = self.get_consistency_group()["consistency_group_id"]
1209
1210        pit_sequence_number = int(pit_info["sequence_number"])
1211        sequence_numbers = set(int(pit_image["sequence_number"]) for timestamp, pit_image in self.get_pit_images_by_timestamp().items()
1212                               if int(pit_image["sequence_number"]) < pit_sequence_number)
1213        sequence_numbers.add(pit_sequence_number)
1214
1215        for sequence_number in sorted(sequence_numbers):
1216
1217            try:
1218                rc, images = self.request("storage-systems/%s/consistency-groups/%s/snapshots/%s" % (self.ssid, group_id, sequence_number), method="DELETE")
1219            except Exception as error:
1220                self.module.fail_json(msg="Failed to create consistency group snapshot images!"
1221                                          " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1222
1223        # Embedded web services should store the pit_image metadata since sending it to the proxy will be written to it instead.
1224        if self.pit_name:
1225            try:
1226                rc, key_values = self.request(self.url_path_prefix + "key-values/ansible|%s|%s" % (self.group_name, self.pit_name), method="DELETE")
1227            except Exception as error:
1228                self.module.fail_json(msg="Failed to delete metadata for snapshot images!"
1229                                          " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1230
1231    def cleanup_old_pit_metadata(self, keys):
1232        """Delete unused point-in-time image metadata."""
1233        for key in keys:
1234            try:
1235                rc, images = self.request("key-values/%s" % key, method="DELETE")
1236            except Exception as error:
1237                self.module.fail_json(msg="Failed to purge unused point-in-time image metadata! Key [%s]. Array [%s]."
1238                                          " Error [%s]." % (key, self.ssid, error))
1239
1240    def create_view(self, view_info):
1241        """Generate consistency group view."""
1242        group_id = self.get_consistency_group()["consistency_group_id"]
1243        view_request = {"name": view_info["name"],
1244                        "pitSequenceNumber": view_info["sequence_number"],
1245                        "requests": []}
1246
1247        for volume_name, volume_info in view_info["volumes"].items():
1248            candidate = None
1249            if volume_info["snapshot_volume_writable"]:
1250                candidate = self.get_candidate(volume_name, volume_info)
1251
1252            for image in view_info["images"]:
1253                if volume_name == image["base_volume_name"]:
1254                    view_request["requests"].append({"pitId": image["id"],
1255                                                     "candidate": candidate["candidate"]["candidate"] if candidate else None,
1256                                                     "accessMode": "readWrite" if volume_info["snapshot_volume_writable"] else "readOnly",
1257                                                     "scanMedia": volume_info["snapshot_volume_validate"],
1258                                                     "validateParity": volume_info["snapshot_volume_validate"]})
1259                    break
1260            else:
1261                self.module.fail_json(msg="Base volume does not exist! Volume [%s]. Group [%s]. Array [%s]." % (volume_name, self.group_name, self.ssid))
1262        try:
1263            rc, images = self.request("storage-systems/%s/consistency-groups/%s/views/batch" % (self.ssid, group_id), method="POST", data=view_request)
1264
1265            # Determine snapshot volume mappings
1266            view = self.get_consistency_group_view()
1267            existing_volumes_by_id = self.get_all_volumes_by_id()
1268            existing_hosts_by_name = self.get_all_hosts_and_hostgroups_by_name()
1269            for volume_name, volume_info in self.volumes.items():
1270                if volume_info["snapshot_volume_host"]:
1271                    for snapshot_volume in view["snapshot_volumes"]:
1272                        if volume_name == existing_volumes_by_id[snapshot_volume["baseVol"]]["name"]:
1273                            snapshot_volume_map_request = {"mappableObjectId": snapshot_volume["id"],
1274                                                           "lun": volume_info["snapshot_volume_lun"],
1275                                                           "targetId": existing_hosts_by_name[volume_info["snapshot_volume_host"]]["id"]}
1276                            try:
1277                                rc, mapping = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=snapshot_volume_map_request)
1278                            except Exception as error:
1279                                self.module.fail_json(msg="Failed to map snapshot volume! Snapshot volume [%s]. View [%s]. Group [%s]. Array [%s]."
1280                                                          " Error [%s]" % (snapshot_volume["name"], self.view_name, self.group_name, self.ssid, error))
1281                            break
1282        except Exception as error:
1283            self.module.fail_json(msg="Failed to create consistency group snapshot volumes!"
1284                                      " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1285
1286    def map_view(self, map_information_list):
1287        """Map consistency group point-in-time snapshot volumes to host or host group."""
1288        existing_volumes = self.get_all_volumes_by_id()
1289        existing_host_or_hostgroups = self.get_all_hosts_and_hostgroups_by_id()
1290        for map_request in map_information_list:
1291            try:
1292                rc, mapping = self.request("storage-systems/%s/volume-mappings" % self.ssid, method="POST", data=map_request)
1293            except Exception as error:
1294                self.module.fail_json(msg="Failed to map snapshot volume! Snapshot volume [%s]. Target [%s]. Lun [%s]. Group [%s]. Array [%s]."
1295                                          " Error [%s]." % (existing_volumes[map_request["mappableObjectId"]],
1296                                                            existing_host_or_hostgroups[map_request["targetId"]],
1297                                                            map_request["lun"], self.group_name, self.ssid, error))
1298
1299    def unmap_view(self, unmap_info_list):
1300        """Unmap consistency group point-in-time snapshot volumes from host or host group."""
1301        for unmap_info in unmap_info_list:
1302            try:
1303                rc, unmap = self.request("storage-systems/%s/volume-mappings/%s" % (self.ssid, unmap_info["lun_mapping_reference"]), method="DELETE")
1304            except Exception as error:
1305                self.module.fail_json(msg="Failed to unmap snapshot volume! Snapshot volume [%s]. View [%s]. Group [%s]. Array [%s]."
1306                                          " Error [%s]." % (unmap_info["snapshot_volume_name"], self.view_name, self.group_name, self.ssid, error))
1307
1308    def move_view_mapping(self, map_information_list):
1309        """Move consistency group point-in-time snapshot volumes to a different host or host group."""
1310        existing_volumes = self.get_all_volumes_by_id()
1311        existing_host_or_hostgroups = self.get_all_hosts_and_hostgroups_by_id()
1312        for map_request in map_information_list:
1313            try:
1314                rc, mapping = self.request("storage-systems/%s/symbol/moveLUNMapping?verboseErrorResponse=true" % self.ssid, method="POST", data=map_request)
1315            except Exception as error:
1316                self.module.fail_json(msg="Failed to move snapshot volume mapping! Snapshot volume [%s]. Target [%s]. Lun [%s]. Group [%s]. Array [%s]."
1317                                          " Error [%s]." % (existing_volumes[map_request["mappableObjectId"]],
1318                                                            existing_host_or_hostgroups[map_request["targetId"]],
1319                                                            map_request["lun"], self.group_name, self.ssid, error))
1320
1321    def convert_view_to_writable(self, convert_view_information_list):
1322        """Make consistency group point-in-time snapshot volumes writable."""
1323        for volume_name, volume_info in convert_view_information_list.items():
1324            candidate = self.get_candidate(volume_name, volume_info)
1325            convert_request = {"fullThreshold": self.alert_threshold_pct,
1326                               "repositoryCandidate": candidate["candidate"]["candidate"]}
1327            try:
1328                rc, convert = self.request("/storage-systems/%s/snapshot-volumes/%s/convertReadOnly" % (self.ssid, volume_info["snapshot_volume_id"]),
1329                                           method="POST", data=convert_request)
1330            except Exception as error:
1331                self.module.fail_json(msg="Failed to convert snapshot volume to read/write! Snapshot volume [%s]. View [%s] Group [%s]. Array [%s]."
1332                                          " Error [%s]." % (volume_info["snapshot_volume_id"], self.view_name, self.group_name, self.ssid, error))
1333
1334    def remove_view(self, view_id):
1335        """Remove a consistency group view."""
1336        group_id = self.get_consistency_group()["consistency_group_id"]
1337
1338        try:
1339            rc, images = self.request("storage-systems/%s/consistency-groups/%s/views/%s" % (self.ssid, group_id, view_id), method="DELETE")
1340        except Exception as error:
1341            self.module.fail_json(msg="Failed to create consistency group snapshot volumes!"
1342                                      " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1343
1344    def rollback(self, rollback_info):
1345        """Rollback consistency group base volumes to point-in-time snapshot images."""
1346        group_info = self.get_consistency_group()
1347        group_id = group_info["consistency_group_id"]
1348
1349        if self.rollback_backup:
1350            self.create_pit_images()
1351
1352        # Ensure consistency group rollback priority is set correctly prior to rollback.
1353        if self.rollback_priority:
1354            try:
1355                rc, resp = self.request("storage-systems/%s/consistency-groups/%s" % (self.ssid, group_id), method="POST",
1356                                        data={"rollbackPriority": self.rollback_priority})
1357            except Exception as error:
1358                self.module.fail_json(msg="Failed to updated consistency group rollback priority!"
1359                                          " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1360
1361        try:
1362            rc, resp = self.request("storage-systems/%s/symbol/startPITRollback" % self.ssid, method="POST",
1363                                    data={"pitRef": [image["id"] for image in rollback_info["images"]]})
1364        except Exception as error:
1365            self.module.fail_json(msg="Failed to initiate rollback operations!" " Group [%s]. Array [%s]. Error [%s]." % (self.group_name, self.ssid, error))
1366
1367    def complete_volume_definitions(self):
1368        """Determine the complete self.volumes structure."""
1369        group = self.get_consistency_group()
1370
1371        if not self.volumes:
1372            for volume in group["base_volumes"]:
1373                self.volumes.update({volume["name"]: {"reserve_capacity_pct": self.reserve_capacity_pct,
1374                                                      "preferred_reserve_storage_pool": self.preferred_reserve_storage_pool,
1375                                                      "snapshot_volume_writable": self.view_writable,
1376                                                      "snapshot_volume_validate": self.view_validate,
1377                                                      "snapshot_volume_host": self.view_host,
1378                                                      "snapshot_volume_lun": None}})
1379
1380        # Ensure a preferred_reserve_storage_pool has been selected
1381        existing_storage_pools_by_id = self.get_all_storage_pools_by_id()
1382        existing_storage_pools_by_name = self.get_all_storage_pools_by_name()
1383        existing_volumes_by_name = self.get_all_volumes_by_name()
1384        existing_volumes_by_id = self.get_all_volumes_by_id()
1385        existing_mappings = self.get_mapping_by_id()
1386        existing_host_and_hostgroup_by_id = self.get_all_hosts_and_hostgroups_by_id()
1387        existing_host_and_hostgroup_by_name = self.get_all_hosts_and_hostgroups_by_name()
1388        for volume_name, volume_info in self.volumes.items():
1389            base_volume_storage_pool_id = existing_volumes_by_name[volume_name]["volumeGroupRef"]
1390            base_volume_storage_pool_name = existing_storage_pools_by_id[base_volume_storage_pool_id]["name"]
1391
1392            # Check storage group information.
1393            if not volume_info["preferred_reserve_storage_pool"]:
1394                volume_info["preferred_reserve_storage_pool"] = base_volume_storage_pool_name
1395            elif volume_info["preferred_reserve_storage_pool"] not in existing_storage_pools_by_name.keys():
1396                self.module.fail_json(msg="Preferred storage pool or volume group does not exist! Storage pool [%s]. Group [%s]."
1397                                          " Array [%s]." % (volume_info["preferred_reserve_storage_pool"], self.group_name, self.ssid))
1398
1399            # Check host mapping information
1400            if self.state == "present" and self.type == "view":
1401                view_info = self.get_consistency_group_view()
1402
1403                if volume_info["snapshot_volume_host"]:
1404                    if volume_info["snapshot_volume_host"] not in existing_host_and_hostgroup_by_name:
1405                        self.module.fail_json(msg="Specified host or host group does not exist! Host [%s]. Group [%s]."
1406                                                  " Array [%s]." % (volume_info["snapshot_volume_host"], self.group_name, self.ssid))
1407
1408                    if not volume_info["snapshot_volume_lun"]:
1409                        if view_info:
1410                            for snapshot_volume in view_info["snapshot_volumes"]:
1411                                if snapshot_volume["listOfMappings"]:
1412                                    mapping = snapshot_volume["listOfMappings"][0]
1413                                    if (volume_name == existing_volumes_by_id[snapshot_volume["baseVol"]]["name"] and
1414                                            volume_info["snapshot_volume_host"] == existing_host_and_hostgroup_by_id[mapping["mapRef"]]["name"]):
1415                                        volume_info["snapshot_volume_lun"] = mapping["lun"]
1416                                        break
1417                            else:
1418                                host_id = existing_host_and_hostgroup_by_name[volume_info["snapshot_volume_host"]]["id"]
1419                                for next_lun in range(1, 100):
1420
1421                                    if host_id not in existing_mappings.keys():
1422                                        existing_mappings.update({host_id: {}})
1423
1424                                    if next_lun not in existing_mappings[host_id].keys():
1425                                        volume_info["snapshot_volume_lun"] = next_lun
1426                                        existing_mappings[host_id].update({next_lun: None})
1427                                        break
1428
1429    def apply(self):
1430        """Apply any required snapshot state changes."""
1431        changes_required = False
1432        group = self.get_consistency_group()
1433        group_changes = {}
1434
1435        # Determine which changes are required.
1436        if group:
1437
1438            # Determine whether changes are required.
1439            if self.state == "absent":
1440                if self.type == "group":
1441                    if self.group_name:
1442                        changes_required = True
1443                elif self.type == "pit":
1444                    group_changes = self.get_pit_info()
1445                    if group_changes:
1446                        changes_required = True
1447                elif self.type == "view":
1448                    group_changes = self.get_consistency_group_view()
1449                    if group_changes:
1450                        changes_required = True
1451
1452            elif self.state == "present":
1453                self.complete_volume_definitions()
1454
1455                if self.type == "group":
1456                    group_changes = self.update_changes_required()
1457                    if (group_changes["update_group"] or
1458                            group_changes["add_volumes"] or
1459                            group_changes["remove_volumes"] or
1460                            group_changes["expand_reserve_capacity"] or
1461                            group_changes["trim_reserve_capacity"]):
1462                        changes_required = True
1463
1464                elif self.type == "pit":
1465                    changes_required = True
1466
1467                elif self.type == "view":
1468                    if self.get_consistency_group_view():
1469                        group_changes = self.update_view_changes_required()
1470                        if (group_changes["expand_reserve_capacity"] or
1471                                group_changes["trim_reserve_capacity"] or
1472                                group_changes["map_snapshot_volumes_mapping"] or
1473                                group_changes["unmap_snapshot_volumes_mapping"] or
1474                                group_changes["move_snapshot_volumes_mapping"] or
1475                                group_changes["update_snapshot_volumes_writable"]):
1476                            changes_required = True
1477                    else:
1478                        group_changes = self.create_view_changes_required()
1479                        changes_required = True
1480
1481            elif self.state == "rollback":
1482                self.complete_volume_definitions()
1483                if not self.volumes:
1484                    for volume in group["base_volumes"]:
1485                        self.volumes.update({volume["name"]: None})
1486                group_changes = self.rollback_changes_required()
1487                if group_changes:
1488                    changes_required = True
1489
1490        else:
1491            if self.state == "present":
1492                if self.type == "group":
1493                    self.complete_volume_definitions()
1494                    group_changes = self.create_changes_required()
1495                    changes_required = True
1496                elif self.type == "pit":
1497                    self.module.fail_json(msg="Snapshot point-in-time images cannot be taken when the snapshot consistency group does not exist!"
1498                                              " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1499                elif self.type == "view":
1500                    self.module.fail_json(msg="Snapshot view cannot be created when the snapshot consistency group does not exist!"
1501                                              " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1502            elif self.state == "rollback":
1503                self.module.fail_json(msg="Rollback operation is not available when the snapshot consistency group does not exist!"
1504                                          " Group [%s]. Array [%s]." % (self.group_name, self.ssid))
1505
1506        # Determine if they're any key-value pairs that need to be cleaned up since snapshot pit images were deleted outside of this module.
1507        unused_pit_keys = self.get_unused_pit_key()
1508
1509        # Apply any required changes.
1510        if (changes_required or unused_pit_keys) and not self.module.check_mode:
1511            if group:
1512                if self.state == "absent":
1513                    if self.type == "group":
1514                        self.remove_snapshot_consistency_group(group)
1515                    elif self.type == "pit":
1516                        self.remove_pit_images(group_changes)
1517                    elif self.type == "view":
1518                        self.remove_view(group_changes["id"])
1519
1520                elif self.state == "present":
1521
1522                    if self.type == "group":
1523                        if group_changes["update_group"]:
1524                            self.update_snapshot_consistency_group(group_changes["update_group"])
1525                        if group_changes["add_volumes"]:
1526                            self.add_base_volumes(group_changes["add_volumes"])
1527                        if group_changes["remove_volumes"]:
1528                            self.remove_base_volumes(group_changes["remove_volumes"])
1529                        if group_changes["trim_reserve_capacity"]:
1530                            self.trim_reserve_capacities(group_changes["trim_reserve_capacity"])
1531                            if group_changes["expand_reserve_capacity"]:
1532                                sleep(15)
1533                        if group_changes["expand_reserve_capacity"]:
1534                            self.expand_reserve_capacities(group_changes["expand_reserve_capacity"])
1535
1536                    elif self.type == "pit":
1537                        self.create_pit_images()
1538
1539                    elif self.type == "view":
1540                        view = self.get_consistency_group_view()
1541                        if view:
1542                            if group_changes["trim_reserve_capacity"]:
1543                                self.trim_reserve_capacities(group_changes["trim_reserve_capacity"])
1544                                if group_changes["expand_reserve_capacity"]:
1545                                    sleep(15)
1546                            if group_changes["expand_reserve_capacity"]:
1547                                self.expand_reserve_capacities(group_changes["expand_reserve_capacity"])
1548                            if group_changes["map_snapshot_volumes_mapping"]:
1549                                self.map_view(group_changes["map_snapshot_volumes_mapping"])
1550                            if group_changes["unmap_snapshot_volumes_mapping"]:
1551                                self.unmap_view(group_changes["unmap_snapshot_volumes_mapping"])
1552                            if group_changes["move_snapshot_volumes_mapping"]:
1553                                self.move_view_mapping(group_changes["move_snapshot_volumes_mapping"])
1554                            if group_changes["update_snapshot_volumes_writable"]:
1555                                self.convert_view_to_writable(group_changes["update_snapshot_volumes_writable"])
1556                        else:
1557                            self.create_view(group_changes)
1558
1559                elif self.state == "rollback":
1560                    self.rollback(group_changes)
1561
1562            elif self.type == "group":
1563                self.create_snapshot_consistency_group(group_changes["create_group"])
1564                self.add_base_volumes(group_changes["add_volumes"])
1565
1566            if unused_pit_keys:
1567                self.cleanup_old_pit_metadata()
1568
1569        self.module.exit_json(changed=changes_required, group_changes=group_changes, deleted_metadata_keys=unused_pit_keys)
1570
1571
1572def main():
1573    snapshot = NetAppESeriesSnapshot()
1574    snapshot.apply()
1575
1576
1577if __name__ == "__main__":
1578    main()
1579