1"""
2Copyright 2019 Ettus Research, A National Instrument Brand
3
4SPDX-License-Identifier: GPL-3.0-or-later
5
6RFNoC image builder: All the algorithms required to turn either a YAML
7description or a GRC file into an rfnoc_image_core.v file.
8"""
9
10from collections import deque
11from collections import OrderedDict
12
13import logging
14import os
15import re
16import sys
17
18import mako.lookup
19import mako.template
20from mako import exceptions
21from ruamel import yaml
22
23### DATA ######################################################################
24# Directory under the FPGA repo where the device directories are
25USRP3_TOP_DIR = os.path.join('usrp3', 'top')
26
27USRP3_LIB_RFNOC_DIR = os.path.join('usrp3', 'lib', 'rfnoc')
28
29# Subdirectory for the core YAML files
30RFNOC_CORE_DIR = os.path.join('rfnoc', 'core')
31
32# Path to the system's bash executable
33BASH_EXECUTABLE = '/bin/bash' # FIXME this should come from somewhere
34
35# Map device names to the corresponding directory under usrp3/top
36DEVICE_DIR_MAP = {
37    'x300': 'x300',
38    'x310': 'x300',
39    'e300': 'e300',
40    'e310': 'e31x',
41    'e320': 'e320',
42    'n300': 'n3xx',
43    'n310': 'n3xx',
44    'n320': 'n3xx',
45}
46
47# Picks the default make target per device
48DEVICE_DEFAULTTARGET_MAP = {
49    'x300': 'X300_HG',
50    'x310': 'X310_HG',
51    'e310': 'E310_SG3',
52    'e320': 'E320_1G',
53    'n300': 'N300_HG',
54    'n310': 'N310_HG',
55    'n320': 'N320_XG',
56}
57
58
59# Adapted from code found at
60# https://stackoverflow.com/questions/5121931/
61#     in-python-how-can-you-load-yaml-mappings-as-ordereddicts
62# (Accessed 17 October 2019)
63def ordered_load(stream, Loader=yaml.SafeLoader, object_pairs_hook=OrderedDict):
64    """
65    In Python 3.5, element insertion order into dictionaries is not preserved.
66    This function uses an OrderedDict to read a YAML file, which does preserve order.
67    """
68    class OrderedLoader(Loader):
69        pass
70    def construct_mapping(loader, node):
71        loader.flatten_mapping(node)
72        return object_pairs_hook(loader.construct_pairs(node))
73    OrderedLoader.add_constructor(
74        yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
75        construct_mapping)
76    return yaml.load(stream, OrderedLoader)
77
78def split(iterable, function):
79    """
80    Split an iterable by condition. Matching items are returned in the first
81    deque of the returned tuple unmatched in the second
82    :param iterable: an iterable to split
83    :param function: an expression that returns True/False for iterable values
84    :return: 2-tuple with deque for matching/non-matching items
85    """
86    dq_true = deque()
87    dq_false = deque()
88
89    deque(((dq_true if function(item) else dq_false).append(item)
90           for item in iterable), maxlen=0)
91
92    return dq_true, dq_false
93
94
95def expand_io_port_desc(io_ports, signatures):
96    """
97    Add a wires entry to each io port dictionary entry which contains a
98    complete list of wires for the specific port according to the information
99    in signature file. Each wire entry contains:
100    * fully resolved wire name (wire name as in signature or replaced name
101      with respect to regular expression if some is given, regular expression
102      should use back references to retain original wire name).
103    * width in bits
104    * direction as input/output depending on whether the port is a
105      master/broadcaster or slave/listener and the wire is described as from
106      or to master
107    :param io_ports: io port dictionary from yml configuration
108    :param signatures: signature description from yml configuration
109    :return: None
110    """
111    for io_port in io_ports.values():
112        wires = []
113        for signature in signatures[io_port["type"]]["ports"]:
114            width = signature.get("width", 1)
115            wire_type = signature.get("type", None)
116            drive = io_port["drive"]
117            direction = {"master": {"from-master": "input ", "to-master": "output"},
118                         "slave":  {"from-master": "output", "to-master": "input "},
119                         "broadcaster":  {None: "input "},
120                         "listener":  {None: "output"}}[drive][wire_type]
121
122            signature_name = signature["name"]
123            if "rename" in io_port:
124                signature_name = re.sub(io_port["rename"]["pattern"],
125                                        io_port["rename"]["repl"],
126                                        signature_name, 1)
127
128            wires.append({"direction": direction,
129                          "width": width,
130                          "name": signature_name})
131        io_port["wires"] = wires
132
133# pylint: disable=too-few-public-methods
134class IOConfig:
135    """
136    Class containing configuration from a yml file.
137
138    Each top level entry is translated into a class member variable. If the
139    configuration contains an io_ports section the ports get a wire list which
140    is derived from the signature file. This allows easier processing of IO
141    ports in the mako templates and failures from yml configuration files fail
142    in this script rather than during template processing which is easier to
143    track and debug.
144    """
145    def __init__(self, config, signatures):
146        # read configuration from config dictionary
147        # TODO: Is this guaranteed ordered?
148        self.__dict__.update(**config)
149        if hasattr(self, "io_ports"):
150            expand_io_port_desc(getattr(self, "io_ports"), signatures)
151
152
153class ImageBuilderConfig:
154    """
155    Holds the complete image configuration settings. This includes
156    * the image configuration itself as it is passed to the script
157    * all noc block configurations found by the script
158    * device configuration information as found in the bsp.yml of the device
159      information passed to the script.
160    """
161    # pylint: disable=too-many-instance-attributes
162    def __init__(self, config, blocks, device):
163        self.noc_blocks = OrderedDict()
164        self.stream_endpoints = OrderedDict()
165        self.connections = []
166        self.clk_domains = []
167        self.block_ports = OrderedDict()
168        self.io_ports = OrderedDict()
169        self.clocks = OrderedDict()
170        self.block_con = []
171        self.io_port_con_ms = []
172        self.io_port_con_bl = []
173        self.clk_domain_con = []
174        # read configuration from config dictionary
175        self.__dict__.update(**config)
176        self.blocks = blocks
177        self.device = device
178        self._update_sep_defaults()
179        self._set_indices()
180        self._collect_noc_ports()
181        self._collect_io_ports()
182        self._collect_clocks()
183        self.pick_connections()
184        self.pick_clk_domains()
185
186    def _update_sep_defaults(self):
187        """
188        Update any missing stream endpoint attributes with default values
189        """
190        for sep in self.stream_endpoints:
191            if "num_data_i" not in self.stream_endpoints[sep]:
192                self.stream_endpoints[sep]["num_data_i"] = 1
193            if "num_data_o" not in self.stream_endpoints[sep]:
194                self.stream_endpoints[sep]["num_data_o"] = 1
195
196    def _set_indices(self):
197        """
198        Add an index for each port of each stream endpoint and noc block.
199        These indices are used to generate static_router.hex
200        """
201        start = 1
202        i = 0
203        for i, sep in enumerate(self.stream_endpoints.values()):
204            sep["index"] = i + start
205        start = start + i + 1
206        for i, block in enumerate(self.noc_blocks.values()):
207            block["index"] = start + i
208
209    def _collect_noc_ports(self):
210        """
211        Create lookup table for noc blocks. The key is a tuple of block
212        name, port name and flow direction. If any block port has num_ports > 1
213        then unroll that port into multiple ports of the same name plus a
214        number to make its name unique.
215        """
216        for name, block in self.noc_blocks.items():
217            desc = self.blocks[block["block_desc"]]
218            # Update per-instance parameters
219            if not hasattr(desc, "parameters"):
220                setattr(desc, "parameters", {})
221            if "parameters" not in block:
222                block["parameters"] = OrderedDict()
223            for key in block["parameters"].keys():
224                if key not in desc.parameters:
225                    logging.error("Unknown parameter %s for block %s", key, name)
226                    del block["parameters"][key]
227            for param, value in desc.parameters.items():
228                if param not in block["parameters"]:
229                    block["parameters"][param] = value
230            # Generate list of block ports, adding 'index' to each port's dict
231            for direction in ("inputs", "outputs"):
232                index = 0
233                for port_name, port_info in desc.data[direction].items():
234                    num_ports = 1
235                    if "num_ports" in port_info:
236                        parameter = port_info["num_ports"]
237                        num_ports = parameter
238
239                    # If num_ports isn't an integer, it could be an expression
240                    # using values from the parameters section (e.g.,
241                    # NUM_PORTS*NUM_BRANCHES for a stream-splitting block).
242                    # If the parameter doesn't resolve to an integer, treat it
243                    # as an expression that needs to be evaluated, hopefully to
244                    # an integer.
245                    if not isinstance(num_ports, int):
246                        # Create a regex to find identifiers.
247                        regex_ident = re.compile(r'[A-Za-z_][A-Za-z0-9_]*')
248
249                        # Get a list of all identifiers in the num_ports
250                        # expression and iterate over them all
251                        idents = re.finditer(regex_ident, num_ports)
252                        for ident in idents:
253                            # If the identifier represents a valid parameter
254                            # in the block, replace the identifier text with
255                            # the value of the parameter. If no matching
256                            # parameter is found, just leave the text in
257                            # place. That may result in an exception being
258                            # thrown from eval(), but we'll catch it and
259                            # report an error a bit later on.
260                            if ident[0] in block["parameters"]:
261                                val = str(block["parameters"][ident[0]])
262                                num_ports = re.sub(ident[0], val, num_ports)
263
264                        # Now, with identifiers resolved to parameter values,
265                        # attempt to evaluate the expression. If eval() fails,
266                        # we'll catch the exception, num_ports will remain non-
267                        # integral, and the if statement after the exception
268                        # is caught will inform the user.
269                        try:
270                            num_ports = eval(num_ports)
271                        except:
272                            pass
273
274                    # Make sure the parameter resolved to a number
275                    if not isinstance(num_ports, int):
276                        logging.error(
277                            "'num_ports' of port '%s' on block '%s' "
278                            "resolved to invalid value of '%s'",
279                            port_name, name, str(num_ports))
280                        sys.exit(1)
281                    if num_ports < 1 or num_ports > 64:
282                        logging.error(
283                            "'num_ports' of port '%s' on block '%s' "
284                            "has invalid value '%s', must be in [1, 64]",
285                            port_name, name, str(num_ports))
286                        sys.exit(1)
287                    if "num_ports" in port_info:
288                        # If num_ports was a variable in the YAML, unroll into
289                        # multiple ports
290                        for i in range(num_ports):
291                            new_port_info = port_info.copy()
292                            new_port_info['index'] = index
293                            index = index + 1
294                            self.block_ports.update({(name, port_name + "_" \
295                                + str(i), direction[:-1]) : new_port_info})
296                    else:
297                        port_info['index'] = index
298                        self.block_ports.update(
299                            {(name, port_name, direction[:-1]) : port_info})
300                        index = index + 1
301        ports = self.stream_endpoints
302        for sep in self.stream_endpoints:
303            inputs = {(sep, "in%d" % port, "input") :
304                      ports[sep] for port in range(ports[sep]["num_data_i"])}
305            self.block_ports.update(inputs)
306            outputs = {(sep, "out%d" % port, "output") :
307                       ports[sep] for port in range(ports[sep]["num_data_o"])}
308            self.block_ports.update(outputs)
309
310    def _collect_io_ports(self):
311        """
312        Create lookup table for io ports. The key is a tuple of block name
313        (_device_ for io ports of the bsp), the io port name and flow
314        direction.
315        """
316        for name, block in self.noc_blocks.items():
317            desc = self.blocks[block["block_desc"]]
318            if hasattr(desc, "io_ports"):
319                self.io_ports.update({
320                    (name, io, desc.io_ports[io]["drive"]):
321                    desc.io_ports[io] for io in desc.io_ports})
322        self.io_ports.update({
323            ("_device_", io, self.device.io_ports[io]["drive"]):
324            self.device.io_ports[io] for io in self.device.io_ports})
325
326    def _collect_clocks(self):
327        """
328        Create lookup table for clocks. The key is a tuple of block name
329        (_device_ for clocks of the bsp), the clock name and flow
330        direction
331        """
332        for name, block in self.noc_blocks.items():
333            desc = self.blocks[block["block_desc"]]
334            if hasattr(desc, "clocks"):
335                self.clocks.update({
336                    (name, clk["name"]): clk for clk in desc.clocks})
337        if hasattr(self.device, "clocks"):
338            self.clocks.update({
339                ("_device_", clk["name"]): clk for clk in self.device.clocks})
340        # Add the implied clocks for the BSP
341        self.clocks[("_device_", "rfnoc_ctrl")] = {"freq": '[]', "name": "rfnoc_ctrl"}
342        self.clocks[("_device_", "rfnoc_chdr")] = {"freq": '[]', "name": "rfnoc_chdr"}
343
344    def pick_clk_domains(self):
345        """
346        Filter clock domain list into a local list for easier access.
347        Remaining connection items are printed as error and execution is
348        aborted. Likewise, checks for unconnected clocks.
349        """
350        (self.clk_domain_con, self.clk_domains) = split(
351            self.clk_domains, lambda con:
352            (con["srcblk"], con["srcport"]) in self.clocks and
353            (con["dstblk"], con["dstport"]) in self.clocks)
354
355        # Check if there are unconnected clocks
356        connected = [(con["dstblk"], con["dstport"]) for con in self.clk_domain_con]
357        unconnected = []
358        for clk in self.clocks:
359            if clk[0] != "_device_" and \
360               clk[1] not in ["rfnoc_ctrl", "rfnoc_chdr"] and \
361               clk not in connected:
362                unconnected.append(clk)
363        if unconnected:
364            logging.error("%d unresolved clk domain(s)", len(unconnected))
365            for clk in unconnected:
366                logging.error("    %s:%s", clk[0], clk[1])
367            logging.error("Please specify the clock(s) to connect")
368            sys.exit(1)
369
370        if self.clk_domains:
371            logging.error("%d Unresolved clk domain(s)", len(self.clk_domains))
372
373            for connection in self.clk_domains:
374                logging.error("    (%s-%s -> %s-%s)",
375                              connection["srcblk"], connection["srcport"],
376                              connection["dstblk"], connection["dstport"])
377            logging.error("Source or destination domain not found")
378            sys.exit(1)
379
380    def pick_connections(self):
381        """
382        Sort connection list into three local lists for
383         * input => output (block port to block port)
384         * master => slave (io port to io port)
385         * broadcaster => listener (io port to io port)
386        Remaining connection items are printed as error and execution is
387        aborted. Possible reasons are
388         * undeclared block or io port
389         * connection direction wrong (e.g. output => input)
390         * mixed connection type (e.g. master => listener)
391        """
392        block_types = lambda type: filter(lambda key: key[2] == type, self.block_ports)
393        io_types = lambda type: filter(lambda key: key[2] == type, self.io_ports)
394        (self.block_con, self.connections) = split(
395            self.connections, lambda con:
396            (con["srcblk"], con["srcport"], "output") in block_types("output") and
397            (con["dstblk"], con["dstport"], "input") in block_types("input"))
398        (self.io_port_con_ms, self.connections) = split(
399            self.connections, lambda con:
400            (con["srcblk"], con["srcport"], "master") in io_types("master") and
401            (con["dstblk"], con["dstport"], "slave") in  io_types("slave"))
402        (self.io_port_con_bl, self.connections) = split(
403            self.connections, lambda con:
404            (con["srcblk"], con["srcport"], "broadcaster") in io_types("broadcaster") and
405            (con["dstblk"], con["dstport"], "listener") in io_types("listener"))
406
407        if self.connections:
408            logging.error("%d Unresolved connection(s)", len(self.connections))
409
410            for connection in self.connections:
411                logging.error("    (%s-%s -> %s-%s)",
412                              connection["srcblk"], connection["srcport"],
413                              connection["dstblk"], connection["dstport"])
414            logging.debug("    Make sure block ports are connected output "
415                          "(src) to input (dst)")
416            logging.debug("    Available block ports for connections:")
417            for block in self.block_ports:
418                logging.debug("        %s", (block,))
419            logging.debug("    Make sure io ports are connected master      "
420                          "(src) to slave    (dst)")
421            logging.debug("                                  or broadcaster "
422                          "(src) to listener (dst)")
423            logging.debug("    Available io ports for connections:")
424            for io_port in self.io_ports:
425                logging.info("        %s", (io_port,))
426            sys.exit(1)
427
428def load_config(filename):
429    """
430    Loads yml configuration from filename.
431
432    Configuration files are searched in folder returned by get_get_config_path.
433    This method logs error and exits on IO failure
434
435    :param filename: yml configuration to load
436    :return: IO signatures as dictionary
437    """
438    dirname, basename = os.path.split(filename)
439    try:
440        with open(filename) as stream:
441            logging.info(
442                "Using %s from %s.", basename, os.path.normpath(dirname))
443            config = ordered_load(stream)
444        return config
445    except IOError:
446        logging.error("%s misses %s", os.path.normpath(dirname), basename)
447        sys.exit(1)
448
449def device_config(config_path, device):
450    """
451    Load device config from bsp.yml
452
453    Location of bsp.yml is derived from the device chosen in the arguments
454
455    :param config_path: location of core configuration files
456    :param device: device to build for
457    :return: device configuration as dictionary
458    """
459    return load_config(os.path.join(config_path, "%s_bsp.yml" % device.lower()))
460
461
462def io_signatures(config_path):
463    """
464    Load IO signatures from io_signatures.yml
465
466    :param config_path: location of core configuration files
467    :return: IO signatures as dictionary
468    """
469    return load_config(os.path.join(config_path, "io_signatures.yml"))
470
471
472def read_grc_block_configs(path):
473    """
474    Reads RFNoC config block used by Gnuradio Companion
475    :param path: location of grc block configuration files
476    :return: dictionary of block (id mapped to description)
477    """
478    result = {}
479
480    for root, _dirs, names in os.walk(path):
481        for name in names:
482            if re.match(r".*\.block\.yml", name):
483                with open(os.path.join(root, name)) as stream:
484                    config = ordered_load(stream)
485                    result[config["id"]] = config
486
487    return result
488
489
490def convert_to_image_config(grc, grc_config_path):
491    """
492    Converts Gnuradio Companion grc into image configuration.
493    :param grc:
494    :return: image configuration as it would be returned by image_config(args)
495    """
496    grc_blocks = read_grc_block_configs(grc_config_path)
497    #filter all blocks that have no block representation
498    seps = {item["name"]: item for item in grc["blocks"] if item["parameters"]["type"] == 'sep'}
499    blocks = {item["name"]: item for item in grc["blocks"] if item["parameters"]["type"] == 'block'}
500    device = [item for item in grc["blocks"] if item["parameters"]["type"] == 'device']
501    if len(device) == 1:
502        device = device[0]
503    else:
504        logging.error("More than one or no device found in grc file")
505        return None
506
507    result = {
508        "schema": "rfnoc_imagebuilder",
509        "copyright": "Ettus Research, A National Instruments Brand",
510        "license": "SPDX-License-Identifier: LGPL-3.0-or-later",
511        "version": 1.0,
512        "rfnoc_version": 1.0}
513    # for param in [item for item in grc["blocks"] if item["id"] == "parameter"]:
514    #     result[param["name"]] = {
515    #         "str": lambda value: str,
516    #         "": lambda value: str,
517    #         "complex": str,
518    #         "intx": int,
519    #         "long": int,
520    #     }[param["parameters"]["type"]](param["parameters"]["value"])
521
522    result["stream_endpoints"] = {}
523    for sep in seps.values():
524        result["stream_endpoints"][sep["name"]] = {"ctrl": bool(sep["parameters"]["ctrl"]),
525                                                   "data": bool(sep["parameters"]["data"]),
526                                                   "buff_size": int(sep["parameters"]["buff_size"])}
527
528    result["noc_blocks"] = {}
529    for block in blocks.values():
530        result["noc_blocks"][block["name"]] = {
531            "block_desc": block["parameters"]["desc"]
532        }
533
534    device_clocks = {
535        port["id"]: port for port in grc_blocks[device['id']]["outputs"]
536        if port["dtype"] == "message"
537    }
538
539    for connection in grc["connections"]:
540        if connection[0] == device["name"]:
541            connection[0] = "_device_"
542        if connection[2] == device["name"]:
543            connection[2] = "_device_"
544    device["name"] = "_device_"
545
546    (clk_connections, connections) = split(
547        grc["connections"], lambda con:
548        con[0] == device["name"] and con[1] in device_clocks)
549
550    result["connections"] = []
551    for connection in connections:
552        result["connections"].append(
553            {"srcblk":  connection[0],
554             "srcport": connection[1],
555             "dstblk":  connection[2],
556             "dstport": connection[3]}
557        )
558
559    result["clk_domains"] = []
560    for connection in clk_connections:
561        result["clk_domains"].append(
562            {"srcblk":  connection[0],
563             "srcport": connection[1],
564             "dstblk":  connection[2],
565             "dstport": connection[3]}
566        )
567
568    return result
569
570
571def collect_module_paths(config_path, include_paths):
572    """
573    Create a list of directories that contain noc block configuration files.
574    :param config_path: root path holding configuration files
575    :return: list of noc block directories
576    """
577    # rfnoc blocks
578    result = [os.path.join(config_path, 'rfnoc', 'blocks')] + \
579            [os.path.join(x, 'blocks') for x in include_paths]
580    return result
581
582
583def read_block_descriptions(signatures, *paths):
584    """
585    Recursive search all pathes for block definitions.
586    :param signatures: signature passed to IOConfig initialization
587    :param paths: paths to be searched
588    :return: dictionary of noc blocks. Key is filename of the block, value
589             is an IOConfig object
590    """
591    blocks = OrderedDict()
592    for path in paths:
593        for root, dirs, files, in os.walk(path):
594            for filename in files:
595                if re.match(r".*\.yml$", filename):
596                    with open(os.path.join(root, filename)) as stream:
597                        block = ordered_load(stream)
598                        if "schema" in block and \
599                                block["schema"] == "rfnoc_modtool_args":
600                            logging.info("Adding block description from "
601                                         "%s (%s).", filename, os.path.normpath(root))
602                            blocks[filename] = IOConfig(block, signatures)
603            for dirname in dirs:
604                blocks.update(read_block_descriptions(
605                    os.path.join(root, dirname)))
606    return blocks
607
608
609def write_edges(config, destination):
610    """
611    Write edges description files. The file is a simple text file. Each line
612    contains 8 hexadecimal digits.
613    First line is the number of following entries.
614    Starting with the second line each line describes a port to port connection
615    The 32 bit value has 16 bit for each node where the node is represented by
616    10 bit for the block number and 6 bit for the port number.
617    :param config: ImageBuilderConfig derived from script parameter
618    :param destination: folder to write the file (next to device top level files
619    :return: None
620    """
621    logging.info("Writing static routing table to %s", destination)
622    with open(destination, "w") as stream:
623        stream.write("%08X\n" % len(config.block_con))
624        for connection in config.block_con:
625            if connection["srcblk"] in config.stream_endpoints:
626                sep = config.stream_endpoints[connection["srcblk"]]
627                index_match = re.match(r"out(\d)", connection["srcport"])
628                if not index_match:
629                    logging.error("Port %s is invalid on endpoint %s",
630                                  connection["srcport"], connection["srcblk"])
631                port_index = int(index_match.group(1))
632                # Verify index < num_data_o
633                if port_index >= sep["num_data_o"]:
634                    logging.error("Port %s exceeds num_data_o for endpoint %s",
635                                  connection["srcport"], connection["srcblk"])
636                src = (sep["index"], port_index)
637            else:
638                key = (connection["srcblk"], connection["srcport"], "output")
639                src = (config.noc_blocks[connection["srcblk"]]["index"],
640                       config.block_ports[key]["index"])
641            if connection["dstblk"] in config.stream_endpoints:
642                sep = config.stream_endpoints[connection["dstblk"]]
643                index_match = re.match(r"in(\d)", connection["dstport"])
644                if not index_match:
645                    logging.error("Port %s is invalid on endpoint %s",
646                                  connection["dstport"], connection["dstblk"])
647                # Verify index < num_data_i
648                port_index = int(index_match.group(1))
649                if port_index >= sep["num_data_i"]:
650                    logging.error("Port %s exceeds num_data_i for endpoint %s",
651                                  connection["dstport"], connection["dstblk"])
652                dst = (sep["index"], port_index)
653            else:
654                key = (connection["dstblk"], connection["dstport"], "input")
655                dst = (config.noc_blocks[connection["dstblk"]]["index"],
656                       config.block_ports[key]["index"])
657            logging.debug("%s-%s (%d,%d) => %s-%s (%d,%d)",
658                          connection["srcblk"], connection["srcport"],
659                          src[0], src[1],
660                          connection["dstblk"], connection["dstport"],
661                          dst[0], dst[1])
662            stream.write("%08x\n" %
663                         ((((src[0] << 6) | src[1]) << 16) |
664                          ((dst[0] << 6) | dst[1])))
665
666
667def write_verilog(config, destination, source, source_hash):
668    """
669    Generates rfnoc_image_core.v file for the device.
670
671    Mako templates from local template folder are used to generate the image
672    core file. The template engine does not do any computation on the script
673    parameter. Instead all necessary dependencies are resolved in this script
674    to enforce early failure which is easier to track than errors in the
675    template engine.
676    :param config: ImageBuilderConfig derived from script parameter
677    :param destination: Filepath to write to
678    :return: None
679    """
680    template_dir = os.path.join(os.path.dirname(__file__), "templates")
681    lookup = mako.lookup.TemplateLookup(directories=[template_dir])
682    tpl_filename = os.path.join(template_dir, "rfnoc_image_core.v.mako")
683    tpl = mako.template.Template(
684        filename=tpl_filename,
685        lookup=lookup,
686        strict_undefined=True)
687
688    try:
689        block = tpl.render(**{
690            "config": config,
691            "source": source,
692            "source_hash": source_hash,
693            })
694    except:
695        print(exceptions.text_error_template().render())
696        sys.exit(1)
697
698    logging.info("Writing image core to %s", destination)
699    with open(destination, "w") as image_core_file:
700        image_core_file.write(block)
701
702
703def write_build_env():
704    """
705    # TODO update Makefile entries according to used blocks
706    :return:
707    """
708
709
710def build(fpga_path, device, image_core_path, edge_file, **args):
711    """
712    Call FPGA toolchain to actually build the image
713
714    :param fpga_path: A path that holds the FPGA IP sources.
715    :param device: The device to build for.
716    :param **args: Additional options
717                   target: The target to build (leave empty for default).
718                   clean_all: passed to Makefile
719                   GUI: passed to Makefile
720                   source: The source of the build (YAML or GRC file path)
721                   include_paths: List of paths to OOT modules
722                   extra_makefile_srcs: An additional list of paths to modules
723                   that don't follow the OOT module layout. These paths must
724                   point directly to a Makefile.srcs file.
725    :return: exit value of build process
726    """
727    ret_val = 0
728    cwd = os.path.dirname(__file__)
729    build_dir = os.path.join(get_top_path(os.path.abspath(fpga_path)), target_dir(device))
730    if not os.path.isdir(build_dir):
731        logging.error("Not a valid directory: %s", build_dir)
732        return 1
733    makefile_src_paths = [
734        os.path.join(
735            os.path.abspath(os.path.normpath(x)),
736            os.path.join('fpga', 'Makefile.srcs'))
737        for x in args.get("include_paths", [])
738    ] + args.get("extra_makefile_srcs", [])
739    logging.debug("Temporarily changing working directory to %s", build_dir)
740    os.chdir(build_dir)
741    make_cmd = ". ./setupenv.sh "
742    if "clean_all" in args and args["clean_all"]:
743        make_cmd = make_cmd + "&& make cleanall "
744    target = args["target"] if "target" in args else ""
745    make_cmd = make_cmd + "&& make " + default_target(device, target)
746    make_cmd += " IMAGE_CORE={} EDGE_FILE={}".format(image_core_path,
747                                                     edge_file)
748    if makefile_src_paths:
749        make_cmd += " RFNOC_OOT_MAKEFILE_SRCS=" + "\\ ".join(makefile_src_paths)
750    if "GUI" in args and args["GUI"]:
751        make_cmd = make_cmd + " GUI=1"
752    logging.info("Launching build with the following settings:")
753    logging.info(" * Build Directory: %s", build_dir)
754    logging.info(" * Target: %s", target)
755    logging.info(" * Image Core File: %s", image_core_path)
756    logging.info(" * Edge Table File: %s", edge_file)
757    # Wrap it into a bash call:
758    make_cmd = '{bash} -c "{cmd}"'.format(bash=BASH_EXECUTABLE, cmd=make_cmd)
759    logging.debug("Executing the following command: %s", make_cmd)
760    ret_val = os.system(make_cmd)
761    os.chdir(cwd)
762    return ret_val
763
764
765def target_dir(device):
766    """
767    Target directory derived from chosen device
768    :param device: device to build for
769    :return: target directory (relative path)
770    """
771    if not device.lower() in DEVICE_DIR_MAP:
772        logging.error("Unsupported device %s. Supported devices are %s",
773                      device, DEVICE_DIR_MAP.keys())
774        sys.exit(1)
775    return DEVICE_DIR_MAP[device.lower()]
776
777def default_target(device, target):
778    """
779    If no target specified, selects the default building target based on the
780    targeted device
781    """
782    if target is None:
783        return DEVICE_DEFAULTTARGET_MAP.get(device.lower())
784    return target
785
786def get_top_path(fpga_root):
787    """
788    returns the path where FPGA top level sources reside
789    """
790    return os.path.join(fpga_root, USRP3_TOP_DIR)
791
792def get_core_config_path(config_path):
793    """
794    returns the path where core configuration files are stored
795    """
796    return os.path.join(config_path, RFNOC_CORE_DIR)
797
798def generate_image_core_path(output_path, device, source):
799    """
800    Creates the path where the image core file gets to be stored.
801
802    output_path: If not None, this is returned
803    device: Device type string, used to generate default file name
804    source: Otherwise, this path is returned, combined with a default file name
805    """
806    if output_path is not None:
807        return output_path
808    source = os.path.split(os.path.abspath(os.path.normpath(source)))[0]
809    return os.path.join(source, "{}_rfnoc_image_core.v".format(device))
810
811def generate_edge_file_path(output_path, device, source):
812    """
813    Creates a valid path for the edge file to get stored.
814
815    output_path: If not None, this is returned
816    device: Device type string, used to generate default file name
817    source: Otherwise, this path is returned, combined with a default file name
818    """
819    if output_path is not None:
820        return output_path
821    edge_path = os.path.split(os.path.abspath(os.path.normpath(source)))[0]
822    return os.path.join(edge_path, "{}_static_router.hex".format(device))
823
824
825def build_image(config, fpga_path, config_path, device, **args):
826    """
827    Generate image dependent Verilog code and trigger Xilinx toolchain, if
828    requested.
829
830    :param config: A dictionary containing the image configuration options.
831                   This must obey the rfnoc_imagebuilder_args schema.
832    :param fpga_path: A path that holds the FPGA IP sources.
833    :param device: The device to build for.
834    :param **args: Additional options including
835                   target: The target to build (leave empty for default).
836                   generate_only: Do not build the code after generation.
837                   clean_all: passed to Makefile
838                   GUI: passed to Makefile
839                   include_paths: Paths to additional blocks
840    :return: Exit result of build process or 0 if generate-only is given.
841    """
842    logging.info("Selected device %s", device)
843    image_core_path = \
844        generate_image_core_path(
845            args.get('output_path'), device, args.get('source'))
846    edge_file = \
847        generate_edge_file_path(
848            args.get('router_hex_path'), device, args.get('source'))
849
850    logging.debug("Image core output file: %s", image_core_path)
851    logging.debug("Edge output file: %s", edge_file)
852
853    core_config_path = get_core_config_path(config_path)
854    signatures_conf = io_signatures(core_config_path)
855    device_conf = IOConfig(device_config(core_config_path, device),
856                           signatures_conf)
857
858    block_paths = collect_module_paths(config_path, args.get('include_paths', []))
859    logging.debug("Looking for block descriptors in:")
860    for path in block_paths:
861        logging.debug("    %s", os.path.normpath(path))
862    blocks = read_block_descriptions(signatures_conf, *block_paths)
863
864    builder_conf = ImageBuilderConfig(config, blocks, device_conf)
865
866    write_edges(builder_conf, edge_file)
867    write_verilog(
868        builder_conf,
869        image_core_path,
870        source=args.get('source'),
871        source_hash=args.get('source_hash'))
872    write_build_env()
873
874    if "generate_only" in args and args["generate_only"]:
875        logging.info("Skip build (generate only option given)")
876        return 0
877
878    # Check if the YAML files require additional Makefile.srcs
879    extra_makefile_srcs = set()
880    for block_info in builder_conf.noc_blocks.values():
881        block_desc = blocks[block_info['block_desc']]
882        if hasattr(block_desc, 'makefile_srcs'):
883            extra_path = mako.template.Template(block_desc.makefile_srcs).render(**{
884                "fpga_lib_dir": os.path.join(fpga_path, USRP3_LIB_RFNOC_DIR),
885            })
886            if extra_path not in extra_makefile_srcs:
887                logging.debug("Adding additional Makefile.srcs path: %s", extra_path)
888                extra_makefile_srcs.add(extra_path)
889    args['extra_makefile_srcs'] = list(extra_makefile_srcs)
890    return build(fpga_path, device, image_core_path, edge_file, **args)
891