1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Arm Limited. All rights reserved.
4  *
5  * Coresight Address Translation Unit support
6  *
7  * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
8  */
9 
10 #include <linux/amba/bus.h>
11 #include <linux/device.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/io.h>
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 
17 #include "coresight-catu.h"
18 #include "coresight-priv.h"
19 #include "coresight-tmc.h"
20 
21 #define csdev_to_catu_drvdata(csdev)	\
22 	dev_get_drvdata(csdev->dev.parent)
23 
24 /* Verbose output for CATU table contents */
25 #ifdef CATU_DEBUG
26 #define catu_dbg(x, ...) dev_dbg(x, __VA_ARGS__)
27 #else
28 #define catu_dbg(x, ...) do {} while (0)
29 #endif
30 
31 DEFINE_CORESIGHT_DEVLIST(catu_devs, "catu");
32 
33 struct catu_etr_buf {
34 	struct tmc_sg_table *catu_table;
35 	dma_addr_t sladdr;
36 };
37 
38 /*
39  * CATU uses a page size of 4KB for page tables as well as data pages.
40  * Each 64bit entry in the table has the following format.
41  *
42  *	63			12	1  0
43  *	------------------------------------
44  *	|	 Address [63-12] | SBZ	| V|
45  *	------------------------------------
46  *
47  * Where bit[0] V indicates if the address is valid or not.
48  * Each 4K table pages have upto 256 data page pointers, taking upto 2K
49  * size. There are two Link pointers, pointing to the previous and next
50  * table pages respectively at the end of the 4K page. (i.e, entry 510
51  * and 511).
52  *  E.g, a table of two pages could look like :
53  *
54  *                 Table Page 0               Table Page 1
55  * SLADDR ===> x------------------x  x--> x-----------------x
56  * INADDR    ->|  Page 0      | V |  |    | Page 256    | V | <- INADDR+1M
57  *             |------------------|  |    |-----------------|
58  * INADDR+4K ->|  Page 1      | V |  |    |                 |
59  *             |------------------|  |    |-----------------|
60  *             |  Page 2      | V |  |    |                 |
61  *             |------------------|  |    |-----------------|
62  *             |   ...        | V |  |    |    ...          |
63  *             |------------------|  |    |-----------------|
64  * INADDR+1020K|  Page 255    | V |  |    |   Page 511  | V |
65  * SLADDR+2K==>|------------------|  |    |-----------------|
66  *             |  UNUSED      |   |  |    |                 |
67  *             |------------------|  |    |                 |
68  *             |  UNUSED      |   |  |    |                 |
69  *             |------------------|  |    |                 |
70  *             |    ...       |   |  |    |                 |
71  *             |------------------|  |    |-----------------|
72  *             |   IGNORED    | 0 |  |    | Table Page 0| 1 |
73  *             |------------------|  |    |-----------------|
74  *             |  Table Page 1| 1 |--x    | IGNORED     | 0 |
75  *             x------------------x       x-----------------x
76  * SLADDR+4K==>
77  *
78  * The base input address (used by the ETR, programmed in INADDR_{LO,HI})
79  * must be aligned to 1MB (the size addressable by a single page table).
80  * The CATU maps INADDR{LO:HI} to the first page in the table pointed
81  * to by SLADDR{LO:HI} and so on.
82  *
83  */
84 typedef u64 cate_t;
85 
86 #define CATU_PAGE_SHIFT		12
87 #define CATU_PAGE_SIZE		(1UL << CATU_PAGE_SHIFT)
88 #define CATU_PAGES_PER_SYSPAGE	(PAGE_SIZE / CATU_PAGE_SIZE)
89 
90 /* Page pointers are only allocated in the first 2K half */
91 #define CATU_PTRS_PER_PAGE	((CATU_PAGE_SIZE >> 1) / sizeof(cate_t))
92 #define CATU_PTRS_PER_SYSPAGE	(CATU_PAGES_PER_SYSPAGE * CATU_PTRS_PER_PAGE)
93 #define CATU_LINK_PREV		((CATU_PAGE_SIZE / sizeof(cate_t)) - 2)
94 #define CATU_LINK_NEXT		((CATU_PAGE_SIZE / sizeof(cate_t)) - 1)
95 
96 #define CATU_ADDR_SHIFT		12
97 #define CATU_ADDR_MASK		~(((cate_t)1 << CATU_ADDR_SHIFT) - 1)
98 #define CATU_ENTRY_VALID	((cate_t)0x1)
99 #define CATU_VALID_ENTRY(addr) \
100 	(((cate_t)(addr) & CATU_ADDR_MASK) | CATU_ENTRY_VALID)
101 #define CATU_ENTRY_ADDR(entry)	((cate_t)(entry) & ~((cate_t)CATU_ENTRY_VALID))
102 
103 /* CATU expects the INADDR to be aligned to 1M. */
104 #define CATU_DEFAULT_INADDR	(1ULL << 20)
105 
106 /*
107  * catu_get_table : Retrieve the table pointers for the given @offset
108  * within the buffer. The buffer is wrapped around to a valid offset.
109  *
110  * Returns : The CPU virtual address for the beginning of the table
111  * containing the data page pointer for @offset. If @daddrp is not NULL,
112  * @daddrp points the DMA address of the beginning of the table.
113  */
114 static inline cate_t *catu_get_table(struct tmc_sg_table *catu_table,
115 				     unsigned long offset,
116 				     dma_addr_t *daddrp)
117 {
118 	unsigned long buf_size = tmc_sg_table_buf_size(catu_table);
119 	unsigned int table_nr, pg_idx, pg_offset;
120 	struct tmc_pages *table_pages = &catu_table->table_pages;
121 	void *ptr;
122 
123 	/* Make sure offset is within the range */
124 	offset %= buf_size;
125 
126 	/*
127 	 * Each table can address 1MB and a single kernel page can
128 	 * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
129 	 */
130 	table_nr = offset >> 20;
131 	/* Find the table page where the table_nr lies in */
132 	pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
133 	pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE;
134 	if (daddrp)
135 		*daddrp = table_pages->daddrs[pg_idx] + pg_offset;
136 	ptr = page_address(table_pages->pages[pg_idx]);
137 	return (cate_t *)((unsigned long)ptr + pg_offset);
138 }
139 
140 #ifdef CATU_DEBUG
141 static void catu_dump_table(struct tmc_sg_table *catu_table)
142 {
143 	int i;
144 	cate_t *table;
145 	unsigned long table_end, buf_size, offset = 0;
146 
147 	buf_size = tmc_sg_table_buf_size(catu_table);
148 	dev_dbg(catu_table->dev,
149 		"Dump table %p, tdaddr: %llx\n",
150 		catu_table, catu_table->table_daddr);
151 
152 	while (offset < buf_size) {
153 		table_end = offset + SZ_1M < buf_size ?
154 			    offset + SZ_1M : buf_size;
155 		table = catu_get_table(catu_table, offset, NULL);
156 		for (i = 0; offset < table_end; i++, offset += CATU_PAGE_SIZE)
157 			dev_dbg(catu_table->dev, "%d: %llx\n", i, table[i]);
158 		dev_dbg(catu_table->dev, "Prev : %llx, Next: %llx\n",
159 			table[CATU_LINK_PREV], table[CATU_LINK_NEXT]);
160 		dev_dbg(catu_table->dev, "== End of sub-table ===");
161 	}
162 	dev_dbg(catu_table->dev, "== End of Table ===");
163 }
164 
165 #else
166 static inline void catu_dump_table(struct tmc_sg_table *catu_table)
167 {
168 }
169 #endif
170 
171 static inline cate_t catu_make_entry(dma_addr_t addr)
172 {
173 	return addr ? CATU_VALID_ENTRY(addr) : 0;
174 }
175 
176 /*
177  * catu_populate_table : Populate the given CATU table.
178  * The table is always populated as a circular table.
179  * i.e, the "prev" link of the "first" table points to the "last"
180  * table and the "next" link of the "last" table points to the
181  * "first" table. The buffer should be made linear by calling
182  * catu_set_table().
183  */
184 static void
185 catu_populate_table(struct tmc_sg_table *catu_table)
186 {
187 	int i;
188 	int sys_pidx;	/* Index to current system data page */
189 	int catu_pidx;	/* Index of CATU page within the system data page */
190 	unsigned long offset, buf_size, table_end;
191 	dma_addr_t data_daddr;
192 	dma_addr_t prev_taddr, next_taddr, cur_taddr;
193 	cate_t *table_ptr, *next_table;
194 
195 	buf_size = tmc_sg_table_buf_size(catu_table);
196 	sys_pidx = catu_pidx = 0;
197 	offset = 0;
198 
199 	table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
200 	prev_taddr = 0;	/* Prev link for the first table */
201 
202 	while (offset < buf_size) {
203 		/*
204 		 * The @offset is always 1M aligned here and we have an
205 		 * empty table @table_ptr to fill. Each table can address
206 		 * upto 1MB data buffer. The last table may have fewer
207 		 * entries if the buffer size is not aligned.
208 		 */
209 		table_end = (offset + SZ_1M) < buf_size ?
210 			    (offset + SZ_1M) : buf_size;
211 		for (i = 0; offset < table_end;
212 		     i++, offset += CATU_PAGE_SIZE) {
213 
214 			data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
215 				     catu_pidx * CATU_PAGE_SIZE;
216 			catu_dbg(catu_table->dev,
217 				"[table %5ld:%03d] 0x%llx\n",
218 				(offset >> 20), i, data_daddr);
219 			table_ptr[i] = catu_make_entry(data_daddr);
220 			/* Move the pointers for data pages */
221 			catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE;
222 			if (catu_pidx == 0)
223 				sys_pidx++;
224 		}
225 
226 		/*
227 		 * If we have finished all the valid entries, fill the rest of
228 		 * the table (i.e, last table page) with invalid entries,
229 		 * to fail the lookups.
230 		 */
231 		if (offset == buf_size) {
232 			memset(&table_ptr[i], 0,
233 			       sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
234 			next_taddr = 0;
235 		} else {
236 			next_table = catu_get_table(catu_table,
237 						    offset, &next_taddr);
238 		}
239 
240 		table_ptr[CATU_LINK_PREV] = catu_make_entry(prev_taddr);
241 		table_ptr[CATU_LINK_NEXT] = catu_make_entry(next_taddr);
242 
243 		catu_dbg(catu_table->dev,
244 			"[table%5ld]: Cur: 0x%llx Prev: 0x%llx, Next: 0x%llx\n",
245 			(offset >> 20) - 1,  cur_taddr, prev_taddr, next_taddr);
246 
247 		/* Update the prev/next addresses */
248 		if (next_taddr) {
249 			prev_taddr = cur_taddr;
250 			cur_taddr = next_taddr;
251 			table_ptr = next_table;
252 		}
253 	}
254 
255 	/* Sync the table for device */
256 	tmc_sg_table_sync_table(catu_table);
257 }
258 
259 static struct tmc_sg_table *
260 catu_init_sg_table(struct device *catu_dev, int node,
261 		   ssize_t size, void **pages)
262 {
263 	int nr_tpages;
264 	struct tmc_sg_table *catu_table;
265 
266 	/*
267 	 * Each table can address upto 1MB and we can have
268 	 * CATU_PAGES_PER_SYSPAGE tables in a system page.
269 	 */
270 	nr_tpages = DIV_ROUND_UP(size, SZ_1M) / CATU_PAGES_PER_SYSPAGE;
271 	catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
272 					size >> PAGE_SHIFT, pages);
273 	if (IS_ERR(catu_table))
274 		return catu_table;
275 
276 	catu_populate_table(catu_table);
277 	dev_dbg(catu_dev,
278 		"Setup table %p, size %ldKB, %d table pages\n",
279 		catu_table, (unsigned long)size >> 10,  nr_tpages);
280 	catu_dump_table(catu_table);
281 	return catu_table;
282 }
283 
284 static void catu_free_etr_buf(struct etr_buf *etr_buf)
285 {
286 	struct catu_etr_buf *catu_buf;
287 
288 	if (!etr_buf || etr_buf->mode != ETR_MODE_CATU || !etr_buf->private)
289 		return;
290 
291 	catu_buf = etr_buf->private;
292 	tmc_free_sg_table(catu_buf->catu_table);
293 	kfree(catu_buf);
294 }
295 
296 static ssize_t catu_get_data_etr_buf(struct etr_buf *etr_buf, u64 offset,
297 				     size_t len, char **bufpp)
298 {
299 	struct catu_etr_buf *catu_buf = etr_buf->private;
300 
301 	return tmc_sg_table_get_data(catu_buf->catu_table, offset, len, bufpp);
302 }
303 
304 static void catu_sync_etr_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp)
305 {
306 	struct catu_etr_buf *catu_buf = etr_buf->private;
307 	struct tmc_sg_table *catu_table = catu_buf->catu_table;
308 	u64 r_offset, w_offset;
309 
310 	/*
311 	 * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to
312 	 * offsets within the trace buffer.
313 	 */
314 	r_offset = rrp - etr_buf->hwaddr;
315 	w_offset = rwp - etr_buf->hwaddr;
316 
317 	if (!etr_buf->full) {
318 		etr_buf->len = w_offset - r_offset;
319 		if (w_offset < r_offset)
320 			etr_buf->len += etr_buf->size;
321 	} else {
322 		etr_buf->len = etr_buf->size;
323 	}
324 
325 	etr_buf->offset = r_offset;
326 	tmc_sg_table_sync_data_range(catu_table, r_offset, etr_buf->len);
327 }
328 
329 static int catu_alloc_etr_buf(struct tmc_drvdata *tmc_drvdata,
330 			      struct etr_buf *etr_buf, int node, void **pages)
331 {
332 	struct coresight_device *csdev;
333 	struct tmc_sg_table *catu_table;
334 	struct catu_etr_buf *catu_buf;
335 
336 	csdev = tmc_etr_get_catu_device(tmc_drvdata);
337 	if (!csdev)
338 		return -ENODEV;
339 	catu_buf = kzalloc(sizeof(*catu_buf), GFP_KERNEL);
340 	if (!catu_buf)
341 		return -ENOMEM;
342 
343 	catu_table = catu_init_sg_table(&csdev->dev, node,
344 					etr_buf->size, pages);
345 	if (IS_ERR(catu_table)) {
346 		kfree(catu_buf);
347 		return PTR_ERR(catu_table);
348 	}
349 
350 	etr_buf->mode = ETR_MODE_CATU;
351 	etr_buf->private = catu_buf;
352 	etr_buf->hwaddr = CATU_DEFAULT_INADDR;
353 
354 	catu_buf->catu_table = catu_table;
355 	/* Get the table base address */
356 	catu_buf->sladdr = catu_table->table_daddr;
357 
358 	return 0;
359 }
360 
361 static const struct etr_buf_operations etr_catu_buf_ops = {
362 	.alloc = catu_alloc_etr_buf,
363 	.free = catu_free_etr_buf,
364 	.sync = catu_sync_etr_buf,
365 	.get_data = catu_get_data_etr_buf,
366 };
367 
368 static struct attribute *catu_mgmt_attrs[] = {
369 	coresight_simple_reg32(devid, CORESIGHT_DEVID),
370 	coresight_simple_reg32(control, CATU_CONTROL),
371 	coresight_simple_reg32(status, CATU_STATUS),
372 	coresight_simple_reg32(mode, CATU_MODE),
373 	coresight_simple_reg32(axictrl, CATU_AXICTRL),
374 	coresight_simple_reg32(irqen, CATU_IRQEN),
375 	coresight_simple_reg64(sladdr, CATU_SLADDRLO, CATU_SLADDRHI),
376 	coresight_simple_reg64(inaddr, CATU_INADDRLO, CATU_INADDRHI),
377 	NULL,
378 };
379 
380 static const struct attribute_group catu_mgmt_group = {
381 	.attrs = catu_mgmt_attrs,
382 	.name = "mgmt",
383 };
384 
385 static const struct attribute_group *catu_groups[] = {
386 	&catu_mgmt_group,
387 	NULL,
388 };
389 
390 
391 static inline int catu_wait_for_ready(struct catu_drvdata *drvdata)
392 {
393 	struct csdev_access *csa = &drvdata->csdev->access;
394 
395 	return coresight_timeout(csa, CATU_STATUS, CATU_STATUS_READY, 1);
396 }
397 
398 static int catu_enable_hw(struct catu_drvdata *drvdata, void *data)
399 {
400 	int rc;
401 	u32 control, mode;
402 	struct etr_buf *etr_buf = data;
403 	struct device *dev = &drvdata->csdev->dev;
404 	struct coresight_device *csdev = drvdata->csdev;
405 
406 	if (catu_wait_for_ready(drvdata))
407 		dev_warn(dev, "Timeout while waiting for READY\n");
408 
409 	control = catu_read_control(drvdata);
410 	if (control & BIT(CATU_CONTROL_ENABLE)) {
411 		dev_warn(dev, "CATU is already enabled\n");
412 		return -EBUSY;
413 	}
414 
415 	rc = coresight_claim_device_unlocked(csdev);
416 	if (rc)
417 		return rc;
418 
419 	control |= BIT(CATU_CONTROL_ENABLE);
420 
421 	if (etr_buf && etr_buf->mode == ETR_MODE_CATU) {
422 		struct catu_etr_buf *catu_buf = etr_buf->private;
423 
424 		mode = CATU_MODE_TRANSLATE;
425 		catu_write_axictrl(drvdata, CATU_OS_AXICTRL);
426 		catu_write_sladdr(drvdata, catu_buf->sladdr);
427 		catu_write_inaddr(drvdata, CATU_DEFAULT_INADDR);
428 	} else {
429 		mode = CATU_MODE_PASS_THROUGH;
430 		catu_write_sladdr(drvdata, 0);
431 		catu_write_inaddr(drvdata, 0);
432 	}
433 
434 	catu_write_irqen(drvdata, 0);
435 	catu_write_mode(drvdata, mode);
436 	catu_write_control(drvdata, control);
437 	dev_dbg(dev, "Enabled in %s mode\n",
438 		(mode == CATU_MODE_PASS_THROUGH) ?
439 		"Pass through" :
440 		"Translate");
441 	return 0;
442 }
443 
444 static int catu_enable(struct coresight_device *csdev, void *data)
445 {
446 	int rc;
447 	struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
448 
449 	CS_UNLOCK(catu_drvdata->base);
450 	rc = catu_enable_hw(catu_drvdata, data);
451 	CS_LOCK(catu_drvdata->base);
452 	return rc;
453 }
454 
455 static int catu_disable_hw(struct catu_drvdata *drvdata)
456 {
457 	int rc = 0;
458 	struct device *dev = &drvdata->csdev->dev;
459 	struct coresight_device *csdev = drvdata->csdev;
460 
461 	catu_write_control(drvdata, 0);
462 	coresight_disclaim_device_unlocked(csdev);
463 	if (catu_wait_for_ready(drvdata)) {
464 		dev_info(dev, "Timeout while waiting for READY\n");
465 		rc = -EAGAIN;
466 	}
467 
468 	dev_dbg(dev, "Disabled\n");
469 	return rc;
470 }
471 
472 static int catu_disable(struct coresight_device *csdev, void *__unused)
473 {
474 	int rc;
475 	struct catu_drvdata *catu_drvdata = csdev_to_catu_drvdata(csdev);
476 
477 	CS_UNLOCK(catu_drvdata->base);
478 	rc = catu_disable_hw(catu_drvdata);
479 	CS_LOCK(catu_drvdata->base);
480 	return rc;
481 }
482 
483 static const struct coresight_ops_helper catu_helper_ops = {
484 	.enable = catu_enable,
485 	.disable = catu_disable,
486 };
487 
488 static const struct coresight_ops catu_ops = {
489 	.helper_ops = &catu_helper_ops,
490 };
491 
492 static int catu_probe(struct amba_device *adev, const struct amba_id *id)
493 {
494 	int ret = 0;
495 	u32 dma_mask;
496 	struct catu_drvdata *drvdata;
497 	struct coresight_desc catu_desc;
498 	struct coresight_platform_data *pdata = NULL;
499 	struct device *dev = &adev->dev;
500 	void __iomem *base;
501 
502 	catu_desc.name = coresight_alloc_device_name(&catu_devs, dev);
503 	if (!catu_desc.name)
504 		return -ENOMEM;
505 
506 	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
507 	if (!drvdata) {
508 		ret = -ENOMEM;
509 		goto out;
510 	}
511 
512 	dev_set_drvdata(dev, drvdata);
513 	base = devm_ioremap_resource(dev, &adev->res);
514 	if (IS_ERR(base)) {
515 		ret = PTR_ERR(base);
516 		goto out;
517 	}
518 
519 	/* Setup dma mask for the device */
520 	dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f;
521 	switch (dma_mask) {
522 	case 32:
523 	case 40:
524 	case 44:
525 	case 48:
526 	case 52:
527 	case 56:
528 	case 64:
529 		break;
530 	default:
531 		/* Default to the 40bits as supported by TMC-ETR */
532 		dma_mask = 40;
533 	}
534 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask));
535 	if (ret)
536 		goto out;
537 
538 	pdata = coresight_get_platform_data(dev);
539 	if (IS_ERR(pdata)) {
540 		ret = PTR_ERR(pdata);
541 		goto out;
542 	}
543 	dev->platform_data = pdata;
544 
545 	drvdata->base = base;
546 	catu_desc.access = CSDEV_ACCESS_IOMEM(base);
547 	catu_desc.pdata = pdata;
548 	catu_desc.dev = dev;
549 	catu_desc.groups = catu_groups;
550 	catu_desc.type = CORESIGHT_DEV_TYPE_HELPER;
551 	catu_desc.subtype.helper_subtype = CORESIGHT_DEV_SUBTYPE_HELPER_CATU;
552 	catu_desc.ops = &catu_ops;
553 
554 	drvdata->csdev = coresight_register(&catu_desc);
555 	if (IS_ERR(drvdata->csdev))
556 		ret = PTR_ERR(drvdata->csdev);
557 	else
558 		pm_runtime_put(&adev->dev);
559 out:
560 	return ret;
561 }
562 
563 static void catu_remove(struct amba_device *adev)
564 {
565 	struct catu_drvdata *drvdata = dev_get_drvdata(&adev->dev);
566 
567 	coresight_unregister(drvdata->csdev);
568 }
569 
570 static struct amba_id catu_ids[] = {
571 	CS_AMBA_ID(0x000bb9ee),
572 	{},
573 };
574 
575 MODULE_DEVICE_TABLE(amba, catu_ids);
576 
577 static struct amba_driver catu_driver = {
578 	.drv = {
579 		.name			= "coresight-catu",
580 		.owner			= THIS_MODULE,
581 		.suppress_bind_attrs	= true,
582 	},
583 	.probe				= catu_probe,
584 	.remove				= catu_remove,
585 	.id_table			= catu_ids,
586 };
587 
588 static int __init catu_init(void)
589 {
590 	int ret;
591 
592 	ret = amba_driver_register(&catu_driver);
593 	if (ret)
594 		pr_info("Error registering catu driver\n");
595 	tmc_etr_set_catu_ops(&etr_catu_buf_ops);
596 	return ret;
597 }
598 
599 static void __exit catu_exit(void)
600 {
601 	tmc_etr_remove_catu_ops();
602 	amba_driver_unregister(&catu_driver);
603 }
604 
605 module_init(catu_init);
606 module_exit(catu_exit);
607 
608 MODULE_AUTHOR("Suzuki K Poulose <suzuki.poulose@arm.com>");
609 MODULE_DESCRIPTION("Arm CoreSight Address Translation Unit (CATU) Driver");
610 MODULE_LICENSE("GPL v2");
611