1 /*
2  * Copyright (C) 2018-2021 Intel Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  */
7 
8 #pragma once
9 #include "shared/source/helpers/aligned_memory.h"
10 #include "shared/source/helpers/constants.h"
11 #include "shared/source/helpers/debug_helpers.h"
12 #include "shared/source/memory_manager/memory_banks.h"
13 
14 #include <atomic>
15 #include <mutex>
16 
17 namespace NEO {
18 
19 class PhysicalAddressAllocator {
20   public:
PhysicalAddressAllocator()21     PhysicalAddressAllocator() {
22         mainAllocator.store(initialPageAddress);
23     }
24 
25     virtual ~PhysicalAddressAllocator() = default;
26 
reserve4kPage(uint32_t memoryBank)27     uint64_t reserve4kPage(uint32_t memoryBank) {
28         return reservePage(memoryBank, MemoryConstants::pageSize, MemoryConstants::pageSize);
29     }
30 
reserve64kPage(uint32_t memoryBank)31     uint64_t reserve64kPage(uint32_t memoryBank) {
32         return reservePage(memoryBank, MemoryConstants::pageSize64k, MemoryConstants::pageSize64k);
33     }
34 
reservePage(uint32_t memoryBank,size_t pageSize,size_t alignement)35     virtual uint64_t reservePage(uint32_t memoryBank, size_t pageSize, size_t alignement) {
36         UNRECOVERABLE_IF(memoryBank != MemoryBanks::MainBank);
37 
38         std::unique_lock<std::mutex> lock(pageReserveMutex);
39 
40         auto currentAddress = mainAllocator.load();
41         auto alignmentSize = alignUp(currentAddress, alignement) - currentAddress;
42         mainAllocator += alignmentSize;
43         return mainAllocator.fetch_add(pageSize);
44     }
45 
46   protected:
47     std::atomic<uint64_t> mainAllocator;
48     std::mutex pageReserveMutex;
49     const uint64_t initialPageAddress = 0x1000;
50 };
51 
52 template <typename GfxFamily>
53 class PhysicalAddressAllocatorHw : public PhysicalAddressAllocator {
54 
55   public:
PhysicalAddressAllocatorHw(uint64_t bankSize,uint32_t numOfBanks)56     PhysicalAddressAllocatorHw(uint64_t bankSize, uint32_t numOfBanks) : memoryBankSize(bankSize), numberOfBanks(numOfBanks) {
57         if (numberOfBanks > 0) {
58             bankAllocators = new std::atomic<uint64_t>[numberOfBanks];
59             bankAllocators[0].store(initialPageAddress);
60 
61             for (uint32_t i = 1; i < numberOfBanks; i++) {
62                 bankAllocators[i].store(i * memoryBankSize);
63             }
64         }
65     }
66 
~PhysicalAddressAllocatorHw()67     ~PhysicalAddressAllocatorHw() override {
68         if (bankAllocators) {
69             delete bankAllocators;
70         }
71     }
72 
reservePage(uint32_t memoryBank,size_t pageSize,size_t alignement)73     uint64_t reservePage(uint32_t memoryBank, size_t pageSize, size_t alignement) override {
74         std::unique_lock<std::mutex> lock(pageReserveMutex);
75 
76         if (memoryBank == MemoryBanks::MainBank || numberOfBanks == 0) {
77             auto currentAddress = mainAllocator.load();
78             auto alignmentSize = alignUp(currentAddress, alignement) - currentAddress;
79             mainAllocator += alignmentSize;
80             return mainAllocator.fetch_add(pageSize);
81         }
82         UNRECOVERABLE_IF(memoryBank > numberOfBanks);
83 
84         auto index = memoryBank - MemoryBanks::getBankForLocalMemory(0);
85 
86         auto currentAddress = bankAllocators[index].load();
87         auto alignmentSize = alignUp(currentAddress, alignement) - currentAddress;
88         bankAllocators[index] += alignmentSize;
89 
90         auto address = bankAllocators[index].fetch_add(pageSize);
91 
92         UNRECOVERABLE_IF(address > ((index + 1) * memoryBankSize));
93 
94         return address;
95     }
96 
getBankSize()97     uint64_t getBankSize() { return memoryBankSize; }
getNumberOfBanks()98     uint32_t getNumberOfBanks() { return numberOfBanks; }
99 
100   protected:
101     std::atomic<uint64_t> *bankAllocators = nullptr;
102     uint64_t memoryBankSize = 0;
103     uint32_t numberOfBanks = 0;
104 };
105 
106 } // namespace NEO
107