1 module gfx.graal.memory; 2 3 import gfx.core.rc; 4 import gfx.graal.device; 5 6 /// Properties of memory allocated by the device 7 enum MemProps { 8 none = 0x00, 9 /// Memory resides on the device. 10 deviceLocal = 0x01, 11 /// Memory is visible from cpu and therefore mappable. 12 hostVisible = 0x02, 13 /// Memory seen from cpu is coherent with device memory. 14 hostCoherent = 0x04, 15 /// Memory is cached on host. read/write is very fast, but flush operation is necessary after writing. 16 hostCached = 0x08, 17 lazilyAllocated = 0x10, 18 } 19 20 /// Structure representing all heaps and types of memory from a device. 21 /// A device can have different heaps each supporting different types. 22 struct MemoryProperties { 23 MemoryHeap[] heaps; 24 MemoryType[] types; 25 } 26 27 struct MemoryHeap { 28 size_t size; 29 bool deviceLocal; 30 } 31 32 struct MemoryType { 33 MemProps props; 34 uint heapIndex; 35 } 36 37 struct MemoryRequirements { 38 /// minimal allocation requirement 39 size_t size; 40 /// alignment required when binding the resource to a memory with offset 41 size_t alignment; 42 /// mask where each bit is set if the corresponding memory type is supported. 43 /// For example if the resource supports types 0 and 2 from MemoryProperties, 44 /// memTypeMask will be 00000101 45 uint memTypeMask; 46 } 47 48 /// Holds a memory mapping to host visible memory. 49 /// Memory is unmapped when object goes out of scope. 50 /// It also acts as a void[], and allows to get a typed slice view on the data. 51 struct MemoryMap 52 { 53 import std.traits : isDynamicArray; 54 55 private DeviceMemory dm; 56 private size_t offset; 57 private void[] data; 58 private void delegate() unmap; 59 60 package(gfx) this(DeviceMemory dm, in size_t offset, void[] data, void delegate() unmap) 61 { 62 this.dm = dm; 63 this.offset = offset; 64 this.data = data; 65 this.unmap = unmap; 66 } 67 68 @disable this(this); 69 70 ~this() 71 { 72 // should handle dtor of MemoryMap.init 73 if (unmap) unmap(); 74 } 75 76 void addToSet(ref MappedMemorySet set) 77 { 78 set.addMM(MappedMemorySet.MM(dm, offset, data.length)); 79 } 80 81 /// Get a typed view on the memory map that support slice and indexing operations. 82 /// Params: 83 /// offset = the offset to the requested memory in bytes 84 /// count = the number of elements of type T.init[0] to be mapped 85 /// Warning: offset and count are not in the same units. 86 /// This is necessary in order to allow a memory block to hold several arrays 87 /// of different element types. 88 auto view(T)(in size_t offset=0, in size_t count=size_t.max) 89 if (isDynamicArray!T) 90 { 91 alias Elem = typeof(T.init[0]); 92 const len = count == size_t.max ? data.length-offset : count*Elem.sizeof; 93 return MemoryMapArrayView!Elem(cast(T)(data[offset .. offset+len])); 94 } 95 96 size_t opDollar() { 97 return data.length; 98 } 99 100 size_t[2] opSlice(size_t beg, size_t end) { 101 return [beg, end]; 102 } 103 104 void[] opIndex() { 105 return data; 106 } 107 void[] opIndex(in size_t[2] slice) { 108 return data[ slice[0] .. slice[1] ]; 109 } 110 111 void opIndexAssign(in void[] vals) { 112 data[] = vals; 113 } 114 void opIndexAssign(in void[] vals, size_t[2] slice) { 115 data[slice[0] .. slice[1]] = vals; 116 } 117 } 118 119 private struct MemoryMapArrayView(T) 120 { 121 private T[] data; 122 123 @property size_t opDollar(size_t dim : 0)() { 124 return data.length; 125 } 126 127 @property size_t[2] opSlice(size_t dim : 0)(size_t beg, size_t end) { 128 return [beg, end]; 129 } 130 131 T[] opIndex() { 132 return data; 133 } 134 T[] opIndex(in size_t[2] slice) { 135 return data[ slice[0] .. slice[1] ]; 136 } 137 138 void opIndexAssign(in T[] vals) { 139 data[] = vals; 140 } 141 void opIndexAssign(in T[] vals, size_t[2] slice) { 142 data[slice[0] .. slice[1]] = vals; 143 } 144 145 static if (!is(T == void)) { 146 T opIndex(size_t index) { 147 return data[index]; 148 } 149 void opIndexAssign(in T val, size_t ind) { 150 data[ind] = val; 151 } 152 void opIndexAssign(in T val, size_t[2] slice) { 153 data[slice[0] .. slice[1]] = val; 154 } 155 } 156 } 157 158 159 interface DeviceMemory : IAtomicRefCounted 160 { 161 /// Get the parent device 162 @property Device device(); 163 164 @property uint typeIndex(); 165 @property MemProps props(); 166 @property size_t size(); 167 168 /// Map device memory to host visible memory. 169 /// Params: 170 /// offset = the offset to the requested memory in bytes 171 /// size = the size of the mapping in bytes. 172 void* mapRaw(in size_t offset, in size_t size); 173 void unmapRaw(); 174 175 /// Produce a scoped memory map. 176 /// The the memory will be unmapped when the object goes out of scope. 177 /// The is an untyped memory holder. In order to access the memory, call 178 /// view with the right type parameter. 179 /// Params: 180 /// offset = the offset to the requested memory in bytes 181 /// count = the number of bytes to be mapped 182 final auto map(in size_t offset=0, in size_t sz=size_t.max) 183 { 184 const size = sz==size_t.max ? this.size : sz; 185 auto data = mapRaw(offset, size)[0 .. size]; 186 return MemoryMap(this, offset, data, &unmapRaw); 187 } 188 } 189 190 191 192 /// cast a typed slice into a blob of bytes 193 /// (same representation; no copy is made) 194 void[] untypeSlice(T)(T[] slice) if(!is(T == const)) 195 { 196 if (slice.length == 0) return []; 197 auto loc = cast(void*)slice.ptr; 198 return loc[0 .. slice.length*T.sizeof]; 199 } 200 201 /// ditto 202 const(void)[] untypeSlice(T)(const(T)[] slice) 203 { 204 if (slice.length == 0) return []; 205 auto loc = cast(const(void)*)slice.ptr; 206 return loc[0 .. slice.length*T.sizeof]; 207 } 208 209 /// cast a blob of bytes into a typed slice 210 T[] retypeSlice(T)(void[] slice) if (!is(T == const)) 211 in { 212 assert (!slice.length || (slice.length % T.sizeof) == 0); 213 } 214 body { 215 if(slice.length == 0) return []; 216 auto loc = cast(T*)slice.ptr; 217 return loc[0 .. slice.length / T.sizeof]; 218 } 219 220 /// ditto 221 const(T)[] retypeSlice(T)(const(void)[] slice) 222 in { 223 assert (!slice.length || (slice.length % T.sizeof) == 0); 224 } 225 body { 226 if(slice.length == 0) return []; 227 auto loc = cast(const(T)*)slice.ptr; 228 return loc[0 .. slice.length / T.sizeof]; 229 } 230 231 unittest { 232 int[] slice = [1, 2, 3, 4]; 233 auto bytes = cast(ubyte[])untypeSlice(slice); 234 auto ints = retypeSlice!int(bytes); 235 assert(bytes.length == 16); 236 version(LittleEndian) { 237 assert(bytes == [ 238 1, 0, 0, 0, 239 2, 0, 0, 0, 240 3, 0, 0, 0, 241 4, 0, 0, 0, 242 ]); 243 } 244 else { 245 assert(bytes == [ 246 0, 0, 0, 1, 247 0, 0, 0, 2, 248 0, 0, 0, 3, 249 0, 0, 0, 4, 250 ]); 251 } 252 assert(ints.length == 4); 253 assert(ints == slice); 254 assert(ints.ptr == slice.ptr); 255 } 256 257 /// cast an array of typed slices to another array of blob of bytes 258 /// an allocation is performed for the top container (the array of arrays) 259 /// but the underlying data is moved without allocation 260 void[][] untypeSlices(T)(T[][] slices) if (!is(T == const)) { 261 void[][] res = new void[][slices.length]; 262 foreach(i, s; slices) { 263 res[i] = untypeSlice(s); 264 } 265 return res; 266 } 267 268 /// ditto 269 const(void)[][] untypeSlices(T)(const(T)[][] slices) { 270 const(void)[][] res = new const(void)[][slices.length]; 271 foreach(i, s; slices) { 272 res[i] = untypeSlice(s); 273 } 274 return res; 275 } 276 277 unittest { 278 int[][] texels = [ [1, 2], [3, 4] ]; 279 auto bytes = cast(ubyte[][])untypeSlices(texels); 280 assert(bytes.length == 2); 281 assert(bytes[0].length == 8); 282 assert(bytes[1].length == 8); 283 version(LittleEndian) { 284 assert(bytes == [ 285 [ 1, 0, 0, 0, 286 2, 0, 0, 0, ], 287 [ 3, 0, 0, 0, 288 4, 0, 0, 0, ], 289 ]); 290 } 291 else { 292 assert(bytes == [ 293 [ 0, 0, 0, 1, 294 0, 0, 0, 2, ], 295 [ 0, 0, 0, 3, 296 0, 0, 0, 4, ], 297 ]); 298 } 299 }