|
@@ -1,39 +1,15 @@
|
|
package mem
|
|
package mem
|
|
|
|
|
|
-/*
|
|
|
|
-The Rollback Stack Allocator was designed for the test runner to be fast,
|
|
|
|
-able to grow, and respect the Tracking Allocator's requirement for
|
|
|
|
-individual frees. It is not overly concerned with fragmentation, however.
|
|
|
|
-
|
|
|
|
-It has support for expansion when configured with a block allocator and
|
|
|
|
-limited support for out-of-order frees.
|
|
|
|
-
|
|
|
|
-Allocation has constant-time best and usual case performance.
|
|
|
|
-At worst, it is linear according to the number of memory blocks.
|
|
|
|
-
|
|
|
|
-Allocation follows a first-fit strategy when there are multiple memory
|
|
|
|
-blocks.
|
|
|
|
-
|
|
|
|
-Freeing has constant-time best and usual case performance.
|
|
|
|
-At worst, it is linear according to the number of memory blocks and number
|
|
|
|
-of freed items preceding the last item in a block.
|
|
|
|
-
|
|
|
|
-Resizing has constant-time performance, if it's the last item in a block, or
|
|
|
|
-the new size is smaller. Naturally, this becomes linear-time if there are
|
|
|
|
-multiple blocks to search for the pointer's owning block. Otherwise, the
|
|
|
|
-allocator defaults to a combined alloc & free operation internally.
|
|
|
|
-
|
|
|
|
-Out-of-order freeing is accomplished by collapsing a run of freed items
|
|
|
|
-from the last allocation backwards.
|
|
|
|
-
|
|
|
|
-Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
|
|
|
|
-the requested alignment.
|
|
|
|
-*/
|
|
|
|
import "base:runtime"
|
|
import "base:runtime"
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Rollback stack default block size.
|
|
|
|
+*/
|
|
ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
|
|
ROLLBACK_STACK_DEFAULT_BLOCK_SIZE :: 4 * Megabyte
|
|
|
|
|
|
/*
|
|
/*
|
|
|
|
+Rollback stack max head block size.
|
|
|
|
+
|
|
This limitation is due to the size of `prev_ptr`, but it is only for the
|
|
This limitation is due to the size of `prev_ptr`, but it is only for the
|
|
head block; any allocation in excess of the allocator's `block_size` is
|
|
head block; any allocation in excess of the allocator's `block_size` is
|
|
valid, so long as the block allocator can handle it.
|
|
valid, so long as the block allocator can handle it.
|
|
@@ -43,12 +19,18 @@ within is freed; they are immediately returned to the block allocator.
|
|
*/
|
|
*/
|
|
ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
|
|
ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE :: 2 * Gigabyte
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Allocation header of the rollback stack allocator.
|
|
|
|
+*/
|
|
Rollback_Stack_Header :: bit_field u64 {
|
|
Rollback_Stack_Header :: bit_field u64 {
|
|
prev_offset: uintptr | 32,
|
|
prev_offset: uintptr | 32,
|
|
is_free: bool | 1,
|
|
is_free: bool | 1,
|
|
prev_ptr: uintptr | 31,
|
|
prev_ptr: uintptr | 31,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Block header of the rollback stack allocator.
|
|
|
|
+*/
|
|
Rollback_Stack_Block :: struct {
|
|
Rollback_Stack_Block :: struct {
|
|
next_block: ^Rollback_Stack_Block,
|
|
next_block: ^Rollback_Stack_Block,
|
|
last_alloc: rawptr,
|
|
last_alloc: rawptr,
|
|
@@ -56,6 +38,9 @@ Rollback_Stack_Block :: struct {
|
|
buffer: []byte,
|
|
buffer: []byte,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Rollback stack allocator data.
|
|
|
|
+*/
|
|
Rollback_Stack :: struct {
|
|
Rollback_Stack :: struct {
|
|
head: ^Rollback_Stack_Block,
|
|
head: ^Rollback_Stack_Block,
|
|
block_size: int,
|
|
block_size: int,
|
|
@@ -111,6 +96,9 @@ rb_rollback_block :: proc(block: ^Rollback_Stack_Block, header: ^Rollback_Stack_
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Free memory to a rollback stack allocator.
|
|
|
|
+*/
|
|
@(private="file", require_results)
|
|
@(private="file", require_results)
|
|
rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
|
|
rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
|
|
parent, block, header := rb_find_ptr(stack, ptr) or_return
|
|
parent, block, header := rb_find_ptr(stack, ptr) or_return
|
|
@@ -129,6 +117,9 @@ rb_free :: proc(stack: ^Rollback_Stack, ptr: rawptr) -> Allocator_Error {
|
|
return nil
|
|
return nil
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Free all memory owned by the rollback stack allocator.
|
|
|
|
+*/
|
|
@(private="file")
|
|
@(private="file")
|
|
rb_free_all :: proc(stack: ^Rollback_Stack) {
|
|
rb_free_all :: proc(stack: ^Rollback_Stack) {
|
|
for block := stack.head.next_block; block != nil; /**/ {
|
|
for block := stack.head.next_block; block != nil; /**/ {
|
|
@@ -142,14 +133,16 @@ rb_free_all :: proc(stack: ^Rollback_Stack) {
|
|
stack.head.offset = 0
|
|
stack.head.offset = 0
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Resize an allocation made on a rollback stack allocator.
|
|
|
|
+*/
|
|
@(private="file", require_results)
|
|
@(private="file", require_results)
|
|
-rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
|
|
|
|
|
|
+rb_resize_non_zeroed :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
|
|
if ptr != nil {
|
|
if ptr != nil {
|
|
if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
|
|
if block, _, ok := rb_find_last_alloc(stack, ptr); ok {
|
|
// `block.offset` should never underflow because it is contingent
|
|
// `block.offset` should never underflow because it is contingent
|
|
// on `old_size` in the first place, assuming sane arguments.
|
|
// on `old_size` in the first place, assuming sane arguments.
|
|
assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
|
|
assert(block.offset >= cast(uintptr)old_size, "Rollback Stack Allocator received invalid `old_size`.")
|
|
-
|
|
|
|
if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
|
|
if block.offset + cast(uintptr)size - cast(uintptr)old_size < cast(uintptr)len(block.buffer) {
|
|
// Prevent singleton allocations from fragmenting by forbidding
|
|
// Prevent singleton allocations from fragmenting by forbidding
|
|
// them to shrink, removing the possibility of overflow bugs.
|
|
// them to shrink, removing the possibility of overflow bugs.
|
|
@@ -160,27 +153,26 @@ rb_resize :: proc(stack: ^Rollback_Stack, ptr: rawptr, old_size, size, alignment
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-
|
|
|
|
- result = rb_alloc(stack, size, alignment) or_return
|
|
|
|
|
|
+ result = rb_alloc_non_zeroed(stack, size, alignment) or_return
|
|
runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
|
|
runtime.mem_copy_non_overlapping(raw_data(result), ptr, old_size)
|
|
err = rb_free(stack, ptr)
|
|
err = rb_free(stack, ptr)
|
|
-
|
|
|
|
return
|
|
return
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Allocate memory using the rollback stack allocator.
|
|
|
|
+*/
|
|
@(private="file", require_results)
|
|
@(private="file", require_results)
|
|
-rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
|
|
|
|
|
|
+rb_alloc_non_zeroed :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byte, err: Allocator_Error) {
|
|
parent: ^Rollback_Stack_Block
|
|
parent: ^Rollback_Stack_Block
|
|
for block := stack.head; /**/; block = block.next_block {
|
|
for block := stack.head; /**/; block = block.next_block {
|
|
when !ODIN_DISABLE_ASSERT {
|
|
when !ODIN_DISABLE_ASSERT {
|
|
allocated_new_block: bool
|
|
allocated_new_block: bool
|
|
}
|
|
}
|
|
-
|
|
|
|
if block == nil {
|
|
if block == nil {
|
|
if stack.block_allocator.procedure == nil {
|
|
if stack.block_allocator.procedure == nil {
|
|
return nil, .Out_Of_Memory
|
|
return nil, .Out_Of_Memory
|
|
}
|
|
}
|
|
-
|
|
|
|
minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
|
|
minimum_size_required := size_of(Rollback_Stack_Header) + size + alignment - 1
|
|
new_block_size := max(minimum_size_required, stack.block_size)
|
|
new_block_size := max(minimum_size_required, stack.block_size)
|
|
block = rb_make_block(new_block_size, stack.block_allocator) or_return
|
|
block = rb_make_block(new_block_size, stack.block_allocator) or_return
|
|
@@ -189,10 +181,8 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
|
|
allocated_new_block = true
|
|
allocated_new_block = true
|
|
}
|
|
}
|
|
}
|
|
}
|
|
-
|
|
|
|
start := raw_data(block.buffer)[block.offset:]
|
|
start := raw_data(block.buffer)[block.offset:]
|
|
padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
|
|
padding := cast(uintptr)calc_padding_with_header(cast(uintptr)start, cast(uintptr)alignment, size_of(Rollback_Stack_Header))
|
|
-
|
|
|
|
if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
|
|
if block.offset + padding + cast(uintptr)size > cast(uintptr)len(block.buffer) {
|
|
when !ODIN_DISABLE_ASSERT {
|
|
when !ODIN_DISABLE_ASSERT {
|
|
if allocated_new_block {
|
|
if allocated_new_block {
|
|
@@ -202,54 +192,50 @@ rb_alloc :: proc(stack: ^Rollback_Stack, size, alignment: int) -> (result: []byt
|
|
parent = block
|
|
parent = block
|
|
continue
|
|
continue
|
|
}
|
|
}
|
|
-
|
|
|
|
header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
|
|
header := cast(^Rollback_Stack_Header)(start[padding - size_of(Rollback_Stack_Header):])
|
|
ptr := start[padding:]
|
|
ptr := start[padding:]
|
|
-
|
|
|
|
header^ = {
|
|
header^ = {
|
|
prev_offset = block.offset,
|
|
prev_offset = block.offset,
|
|
prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
|
|
prev_ptr = uintptr(0) if block.last_alloc == nil else cast(uintptr)block.last_alloc - cast(uintptr)raw_data(block.buffer),
|
|
is_free = false,
|
|
is_free = false,
|
|
}
|
|
}
|
|
-
|
|
|
|
block.last_alloc = ptr
|
|
block.last_alloc = ptr
|
|
block.offset += padding + cast(uintptr)size
|
|
block.offset += padding + cast(uintptr)size
|
|
-
|
|
|
|
if len(block.buffer) > stack.block_size {
|
|
if len(block.buffer) > stack.block_size {
|
|
// This block exceeds the allocator's standard block size and is considered a singleton.
|
|
// This block exceeds the allocator's standard block size and is considered a singleton.
|
|
// Prevent any further allocations on it.
|
|
// Prevent any further allocations on it.
|
|
block.offset = cast(uintptr)len(block.buffer)
|
|
block.offset = cast(uintptr)len(block.buffer)
|
|
}
|
|
}
|
|
-
|
|
|
|
#no_bounds_check return ptr[:size], nil
|
|
#no_bounds_check return ptr[:size], nil
|
|
}
|
|
}
|
|
-
|
|
|
|
return nil, .Out_Of_Memory
|
|
return nil, .Out_Of_Memory
|
|
}
|
|
}
|
|
|
|
|
|
@(private="file", require_results)
|
|
@(private="file", require_results)
|
|
rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
|
|
rb_make_block :: proc(size: int, allocator: Allocator) -> (block: ^Rollback_Stack_Block, err: Allocator_Error) {
|
|
buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
|
|
buffer := runtime.mem_alloc(size_of(Rollback_Stack_Block) + size, align_of(Rollback_Stack_Block), allocator) or_return
|
|
-
|
|
|
|
block = cast(^Rollback_Stack_Block)raw_data(buffer)
|
|
block = cast(^Rollback_Stack_Block)raw_data(buffer)
|
|
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
|
|
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
|
|
return
|
|
return
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
|
|
+/*
|
|
|
|
+Initialize the rollback stack allocator using a fixed backing buffer.
|
|
|
|
+*/
|
|
rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
|
|
rollback_stack_init_buffered :: proc(stack: ^Rollback_Stack, buffer: []byte, location := #caller_location) {
|
|
MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
|
|
MIN_SIZE :: size_of(Rollback_Stack_Block) + size_of(Rollback_Stack_Header) + size_of(rawptr)
|
|
assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
|
|
assert(len(buffer) >= MIN_SIZE, "User-provided buffer to Rollback Stack Allocator is too small.", location)
|
|
-
|
|
|
|
block := cast(^Rollback_Stack_Block)raw_data(buffer)
|
|
block := cast(^Rollback_Stack_Block)raw_data(buffer)
|
|
block^ = {}
|
|
block^ = {}
|
|
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
|
|
#no_bounds_check block.buffer = buffer[size_of(Rollback_Stack_Block):]
|
|
-
|
|
|
|
stack^ = {}
|
|
stack^ = {}
|
|
stack.head = block
|
|
stack.head = block
|
|
stack.block_size = len(block.buffer)
|
|
stack.block_size = len(block.buffer)
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Initialize the rollback stack alocator using a backing block allocator.
|
|
|
|
+*/
|
|
rollback_stack_init_dynamic :: proc(
|
|
rollback_stack_init_dynamic :: proc(
|
|
stack: ^Rollback_Stack,
|
|
stack: ^Rollback_Stack,
|
|
block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
|
|
block_size : int = ROLLBACK_STACK_DEFAULT_BLOCK_SIZE,
|
|
@@ -262,22 +248,25 @@ rollback_stack_init_dynamic :: proc(
|
|
// size is insufficient; check only on platforms with big enough ints.
|
|
// size is insufficient; check only on platforms with big enough ints.
|
|
assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
|
|
assert(block_size <= ROLLBACK_STACK_MAX_HEAD_BLOCK_SIZE, "Rollback Stack Allocators cannot support head blocks larger than 2 gigabytes.", location)
|
|
}
|
|
}
|
|
-
|
|
|
|
block := rb_make_block(block_size, block_allocator) or_return
|
|
block := rb_make_block(block_size, block_allocator) or_return
|
|
-
|
|
|
|
stack^ = {}
|
|
stack^ = {}
|
|
stack.head = block
|
|
stack.head = block
|
|
stack.block_size = block_size
|
|
stack.block_size = block_size
|
|
stack.block_allocator = block_allocator
|
|
stack.block_allocator = block_allocator
|
|
-
|
|
|
|
return nil
|
|
return nil
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Initialize the rollback stack.
|
|
|
|
+*/
|
|
rollback_stack_init :: proc {
|
|
rollback_stack_init :: proc {
|
|
rollback_stack_init_buffered,
|
|
rollback_stack_init_buffered,
|
|
rollback_stack_init_dynamic,
|
|
rollback_stack_init_dynamic,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Destroy a rollback stack.
|
|
|
|
+*/
|
|
rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
|
|
rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
|
|
if stack.block_allocator.procedure != nil {
|
|
if stack.block_allocator.procedure != nil {
|
|
rb_free_all(stack)
|
|
rb_free_all(stack)
|
|
@@ -286,6 +275,37 @@ rollback_stack_destroy :: proc(stack: ^Rollback_Stack) {
|
|
stack^ = {}
|
|
stack^ = {}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+Rollback stack allocator.
|
|
|
|
+
|
|
|
|
+The Rollback Stack Allocator was designed for the test runner to be fast,
|
|
|
|
+able to grow, and respect the Tracking Allocator's requirement for
|
|
|
|
+individual frees. It is not overly concerned with fragmentation, however.
|
|
|
|
+
|
|
|
|
+It has support for expansion when configured with a block allocator and
|
|
|
|
+limited support for out-of-order frees.
|
|
|
|
+
|
|
|
|
+Allocation has constant-time best and usual case performance.
|
|
|
|
+At worst, it is linear according to the number of memory blocks.
|
|
|
|
+
|
|
|
|
+Allocation follows a first-fit strategy when there are multiple memory
|
|
|
|
+blocks.
|
|
|
|
+
|
|
|
|
+Freeing has constant-time best and usual case performance.
|
|
|
|
+At worst, it is linear according to the number of memory blocks and number
|
|
|
|
+of freed items preceding the last item in a block.
|
|
|
|
+
|
|
|
|
+Resizing has constant-time performance, if it's the last item in a block, or
|
|
|
|
+the new size is smaller. Naturally, this becomes linear-time if there are
|
|
|
|
+multiple blocks to search for the pointer's owning block. Otherwise, the
|
|
|
|
+allocator defaults to a combined alloc & free operation internally.
|
|
|
|
+
|
|
|
|
+Out-of-order freeing is accomplished by collapsing a run of freed items
|
|
|
|
+from the last allocation backwards.
|
|
|
|
+
|
|
|
|
+Each allocation has an overhead of 8 bytes and any extra bytes to satisfy
|
|
|
|
+the requested alignment.
|
|
|
|
+*/
|
|
@(require_results)
|
|
@(require_results)
|
|
rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
|
|
rollback_stack_allocator :: proc(stack: ^Rollback_Stack) -> Allocator {
|
|
return Allocator {
|
|
return Allocator {
|
|
@@ -309,38 +329,31 @@ rollback_stack_allocator_proc :: proc(
|
|
case .Alloc, .Alloc_Non_Zeroed:
|
|
case .Alloc, .Alloc_Non_Zeroed:
|
|
assert(size >= 0, "Size must be positive or zero.", location)
|
|
assert(size >= 0, "Size must be positive or zero.", location)
|
|
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
|
|
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
|
|
- result = rb_alloc(stack, size, alignment) or_return
|
|
|
|
-
|
|
|
|
|
|
+ result = rb_alloc_non_zeroed(stack, size, alignment) or_return
|
|
if mode == .Alloc {
|
|
if mode == .Alloc {
|
|
zero_slice(result)
|
|
zero_slice(result)
|
|
}
|
|
}
|
|
-
|
|
|
|
case .Free:
|
|
case .Free:
|
|
err = rb_free(stack, old_memory)
|
|
err = rb_free(stack, old_memory)
|
|
|
|
|
|
case .Free_All:
|
|
case .Free_All:
|
|
rb_free_all(stack)
|
|
rb_free_all(stack)
|
|
-
|
|
|
|
case .Resize, .Resize_Non_Zeroed:
|
|
case .Resize, .Resize_Non_Zeroed:
|
|
assert(size >= 0, "Size must be positive or zero.", location)
|
|
assert(size >= 0, "Size must be positive or zero.", location)
|
|
assert(old_size >= 0, "Old size must be positive or zero.", location)
|
|
assert(old_size >= 0, "Old size must be positive or zero.", location)
|
|
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
|
|
assert(is_power_of_two(cast(uintptr)alignment), "Alignment must be a power of two.", location)
|
|
- result = rb_resize(stack, old_memory, old_size, size, alignment) or_return
|
|
|
|
-
|
|
|
|
|
|
+ result = rb_resize_non_zeroed(stack, old_memory, old_size, size, alignment) or_return
|
|
#no_bounds_check if mode == .Resize && size > old_size {
|
|
#no_bounds_check if mode == .Resize && size > old_size {
|
|
zero_slice(result[old_size:])
|
|
zero_slice(result[old_size:])
|
|
}
|
|
}
|
|
-
|
|
|
|
case .Query_Features:
|
|
case .Query_Features:
|
|
set := (^Allocator_Mode_Set)(old_memory)
|
|
set := (^Allocator_Mode_Set)(old_memory)
|
|
if set != nil {
|
|
if set != nil {
|
|
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
|
|
set^ = {.Alloc, .Alloc_Non_Zeroed, .Free, .Free_All, .Resize, .Resize_Non_Zeroed}
|
|
}
|
|
}
|
|
return nil, nil
|
|
return nil, nil
|
|
-
|
|
|
|
case .Query_Info:
|
|
case .Query_Info:
|
|
return nil, .Mode_Not_Implemented
|
|
return nil, .Mode_Not_Implemented
|
|
}
|
|
}
|
|
-
|
|
|
|
return
|
|
return
|
|
}
|
|
}
|