Skip to content

Commit

Permalink
[vm/compiler] Move setRange bounds checking entirely into Dart.
Browse files Browse the repository at this point in the history
The bounds checking was implemented in Dart previously, but this
removes _checkSetRangeArguments, inlining it into
_TypedListBase.setRange, renames _checkBoundsAndMemcpyN to _memMoveN
since it no longer performs bounds checking, and also removes the now
unneeded bounds checking from the native function TypedData_setRange.

TEST=co19{,_2}/LibTest/typed_data lib{,_2}/typed_data
     corelib{,_2}/list_test

Issue: #42072
Cq-Include-Trybots: luci.dart.try:vm-aot-linux-debug-simarm_x64-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-kernel-linux-debug-x64-try,vm-kernel-precomp-linux-debug-x64-try,vm-linux-debug-x64-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-aot-linux-release-simarm_x64-try,vm-aot-linux-release-x64-try,vm-aot-mac-release-arm64-try,vm-linux-release-x64-try,vm-mac-release-arm64-try,vm-kernel-precomp-linux-release-x64-try
Change-Id: I85ec751708f603f68729f4109d7339dd8407ae77
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/324102
Reviewed-by: Alexander Markov <[email protected]>
Commit-Queue: Tess Strickland <[email protected]>
  • Loading branch information
sstrickl authored and Commit Queue committed Sep 5, 2023
1 parent 2a669c5 commit b94d3f7
Show file tree
Hide file tree
Showing 7 changed files with 311 additions and 262 deletions.
61 changes: 32 additions & 29 deletions runtime/lib/typed_data.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,11 +89,13 @@ static bool IsUint8(intptr_t cid) {
}

DEFINE_NATIVE_ENTRY(TypedDataBase_setRange, 0, 5) {
// This is called after bounds checking, so the numeric inputs are
// guaranteed to be Smis, and the length is guaranteed to be non-zero.
const TypedDataBase& dst =
TypedDataBase::CheckedHandle(zone, arguments->NativeArgAt(0));
const Smi& dst_start_smi =
Smi::CheckedHandle(zone, arguments->NativeArgAt(1));
const Smi& dst_end_smi = Smi::CheckedHandle(zone, arguments->NativeArgAt(2));
const Smi& length_smi = Smi::CheckedHandle(zone, arguments->NativeArgAt(2));
const TypedDataBase& src =
TypedDataBase::CheckedHandle(zone, arguments->NativeArgAt(3));
const Smi& src_start_smi =
Expand All @@ -104,16 +106,29 @@ DEFINE_NATIVE_ENTRY(TypedDataBase_setRange, 0, 5) {

const intptr_t dst_start_in_bytes =
dst_start_smi.Value() * element_size_in_bytes;
const intptr_t dst_end_in_bytes = dst_end_smi.Value() * element_size_in_bytes;
const intptr_t length_in_bytes = length_smi.Value() * element_size_in_bytes;
const intptr_t src_start_in_bytes =
src_start_smi.Value() * element_size_in_bytes;

const intptr_t length_in_bytes = dst_end_in_bytes - dst_start_in_bytes;
#if defined(DEBUG)
// Verify bounds checks weren't needed.
ASSERT(dst_start_in_bytes >= 0);
ASSERT(src_start_in_bytes >= 0);
// The callers of this native function never call it for a zero-sized copy.
ASSERT(length_in_bytes > 0);

const intptr_t dst_length_in_bytes = dst.LengthInBytes();
// Since the length is non-zero, the start can't be the same as the end.
ASSERT(dst_start_in_bytes < dst_length_in_bytes);
ASSERT(length_in_bytes <= dst_length_in_bytes - dst_start_in_bytes);

const intptr_t src_length_in_bytes = src.LengthInBytes();
// Since the length is non-zero, the start can't be the same as the end.
ASSERT(src_start_in_bytes < src_length_in_bytes);
ASSERT(length_in_bytes <= src_length_in_bytes - src_start_in_bytes);
#endif

if (!IsClamped(dst.ptr()->GetClassId()) || IsUint8(src.ptr()->GetClassId())) {
// We've already performed range checking in _boundsCheckAndMemcpyN prior
// to the call to _nativeSetRange, so just perform the memmove.
//
// TODO(dartbug.com/42072): We do this when the copy length gets large
// enough that a native call to invoke memmove is faster than the generated
// code from MemoryCopy. Replace the static call to _nativeSetRange with
Expand All @@ -125,31 +140,19 @@ DEFINE_NATIVE_ENTRY(TypedDataBase_setRange, 0, 5) {
return Object::null();
}

// This is called on the fast path prior to bounds checking, so perform
// the bounds check even if the length is 0.
const intptr_t dst_length_in_bytes = dst.LengthInBytes();
RangeCheck(dst_start_in_bytes, length_in_bytes, dst_length_in_bytes,
element_size_in_bytes);

const intptr_t src_length_in_bytes = src.LengthInBytes();
RangeCheck(src_start_in_bytes, length_in_bytes, src_length_in_bytes,
element_size_in_bytes);

ASSERT_EQUAL(element_size_in_bytes, 1);

if (length_in_bytes > 0) {
NoSafepointScope no_safepoint;
uint8_t* dst_data =
reinterpret_cast<uint8_t*>(dst.DataAddr(dst_start_in_bytes));
int8_t* src_data =
reinterpret_cast<int8_t*>(src.DataAddr(src_start_in_bytes));
for (intptr_t ix = 0; ix < length_in_bytes; ix++) {
int8_t v = *src_data;
if (v < 0) v = 0;
*dst_data = v;
src_data++;
dst_data++;
}
NoSafepointScope no_safepoint;
uint8_t* dst_data =
reinterpret_cast<uint8_t*>(dst.DataAddr(dst_start_in_bytes));
int8_t* src_data =
reinterpret_cast<int8_t*>(src.DataAddr(src_start_in_bytes));
for (intptr_t ix = 0; ix < length_in_bytes; ix++) {
int8_t v = *src_data;
if (v < 0) v = 0;
*dst_data = v;
src_data++;
dst_data++;
}

return Object::null();
Expand Down
4 changes: 2 additions & 2 deletions runtime/vm/compiler/backend/memory_copy_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -420,8 +420,8 @@ static void RunMemoryCopyInstrTest(intptr_t src_start,
#define MEMORY_MOVE_TEST_BOXED(src_start, dest_start, length, elem_size) \
ISOLATE_UNIT_TEST_CASE( \
IRTest_MemoryMove_##src_start##_##dest_start##_##length##_##elem_size) { \
RunMemoryCopyInstrTest(src_start, dest_start, length, elem_size, true, \
false); \
RunMemoryCopyInstrTest(src_start, dest_start, length, elem_size, false, \
true); \
}

#define MEMORY_MOVE_TEST_UNBOXED(src_start, dest_start, length, el_si) \
Expand Down
67 changes: 23 additions & 44 deletions runtime/vm/compiler/frontend/kernel_to_il.cc
Original file line number Diff line number Diff line change
Expand Up @@ -925,11 +925,11 @@ bool FlowGraphBuilder::IsRecognizedMethodForFlowGraph(
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kSuspendState_clone:
case MethodRecognizer::kSuspendState_resume:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy1:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy2:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy4:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy8:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy16:
case MethodRecognizer::kTypedData_memMove1:
case MethodRecognizer::kTypedData_memMove2:
case MethodRecognizer::kTypedData_memMove4:
case MethodRecognizer::kTypedData_memMove8:
case MethodRecognizer::kTypedData_memMove16:
case MethodRecognizer::kTypedData_ByteDataView_factory:
case MethodRecognizer::kTypedData_Int8ArrayView_factory:
case MethodRecognizer::kTypedData_Uint8ArrayView_factory:
Expand Down Expand Up @@ -1138,26 +1138,21 @@ FlowGraph* FlowGraphBuilder::BuildGraphOfRecognizedMethod(
body += TailCall(resume_stub);
break;
}
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy1:
case MethodRecognizer::kTypedData_memMove1:
// Pick an appropriate typed data cid based on the element size.
body +=
BuildTypedDataCheckBoundsAndMemcpy(function, kTypedDataUint8ArrayCid);
body += BuildTypedDataMemMove(function, kTypedDataUint8ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy2:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataUint16ArrayCid);
case MethodRecognizer::kTypedData_memMove2:
body += BuildTypedDataMemMove(function, kTypedDataUint16ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy4:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataUint32ArrayCid);
case MethodRecognizer::kTypedData_memMove4:
body += BuildTypedDataMemMove(function, kTypedDataUint32ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy8:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataUint64ArrayCid);
case MethodRecognizer::kTypedData_memMove8:
body += BuildTypedDataMemMove(function, kTypedDataUint64ArrayCid);
break;
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy16:
body += BuildTypedDataCheckBoundsAndMemcpy(function,
kTypedDataInt32x4ArrayCid);
case MethodRecognizer::kTypedData_memMove16:
body += BuildTypedDataMemMove(function, kTypedDataInt32x4ArrayCid);
break;
#define CASE(name) \
case MethodRecognizer::kTypedData_##name##_factory: \
Expand Down Expand Up @@ -1758,33 +1753,16 @@ Fragment FlowGraphBuilder::BuildTypedDataViewFactoryConstructor(
return body;
}

Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
const Function& function,
intptr_t cid) {
Fragment FlowGraphBuilder::BuildTypedDataMemMove(const Function& function,
intptr_t cid) {
ASSERT_EQUAL(parsed_function_->function().NumParameters(), 5);
LocalVariable* arg_to = parsed_function_->RawParameterVariable(0);
LocalVariable* arg_to_start = parsed_function_->RawParameterVariable(1);
LocalVariable* arg_to_end = parsed_function_->RawParameterVariable(2);
LocalVariable* arg_count = parsed_function_->RawParameterVariable(2);
LocalVariable* arg_from = parsed_function_->RawParameterVariable(3);
LocalVariable* arg_from_start = parsed_function_->RawParameterVariable(4);

const Library& lib = Library::Handle(Z, Library::TypedDataLibrary());
ASSERT(!lib.IsNull());
const Function& check_set_range_args = Function::ZoneHandle(
Z, lib.LookupFunctionAllowPrivate(Symbols::_checkSetRangeArguments()));
ASSERT(!check_set_range_args.IsNull());

Fragment body;
body += LoadLocal(arg_to);
body += LoadLocal(arg_to_start);
body += LoadLocal(arg_to_end);
body += LoadLocal(arg_from);
body += LoadLocal(arg_from_start);
body += StaticCall(TokenPosition::kNoSource, check_set_range_args, 5,
ICData::kStatic);
// The length is guaranteed to be a Smi if bounds checking is successful.
LocalVariable* length_to_copy = MakeTemporary("length");

// If we're copying at least this many elements, calling _nativeSetRange,
// which calls memmove via a native call, is faster than using the code
// currently emitted by the MemoryCopy instruction.
Expand All @@ -1806,7 +1784,7 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(

JoinEntryInstr* done = BuildJoinEntry();
TargetEntryInstr *is_small_enough, *is_too_large;
body += LoadLocal(length_to_copy);
body += LoadLocal(arg_count);
body += IntConstant(kCopyLengthForNativeCall);
body += SmiRelationalOp(Token::kLT);
body += BranchIfTrue(&is_small_enough, &is_too_large);
Expand All @@ -1816,13 +1794,15 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
use_instruction += LoadLocal(arg_to);
use_instruction += LoadLocal(arg_from_start);
use_instruction += LoadLocal(arg_to_start);
use_instruction += LoadLocal(length_to_copy);
use_instruction += LoadLocal(arg_count);
use_instruction += MemoryCopy(cid, cid,
/*unboxed_inputs=*/false, /*can_overlap=*/true);
use_instruction += Goto(done);

// TODO(dartbug.com/42072): Instead of doing a static call to a native
// method, make a leaf runtime entry for memmove and use CCall.
const Library& lib = Library::Handle(Z, Library::TypedDataLibrary());
ASSERT(!lib.IsNull());
const Class& typed_list_base =
Class::Handle(Z, lib.LookupClassAllowPrivate(Symbols::_TypedListBase()));
ASSERT(!typed_list_base.IsNull());
Expand All @@ -1836,7 +1816,7 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
Fragment call_native(is_too_large);
call_native += LoadLocal(arg_to);
call_native += LoadLocal(arg_to_start);
call_native += LoadLocal(arg_to_end);
call_native += LoadLocal(arg_count);
call_native += LoadLocal(arg_from);
call_native += LoadLocal(arg_from_start);
call_native += StaticCall(TokenPosition::kNoSource, native_set_range, 5,
Expand All @@ -1845,7 +1825,6 @@ Fragment FlowGraphBuilder::BuildTypedDataCheckBoundsAndMemcpy(
call_native += Goto(done);

body.current = done;
body += DropTemporary(&length_to_copy);
body += NullConstant();

return body;
Expand Down
3 changes: 1 addition & 2 deletions runtime/vm/compiler/frontend/kernel_to_il.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,7 @@ class FlowGraphBuilder : public BaseFlowGraphBuilder {

FlowGraph* BuildGraphOfRecognizedMethod(const Function& function);

Fragment BuildTypedDataCheckBoundsAndMemcpy(const Function& function,
intptr_t cid);
Fragment BuildTypedDataMemMove(const Function& function, intptr_t cid);
Fragment BuildTypedDataViewFactoryConstructor(const Function& function,
classid_t cid);
Fragment BuildTypedDataFactoryConstructor(const Function& function,
Expand Down
15 changes: 5 additions & 10 deletions runtime/vm/compiler/recognized_methods_list.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,16 +114,11 @@ namespace dart {
V(Float32x4List, ., TypedData_Float32x4Array_factory, 0x0a6eefa8) \
V(Int32x4List, ., TypedData_Int32x4Array_factory, 0x5a09288e) \
V(Float64x2List, ., TypedData_Float64x2Array_factory, 0xecbc738a) \
V(_TypedListBase, _checkBoundsAndMemcpy1, \
TypedData_checkBoundsAndMemcpy1, 0xf9d326bd) \
V(_TypedListBase, _checkBoundsAndMemcpy2, \
TypedData_checkBoundsAndMemcpy2, 0xf0756646) \
V(_TypedListBase, _checkBoundsAndMemcpy4, \
TypedData_checkBoundsAndMemcpy4, 0xe8cfd800) \
V(_TypedListBase, _checkBoundsAndMemcpy8, \
TypedData_checkBoundsAndMemcpy8, 0xe945188e) \
V(_TypedListBase, _checkBoundsAndMemcpy16, \
TypedData_checkBoundsAndMemcpy16, 0xebd06cb3) \
V(_TypedListBase, _memMove1, TypedData_memMove1, 0xd2767fb0) \
V(_TypedListBase, _memMove2, TypedData_memMove2, 0xed382bb6) \
V(_TypedListBase, _memMove4, TypedData_memMove4, 0xcfe37726) \
V(_TypedListBase, _memMove8, TypedData_memMove8, 0xd1d8e325) \
V(_TypedListBase, _memMove16, TypedData_memMove16, 0x07861cd5) \
V(::, _toClampedUint8, ConvertIntToClampedUint8, 0xd0e522d0) \
V(::, copyRangeFromUint8ListToOneByteString, \
CopyRangeFromUint8ListToOneByteString, 0xcc42cce1) \
Expand Down
10 changes: 5 additions & 5 deletions runtime/vm/object.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9102,11 +9102,11 @@ bool Function::RecognizedKindForceOptimize() const {
case MethodRecognizer::kRecord_numFields:
case MethodRecognizer::kUtf8DecoderScan:
case MethodRecognizer::kDouble_hashCode:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy1:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy2:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy4:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy8:
case MethodRecognizer::kTypedData_checkBoundsAndMemcpy16:
case MethodRecognizer::kTypedData_memMove1:
case MethodRecognizer::kTypedData_memMove2:
case MethodRecognizer::kTypedData_memMove4:
case MethodRecognizer::kTypedData_memMove8:
case MethodRecognizer::kTypedData_memMove16:
// Prevent the GC from running so that the operation is atomic from
// a GC point of view. Always double check implementation in
// kernel_to_il.cc that no GC can happen in between the relevant IL
Expand Down
Loading

0 comments on commit b94d3f7

Please sign in to comment.