Skip to content

Commit

Permalink
Fix "set but not used" warnings/errors (halide#6683)
Browse files Browse the repository at this point in the history
* Fix "set but not used" warnings/errors

Apparently XCode 13.3 has smarter warnings about unused code and emits warnings/errors for these, so let's clean them up.

* Also fix missing `ssize_t` usage
  • Loading branch information
steven-johnson authored and ardier committed Mar 3, 2024
1 parent 93d0e84 commit bedf0be
Show file tree
Hide file tree
Showing 4 changed files with 3 additions and 13 deletions.
6 changes: 3 additions & 3 deletions python_bindings/src/PyBuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -313,10 +313,10 @@ void define_buffer(py::module &m) {

const int d = b.dimensions();
const int bytes = b.type().bytes();
std::vector<ssize_t> shape, strides;
std::vector<Py_ssize_t> shape, strides;
for (int i = 0; i < d; i++) {
shape.push_back((ssize_t)b.raw_buffer()->dim[i].extent);
strides.push_back((ssize_t)(b.raw_buffer()->dim[i].stride * bytes));
shape.push_back((Py_ssize_t)b.raw_buffer()->dim[i].extent);
strides.push_back((Py_ssize_t)(b.raw_buffer()->dim[i].stride * bytes));
}

return py::buffer_info(
Expand Down
4 changes: 0 additions & 4 deletions src/autoschedulers/adams2019/LoopNest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,6 @@ void LoopNest::compute_features(const FunctionDAG &dag,
int64_t footprint = e->producer->bytes_per_point;
int64_t compute_footprint = footprint;
int64_t store_footprint = footprint;
int64_t task_footprint = footprint;
int64_t line_footprint = 1;
int64_t compute_line_footprint = 1;
int64_t store_line_footprint = 1;
Expand Down Expand Up @@ -890,7 +889,6 @@ void LoopNest::compute_features(const FunctionDAG &dag,
footprint *= extent;
compute_footprint *= compute_extent;
store_footprint *= store_extent;
task_footprint *= task_extent;

bool dense = ((e->producer->is_input && i == 0) ||
(site.produce != nullptr && i == site.produce->vector_dim));
Expand Down Expand Up @@ -1295,13 +1293,11 @@ void LoopNest::compute_here(const FunctionDAG::Node *f, bool tileable, int v) {
size_t loop_dim = f->stages[s].loop.size();
node->size.resize(loop_dim);

int64_t total_extent = 1;
int64_t vector_size = 1;
for (size_t i = 0; i < loop_dim; i++) {
const auto &l = bounds->loops(s, i);
// Initialize the loop nest
node->size[i] = l.extent();
total_extent *= node->size[i];

// Use the first loop iteration to represent the inner
// loop. We'll shift it to a later one once we decide
Expand Down
2 changes: 0 additions & 2 deletions src/autoschedulers/adams2019/State.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -594,7 +594,6 @@ void State::apply_schedule(const FunctionDAG &dag, const MachineParams &params)
// Do all the reorders and pick which vars to
// parallelize.
vector<VarOrRVar> vars;
int64_t parallel_tasks = 1;
vector<VarOrRVar> parallel_vars;
bool any_parallel_vars = false, any_parallel_rvars = false;
for (auto it = p.second->vars.rbegin(); it != p.second->vars.rend(); it++) {
Expand All @@ -606,7 +605,6 @@ void State::apply_schedule(const FunctionDAG &dag, const MachineParams &params)
}
any_parallel_rvars |= it->var.is_rvar;
any_parallel_vars |= !it->var.is_rvar;
parallel_tasks *= it->extent;
parallel_vars.push_back(it->var);
}

Expand Down
4 changes: 0 additions & 4 deletions src/autoschedulers/li2018/GradientAutoscheduler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -606,10 +606,6 @@ void apply_schedule(const MachineParams &params,
for (const ReductionVariable &r : reduction_vars) {
rvars.emplace_back(r.var);
}
int rdomain_size = 1;
for (int b : rvar_bounds) {
rdomain_size *= b;
}
// Define the thresholds for the pure domain.
// For CPU we want at least params.parallelism number of elements
// to launch threads. For GPU we want to launch at least 64 GPU blocks.
Expand Down

0 comments on commit bedf0be

Please sign in to comment.