Skip to content

Commit

Permalink
bpo-36030: Improve performance of some tuple operations (GH-12052)
Browse files Browse the repository at this point in the history
  • Loading branch information
sir-sigurd authored and vstinner committed Aug 14, 2019
1 parent b0c8369 commit 4fa10dd
Showing 1 changed file with 71 additions and 32 deletions.
103 changes: 71 additions & 32 deletions Objects/tupleobject.c
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,15 @@ show_track(void)
}
#endif

static inline void
tuple_gc_track(PyTupleObject *op)
{
#ifdef SHOW_TRACK_COUNT
count_tracked++;
#endif
_PyObject_GC_TRACK(op);
}

/* Print summary info about the state of the optimized allocator */
void
_PyTuple_DebugMallocStats(FILE *out)
Expand All @@ -76,25 +85,25 @@ _PyTuple_DebugMallocStats(FILE *out)
#endif
}

PyObject *
PyTuple_New(Py_ssize_t size)
/* Allocate an uninitialized tuple object. Before making it public following
steps must be done:
- initialize its items
- call tuple_gc_track() on it
Because the empty tuple is always reused and it's already tracked by GC,
this function must not be called with size == 0 (unless from PyTuple_New()
which wraps this function).
*/
static PyTupleObject *
tuple_alloc(Py_ssize_t size)
{
PyTupleObject *op;
Py_ssize_t i;
if (size < 0) {
PyErr_BadInternalCall();
return NULL;
}
#if PyTuple_MAXSAVESIZE > 0
if (size == 0 && free_list[0]) {
op = free_list[0];
Py_INCREF(op);
#ifdef COUNT_ALLOCS
_Py_tuple_zero_allocs++;
#endif
return (PyObject *) op;
}
if (size < PyTuple_MAXSAVESIZE && (op = free_list[size]) != NULL) {
assert(size != 0);
free_list[size] = (PyTupleObject *) op->ob_item[0];
numfree[size]--;
#ifdef COUNT_ALLOCS
Expand All @@ -113,25 +122,41 @@ PyTuple_New(Py_ssize_t size)
/* Check for overflow */
if ((size_t)size > ((size_t)PY_SSIZE_T_MAX - sizeof(PyTupleObject) -
sizeof(PyObject *)) / sizeof(PyObject *)) {
return PyErr_NoMemory();
return (PyTupleObject *)PyErr_NoMemory();
}
op = PyObject_GC_NewVar(PyTupleObject, &PyTuple_Type, size);
if (op == NULL)
return NULL;
}
for (i=0; i < size; i++)
return op;
}

PyObject *
PyTuple_New(Py_ssize_t size)
{
PyTupleObject *op;
#if PyTuple_MAXSAVESIZE > 0
if (size == 0 && free_list[0]) {
op = free_list[0];
Py_INCREF(op);
#ifdef COUNT_ALLOCS
_Py_tuple_zero_allocs++;
#endif
return (PyObject *) op;
}
#endif
op = tuple_alloc(size);
for (Py_ssize_t i = 0; i < size; i++) {
op->ob_item[i] = NULL;
}
#if PyTuple_MAXSAVESIZE > 0
if (size == 0) {
free_list[0] = op;
++numfree[0];
Py_INCREF(op); /* extra INCREF so that this is never freed */
}
#endif
#ifdef SHOW_TRACK_COUNT
count_tracked++;
#endif
_PyObject_GC_TRACK(op);
tuple_gc_track(op);
return (PyObject *) op;
}

Expand Down Expand Up @@ -211,24 +236,28 @@ PyTuple_Pack(Py_ssize_t n, ...)
{
Py_ssize_t i;
PyObject *o;
PyObject *result;
PyObject **items;
va_list vargs;

if (n == 0) {
return PyTuple_New(0);
}

va_start(vargs, n);
result = PyTuple_New(n);
PyTupleObject *result = tuple_alloc(n);
if (result == NULL) {
va_end(vargs);
return NULL;
}
items = ((PyTupleObject *)result)->ob_item;
items = result->ob_item;
for (i = 0; i < n; i++) {
o = va_arg(vargs, PyObject *);
Py_INCREF(o);
items[i] = o;
}
va_end(vargs);
return result;
tuple_gc_track(result);
return (PyObject *)result;
}


Expand Down Expand Up @@ -421,7 +450,11 @@ tupleitem(PyTupleObject *a, Py_ssize_t i)
PyObject *
_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n)
{
PyTupleObject *tuple = (PyTupleObject *)PyTuple_New(n);
if (n == 0) {
return PyTuple_New(0);
}

PyTupleObject *tuple = tuple_alloc(n);
if (tuple == NULL) {
return NULL;
}
Expand All @@ -431,6 +464,7 @@ _PyTuple_FromArray(PyObject *const *src, Py_ssize_t n)
Py_INCREF(item);
dst[i] = item;
}
tuple_gc_track(tuple);
return (PyObject *)tuple;
}

Expand Down Expand Up @@ -486,7 +520,11 @@ tupleconcat(PyTupleObject *a, PyObject *bb)
if (Py_SIZE(a) > PY_SSIZE_T_MAX - Py_SIZE(b))
return PyErr_NoMemory();
size = Py_SIZE(a) + Py_SIZE(b);
np = (PyTupleObject *) PyTuple_New(size);
if (size == 0) {
return PyTuple_New(0);
}

np = tuple_alloc(size);
if (np == NULL) {
return NULL;
}
Expand All @@ -504,6 +542,7 @@ tupleconcat(PyTupleObject *a, PyObject *bb)
Py_INCREF(v);
dest[i] = v;
}
tuple_gc_track(np);
return (PyObject *)np;
#undef b
}
Expand All @@ -515,22 +554,21 @@ tuplerepeat(PyTupleObject *a, Py_ssize_t n)
Py_ssize_t size;
PyTupleObject *np;
PyObject **p, **items;
if (n < 0)
n = 0;
if (Py_SIZE(a) == 0 || n == 1) {
if (PyTuple_CheckExact(a)) {
/* Since tuples are immutable, we can return a shared
copy in this case */
Py_INCREF(a);
return (PyObject *)a;
}
if (Py_SIZE(a) == 0)
return PyTuple_New(0);
}
if (Py_SIZE(a) == 0 || n <= 0) {
return PyTuple_New(0);
}
if (n > PY_SSIZE_T_MAX / Py_SIZE(a))
return PyErr_NoMemory();
size = Py_SIZE(a) * n;
np = (PyTupleObject *) PyTuple_New(size);
np = tuple_alloc(size);
if (np == NULL)
return NULL;
p = np->ob_item;
Expand All @@ -542,6 +580,7 @@ tuplerepeat(PyTupleObject *a, Py_ssize_t n)
p++;
}
}
tuple_gc_track(np);
return (PyObject *) np;
}

Expand Down Expand Up @@ -754,7 +793,6 @@ tuplesubscript(PyTupleObject* self, PyObject* item)
else if (PySlice_Check(item)) {
Py_ssize_t start, stop, step, slicelength, i;
size_t cur;
PyObject* result;
PyObject* it;
PyObject **src, **dest;

Expand All @@ -774,19 +812,20 @@ tuplesubscript(PyTupleObject* self, PyObject* item)
return (PyObject *)self;
}
else {
result = PyTuple_New(slicelength);
PyTupleObject* result = tuple_alloc(slicelength);
if (!result) return NULL;

src = self->ob_item;
dest = ((PyTupleObject *)result)->ob_item;
dest = result->ob_item;
for (cur = start, i = 0; i < slicelength;
cur += step, i++) {
it = src[cur];
Py_INCREF(it);
dest[i] = it;
}

return result;
tuple_gc_track(result);
return (PyObject *)result;
}
}
else {
Expand Down

0 comments on commit 4fa10dd

Please sign in to comment.