# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _swigfaiss
else:
import _swigfaiss
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SwigPyIterator(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _swigfaiss.delete_SwigPyIterator
def value(self):
return _swigfaiss.SwigPyIterator_value(self)
def incr(self, n=1):
return _swigfaiss.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _swigfaiss.SwigPyIterator_decr(self, n)
def distance(self, x):
return _swigfaiss.SwigPyIterator_distance(self, x)
def equal(self, x):
return _swigfaiss.SwigPyIterator_equal(self, x)
def copy(self):
return _swigfaiss.SwigPyIterator_copy(self)
def next(self):
return _swigfaiss.SwigPyIterator_next(self)
def __next__(self):
return _swigfaiss.SwigPyIterator___next__(self)
def previous(self):
return _swigfaiss.SwigPyIterator_previous(self)
def advance(self, n):
return _swigfaiss.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _swigfaiss.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _swigfaiss.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _swigfaiss.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _swigfaiss.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _swigfaiss.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _swigfaiss.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
# Register SwigPyIterator in _swigfaiss:
_swigfaiss.SwigPyIterator_swigregister(SwigPyIterator)
SHARED_PTR_DISOWN = _swigfaiss.SHARED_PTR_DISOWN
class Float32Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Float32Vector_swiginit(self, _swigfaiss.new_Float32Vector())
def push_back(self, arg2):
return _swigfaiss.Float32Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Float32Vector_clear(self)
def data(self):
return _swigfaiss.Float32Vector_data(self)
def size(self):
return _swigfaiss.Float32Vector_size(self)
def at(self, n):
return _swigfaiss.Float32Vector_at(self, n)
def resize(self, n):
return _swigfaiss.Float32Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.Float32Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Float32Vector
# Register Float32Vector in _swigfaiss:
_swigfaiss.Float32Vector_swigregister(Float32Vector)
class Float64Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Float64Vector_swiginit(self, _swigfaiss.new_Float64Vector())
def push_back(self, arg2):
return _swigfaiss.Float64Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Float64Vector_clear(self)
def data(self):
return _swigfaiss.Float64Vector_data(self)
def size(self):
return _swigfaiss.Float64Vector_size(self)
def at(self, n):
return _swigfaiss.Float64Vector_at(self, n)
def resize(self, n):
return _swigfaiss.Float64Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.Float64Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Float64Vector
# Register Float64Vector in _swigfaiss:
_swigfaiss.Float64Vector_swigregister(Float64Vector)
class Int8Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Int8Vector_swiginit(self, _swigfaiss.new_Int8Vector())
def push_back(self, arg2):
return _swigfaiss.Int8Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Int8Vector_clear(self)
def data(self):
return _swigfaiss.Int8Vector_data(self)
def size(self):
return _swigfaiss.Int8Vector_size(self)
def at(self, n):
return _swigfaiss.Int8Vector_at(self, n)
def resize(self, n):
return _swigfaiss.Int8Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.Int8Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Int8Vector
# Register Int8Vector in _swigfaiss:
_swigfaiss.Int8Vector_swigregister(Int8Vector)
class Int16Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Int16Vector_swiginit(self, _swigfaiss.new_Int16Vector())
def push_back(self, arg2):
return _swigfaiss.Int16Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Int16Vector_clear(self)
def data(self):
return _swigfaiss.Int16Vector_data(self)
def size(self):
return _swigfaiss.Int16Vector_size(self)
def at(self, n):
return _swigfaiss.Int16Vector_at(self, n)
def resize(self, n):
return _swigfaiss.Int16Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.Int16Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Int16Vector
# Register Int16Vector in _swigfaiss:
_swigfaiss.Int16Vector_swigregister(Int16Vector)
class Int32Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Int32Vector_swiginit(self, _swigfaiss.new_Int32Vector())
def push_back(self, arg2):
return _swigfaiss.Int32Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Int32Vector_clear(self)
def data(self):
return _swigfaiss.Int32Vector_data(self)
def size(self):
return _swigfaiss.Int32Vector_size(self)
def at(self, n):
return _swigfaiss.Int32Vector_at(self, n)
def resize(self, n):
return _swigfaiss.Int32Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.Int32Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Int32Vector
# Register Int32Vector in _swigfaiss:
_swigfaiss.Int32Vector_swigregister(Int32Vector)
class Int64Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Int64Vector_swiginit(self, _swigfaiss.new_Int64Vector())
def push_back(self, arg2):
return _swigfaiss.Int64Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Int64Vector_clear(self)
def data(self):
return _swigfaiss.Int64Vector_data(self)
def size(self):
return _swigfaiss.Int64Vector_size(self)
def at(self, n):
return _swigfaiss.Int64Vector_at(self, n)
def resize(self, n):
return _swigfaiss.Int64Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.Int64Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Int64Vector
# Register Int64Vector in _swigfaiss:
_swigfaiss.Int64Vector_swigregister(Int64Vector)
class UInt8Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.UInt8Vector_swiginit(self, _swigfaiss.new_UInt8Vector())
def push_back(self, arg2):
return _swigfaiss.UInt8Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.UInt8Vector_clear(self)
def data(self):
return _swigfaiss.UInt8Vector_data(self)
def size(self):
return _swigfaiss.UInt8Vector_size(self)
def at(self, n):
return _swigfaiss.UInt8Vector_at(self, n)
def resize(self, n):
return _swigfaiss.UInt8Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.UInt8Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_UInt8Vector
# Register UInt8Vector in _swigfaiss:
_swigfaiss.UInt8Vector_swigregister(UInt8Vector)
class UInt16Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.UInt16Vector_swiginit(self, _swigfaiss.new_UInt16Vector())
def push_back(self, arg2):
return _swigfaiss.UInt16Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.UInt16Vector_clear(self)
def data(self):
return _swigfaiss.UInt16Vector_data(self)
def size(self):
return _swigfaiss.UInt16Vector_size(self)
def at(self, n):
return _swigfaiss.UInt16Vector_at(self, n)
def resize(self, n):
return _swigfaiss.UInt16Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.UInt16Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_UInt16Vector
# Register UInt16Vector in _swigfaiss:
_swigfaiss.UInt16Vector_swigregister(UInt16Vector)
class UInt32Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.UInt32Vector_swiginit(self, _swigfaiss.new_UInt32Vector())
def push_back(self, arg2):
return _swigfaiss.UInt32Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.UInt32Vector_clear(self)
def data(self):
return _swigfaiss.UInt32Vector_data(self)
def size(self):
return _swigfaiss.UInt32Vector_size(self)
def at(self, n):
return _swigfaiss.UInt32Vector_at(self, n)
def resize(self, n):
return _swigfaiss.UInt32Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.UInt32Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_UInt32Vector
# Register UInt32Vector in _swigfaiss:
_swigfaiss.UInt32Vector_swigregister(UInt32Vector)
class UInt64Vector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.UInt64Vector_swiginit(self, _swigfaiss.new_UInt64Vector())
def push_back(self, arg2):
return _swigfaiss.UInt64Vector_push_back(self, arg2)
def clear(self):
return _swigfaiss.UInt64Vector_clear(self)
def data(self):
return _swigfaiss.UInt64Vector_data(self)
def size(self):
return _swigfaiss.UInt64Vector_size(self)
def at(self, n):
return _swigfaiss.UInt64Vector_at(self, n)
def resize(self, n):
return _swigfaiss.UInt64Vector_resize(self, n)
def swap(self, other):
return _swigfaiss.UInt64Vector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_UInt64Vector
# Register UInt64Vector in _swigfaiss:
_swigfaiss.UInt64Vector_swigregister(UInt64Vector)
class Float32VectorVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Float32VectorVector_swiginit(self, _swigfaiss.new_Float32VectorVector())
def push_back(self, arg2):
return _swigfaiss.Float32VectorVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Float32VectorVector_clear(self)
def data(self):
return _swigfaiss.Float32VectorVector_data(self)
def size(self):
return _swigfaiss.Float32VectorVector_size(self)
def at(self, n):
return _swigfaiss.Float32VectorVector_at(self, n)
def resize(self, n):
return _swigfaiss.Float32VectorVector_resize(self, n)
def swap(self, other):
return _swigfaiss.Float32VectorVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Float32VectorVector
# Register Float32VectorVector in _swigfaiss:
_swigfaiss.Float32VectorVector_swigregister(Float32VectorVector)
class UInt8VectorVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.UInt8VectorVector_swiginit(self, _swigfaiss.new_UInt8VectorVector())
def push_back(self, arg2):
return _swigfaiss.UInt8VectorVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.UInt8VectorVector_clear(self)
def data(self):
return _swigfaiss.UInt8VectorVector_data(self)
def size(self):
return _swigfaiss.UInt8VectorVector_size(self)
def at(self, n):
return _swigfaiss.UInt8VectorVector_at(self, n)
def resize(self, n):
return _swigfaiss.UInt8VectorVector_resize(self, n)
def swap(self, other):
return _swigfaiss.UInt8VectorVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_UInt8VectorVector
# Register UInt8VectorVector in _swigfaiss:
_swigfaiss.UInt8VectorVector_swigregister(UInt8VectorVector)
class Int32VectorVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Int32VectorVector_swiginit(self, _swigfaiss.new_Int32VectorVector())
def push_back(self, arg2):
return _swigfaiss.Int32VectorVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Int32VectorVector_clear(self)
def data(self):
return _swigfaiss.Int32VectorVector_data(self)
def size(self):
return _swigfaiss.Int32VectorVector_size(self)
def at(self, n):
return _swigfaiss.Int32VectorVector_at(self, n)
def resize(self, n):
return _swigfaiss.Int32VectorVector_resize(self, n)
def swap(self, other):
return _swigfaiss.Int32VectorVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Int32VectorVector
# Register Int32VectorVector in _swigfaiss:
_swigfaiss.Int32VectorVector_swigregister(Int32VectorVector)
class Int64VectorVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.Int64VectorVector_swiginit(self, _swigfaiss.new_Int64VectorVector())
def push_back(self, arg2):
return _swigfaiss.Int64VectorVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.Int64VectorVector_clear(self)
def data(self):
return _swigfaiss.Int64VectorVector_data(self)
def size(self):
return _swigfaiss.Int64VectorVector_size(self)
def at(self, n):
return _swigfaiss.Int64VectorVector_at(self, n)
def resize(self, n):
return _swigfaiss.Int64VectorVector_resize(self, n)
def swap(self, other):
return _swigfaiss.Int64VectorVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_Int64VectorVector
# Register Int64VectorVector in _swigfaiss:
_swigfaiss.Int64VectorVector_swigregister(Int64VectorVector)
class VectorTransformVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.VectorTransformVector_swiginit(self, _swigfaiss.new_VectorTransformVector())
def push_back(self, arg2):
return _swigfaiss.VectorTransformVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.VectorTransformVector_clear(self)
def data(self):
return _swigfaiss.VectorTransformVector_data(self)
def size(self):
return _swigfaiss.VectorTransformVector_size(self)
def at(self, n):
return _swigfaiss.VectorTransformVector_at(self, n)
def resize(self, n):
return _swigfaiss.VectorTransformVector_resize(self, n)
def swap(self, other):
return _swigfaiss.VectorTransformVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_VectorTransformVector
# Register VectorTransformVector in _swigfaiss:
_swigfaiss.VectorTransformVector_swigregister(VectorTransformVector)
class OperatingPointVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.OperatingPointVector_swiginit(self, _swigfaiss.new_OperatingPointVector())
def push_back(self, arg2):
return _swigfaiss.OperatingPointVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.OperatingPointVector_clear(self)
def data(self):
return _swigfaiss.OperatingPointVector_data(self)
def size(self):
return _swigfaiss.OperatingPointVector_size(self)
def at(self, n):
return _swigfaiss.OperatingPointVector_at(self, n)
def resize(self, n):
return _swigfaiss.OperatingPointVector_resize(self, n)
def swap(self, other):
return _swigfaiss.OperatingPointVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_OperatingPointVector
# Register OperatingPointVector in _swigfaiss:
_swigfaiss.OperatingPointVector_swigregister(OperatingPointVector)
class InvertedListsPtrVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.InvertedListsPtrVector_swiginit(self, _swigfaiss.new_InvertedListsPtrVector())
def push_back(self, arg2):
return _swigfaiss.InvertedListsPtrVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.InvertedListsPtrVector_clear(self)
def data(self):
return _swigfaiss.InvertedListsPtrVector_data(self)
def size(self):
return _swigfaiss.InvertedListsPtrVector_size(self)
def at(self, n):
return _swigfaiss.InvertedListsPtrVector_at(self, n)
def resize(self, n):
return _swigfaiss.InvertedListsPtrVector_resize(self, n)
def swap(self, other):
return _swigfaiss.InvertedListsPtrVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_InvertedListsPtrVector
# Register InvertedListsPtrVector in _swigfaiss:
_swigfaiss.InvertedListsPtrVector_swigregister(InvertedListsPtrVector)
class RepeatVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.RepeatVector_swiginit(self, _swigfaiss.new_RepeatVector())
def push_back(self, arg2):
return _swigfaiss.RepeatVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.RepeatVector_clear(self)
def data(self):
return _swigfaiss.RepeatVector_data(self)
def size(self):
return _swigfaiss.RepeatVector_size(self)
def at(self, n):
return _swigfaiss.RepeatVector_at(self, n)
def resize(self, n):
return _swigfaiss.RepeatVector_resize(self, n)
def swap(self, other):
return _swigfaiss.RepeatVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_RepeatVector
# Register RepeatVector in _swigfaiss:
_swigfaiss.RepeatVector_swigregister(RepeatVector)
class ClusteringIterationStatsVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.ClusteringIterationStatsVector_swiginit(self, _swigfaiss.new_ClusteringIterationStatsVector())
def push_back(self, arg2):
return _swigfaiss.ClusteringIterationStatsVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.ClusteringIterationStatsVector_clear(self)
def data(self):
return _swigfaiss.ClusteringIterationStatsVector_data(self)
def size(self):
return _swigfaiss.ClusteringIterationStatsVector_size(self)
def at(self, n):
return _swigfaiss.ClusteringIterationStatsVector_at(self, n)
def resize(self, n):
return _swigfaiss.ClusteringIterationStatsVector_resize(self, n)
def swap(self, other):
return _swigfaiss.ClusteringIterationStatsVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_ClusteringIterationStatsVector
# Register ClusteringIterationStatsVector in _swigfaiss:
_swigfaiss.ClusteringIterationStatsVector_swigregister(ClusteringIterationStatsVector)
class OnDiskOneListVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self):
_swigfaiss.OnDiskOneListVector_swiginit(self, _swigfaiss.new_OnDiskOneListVector())
def push_back(self, arg2):
return _swigfaiss.OnDiskOneListVector_push_back(self, arg2)
def clear(self):
return _swigfaiss.OnDiskOneListVector_clear(self)
def data(self):
return _swigfaiss.OnDiskOneListVector_data(self)
def size(self):
return _swigfaiss.OnDiskOneListVector_size(self)
def at(self, n):
return _swigfaiss.OnDiskOneListVector_at(self, n)
def resize(self, n):
return _swigfaiss.OnDiskOneListVector_resize(self, n)
def swap(self, other):
return _swigfaiss.OnDiskOneListVector_swap(self, other)
__swig_destroy__ = _swigfaiss.delete_OnDiskOneListVector
# Register OnDiskOneListVector in _swigfaiss:
_swigfaiss.OnDiskOneListVector_swigregister(OnDiskOneListVector)
def simd_histogram_8(data, n, min, shift, hist):
r"""
low level SIMD histogramming functions 8-bin histogram of (x - min) >> shift
values outside the range are ignored.
the data table should be aligned on 32 bytes
"""
return _swigfaiss.simd_histogram_8(data, n, min, shift, hist)
def simd_histogram_16(data, n, min, shift, hist):
r""" same for 16-bin histogram"""
return _swigfaiss.simd_histogram_16(data, n, min, shift, hist)
class PartitionStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
bissect_cycles = property(_swigfaiss.PartitionStats_bissect_cycles_get, _swigfaiss.PartitionStats_bissect_cycles_set)
compress_cycles = property(_swigfaiss.PartitionStats_compress_cycles_get, _swigfaiss.PartitionStats_compress_cycles_set)
def __init__(self):
_swigfaiss.PartitionStats_swiginit(self, _swigfaiss.new_PartitionStats())
def reset(self):
return _swigfaiss.PartitionStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_PartitionStats
# Register PartitionStats in _swigfaiss:
_swigfaiss.PartitionStats_swigregister(PartitionStats)
def bitvec_print(b, d):
return _swigfaiss.bitvec_print(b, d)
def fvecs2bitvecs(x, b, d, n):
return _swigfaiss.fvecs2bitvecs(x, b, d, n)
def bitvecs2fvecs(b, x, d, n):
return _swigfaiss.bitvecs2fvecs(b, x, d, n)
def fvec2bitvec(x, b, d):
return _swigfaiss.fvec2bitvec(x, b, d)
def bitvec_shuffle(n, da, db, order, a, b):
r""" Shuffle the bits from b(i, j) := a(i, order[j])"""
return _swigfaiss.bitvec_shuffle(n, da, db, order, a, b)
class BitstringWriter(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code = property(_swigfaiss.BitstringWriter_code_get, _swigfaiss.BitstringWriter_code_set)
code_size = property(_swigfaiss.BitstringWriter_code_size_get, _swigfaiss.BitstringWriter_code_size_set)
i = property(_swigfaiss.BitstringWriter_i_get, _swigfaiss.BitstringWriter_i_set)
def __init__(self, code, code_size):
_swigfaiss.BitstringWriter_swiginit(self, _swigfaiss.new_BitstringWriter(code, code_size))
def write(self, x, nbit):
return _swigfaiss.BitstringWriter_write(self, x, nbit)
__swig_destroy__ = _swigfaiss.delete_BitstringWriter
# Register BitstringWriter in _swigfaiss:
_swigfaiss.BitstringWriter_swigregister(BitstringWriter)
cvar = _swigfaiss.cvar
class BitstringReader(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code = property(_swigfaiss.BitstringReader_code_get, _swigfaiss.BitstringReader_code_set)
code_size = property(_swigfaiss.BitstringReader_code_size_get, _swigfaiss.BitstringReader_code_size_set)
i = property(_swigfaiss.BitstringReader_i_get, _swigfaiss.BitstringReader_i_set)
def __init__(self, code, code_size):
_swigfaiss.BitstringReader_swiginit(self, _swigfaiss.new_BitstringReader(code, code_size))
def read(self, nbit):
return _swigfaiss.BitstringReader_read(self, nbit)
__swig_destroy__ = _swigfaiss.delete_BitstringReader
# Register BitstringReader in _swigfaiss:
_swigfaiss.BitstringReader_swigregister(BitstringReader)
def popcount64(x):
return _swigfaiss.popcount64(x)
def hammings(a, b, na, nb, nbytespercode, dis):
r"""
Compute a set of Hamming distances between na and nb binary vectors
:type a: uint8_t
:param a: size na * nbytespercode
:type b: uint8_t
:param b: size nb * nbytespercode
:type nbytespercode: int
:param nbytespercode: should be multiple of 8
:type dis: int
:param dis: output distances, size na * nb
"""
return _swigfaiss.hammings(a, b, na, nb, nbytespercode, dis)
def hammings_knn_hc(ha, a, b, nb, ncodes, ordered):
r"""
Return the k smallest Hamming distances for a set of binary query vectors,
using a max heap.
:type a: uint8_t
:param a: queries, size ha->nh * ncodes
:type b: uint8_t
:param b: database, size nb * ncodes
:type nb: int
:param nb: number of database vectors
:type ncodes: int
:param ncodes: size of the binary codes (bytes)
:type ordered: int
:param ordered: if != 0: order the results by decreasing distance
(may be bottleneck for k/n > 0.01)
"""
return _swigfaiss.hammings_knn_hc(ha, a, b, nb, ncodes, ordered)
def hammings_knn(ha, a, b, nb, ncodes, ordered):
return _swigfaiss.hammings_knn(ha, a, b, nb, ncodes, ordered)
def hammings_knn_mc(a, b, na, nb, k, ncodes, distances, labels):
r"""
Return the k smallest Hamming distances for a set of binary query vectors,
using counting max.
:type a: uint8_t
:param a: queries, size na * ncodes
:type b: uint8_t
:param b: database, size nb * ncodes
:type na: int
:param na: number of query vectors
:type nb: int
:param nb: number of database vectors
:type k: int
:param k: number of vectors/distances to return
:type ncodes: int
:param ncodes: size of the binary codes (bytes)
:type distances: int
:param distances: output distances from each query vector to its k nearest
neighbors
:type labels: int
:param labels: output ids of the k nearest neighbors to each query vector
"""
return _swigfaiss.hammings_knn_mc(a, b, na, nb, k, ncodes, distances, labels)
def hamming_range_search(a, b, na, nb, radius, ncodes, result):
r""" same as hammings_knn except we are doing a range search with radius"""
return _swigfaiss.hamming_range_search(a, b, na, nb, radius, ncodes, result)
def hamming_count_thres(bs1, bs2, n1, n2, ht, ncodes, nptr):
return _swigfaiss.hamming_count_thres(bs1, bs2, n1, n2, ht, ncodes, nptr)
def match_hamming_thres(bs1, bs2, n1, n2, ht, ncodes, idx, dis):
return _swigfaiss.match_hamming_thres(bs1, bs2, n1, n2, ht, ncodes, idx, dis)
def crosshamming_count_thres(dbs, n, ht, ncodes, nptr):
return _swigfaiss.crosshamming_count_thres(dbs, n, ht, ncodes, nptr)
def get_num_gpus():
return _swigfaiss.get_num_gpus()
def gpu_profiler_start():
return _swigfaiss.gpu_profiler_start()
def gpu_profiler_stop():
return _swigfaiss.gpu_profiler_stop()
def gpu_sync_all_devices():
return _swigfaiss.gpu_sync_all_devices()
def get_compile_options():
r""" get compile options"""
return _swigfaiss.get_compile_options()
def getmillisecs():
r""" ms elapsed since some arbitrary epoch"""
return _swigfaiss.getmillisecs()
def get_mem_usage_kb():
r""" get current RSS usage in kB"""
return _swigfaiss.get_mem_usage_kb()
def get_cycles():
return _swigfaiss.get_cycles()
def fvec_madd(n, a, bf, b, c):
r"""
compute c := a + bf * b for a, b and c tables
:type n: int
:param n: size of the tables
:type a: float
:param a: size n
:type b: float
:param b: size n
:type c: float
:param c: restult table, size n
"""
return _swigfaiss.fvec_madd(n, a, bf, b, c)
def fvec_madd_and_argmin(n, a, bf, b, c):
r"""
same as fvec_madd, also return index of the min of the result table
:rtype: int
:return: index of the min of table c
"""
return _swigfaiss.fvec_madd_and_argmin(n, a, bf, b, c)
def reflection(u, x, n, d, nu):
return _swigfaiss.reflection(u, x, n, d, nu)
def matrix_qr(m, n, a):
r"""
compute the Q of the QR decomposition for m > n
:type a: float
:param a: size n * m: input matrix and output Q
"""
return _swigfaiss.matrix_qr(m, n, a)
def ranklist_handle_ties(k, idx, dis):
r""" distances are supposed to be sorted. Sorts indices with same distance"""
return _swigfaiss.ranklist_handle_ties(k, idx, dis)
def ranklist_intersection_size(k1, v1, k2, v2):
r"""
count the number of common elements between v1 and v2
algorithm = sorting + bissection to avoid double-counting duplicates
"""
return _swigfaiss.ranklist_intersection_size(k1, v1, k2, v2)
def merge_result_table_with(n, k, I0, D0, I1, D1, keep_min=True, translation=0):
r"""
merge a result table into another one
:type I0: int
:param I0:, D0 first result table, size (n, k)
:type I1: int
:param I1:, D1 second result table, size (n, k)
:type keep_min: boolean, optional
:param keep_min: if true, keep min values, otherwise keep max
:type translation: int, optional
:param translation: add this value to all I1's indexes
:rtype: int
:return: nb of values that were taken from the second table
"""
return _swigfaiss.merge_result_table_with(n, k, I0, D0, I1, D1, keep_min, translation)
def imbalance_factor(*args):
r"""
*Overload 1:*
a balanced assignment has a IF of 1
|
*Overload 2:*
same, takes a histogram as input
"""
return _swigfaiss.imbalance_factor(*args)
def fvec_argsort(n, vals, perm):
return _swigfaiss.fvec_argsort(n, vals, perm)
def fvec_argsort_parallel(n, vals, perm):
return _swigfaiss.fvec_argsort_parallel(n, vals, perm)
def ivec_hist(n, v, vmax, hist):
r""" compute histogram on v"""
return _swigfaiss.ivec_hist(n, v, vmax, hist)
def bincode_hist(n, nbits, codes, hist):
r"""
Compute histogram of bits on a code array
:type codes: uint8_t
:param codes: size(n, nbits / 8)
:type hist: int
:param hist: size(nbits): nb of 1s in the array of codes
"""
return _swigfaiss.bincode_hist(n, nbits, codes, hist)
def ivec_checksum(n, a):
r""" compute a checksum on a table."""
return _swigfaiss.ivec_checksum(n, a)
def fvecs_maybe_subsample(d, n, nmax, x, verbose=False, seed=1234):
r"""
random subsamples a set of vectors if there are too many of them
:type d: int
:param d: dimension of the vectors
:type n: int
:param n: on input: nb of input vectors, output: nb of output vectors
:type nmax: int
:param nmax: max nb of vectors to keep
:type x: float
:param x: input array, size *n-by-d
:type seed: int, optional
:param seed: random seed to use for sampling
:rtype: float
:return: x or an array allocated with new [] with *n vectors
"""
return _swigfaiss.fvecs_maybe_subsample(d, n, nmax, x, verbose, seed)
def binary_to_real(d, x_in, x_out):
r"""
Convert binary vector to +1/-1 valued float vector.
:type d: int
:param d: dimension of the vector (multiple of 8)
:type x_in: uint8_t
:param x_in: input binary vector (uint8_t table of size d / 8)
:type x_out: float
:param x_out: output float vector (float table of size d)
"""
return _swigfaiss.binary_to_real(d, x_in, x_out)
def real_to_binary(d, x_in, x_out):
r"""
Convert float vector to binary vector. Components > 0 are converted to 1,
others to 0.
:type d: int
:param d: dimension of the vector (multiple of 8)
:type x_in: float
:param x_in: input float vector (float table of size d)
:type x_out: uint8_t
:param x_out: output binary vector (uint8_t table of size d / 8)
"""
return _swigfaiss.real_to_binary(d, x_in, x_out)
def hash_bytes(bytes, n):
r""" A reasonable hashing function"""
return _swigfaiss.hash_bytes(bytes, n)
def check_openmp():
r""" Whether OpenMP annotations were respected."""
return _swigfaiss.check_openmp()
def fvec_L2sqr(x, y, d):
r""" Squared L2 distance between two vectors"""
return _swigfaiss.fvec_L2sqr(x, y, d)
def fvec_inner_product(x, y, d):
r""" inner product"""
return _swigfaiss.fvec_inner_product(x, y, d)
def fvec_L1(x, y, d):
r""" L1 distance"""
return _swigfaiss.fvec_L1(x, y, d)
def fvec_Linf(x, y, d):
r""" infinity distance"""
return _swigfaiss.fvec_Linf(x, y, d)
def pairwise_L2sqr(d, nq, xq, nb, xb, dis, ldq=-1, ldb=-1, ldd=-1):
r"""
Compute pairwise distances between sets of vectors
:type d: int
:param d: dimension of the vectors
:type nq: int
:param nq: nb of query vectors
:type nb: int
:param nb: nb of database vectors
:type xq: float
:param xq: query vectors (size nq * d)
:type xb: float
:param xb: database vectors (size nb * d)
:type dis: float
:param dis: output distances (size nq * nb)
:param ldq,ldb:, ldd strides for the matrices
"""
return _swigfaiss.pairwise_L2sqr(d, nq, xq, nb, xb, dis, ldq, ldb, ldd)
def fvec_inner_products_ny(ip, x, y, d, ny):
return _swigfaiss.fvec_inner_products_ny(ip, x, y, d, ny)
def fvec_L2sqr_ny(dis, x, y, d, ny):
return _swigfaiss.fvec_L2sqr_ny(dis, x, y, d, ny)
def fvec_L2sqr_ny_nearest(distances_tmp_buffer, x, y, d, ny):
return _swigfaiss.fvec_L2sqr_ny_nearest(distances_tmp_buffer, x, y, d, ny)
def fvec_L2sqr_ny_nearest_y_transposed(distances_tmp_buffer, x, y, y_sqlen, d, d_offset, ny):
return _swigfaiss.fvec_L2sqr_ny_nearest_y_transposed(distances_tmp_buffer, x, y, y_sqlen, d, d_offset, ny)
def fvec_norm_L2sqr(x, d):
r""" squared norm of a vector"""
return _swigfaiss.fvec_norm_L2sqr(x, d)
def fvec_norms_L2(norms, x, d, nx):
r"""
compute the L2 norms for a set of vectors
:type norms: float
:param norms: output norms, size nx
:type x: float
:param x: set of vectors, size nx * d
"""
return _swigfaiss.fvec_norms_L2(norms, x, d, nx)
def fvec_norms_L2sqr(norms, x, d, nx):
r""" same as fvec_norms_L2, but computes squared norms"""
return _swigfaiss.fvec_norms_L2sqr(norms, x, d, nx)
def fvec_renorm_L2(d, nx, x):
return _swigfaiss.fvec_renorm_L2(d, nx, x)
def inner_product_to_L2sqr(dis, nr1, nr2, n1, n2):
return _swigfaiss.inner_product_to_L2sqr(dis, nr1, nr2, n1, n2)
def fvec_add(*args):
r"""
*Overload 1:*
compute c := a + b for vectors
c and a can overlap, c and b can overlap
:type a: float
:param a: size d
:type b: float
:param b: size d
:type c: float
:param c: size d
|
*Overload 2:*
compute c := a + b for a, c vectors and b a scalar
c and a can overlap
:type a: float
:param a: size d
:type c: float
:param c: size d
"""
return _swigfaiss.fvec_add(*args)
def fvec_sub(d, a, b, c):
r"""
compute c := a - b for vectors
c and a can overlap, c and b can overlap
:type a: float
:param a: size d
:type b: float
:param b: size d
:type c: float
:param c: size d
"""
return _swigfaiss.fvec_sub(d, a, b, c)
def fvec_inner_products_by_idx(ip, x, y, ids, d, nx, ny):
return _swigfaiss.fvec_inner_products_by_idx(ip, x, y, ids, d, nx, ny)
def fvec_L2sqr_by_idx(dis, x, y, ids, d, nx, ny):
return _swigfaiss.fvec_L2sqr_by_idx(dis, x, y, ids, d, nx, ny)
def pairwise_indexed_L2sqr(d, n, x, ix, y, iy, dis):
r"""
compute dis[j] = L2sqr(x[ix[j]], y[iy[j]]) forall j=0..n-1
:type x: float
:param x: size (max(ix) + 1, d)
:type y: float
:param y: size (max(iy) + 1, d)
:type ix: int
:param ix: size n
:type iy: int
:param iy: size n
:type dis: float
:param dis: size n
"""
return _swigfaiss.pairwise_indexed_L2sqr(d, n, x, ix, y, iy, dis)
def pairwise_indexed_inner_product(d, n, x, ix, y, iy, dis):
return _swigfaiss.pairwise_indexed_inner_product(d, n, x, ix, y, iy, dis)
def knn_inner_product(*args):
r"""
*Overload 1:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, w.r.t to max inner product.
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type res: :py:class:`float_minheap_array_t`
:param res: result heap structure, which also provides k. Sorted on output
|
*Overload 2:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, for the inner product metric.
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type distances: float
:param distances: output distances, size nq * k
:type indexes: int
:param indexes: output vector ids, size nq * k
|
*Overload 3:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, for the inner product metric.
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type distances: float
:param distances: output distances, size nq * k
:type indexes: int
:param indexes: output vector ids, size nq * k
"""
return _swigfaiss.knn_inner_product(*args)
def knn_L2sqr(*args):
r"""
*Overload 1:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, for the L2 distance
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type res: :py:class:`float_maxheap_array_t`
:param res: result heap strcture, which also provides k. Sorted on output
:type y_norm2: float, optional
:param y_norm2: (optional) norms for the y vectors (nullptr or size ny)
:type sel: :py:class:`IDSelector`, optional
:param sel: search in this subset of vectors
|
*Overload 2:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, for the L2 distance
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type distances: float
:param distances: output distances, size nq * k
:type indexes: int
:param indexes: output vector ids, size nq * k
:type y_norm2: float, optional
:param y_norm2: (optional) norms for the y vectors (nullptr or size ny)
:type sel: :py:class:`IDSelector`, optional
:param sel: search in this subset of vectors
|
*Overload 3:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, for the L2 distance
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type distances: float
:param distances: output distances, size nq * k
:type indexes: int
:param indexes: output vector ids, size nq * k
:type y_norm2: float, optional
:param y_norm2: (optional) norms for the y vectors (nullptr or size ny)
:param sel: search in this subset of vectors
|
*Overload 4:*
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, for the L2 distance
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type distances: float
:param distances: output distances, size nq * k
:type indexes: int
:param indexes: output vector ids, size nq * k
:param y_norm2: (optional) norms for the y vectors (nullptr or size ny)
:param sel: search in this subset of vectors
"""
return _swigfaiss.knn_L2sqr(*args)
def knn_inner_products_by_idx(x, y, subset, d, nx, nsubset, k, vals, ids, ld_ids=-1):
r"""
Find the max inner product neighbors for nx queries in a set of ny vectors
indexed by ids. May be useful for re-ranking a pre-selected vector list
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size (max(ids) + 1) * d
:type ids: int
:param ids: subset of database vectors to consider, size (nx, nsubset)
:param res: result structure
:type ld_ids: int, optional
:param ld_ids: stride for the ids array. -1: use nsubset, 0: all queries
process the same subset
"""
return _swigfaiss.knn_inner_products_by_idx(x, y, subset, d, nx, nsubset, k, vals, ids, ld_ids)
def knn_L2sqr_by_idx(x, y, subset, d, nx, nsubset, k, vals, ids, ld_subset=-1):
r"""
Find the nearest neighbors for nx queries in a set of ny vectors
indexed by ids. May be useful for re-ranking a pre-selected vector list
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size (max(ids) + 1) * d
:type subset: int
:param subset: subset of database vectors to consider, size (nx, nsubset)
:param res: rIDesult structure
:type ld_subset: int, optional
:param ld_subset: stride for the subset array. -1: use nsubset, 0: all queries
process the same subset
"""
return _swigfaiss.knn_L2sqr_by_idx(x, y, subset, d, nx, nsubset, k, vals, ids, ld_subset)
def range_search_L2sqr(x, y, d, nx, ny, radius, result, sel=None):
r"""
Return the k nearest neighors of each of the nx vectors x among the ny
vector y, w.r.t to max inner product
:type x: float
:param x: query vectors, size nx * d
:type y: float
:param y: database vectors, size ny * d
:type radius: float
:param radius: search radius around the x vectors
:type result: :py:class:`RangeSearchResult`
:param result: result structure
"""
return _swigfaiss.range_search_L2sqr(x, y, d, nx, ny, radius, result, sel)
def range_search_inner_product(x, y, d, nx, ny, radius, result, sel=None):
r""" same as range_search_L2sqr for the inner product similarity"""
return _swigfaiss.range_search_inner_product(x, y, d, nx, ny, radius, result, sel)
def compute_PQ_dis_tables_dsub2(d, ksub, centroids, nx, x, is_inner_product, dis_tables):
r""" specialized function for PQ2"""
return _swigfaiss.compute_PQ_dis_tables_dsub2(d, ksub, centroids, nx, x, is_inner_product, dis_tables)
class RandomGenerator(object):
r""" random generator that can be used in multithreaded contexts"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
mt = property(_swigfaiss.RandomGenerator_mt_get, _swigfaiss.RandomGenerator_mt_set)
def rand_int64(self):
r""" random int64_t"""
return _swigfaiss.RandomGenerator_rand_int64(self)
def rand_int(self, *args):
r"""
*Overload 1:*
random positive integer
|
*Overload 2:*
generate random integer between 0 and max-1
"""
return _swigfaiss.RandomGenerator_rand_int(self, *args)
def rand_float(self):
r""" between 0 and 1"""
return _swigfaiss.RandomGenerator_rand_float(self)
def rand_double(self):
return _swigfaiss.RandomGenerator_rand_double(self)
def __init__(self, seed=1234):
_swigfaiss.RandomGenerator_swiginit(self, _swigfaiss.new_RandomGenerator(seed))
__swig_destroy__ = _swigfaiss.delete_RandomGenerator
# Register RandomGenerator in _swigfaiss:
_swigfaiss.RandomGenerator_swigregister(RandomGenerator)
def float_rand(x, n, seed):
return _swigfaiss.float_rand(x, n, seed)
def float_randn(x, n, seed):
return _swigfaiss.float_randn(x, n, seed)
def int64_rand(x, n, seed):
return _swigfaiss.int64_rand(x, n, seed)
def byte_rand(x, n, seed):
return _swigfaiss.byte_rand(x, n, seed)
def int64_rand_max(x, n, max, seed):
return _swigfaiss.int64_rand_max(x, n, max, seed)
def rand_perm(perm, n, seed):
return _swigfaiss.rand_perm(perm, n, seed)
def rand_smooth_vectors(n, d, x, seed):
return _swigfaiss.rand_smooth_vectors(n, d, x, seed)
METRIC_INNER_PRODUCT = _swigfaiss.METRIC_INNER_PRODUCT
r""" maximum inner product search"""
METRIC_L2 = _swigfaiss.METRIC_L2
r""" squared L2 search"""
METRIC_L1 = _swigfaiss.METRIC_L1
r""" L1 (aka cityblock)"""
METRIC_Linf = _swigfaiss.METRIC_Linf
r""" infinity distance"""
METRIC_Lp = _swigfaiss.METRIC_Lp
r""" L_p distance, p is given by a faiss::Index"""
METRIC_Canberra = _swigfaiss.METRIC_Canberra
r"""
metric_arg
some additional metrics defined in scipy.spatial.distance
"""
METRIC_BrayCurtis = _swigfaiss.METRIC_BrayCurtis
METRIC_JensenShannon = _swigfaiss.METRIC_JensenShannon
FAISS_VERSION_MAJOR = _swigfaiss.FAISS_VERSION_MAJOR
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
FAISS_VERSION_MINOR = _swigfaiss.FAISS_VERSION_MINOR
FAISS_VERSION_PATCH = _swigfaiss.FAISS_VERSION_PATCH
class SearchParameters(object):
r"""
Parent class for the optional search paramenters.
Sub-classes with additional search parameters should inherit this class.
Ownership of the object fields is always to the caller.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
sel = property(_swigfaiss.SearchParameters_sel_get, _swigfaiss.SearchParameters_sel_set, doc=r""" if non-null, only these IDs will be considered during search.""")
__swig_destroy__ = _swigfaiss.delete_SearchParameters
def __init__(self):
_swigfaiss.SearchParameters_swiginit(self, _swigfaiss.new_SearchParameters())
# Register SearchParameters in _swigfaiss:
_swigfaiss.SearchParameters_swigregister(SearchParameters)
class Index(object):
r"""
Abstract structure for an index, supports adding vectors and searching them.
All vectors provided at add or search time are 32-bit float arrays,
although the internal representation may vary.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
d = property(_swigfaiss.Index_d_get, _swigfaiss.Index_d_set, doc=r""" vector dimension""")
ntotal = property(_swigfaiss.Index_ntotal_get, _swigfaiss.Index_ntotal_set, doc=r""" total nb of indexed vectors""")
verbose = property(_swigfaiss.Index_verbose_get, _swigfaiss.Index_verbose_set, doc=r""" verbosity level""")
is_trained = property(_swigfaiss.Index_is_trained_get, _swigfaiss.Index_is_trained_set, doc=r"""
set if the Index does not require training, or if training is
done already
""")
metric_type = property(_swigfaiss.Index_metric_type_get, _swigfaiss.Index_metric_type_set, doc=r""" type of metric this index uses for search""")
metric_arg = property(_swigfaiss.Index_metric_arg_get, _swigfaiss.Index_metric_arg_set, doc=r""" argument of the metric type""")
__swig_destroy__ = _swigfaiss.delete_Index
def train(self, n, x):
r"""
Perform training on a representative set of vectors
:type n: int
:param n: nb of training vectors
:type x: float
:param x: training vecors, size n * d
"""
return _swigfaiss.Index_train(self, n, x)
def add(self, n, x):
r"""
Add n vectors of dimension d to the index.
Vectors are implicitly assigned labels ntotal .. ntotal + n - 1
This function slices the input vectors in chunks smaller than
blocksize_add and calls add_core.
:type x: float
:param x: input matrix, size n * d
"""
return _swigfaiss.Index_add(self, n, x)
def add_with_ids(self, n, x, xids):
r"""
Same as add, but stores xids instead of sequential ids.
The default implementation fails with an assertion, as it is
not supported by all indexes.
:type xids: int
:param xids: if non-null, ids to store for the vectors (size n)
"""
return _swigfaiss.Index_add_with_ids(self, n, x, xids)
def search(self, n, x, k, distances, labels, params=None):
r"""
query n vectors of dimension d to the index.
return at most k vectors. If there are not enough results for a
query, the result array is padded with -1s.
:type x: float
:param x: input vectors to search, size n * d
:type labels: int
:param labels: output labels of the NNs, size n*k
:type distances: float
:param distances: output pairwise distances, size n*k
"""
return _swigfaiss.Index_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
r"""
query n vectors of dimension d to the index.
return all vectors with distance < radius. Note that many
indexes do not implement the range_search (only the k-NN search
is mandatory).
:type x: float
:param x: input vectors to search, size n * d
:type radius: float
:param radius: search radius
:type result: :py:class:`RangeSearchResult`
:param result: result table
"""
return _swigfaiss.Index_range_search(self, n, x, radius, result, params)
def assign(self, n, x, labels, k=1):
r"""
return the indexes of the k vectors closest to the query x.
This function is identical as search but only return labels of neighbors.
:type x: float
:param x: input vectors to search, size n * d
:type labels: int
:param labels: output labels of the NNs, size n*k
"""
return _swigfaiss.Index_assign(self, n, x, labels, k)
def reset(self):
r""" removes all elements from the database."""
return _swigfaiss.Index_reset(self)
def remove_ids(self, sel):
r"""
removes IDs from the index. Not supported by all
indexes. Returns the number of elements removed.
"""
return _swigfaiss.Index_remove_ids(self, sel)
def reconstruct(self, key, recons):
r"""
Reconstruct a stored vector (or an approximation if lossy coding)
this function may not be defined for some indexes
:type key: int
:param key: id of the vector to reconstruct
:type recons: float
:param recons: reconstucted vector (size d)
"""
return _swigfaiss.Index_reconstruct(self, key, recons)
def reconstruct_batch(self, n, keys, recons):
r"""
Reconstruct several stored vectors (or an approximation if lossy coding)
this function may not be defined for some indexes
:type n: int
:param n: number of vectors to reconstruct
:type keys: int
:param keys: ids of the vectors to reconstruct (size n)
:type recons: float
:param recons: reconstucted vector (size n * d)
"""
return _swigfaiss.Index_reconstruct_batch(self, n, keys, recons)
def reconstruct_n(self, i0, ni, recons):
r"""
Reconstruct vectors i0 to i0 + ni - 1
this function may not be defined for some indexes
:type recons: float
:param recons: reconstucted vector (size ni * d)
"""
return _swigfaiss.Index_reconstruct_n(self, i0, ni, recons)
def search_and_reconstruct(self, n, x, k, distances, labels, recons, params=None):
r"""
Similar to search, but also reconstructs the stored vectors (or an
approximation in the case of lossy coding) for the search results.
If there are not enough results for a query, the resulting arrays
is padded with -1s.
:type recons: float
:param recons: reconstructed vectors size (n, k, d)
"""
return _swigfaiss.Index_search_and_reconstruct(self, n, x, k, distances, labels, recons, params)
def compute_residual(self, x, residual, key):
r"""
Computes a residual vector after indexing encoding.
The residual vector is the difference between a vector and the
reconstruction that can be decoded from its representation in
the index. The residual can be used for multiple-stage indexing
methods, like IndexIVF's methods.
:type x: float
:param x: input vector, size d
:type residual: float
:param residual: output residual vector, size d
:type key: int
:param key: encoded index, as returned by search and assign
"""
return _swigfaiss.Index_compute_residual(self, x, residual, key)
def compute_residual_n(self, n, xs, residuals, keys):
r"""
Computes a residual vector after indexing encoding (batch form).
Equivalent to calling compute_residual for each vector.
The residual vector is the difference between a vector and the
reconstruction that can be decoded from its representation in
the index. The residual can be used for multiple-stage indexing
methods, like IndexIVF's methods.
:type n: int
:param n: number of vectors
:type xs: float
:param xs: input vectors, size (n x d)
:type residuals: float
:param residuals: output residual vectors, size (n x d)
:type keys: int
:param keys: encoded index, as returned by search and assign
"""
return _swigfaiss.Index_compute_residual_n(self, n, xs, residuals, keys)
def get_distance_computer(self):
r"""
Get a DistanceComputer (defined in AuxIndexStructures) object
for this kind of index.
DistanceComputer is implemented for indexes that support random
access of their vectors.
"""
return _swigfaiss.Index_get_distance_computer(self)
def sa_code_size(self):
r""" size of the produced codes in bytes"""
return _swigfaiss.Index_sa_code_size(self)
def sa_encode(self, n, x, bytes):
r"""
encode a set of vectors
:type n: int
:param n: number of vectors
:type x: float
:param x: input vectors, size n * d
:type bytes: uint8_t
:param bytes: output encoded vectors, size n * sa_code_size()
"""
return _swigfaiss.Index_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
r"""
decode a set of vectors
:type n: int
:param n: number of vectors
:type bytes: uint8_t
:param bytes: input encoded vectors, size n * sa_code_size()
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.Index_sa_decode(self, n, bytes, x)
def merge_from(self, otherIndex, add_id=0):
r"""
moves the entries from another dataset to self.
On output, other is empty.
add_id is added to all moved ids
(for sequential ids, this would be this->ntotal)
"""
return _swigfaiss.Index_merge_from(self, otherIndex, add_id)
def check_compatible_for_merge(self, otherIndex):
r"""
check that the two indexes are compatible (ie, they are
trained in the same way and have the same
parameters). Otherwise throw.
"""
return _swigfaiss.Index_check_compatible_for_merge(self, otherIndex)
# Register Index in _swigfaiss:
_swigfaiss.Index_swigregister(Index)
class DistanceComputer(object):
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def set_query(self, x):
r"""
called before computing distances. Pointer x should remain valid
while operator () is called
"""
return _swigfaiss.DistanceComputer_set_query(self, x)
def __call__(self, i):
r""" compute distance of vector i to current query"""
return _swigfaiss.DistanceComputer___call__(self, i)
def symmetric_dis(self, i, j):
r""" compute distance between two stored vectors"""
return _swigfaiss.DistanceComputer_symmetric_dis(self, i, j)
__swig_destroy__ = _swigfaiss.delete_DistanceComputer
# Register DistanceComputer in _swigfaiss:
_swigfaiss.DistanceComputer_swigregister(DistanceComputer)
class FlatCodesDistanceComputer(DistanceComputer):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
codes = property(_swigfaiss.FlatCodesDistanceComputer_codes_get, _swigfaiss.FlatCodesDistanceComputer_codes_set)
code_size = property(_swigfaiss.FlatCodesDistanceComputer_code_size_get, _swigfaiss.FlatCodesDistanceComputer_code_size_set)
def __call__(self, i):
return _swigfaiss.FlatCodesDistanceComputer___call__(self, i)
def distance_to_code(self, code):
r""" compute distance of current query to an encoded vector"""
return _swigfaiss.FlatCodesDistanceComputer_distance_to_code(self, code)
__swig_destroy__ = _swigfaiss.delete_FlatCodesDistanceComputer
# Register FlatCodesDistanceComputer in _swigfaiss:
_swigfaiss.FlatCodesDistanceComputer_swigregister(FlatCodesDistanceComputer)
class IndexFlatCodes(Index):
r"""
Index that encodes all vectors as fixed-size codes (size code_size). Storage
is in the codes vector
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
code_size = property(_swigfaiss.IndexFlatCodes_code_size_get, _swigfaiss.IndexFlatCodes_code_size_set)
codes = property(_swigfaiss.IndexFlatCodes_codes_get, _swigfaiss.IndexFlatCodes_codes_set, doc=r""" encoded dataset, size ntotal * code_size""")
def add(self, n, x):
r""" default add uses sa_encode"""
return _swigfaiss.IndexFlatCodes_add(self, n, x)
def reset(self):
return _swigfaiss.IndexFlatCodes_reset(self)
def reconstruct_n(self, i0, ni, recons):
r""" reconstruction using the codec interface"""
return _swigfaiss.IndexFlatCodes_reconstruct_n(self, i0, ni, recons)
def reconstruct(self, key, recons):
return _swigfaiss.IndexFlatCodes_reconstruct(self, key, recons)
def sa_code_size(self):
return _swigfaiss.IndexFlatCodes_sa_code_size(self)
def remove_ids(self, sel):
r"""
remove some ids. NB that Because of the structure of the
indexing structure, the semantics of this operation are
different from the usual ones: the new ids are shifted
"""
return _swigfaiss.IndexFlatCodes_remove_ids(self, sel)
def get_FlatCodesDistanceComputer(self):
r""" a FlatCodesDistanceComputer offers a distance_to_code method"""
return _swigfaiss.IndexFlatCodes_get_FlatCodesDistanceComputer(self)
def get_distance_computer(self):
return _swigfaiss.IndexFlatCodes_get_distance_computer(self)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexFlatCodes_check_compatible_for_merge(self, otherIndex)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexFlatCodes_merge_from(self, otherIndex, add_id)
__swig_destroy__ = _swigfaiss.delete_IndexFlatCodes
# Register IndexFlatCodes in _swigfaiss:
_swigfaiss.IndexFlatCodes_swigregister(IndexFlatCodes)
class IndexFlat(IndexFlatCodes):
r""" Index that stores the full vectors and performs exhaustive search"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexFlat_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexFlat_range_search(self, n, x, radius, result, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexFlat_reconstruct(self, key, recons)
def compute_distance_subset(self, n, x, k, distances, labels):
r"""
compute distance with a subset of vectors
:type x: float
:param x: query vectors, size n * d
:type labels: int
:param labels: indices of the vectors that should be compared
for each query vector, size n * k
:type distances: float
:param distances:
corresponding output distances, size n * k
"""
return _swigfaiss.IndexFlat_compute_distance_subset(self, n, x, k, distances, labels)
def get_xb(self, *args):
return _swigfaiss.IndexFlat_get_xb(self, *args)
def __init__(self, *args):
_swigfaiss.IndexFlat_swiginit(self, _swigfaiss.new_IndexFlat(*args))
def get_FlatCodesDistanceComputer(self):
return _swigfaiss.IndexFlat_get_FlatCodesDistanceComputer(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexFlat_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexFlat_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexFlat
# Register IndexFlat in _swigfaiss:
_swigfaiss.IndexFlat_swigregister(IndexFlat)
class IndexFlatIP(IndexFlat):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexFlatIP_swiginit(self, _swigfaiss.new_IndexFlatIP(*args))
__swig_destroy__ = _swigfaiss.delete_IndexFlatIP
# Register IndexFlatIP in _swigfaiss:
_swigfaiss.IndexFlatIP_swigregister(IndexFlatIP)
class IndexFlatL2(IndexFlat):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexFlatL2_swiginit(self, _swigfaiss.new_IndexFlatL2(*args))
__swig_destroy__ = _swigfaiss.delete_IndexFlatL2
# Register IndexFlatL2 in _swigfaiss:
_swigfaiss.IndexFlatL2_swigregister(IndexFlatL2)
class IndexFlat1D(IndexFlatL2):
r""" optimized version for 1D "vectors"."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
continuous_update = property(_swigfaiss.IndexFlat1D_continuous_update_get, _swigfaiss.IndexFlat1D_continuous_update_set, doc=r""" is the permutation updated continuously?""")
perm = property(_swigfaiss.IndexFlat1D_perm_get, _swigfaiss.IndexFlat1D_perm_set, doc=r""" sorted database indices""")
def __init__(self, continuous_update=True):
_swigfaiss.IndexFlat1D_swiginit(self, _swigfaiss.new_IndexFlat1D(continuous_update))
def update_permutation(self):
r"""
if not continuous_update, call this between the last add and
the first search
"""
return _swigfaiss.IndexFlat1D_update_permutation(self)
def add(self, n, x):
return _swigfaiss.IndexFlat1D_add(self, n, x)
def reset(self):
return _swigfaiss.IndexFlat1D_reset(self)
def search(self, n, x, k, distances, labels, params=None):
r""" Warn: the distances returned are L1 not L2"""
return _swigfaiss.IndexFlat1D_search(self, n, x, k, distances, labels, params)
__swig_destroy__ = _swigfaiss.delete_IndexFlat1D
# Register IndexFlat1D in _swigfaiss:
_swigfaiss.IndexFlat1D_swigregister(IndexFlat1D)
class ClusteringParameters(object):
r"""
Class for the clustering parameters. Can be passed to the
constructor of the Clustering object.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
niter = property(_swigfaiss.ClusteringParameters_niter_get, _swigfaiss.ClusteringParameters_niter_set, doc=r""" clustering iterations""")
nredo = property(_swigfaiss.ClusteringParameters_nredo_get, _swigfaiss.ClusteringParameters_nredo_set, doc=r""" redo clustering this many times and keep best""")
verbose = property(_swigfaiss.ClusteringParameters_verbose_get, _swigfaiss.ClusteringParameters_verbose_set)
spherical = property(_swigfaiss.ClusteringParameters_spherical_get, _swigfaiss.ClusteringParameters_spherical_set, doc=r""" do we want normalized centroids?""")
int_centroids = property(_swigfaiss.ClusteringParameters_int_centroids_get, _swigfaiss.ClusteringParameters_int_centroids_set, doc=r""" round centroids coordinates to integer""")
update_index = property(_swigfaiss.ClusteringParameters_update_index_get, _swigfaiss.ClusteringParameters_update_index_set, doc=r""" re-train index after each iteration?""")
frozen_centroids = property(_swigfaiss.ClusteringParameters_frozen_centroids_get, _swigfaiss.ClusteringParameters_frozen_centroids_set, doc=r"""
use the centroids provided as input and do not
change them during iterations
""")
min_points_per_centroid = property(_swigfaiss.ClusteringParameters_min_points_per_centroid_get, _swigfaiss.ClusteringParameters_min_points_per_centroid_set, doc=r""" otherwise you get a warning""")
max_points_per_centroid = property(_swigfaiss.ClusteringParameters_max_points_per_centroid_get, _swigfaiss.ClusteringParameters_max_points_per_centroid_set, doc=r""" to limit size of dataset""")
seed = property(_swigfaiss.ClusteringParameters_seed_get, _swigfaiss.ClusteringParameters_seed_set, doc=r""" seed for the random number generator""")
decode_block_size = property(_swigfaiss.ClusteringParameters_decode_block_size_get, _swigfaiss.ClusteringParameters_decode_block_size_set, doc=r""" how many vectors at a time to decode""")
def __init__(self):
r""" sets reasonable defaults"""
_swigfaiss.ClusteringParameters_swiginit(self, _swigfaiss.new_ClusteringParameters())
__swig_destroy__ = _swigfaiss.delete_ClusteringParameters
# Register ClusteringParameters in _swigfaiss:
_swigfaiss.ClusteringParameters_swigregister(ClusteringParameters)
class ClusteringIterationStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
obj = property(_swigfaiss.ClusteringIterationStats_obj_get, _swigfaiss.ClusteringIterationStats_obj_set, doc=r""" objective values (sum of distances reported by index)""")
time = property(_swigfaiss.ClusteringIterationStats_time_get, _swigfaiss.ClusteringIterationStats_time_set, doc=r""" seconds for iteration""")
time_search = property(_swigfaiss.ClusteringIterationStats_time_search_get, _swigfaiss.ClusteringIterationStats_time_search_set, doc=r""" seconds for just search""")
imbalance_factor = property(_swigfaiss.ClusteringIterationStats_imbalance_factor_get, _swigfaiss.ClusteringIterationStats_imbalance_factor_set, doc=r""" imbalance factor of iteration""")
nsplit = property(_swigfaiss.ClusteringIterationStats_nsplit_get, _swigfaiss.ClusteringIterationStats_nsplit_set, doc=r""" number of cluster splits""")
def __init__(self):
_swigfaiss.ClusteringIterationStats_swiginit(self, _swigfaiss.new_ClusteringIterationStats())
__swig_destroy__ = _swigfaiss.delete_ClusteringIterationStats
# Register ClusteringIterationStats in _swigfaiss:
_swigfaiss.ClusteringIterationStats_swigregister(ClusteringIterationStats)
class Clustering(ClusteringParameters):
r"""
K-means clustering based on assignment - centroid update iterations
The clustering is based on an Index object that assigns training
points to the centroids. Therefore, at each iteration the centroids
are added to the index.
On output, the centoids table is set to the latest version
of the centroids and they are also added to the index. If the
centroids table it is not empty on input, it is also used for
initialization.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
d = property(_swigfaiss.Clustering_d_get, _swigfaiss.Clustering_d_set, doc=r""" dimension of the vectors""")
k = property(_swigfaiss.Clustering_k_get, _swigfaiss.Clustering_k_set, doc=r""" nb of centroids""")
centroids = property(_swigfaiss.Clustering_centroids_get, _swigfaiss.Clustering_centroids_set, doc=r"""
centroids (k * d)
if centroids are set on input to train, they will be used as
initialization
""")
iteration_stats = property(_swigfaiss.Clustering_iteration_stats_get, _swigfaiss.Clustering_iteration_stats_set, doc=r""" stats at every iteration of clustering""")
def __init__(self, *args):
_swigfaiss.Clustering_swiginit(self, _swigfaiss.new_Clustering(*args))
def train(self, n, x, index, x_weights=None):
r"""
run k-means training
:type x: float
:param x: training vectors, size n * d
:type index: :py:class:`Index`
:param index: index used for assignment
:type x_weights: float, optional
:param x_weights: weight associated to each vector: NULL or size n
"""
return _swigfaiss.Clustering_train(self, n, x, index, x_weights)
def train_encoded(self, nx, x_in, codec, index, weights=None):
r"""
run with encoded vectors
win addition to train()'s parameters takes a codec as parameter
to decode the input vectors.
:type codec: :py:class:`Index`
:param codec: codec used to decode the vectors (nullptr =
vectors are in fact floats) *
"""
return _swigfaiss.Clustering_train_encoded(self, nx, x_in, codec, index, weights)
def post_process_centroids(self):
r"""
Post-process the centroids after each centroid update.
includes optional L2 normalization and nearest integer rounding
"""
return _swigfaiss.Clustering_post_process_centroids(self)
__swig_destroy__ = _swigfaiss.delete_Clustering
# Register Clustering in _swigfaiss:
_swigfaiss.Clustering_swigregister(Clustering)
class Clustering1D(Clustering):
r"""
Exact 1D clustering algorithm
Since it does not use an index, it does not overload the train() function
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.Clustering1D_swiginit(self, _swigfaiss.new_Clustering1D(*args))
def train_exact(self, n, x):
return _swigfaiss.Clustering1D_train_exact(self, n, x)
__swig_destroy__ = _swigfaiss.delete_Clustering1D
# Register Clustering1D in _swigfaiss:
_swigfaiss.Clustering1D_swigregister(Clustering1D)
class ProgressiveDimClusteringParameters(ClusteringParameters):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
progressive_dim_steps = property(_swigfaiss.ProgressiveDimClusteringParameters_progressive_dim_steps_get, _swigfaiss.ProgressiveDimClusteringParameters_progressive_dim_steps_set, doc=r""" number of incremental steps""")
apply_pca = property(_swigfaiss.ProgressiveDimClusteringParameters_apply_pca_get, _swigfaiss.ProgressiveDimClusteringParameters_apply_pca_set, doc=r""" apply PCA on input""")
def __init__(self):
_swigfaiss.ProgressiveDimClusteringParameters_swiginit(self, _swigfaiss.new_ProgressiveDimClusteringParameters())
__swig_destroy__ = _swigfaiss.delete_ProgressiveDimClusteringParameters
# Register ProgressiveDimClusteringParameters in _swigfaiss:
_swigfaiss.ProgressiveDimClusteringParameters_swigregister(ProgressiveDimClusteringParameters)
class ProgressiveDimIndexFactory(object):
r""" generates an index suitable for clustering when called"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __call__(self, dim):
r""" ownership transferred to caller"""
return _swigfaiss.ProgressiveDimIndexFactory___call__(self, dim)
__swig_destroy__ = _swigfaiss.delete_ProgressiveDimIndexFactory
def __init__(self):
_swigfaiss.ProgressiveDimIndexFactory_swiginit(self, _swigfaiss.new_ProgressiveDimIndexFactory())
# Register ProgressiveDimIndexFactory in _swigfaiss:
_swigfaiss.ProgressiveDimIndexFactory_swigregister(ProgressiveDimIndexFactory)
class ProgressiveDimClustering(ProgressiveDimClusteringParameters):
r"""
K-means clustering with progressive dimensions used
The clustering first happens in dim 1, then with exponentially increasing
dimension until d (I steps). This is typically applied after a PCA
transformation (optional). Reference:
"Improved Residual Vector Quantization for High-dimensional Approximate
Nearest Neighbor Search"
Shicong Liu, Hongtao Lu, Junru Shao, AAAI'15
https://arxiv.org/abs/1509.05195
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
d = property(_swigfaiss.ProgressiveDimClustering_d_get, _swigfaiss.ProgressiveDimClustering_d_set, doc=r""" dimension of the vectors""")
k = property(_swigfaiss.ProgressiveDimClustering_k_get, _swigfaiss.ProgressiveDimClustering_k_set, doc=r""" nb of centroids""")
centroids = property(_swigfaiss.ProgressiveDimClustering_centroids_get, _swigfaiss.ProgressiveDimClustering_centroids_set, doc=r""" centroids (k * d)""")
iteration_stats = property(_swigfaiss.ProgressiveDimClustering_iteration_stats_get, _swigfaiss.ProgressiveDimClustering_iteration_stats_set, doc=r""" stats at every iteration of clustering""")
def __init__(self, *args):
_swigfaiss.ProgressiveDimClustering_swiginit(self, _swigfaiss.new_ProgressiveDimClustering(*args))
def train(self, n, x, factory):
return _swigfaiss.ProgressiveDimClustering_train(self, n, x, factory)
__swig_destroy__ = _swigfaiss.delete_ProgressiveDimClustering
# Register ProgressiveDimClustering in _swigfaiss:
_swigfaiss.ProgressiveDimClustering_swigregister(ProgressiveDimClustering)
def kmeans_clustering(d, n, k, x, centroids):
r"""
simplified interface
:type d: int
:param d: dimension of the data
:type n: int
:param n: nb of training vectors
:type k: int
:param k: nb of output centroids
:type x: float
:param x: training set (size n * d)
:type centroids: float
:param centroids: output centroids (size k * d)
:rtype: float
:return: final quantization error
"""
return _swigfaiss.kmeans_clustering(d, n, k, x, centroids)
def pairwise_extra_distances(d, nq, xq, nb, xb, mt, metric_arg, dis, ldq=-1, ldb=-1, ldd=-1):
return _swigfaiss.pairwise_extra_distances(d, nq, xq, nb, xb, mt, metric_arg, dis, ldq, ldb, ldd)
def knn_extra_metrics(x, y, d, nx, ny, mt, metric_arg, res):
return _swigfaiss.knn_extra_metrics(x, y, d, nx, ny, mt, metric_arg, res)
def get_extra_distance_computer(d, mt, metric_arg, nb, xb):
r"""
get a DistanceComputer that refers to this type of distance and
indexes a flat array of size nb
"""
return _swigfaiss.get_extra_distance_computer(d, mt, metric_arg, nb, xb)
class Quantizer(object):
r""" Product Quantizer. Implemented only for METRIC_L2"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
d = property(_swigfaiss.Quantizer_d_get, _swigfaiss.Quantizer_d_set, doc=r""" size of the input vectors""")
code_size = property(_swigfaiss.Quantizer_code_size_get, _swigfaiss.Quantizer_code_size_set, doc=r""" bytes per indexed vector""")
def train(self, n, x):
r"""
Train the quantizer
:type x: float
:param x: training vectors, size n * d
"""
return _swigfaiss.Quantizer_train(self, n, x)
def compute_codes(self, x, codes, n):
r"""
Quantize a set of vectors
:type x: float
:param x: input vectors, size n * d
:type codes: uint8_t
:param codes: output codes, size n * code_size
"""
return _swigfaiss.Quantizer_compute_codes(self, x, codes, n)
def decode(self, code, x, n):
r"""
Decode a set of vectors
:param codes: input codes, size n * code_size
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.Quantizer_decode(self, code, x, n)
__swig_destroy__ = _swigfaiss.delete_Quantizer
# Register Quantizer in _swigfaiss:
_swigfaiss.Quantizer_swigregister(Quantizer)
class ProductQuantizer(Quantizer):
r""" Product Quantizer. Implemented only for METRIC_L2"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
M = property(_swigfaiss.ProductQuantizer_M_get, _swigfaiss.ProductQuantizer_M_set, doc=r""" number of subquantizers""")
nbits = property(_swigfaiss.ProductQuantizer_nbits_get, _swigfaiss.ProductQuantizer_nbits_set, doc=r""" number of bits per quantization index""")
dsub = property(_swigfaiss.ProductQuantizer_dsub_get, _swigfaiss.ProductQuantizer_dsub_set, doc=r""" dimensionality of each subvector""")
ksub = property(_swigfaiss.ProductQuantizer_ksub_get, _swigfaiss.ProductQuantizer_ksub_set, doc=r""" number of centroids for each subquantizer""")
verbose = property(_swigfaiss.ProductQuantizer_verbose_get, _swigfaiss.ProductQuantizer_verbose_set, doc=r""" verbose during training?""")
Train_default = _swigfaiss.ProductQuantizer_Train_default
Train_hot_start = _swigfaiss.ProductQuantizer_Train_hot_start
r""" the centroids are already initialized"""
Train_shared = _swigfaiss.ProductQuantizer_Train_shared
r""" share dictionary accross PQ segments"""
Train_hypercube = _swigfaiss.ProductQuantizer_Train_hypercube
r""" intialize centroids with nbits-D hypercube"""
Train_hypercube_pca = _swigfaiss.ProductQuantizer_Train_hypercube_pca
r""" intialize centroids with nbits-D hypercube"""
train_type = property(_swigfaiss.ProductQuantizer_train_type_get, _swigfaiss.ProductQuantizer_train_type_set)
cp = property(_swigfaiss.ProductQuantizer_cp_get, _swigfaiss.ProductQuantizer_cp_set, doc=r""" parameters used during clustering""")
assign_index = property(_swigfaiss.ProductQuantizer_assign_index_get, _swigfaiss.ProductQuantizer_assign_index_set, doc=r"""
if non-NULL, use this index for assignment (should be of size
d / M)
""")
centroids = property(_swigfaiss.ProductQuantizer_centroids_get, _swigfaiss.ProductQuantizer_centroids_set, doc=r"""
Centroid table, size M * ksub * dsub.
Layout: (M, ksub, dsub)
""")
transposed_centroids = property(_swigfaiss.ProductQuantizer_transposed_centroids_get, _swigfaiss.ProductQuantizer_transposed_centroids_set, doc=r"""
Transposed centroid table, size M * ksub * dsub.
Layout: (dsub, M, ksub)
""")
centroids_sq_lengths = property(_swigfaiss.ProductQuantizer_centroids_sq_lengths_get, _swigfaiss.ProductQuantizer_centroids_sq_lengths_set, doc=r"""
Squared lengths of centroids, size M * ksub
Layout: (M, ksub)
""")
def get_centroids(self, m, i):
r""" return the centroids associated with subvector m"""
return _swigfaiss.ProductQuantizer_get_centroids(self, m, i)
def train(self, n, x):
return _swigfaiss.ProductQuantizer_train(self, n, x)
def __init__(self, *args):
_swigfaiss.ProductQuantizer_swiginit(self, _swigfaiss.new_ProductQuantizer(*args))
def set_derived_values(self):
r""" compute derived values when d, M and nbits have been set"""
return _swigfaiss.ProductQuantizer_set_derived_values(self)
def set_params(self, centroids, m):
r""" Define the centroids for subquantizer m"""
return _swigfaiss.ProductQuantizer_set_params(self, centroids, m)
def compute_code(self, x, code):
r""" Quantize one vector with the product quantizer"""
return _swigfaiss.ProductQuantizer_compute_code(self, x, code)
def compute_codes(self, x, codes, n):
r""" same as compute_code for several vectors"""
return _swigfaiss.ProductQuantizer_compute_codes(self, x, codes, n)
def compute_codes_with_assign_index(self, x, codes, n):
r"""
speed up code assignment using assign_index
(non-const because the index is changed)
"""
return _swigfaiss.ProductQuantizer_compute_codes_with_assign_index(self, x, codes, n)
def decode(self, *args):
return _swigfaiss.ProductQuantizer_decode(self, *args)
def compute_code_from_distance_table(self, tab, code):
r"""
If we happen to have the distance tables precomputed, this is
more efficient to compute the codes.
"""
return _swigfaiss.ProductQuantizer_compute_code_from_distance_table(self, tab, code)
def compute_distance_table(self, x, dis_table):
r"""
Compute distance table for one vector.
The distance table for x = [x_0 x_1 .. x_(M-1)] is a M * ksub
matrix that contains
dis_table (m, j) = || x_m - c_(m, j)||^2
for m = 0..M-1 and j = 0 .. ksub - 1
where c_(m, j) is the centroid no j of sub-quantizer m.
:type x: float
:param x: input vector size d
:type dis_table: float
:param dis_table: output table, size M * ksub
"""
return _swigfaiss.ProductQuantizer_compute_distance_table(self, x, dis_table)
def compute_inner_prod_table(self, x, dis_table):
return _swigfaiss.ProductQuantizer_compute_inner_prod_table(self, x, dis_table)
def compute_distance_tables(self, nx, x, dis_tables):
r"""
compute distance table for several vectors
:type nx: int
:param nx: nb of input vectors
:type x: float
:param x: input vector size nx * d
:param dis_table: output table, size nx * M * ksub
"""
return _swigfaiss.ProductQuantizer_compute_distance_tables(self, nx, x, dis_tables)
def compute_inner_prod_tables(self, nx, x, dis_tables):
return _swigfaiss.ProductQuantizer_compute_inner_prod_tables(self, nx, x, dis_tables)
def search(self, x, nx, codes, ncodes, res, init_finalize_heap=True):
r"""
perform a search (L2 distance)
:type x: float
:param x: query vectors, size nx * d
:type nx: int
:param nx: nb of queries
:type codes: uint8_t
:param codes: database codes, size ncodes * code_size
:type ncodes: int
:param ncodes: nb of nb vectors
:type res: :py:class:`float_maxheap_array_t`
:param res: heap array to store results (nh == nx)
:type init_finalize_heap: boolean, optional
:param init_finalize_heap: initialize heap (input) and sort (output)?
"""
return _swigfaiss.ProductQuantizer_search(self, x, nx, codes, ncodes, res, init_finalize_heap)
def search_ip(self, x, nx, codes, ncodes, res, init_finalize_heap=True):
r""" same as search, but with inner product similarity"""
return _swigfaiss.ProductQuantizer_search_ip(self, x, nx, codes, ncodes, res, init_finalize_heap)
sdc_table = property(_swigfaiss.ProductQuantizer_sdc_table_get, _swigfaiss.ProductQuantizer_sdc_table_set, doc=r""" Symmetric Distance Table""")
def compute_sdc_table(self):
return _swigfaiss.ProductQuantizer_compute_sdc_table(self)
def search_sdc(self, qcodes, nq, bcodes, ncodes, res, init_finalize_heap=True):
return _swigfaiss.ProductQuantizer_search_sdc(self, qcodes, nq, bcodes, ncodes, res, init_finalize_heap)
def sync_transposed_centroids(self):
r"""
Sync transposed centroids with regular centroids. This call
is needed if centroids were edited directly.
"""
return _swigfaiss.ProductQuantizer_sync_transposed_centroids(self)
def clear_transposed_centroids(self):
r""" Clear transposed centroids table so ones are no longer used."""
return _swigfaiss.ProductQuantizer_clear_transposed_centroids(self)
__swig_destroy__ = _swigfaiss.delete_ProductQuantizer
# Register ProductQuantizer in _swigfaiss:
_swigfaiss.ProductQuantizer_swigregister(ProductQuantizer)
class PQEncoderGeneric(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code = property(_swigfaiss.PQEncoderGeneric_code_get, _swigfaiss.PQEncoderGeneric_code_set, doc=r""" code for this vector""")
offset = property(_swigfaiss.PQEncoderGeneric_offset_get, _swigfaiss.PQEncoderGeneric_offset_set)
nbits = property(_swigfaiss.PQEncoderGeneric_nbits_get, doc=r""" number of bits per subquantizer index""")
reg = property(_swigfaiss.PQEncoderGeneric_reg_get, _swigfaiss.PQEncoderGeneric_reg_set)
def __init__(self, code, nbits, offset=0):
_swigfaiss.PQEncoderGeneric_swiginit(self, _swigfaiss.new_PQEncoderGeneric(code, nbits, offset))
def encode(self, x):
return _swigfaiss.PQEncoderGeneric_encode(self, x)
__swig_destroy__ = _swigfaiss.delete_PQEncoderGeneric
# Register PQEncoderGeneric in _swigfaiss:
_swigfaiss.PQEncoderGeneric_swigregister(PQEncoderGeneric)
class PQEncoder8(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code = property(_swigfaiss.PQEncoder8_code_get, _swigfaiss.PQEncoder8_code_set)
def __init__(self, code, nbits):
_swigfaiss.PQEncoder8_swiginit(self, _swigfaiss.new_PQEncoder8(code, nbits))
def encode(self, x):
return _swigfaiss.PQEncoder8_encode(self, x)
__swig_destroy__ = _swigfaiss.delete_PQEncoder8
# Register PQEncoder8 in _swigfaiss:
_swigfaiss.PQEncoder8_swigregister(PQEncoder8)
class PQEncoder16(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code = property(_swigfaiss.PQEncoder16_code_get, _swigfaiss.PQEncoder16_code_set)
def __init__(self, code, nbits):
_swigfaiss.PQEncoder16_swiginit(self, _swigfaiss.new_PQEncoder16(code, nbits))
def encode(self, x):
return _swigfaiss.PQEncoder16_encode(self, x)
__swig_destroy__ = _swigfaiss.delete_PQEncoder16
# Register PQEncoder16 in _swigfaiss:
_swigfaiss.PQEncoder16_swigregister(PQEncoder16)
class PQDecoderGeneric(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code = property(_swigfaiss.PQDecoderGeneric_code_get, _swigfaiss.PQDecoderGeneric_code_set)
offset = property(_swigfaiss.PQDecoderGeneric_offset_get, _swigfaiss.PQDecoderGeneric_offset_set)
nbits = property(_swigfaiss.PQDecoderGeneric_nbits_get)
mask = property(_swigfaiss.PQDecoderGeneric_mask_get)
reg = property(_swigfaiss.PQDecoderGeneric_reg_get, _swigfaiss.PQDecoderGeneric_reg_set)
def __init__(self, code, nbits):
_swigfaiss.PQDecoderGeneric_swiginit(self, _swigfaiss.new_PQDecoderGeneric(code, nbits))
def decode(self):
return _swigfaiss.PQDecoderGeneric_decode(self)
__swig_destroy__ = _swigfaiss.delete_PQDecoderGeneric
# Register PQDecoderGeneric in _swigfaiss:
_swigfaiss.PQDecoderGeneric_swigregister(PQDecoderGeneric)
class PQDecoder8(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nbits = _swigfaiss.PQDecoder8_nbits
code = property(_swigfaiss.PQDecoder8_code_get, _swigfaiss.PQDecoder8_code_set)
def __init__(self, code, nbits):
_swigfaiss.PQDecoder8_swiginit(self, _swigfaiss.new_PQDecoder8(code, nbits))
def decode(self):
return _swigfaiss.PQDecoder8_decode(self)
__swig_destroy__ = _swigfaiss.delete_PQDecoder8
# Register PQDecoder8 in _swigfaiss:
_swigfaiss.PQDecoder8_swigregister(PQDecoder8)
class PQDecoder16(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nbits = _swigfaiss.PQDecoder16_nbits
code = property(_swigfaiss.PQDecoder16_code_get, _swigfaiss.PQDecoder16_code_set)
def __init__(self, code, nbits):
_swigfaiss.PQDecoder16_swiginit(self, _swigfaiss.new_PQDecoder16(code, nbits))
def decode(self):
return _swigfaiss.PQDecoder16_decode(self)
__swig_destroy__ = _swigfaiss.delete_PQDecoder16
# Register PQDecoder16 in _swigfaiss:
_swigfaiss.PQDecoder16_swigregister(PQDecoder16)
class AdditiveQuantizer(Quantizer):
r"""
Abstract structure for additive quantizers
Different from the product quantizer in which the decoded vector is the
concatenation of M sub-vectors, additive quantizers sum M sub-vectors
to get the decoded vector.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
M = property(_swigfaiss.AdditiveQuantizer_M_get, _swigfaiss.AdditiveQuantizer_M_set, doc=r""" number of codebooks""")
nbits = property(_swigfaiss.AdditiveQuantizer_nbits_get, _swigfaiss.AdditiveQuantizer_nbits_set, doc=r""" bits for each step""")
codebooks = property(_swigfaiss.AdditiveQuantizer_codebooks_get, _swigfaiss.AdditiveQuantizer_codebooks_set, doc=r""" codebooks""")
codebook_offsets = property(_swigfaiss.AdditiveQuantizer_codebook_offsets_get, _swigfaiss.AdditiveQuantizer_codebook_offsets_set)
tot_bits = property(_swigfaiss.AdditiveQuantizer_tot_bits_get, _swigfaiss.AdditiveQuantizer_tot_bits_set, doc=r""" total number of bits (indexes + norms)""")
norm_bits = property(_swigfaiss.AdditiveQuantizer_norm_bits_get, _swigfaiss.AdditiveQuantizer_norm_bits_set, doc=r""" bits allocated for the norms""")
total_codebook_size = property(_swigfaiss.AdditiveQuantizer_total_codebook_size_get, _swigfaiss.AdditiveQuantizer_total_codebook_size_set, doc=r""" size of the codebook in vectors""")
only_8bit = property(_swigfaiss.AdditiveQuantizer_only_8bit_get, _swigfaiss.AdditiveQuantizer_only_8bit_set, doc=r""" are all nbits = 8 (use faster decoder)""")
verbose = property(_swigfaiss.AdditiveQuantizer_verbose_get, _swigfaiss.AdditiveQuantizer_verbose_set, doc=r""" verbose during training?""")
is_trained = property(_swigfaiss.AdditiveQuantizer_is_trained_get, _swigfaiss.AdditiveQuantizer_is_trained_set, doc=r""" is trained or not""")
qnorm = property(_swigfaiss.AdditiveQuantizer_qnorm_get, _swigfaiss.AdditiveQuantizer_qnorm_set, doc=r""" store and search norms""")
norm_tabs = property(_swigfaiss.AdditiveQuantizer_norm_tabs_get, _swigfaiss.AdditiveQuantizer_norm_tabs_set, doc=r"""
store norms of codebook entries for 4-bit
fastscan search
""")
max_mem_distances = property(_swigfaiss.AdditiveQuantizer_max_mem_distances_get, _swigfaiss.AdditiveQuantizer_max_mem_distances_set, doc=r"""
norms and distance matrixes with beam search can get large, so use this
to control for the amount of memory that can be allocated
""")
def encode_norm(self, norm):
r""" encode a norm into norm_bits bits"""
return _swigfaiss.AdditiveQuantizer_encode_norm(self, norm)
def encode_qcint(self, x):
r""" encode norm by non-uniform scalar quantization"""
return _swigfaiss.AdditiveQuantizer_encode_qcint(self, x)
def decode_qcint(self, c):
r""" decode norm by non-uniform scalar quantization"""
return _swigfaiss.AdditiveQuantizer_decode_qcint(self, c)
ST_decompress = _swigfaiss.AdditiveQuantizer_ST_decompress
r""" decompress database vector"""
ST_LUT_nonorm = _swigfaiss.AdditiveQuantizer_ST_LUT_nonorm
r"""
use a LUT, don't include norms (OK for IP or
normalized vectors)
"""
ST_norm_from_LUT = _swigfaiss.AdditiveQuantizer_ST_norm_from_LUT
r"""
compute the norms from the look-up tables (cost
is in O(M^2))
"""
ST_norm_float = _swigfaiss.AdditiveQuantizer_ST_norm_float
r""" use a LUT, and store float32 norm with the vectors"""
ST_norm_qint8 = _swigfaiss.AdditiveQuantizer_ST_norm_qint8
r""" use a LUT, and store 8bit-quantized norm"""
ST_norm_qint4 = _swigfaiss.AdditiveQuantizer_ST_norm_qint4
ST_norm_cqint8 = _swigfaiss.AdditiveQuantizer_ST_norm_cqint8
r""" use a LUT, and store non-uniform quantized norm"""
ST_norm_cqint4 = _swigfaiss.AdditiveQuantizer_ST_norm_cqint4
ST_norm_lsq2x4 = _swigfaiss.AdditiveQuantizer_ST_norm_lsq2x4
r"""
use a 2x4 bits lsq as norm quantizer (for fast
scan)
"""
ST_norm_rq2x4 = _swigfaiss.AdditiveQuantizer_ST_norm_rq2x4
r""" use a 2x4 bits rq as norm quantizer (for fast scan)"""
def set_derived_values(self):
r""" Train the norm quantizer"""
return _swigfaiss.AdditiveQuantizer_set_derived_values(self)
def train_norm(self, n, norms):
return _swigfaiss.AdditiveQuantizer_train_norm(self, n, norms)
def compute_codes(self, x, codes, n):
return _swigfaiss.AdditiveQuantizer_compute_codes(self, x, codes, n)
def compute_codes_add_centroids(self, x, codes, n, centroids=None):
r"""
Encode a set of vectors
:type x: float
:param x: vectors to encode, size n * d
:type codes: uint8_t
:param codes: output codes, size n * code_size
:type centroids: float, optional
:param centroids: centroids to be added to x, size n * d
"""
return _swigfaiss.AdditiveQuantizer_compute_codes_add_centroids(self, x, codes, n, centroids)
def pack_codes(self, n, codes, packed_codes, ld_codes=-1, norms=None, centroids=None):
r"""
pack a series of code to bit-compact format
:type codes: int
:param codes: codes to be packed, size n * code_size
:type packed_codes: uint8_t
:param packed_codes: output bit-compact codes
:type ld_codes: int, optional
:param ld_codes: leading dimension of codes
:type norms: float, optional
:param norms: norms of the vectors (size n). Will be computed if
needed but not provided
:type centroids: float, optional
:param centroids: centroids to be added to x, size n * d
"""
return _swigfaiss.AdditiveQuantizer_pack_codes(self, n, codes, packed_codes, ld_codes, norms, centroids)
def decode(self, codes, x, n):
r"""
Decode a set of vectors
:type codes: uint8_t
:param codes: codes to decode, size n * code_size
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.AdditiveQuantizer_decode(self, codes, x, n)
def decode_unpacked(self, codes, x, n, ld_codes=-1):
r"""
Decode a set of vectors in non-packed format
:type codes: int
:param codes: codes to decode, size n * ld_codes
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.AdditiveQuantizer_decode_unpacked(self, codes, x, n, ld_codes)
search_type = property(_swigfaiss.AdditiveQuantizer_search_type_get, _swigfaiss.AdditiveQuantizer_search_type_set, doc=r""" Also determines what's in the codes""")
norm_min = property(_swigfaiss.AdditiveQuantizer_norm_min_get, _swigfaiss.AdditiveQuantizer_norm_min_set, doc=r""" min/max for quantization of norms""")
norm_max = property(_swigfaiss.AdditiveQuantizer_norm_max_get, _swigfaiss.AdditiveQuantizer_norm_max_set)
def decode_64bit(self, n, x):
r""" decoding function for a code in a 64-bit word"""
return _swigfaiss.AdditiveQuantizer_decode_64bit(self, n, x)
def compute_LUT(self, n, xq, LUT, alpha=1.0, ld_lut=-1):
r"""
Compute inner-product look-up tables. Used in the centroid search
functions.
:type xq: float
:param xq: query vector, size (n, d)
:type LUT: float
:param LUT: look-up table, size (n, total_codebook_size)
:type alpha: float, optional
:param alpha: compute alpha * inner-product
:type ld_lut: int, optional
:param ld_lut: leading dimension of LUT
"""
return _swigfaiss.AdditiveQuantizer_compute_LUT(self, n, xq, LUT, alpha, ld_lut)
def knn_centroids_inner_product(self, n, xq, k, distances, labels):
r""" exact IP search"""
return _swigfaiss.AdditiveQuantizer_knn_centroids_inner_product(self, n, xq, k, distances, labels)
def compute_centroid_norms(self, norms):
r"""
For L2 search we need the L2 norms of the centroids
:type norms: float
:param norms: output norms table, size total_codebook_size
"""
return _swigfaiss.AdditiveQuantizer_compute_centroid_norms(self, norms)
def knn_centroids_L2(self, n, xq, k, distances, labels, centroid_norms):
r""" Exact L2 search, with precomputed norms"""
return _swigfaiss.AdditiveQuantizer_knn_centroids_L2(self, n, xq, k, distances, labels, centroid_norms)
__swig_destroy__ = _swigfaiss.delete_AdditiveQuantizer
# Register AdditiveQuantizer in _swigfaiss:
_swigfaiss.AdditiveQuantizer_swigregister(AdditiveQuantizer)
class ResidualQuantizer(AdditiveQuantizer):
r"""
Residual quantizer with variable number of bits per sub-quantizer
The residual centroids are stored in a big cumulative centroid table.
The codes are represented either as a non-compact table of size (n, M) or
as the compact output (n, code_size).
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
train_type = property(_swigfaiss.ResidualQuantizer_train_type_get, _swigfaiss.ResidualQuantizer_train_type_set, doc=r""" Binary or of the Train_* flags below""")
Train_default = _swigfaiss.ResidualQuantizer_Train_default
r""" regular k-means (minimal amount of computation)"""
Train_progressive_dim = _swigfaiss.ResidualQuantizer_Train_progressive_dim
r""" progressive dim clustering (set by default)"""
Train_refine_codebook = _swigfaiss.ResidualQuantizer_Train_refine_codebook
r""" do a few iterations of codebook refinement after first level estimation"""
niter_codebook_refine = property(_swigfaiss.ResidualQuantizer_niter_codebook_refine_get, _swigfaiss.ResidualQuantizer_niter_codebook_refine_set, doc=r""" number of iterations for codebook refinement.""")
Train_top_beam = _swigfaiss.ResidualQuantizer_Train_top_beam
r"""
set this bit on train_type if beam is to be trained only on the
first element of the beam (faster but less accurate)
"""
Skip_codebook_tables = _swigfaiss.ResidualQuantizer_Skip_codebook_tables
r"""
set this bit to *not* autmatically compute the codebook tables
after training
"""
max_beam_size = property(_swigfaiss.ResidualQuantizer_max_beam_size_get, _swigfaiss.ResidualQuantizer_max_beam_size_set, doc=r""" beam size used for training and for encoding""")
use_beam_LUT = property(_swigfaiss.ResidualQuantizer_use_beam_LUT_get, _swigfaiss.ResidualQuantizer_use_beam_LUT_set, doc=r""" use LUT for beam search""")
cp = property(_swigfaiss.ResidualQuantizer_cp_get, _swigfaiss.ResidualQuantizer_cp_set, doc=r""" clustering parameters""")
assign_index_factory = property(_swigfaiss.ResidualQuantizer_assign_index_factory_get, _swigfaiss.ResidualQuantizer_assign_index_factory_set, doc=r""" if non-NULL, use this index for assignment""")
def __init__(self, *args):
_swigfaiss.ResidualQuantizer_swiginit(self, _swigfaiss.new_ResidualQuantizer(*args))
def train(self, n, x):
r""" Train the residual quantizer"""
return _swigfaiss.ResidualQuantizer_train(self, n, x)
def initialize_from(self, other, skip_M=0):
r""" Copy the M codebook levels from other, starting from skip_M"""
return _swigfaiss.ResidualQuantizer_initialize_from(self, other, skip_M)
def retrain_AQ_codebook(self, n, x):
r"""
Encode the vectors and compute codebook that minimizes the quantization
error on these codes
:type x: float
:param x: training vectors, size n * d
:type n: int
:param n: nb of training vectors, n >= total_codebook_size
:rtype: float
:return: returns quantization error for the new codebook with old
codes
"""
return _swigfaiss.ResidualQuantizer_retrain_AQ_codebook(self, n, x)
def compute_codes_add_centroids(self, x, codes, n, centroids=None):
r"""
Encode a set of vectors
:type x: float
:param x: vectors to encode, size n * d
:type codes: uint8_t
:param codes: output codes, size n * code_size
:type centroids: float, optional
:param centroids: centroids to be added to x, size n * d
"""
return _swigfaiss.ResidualQuantizer_compute_codes_add_centroids(self, x, codes, n, centroids)
def refine_beam(self, n, beam_size, residuals, new_beam_size, new_codes, new_residuals=None, new_distances=None):
r"""
lower-level encode function
:type n: int
:param n: number of vectors to hanlde
:type residuals: float
:param residuals: vectors to encode, size (n, beam_size, d)
:type beam_size: int
:param beam_size: input beam size
:type new_beam_size: int
:param new_beam_size: output beam size (should be <= K * beam_size)
:type new_codes: int
:param new_codes: output codes, size (n, new_beam_size, m + 1)
:type new_residuals: float, optional
:param new_residuals: output residuals, size (n, new_beam_size, d)
:type new_distances: float, optional
:param new_distances: output distances, size (n, new_beam_size)
"""
return _swigfaiss.ResidualQuantizer_refine_beam(self, n, beam_size, residuals, new_beam_size, new_codes, new_residuals, new_distances)
def refine_beam_LUT(self, n, query_norms, query_cp, new_beam_size, new_codes, new_distances=None):
return _swigfaiss.ResidualQuantizer_refine_beam_LUT(self, n, query_norms, query_cp, new_beam_size, new_codes, new_distances)
def memory_per_point(self, beam_size=-1):
r"""
Beam search can consume a lot of memory. This function estimates the
amount of mem used by refine_beam to adjust the batch size
:type beam_size: int, optional
:param beam_size: if != -1, override the beam size
"""
return _swigfaiss.ResidualQuantizer_memory_per_point(self, beam_size)
def compute_codebook_tables(self):
r"""
Cross products used in codebook tables
These are used to keep trak of norms of centroids.
"""
return _swigfaiss.ResidualQuantizer_compute_codebook_tables(self)
codebook_cross_products = property(_swigfaiss.ResidualQuantizer_codebook_cross_products_get, _swigfaiss.ResidualQuantizer_codebook_cross_products_set, doc=r"""
dot products of all codebook vectors with each other
size total_codebook_size * total_codebook_size
""")
cent_norms = property(_swigfaiss.ResidualQuantizer_cent_norms_get, _swigfaiss.ResidualQuantizer_cent_norms_set, doc=r""" norms of all vectors""")
__swig_destroy__ = _swigfaiss.delete_ResidualQuantizer
# Register ResidualQuantizer in _swigfaiss:
_swigfaiss.ResidualQuantizer_swigregister(ResidualQuantizer)
def beam_search_encode_step(d, K, cent, n, beam_size, residuals, m, codes, new_beam_size, new_codes, new_residuals, new_distances, assign_index=None):
r"""
Encode a residual by sampling from a centroid table.
This is a single encoding step the residual quantizer.
It allows low-level access to the encoding function, exposed mainly for unit
tests.
:type n: int
:param n: number of vectors to hanlde
:type residuals: float
:param residuals: vectors to encode, size (n, beam_size, d)
:type cent: float
:param cent: centroids, size (K, d)
:type beam_size: int
:param beam_size: input beam size
:type m: int
:param m: size of the codes for the previous encoding steps
:type codes: int
:param codes: code array for the previous steps of the beam (n,
beam_size, m)
:type new_beam_size: int
:param new_beam_size: output beam size (should be <= K * beam_size)
:type new_codes: int
:param new_codes: output codes, size (n, new_beam_size, m + 1)
:type new_residuals: float
:param new_residuals: output residuals, size (n, new_beam_size, d)
:type new_distances: float
:param new_distances: output distances, size (n, new_beam_size)
:type assign_index: :py:class:`Index`, optional
:param assign_index: if non-NULL, will be used to perform assignment
"""
return _swigfaiss.beam_search_encode_step(d, K, cent, n, beam_size, residuals, m, codes, new_beam_size, new_codes, new_residuals, new_distances, assign_index)
def beam_search_encode_step_tab(K, n, beam_size, codebook_cross_norms, ldc, codebook_offsets, query_cp, ldqc, cent_norms_i, m, codes, distances, new_beam_size, new_codes, new_distances):
r""" Encode a set of vectors using their dot products with the codebooks"""
return _swigfaiss.beam_search_encode_step_tab(K, n, beam_size, codebook_cross_norms, ldc, codebook_offsets, query_cp, ldqc, cent_norms_i, m, codes, distances, new_beam_size, new_codes, new_distances)
class LocalSearchQuantizer(AdditiveQuantizer):
r"""
Implementation of LSQ/LSQ++ described in the following two papers:
Revisiting additive quantization
Julieta Martinez, et al. ECCV 2016
LSQ++: Lower running time and higher recall in multi-codebook quantization
Julieta Martinez, et al. ECCV 2018
This implementation is mostly translated from the Julia implementations
by Julieta Martinez:
(https://github.com/una-dinosauria/local-search-quantization,
https://github.com/una-dinosauria/Rayuela.jl)
The trained codes are stored in `codebooks` which is called
`centroids` in PQ and RQ.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
K = property(_swigfaiss.LocalSearchQuantizer_K_get, _swigfaiss.LocalSearchQuantizer_K_set, doc=r""" number of codes per codebook""")
train_iters = property(_swigfaiss.LocalSearchQuantizer_train_iters_get, _swigfaiss.LocalSearchQuantizer_train_iters_set, doc=r""" number of iterations in training""")
encode_ils_iters = property(_swigfaiss.LocalSearchQuantizer_encode_ils_iters_get, _swigfaiss.LocalSearchQuantizer_encode_ils_iters_set, doc=r""" iterations of local search in encoding""")
train_ils_iters = property(_swigfaiss.LocalSearchQuantizer_train_ils_iters_get, _swigfaiss.LocalSearchQuantizer_train_ils_iters_set, doc=r""" iterations of local search in training""")
icm_iters = property(_swigfaiss.LocalSearchQuantizer_icm_iters_get, _swigfaiss.LocalSearchQuantizer_icm_iters_set, doc=r""" number of iterations in icm""")
p = property(_swigfaiss.LocalSearchQuantizer_p_get, _swigfaiss.LocalSearchQuantizer_p_set, doc=r""" temperature factor""")
lambd = property(_swigfaiss.LocalSearchQuantizer_lambd_get, _swigfaiss.LocalSearchQuantizer_lambd_set, doc=r""" regularization factor""")
chunk_size = property(_swigfaiss.LocalSearchQuantizer_chunk_size_get, _swigfaiss.LocalSearchQuantizer_chunk_size_set, doc=r""" nb of vectors to encode at a time""")
random_seed = property(_swigfaiss.LocalSearchQuantizer_random_seed_get, _swigfaiss.LocalSearchQuantizer_random_seed_set, doc=r""" seed for random generator""")
nperts = property(_swigfaiss.LocalSearchQuantizer_nperts_get, _swigfaiss.LocalSearchQuantizer_nperts_set, doc=r"""
number of perturbation in each code
if non-NULL, use this encoder to encode
""")
icm_encoder_factory = property(_swigfaiss.LocalSearchQuantizer_icm_encoder_factory_get, _swigfaiss.LocalSearchQuantizer_icm_encoder_factory_set)
update_codebooks_with_double = property(_swigfaiss.LocalSearchQuantizer_update_codebooks_with_double_get, _swigfaiss.LocalSearchQuantizer_update_codebooks_with_double_set)
def __init__(self, *args):
_swigfaiss.LocalSearchQuantizer_swiginit(self, _swigfaiss.new_LocalSearchQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_LocalSearchQuantizer
def train(self, n, x):
return _swigfaiss.LocalSearchQuantizer_train(self, n, x)
def compute_codes_add_centroids(self, x, codes, n, centroids=None):
r"""
Encode a set of vectors
:type x: float
:param x: vectors to encode, size n * d
:type codes: uint8_t
:param codes: output codes, size n * code_size
:type n: int
:param n: number of vectors
:type centroids: float, optional
:param centroids: centroids to be added to x, size n * d
"""
return _swigfaiss.LocalSearchQuantizer_compute_codes_add_centroids(self, x, codes, n, centroids)
def update_codebooks(self, x, codes, n):
r"""
Update codebooks given encodings
:type x: float
:param x: training vectors, size n * d
:type codes: int
:param codes: encoded training vectors, size n * M
:type n: int
:param n: number of vectors
"""
return _swigfaiss.LocalSearchQuantizer_update_codebooks(self, x, codes, n)
def icm_encode(self, codes, x, n, ils_iters, gen):
r"""
Encode vectors given codebooks using iterative conditional mode (icm).
:type codes: int
:param codes: output codes, size n * M
:type x: float
:param x: vectors to encode, size n * d
:type n: int
:param n: number of vectors
:type ils_iters: int
:param ils_iters: number of iterations of iterative local search
"""
return _swigfaiss.LocalSearchQuantizer_icm_encode(self, codes, x, n, ils_iters, gen)
def icm_encode_impl(self, codes, x, unaries, gen, n, ils_iters, verbose):
return _swigfaiss.LocalSearchQuantizer_icm_encode_impl(self, codes, x, unaries, gen, n, ils_iters, verbose)
def icm_encode_step(self, codes, unaries, binaries, n, n_iters):
return _swigfaiss.LocalSearchQuantizer_icm_encode_step(self, codes, unaries, binaries, n, n_iters)
def perturb_codes(self, codes, n, gen):
r"""
Add some perturbation to codes
:type codes: int
:param codes: codes to be perturbed, size n * M
:type n: int
:param n: number of vectors
"""
return _swigfaiss.LocalSearchQuantizer_perturb_codes(self, codes, n, gen)
def perturb_codebooks(self, T, stddev, gen):
r"""
Add some perturbation to codebooks
:type T: float
:param T: temperature of simulated annealing
:type stddev: std::vector< float >
:param stddev: standard derivations of each dimension in training data
"""
return _swigfaiss.LocalSearchQuantizer_perturb_codebooks(self, T, stddev, gen)
def compute_binary_terms(self, binaries):
r"""
Compute binary terms
:type binaries: float
:param binaries: binary terms, size M * M * K * K
"""
return _swigfaiss.LocalSearchQuantizer_compute_binary_terms(self, binaries)
def compute_unary_terms(self, x, unaries, n):
r"""
Compute unary terms
:type n: int
:param n: number of vectors
:type x: float
:param x: vectors to encode, size n * d
:type unaries: float
:param unaries: unary terms, size n * M * K
"""
return _swigfaiss.LocalSearchQuantizer_compute_unary_terms(self, x, unaries, n)
def evaluate(self, codes, x, n, objs=None):
r"""
Helper function to compute reconstruction error
:type codes: int
:param codes: encoded codes, size n * M
:type x: float
:param x: vectors to encode, size n * d
:type n: int
:param n: number of vectors
:type objs: float, optional
:param objs: if it is not null, store reconstruction
error of each vector into it, size n
"""
return _swigfaiss.LocalSearchQuantizer_evaluate(self, codes, x, n, objs)
# Register LocalSearchQuantizer in _swigfaiss:
_swigfaiss.LocalSearchQuantizer_swigregister(LocalSearchQuantizer)
class IcmEncoder(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
binaries = property(_swigfaiss.IcmEncoder_binaries_get, _swigfaiss.IcmEncoder_binaries_set)
verbose = property(_swigfaiss.IcmEncoder_verbose_get, _swigfaiss.IcmEncoder_verbose_set)
lsq = property(_swigfaiss.IcmEncoder_lsq_get, _swigfaiss.IcmEncoder_lsq_set)
def __init__(self, lsq):
_swigfaiss.IcmEncoder_swiginit(self, _swigfaiss.new_IcmEncoder(lsq))
__swig_destroy__ = _swigfaiss.delete_IcmEncoder
def set_binary_term(self):
return _swigfaiss.IcmEncoder_set_binary_term(self)
def encode(self, codes, x, gen, n, ils_iters):
r"""
Encode vectors given codebooks
:type codes: int
:param codes: output codes, size n * M
:type x: float
:param x: vectors to encode, size n * d
:type gen: std::mt19937
:param gen: random generator
:type n: int
:param n: number of vectors
:type ils_iters: int
:param ils_iters: number of iterations of iterative local search
"""
return _swigfaiss.IcmEncoder_encode(self, codes, x, gen, n, ils_iters)
# Register IcmEncoder in _swigfaiss:
_swigfaiss.IcmEncoder_swigregister(IcmEncoder)
class IcmEncoderFactory(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def get(self, lsq):
return _swigfaiss.IcmEncoderFactory_get(self, lsq)
__swig_destroy__ = _swigfaiss.delete_IcmEncoderFactory
def __init__(self):
_swigfaiss.IcmEncoderFactory_swiginit(self, _swigfaiss.new_IcmEncoderFactory())
# Register IcmEncoderFactory in _swigfaiss:
_swigfaiss.IcmEncoderFactory_swigregister(IcmEncoderFactory)
class LSQTimer(object):
r"""
A helper struct to count consuming time during training.
It is NOT thread-safe.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
t = property(_swigfaiss.LSQTimer_t_get, _swigfaiss.LSQTimer_t_set)
def __init__(self):
_swigfaiss.LSQTimer_swiginit(self, _swigfaiss.new_LSQTimer())
def get(self, name):
return _swigfaiss.LSQTimer_get(self, name)
def add(self, name, delta):
return _swigfaiss.LSQTimer_add(self, name, delta)
def reset(self):
return _swigfaiss.LSQTimer_reset(self)
__swig_destroy__ = _swigfaiss.delete_LSQTimer
# Register LSQTimer in _swigfaiss:
_swigfaiss.LSQTimer_swigregister(LSQTimer)
class LSQTimerScope(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
t0 = property(_swigfaiss.LSQTimerScope_t0_get, _swigfaiss.LSQTimerScope_t0_set)
timer = property(_swigfaiss.LSQTimerScope_timer_get, _swigfaiss.LSQTimerScope_timer_set)
name = property(_swigfaiss.LSQTimerScope_name_get, _swigfaiss.LSQTimerScope_name_set)
finished = property(_swigfaiss.LSQTimerScope_finished_get, _swigfaiss.LSQTimerScope_finished_set)
def __init__(self, timer, name):
_swigfaiss.LSQTimerScope_swiginit(self, _swigfaiss.new_LSQTimerScope(timer, name))
def finish(self):
return _swigfaiss.LSQTimerScope_finish(self)
__swig_destroy__ = _swigfaiss.delete_LSQTimerScope
# Register LSQTimerScope in _swigfaiss:
_swigfaiss.LSQTimerScope_swigregister(LSQTimerScope)
class ProductAdditiveQuantizer(AdditiveQuantizer):
r"""
Product Additive Quantizers
The product additive quantizer is a variant of AQ and PQ.
It first splits the vector space into multiple orthogonal sub-spaces
just like PQ does. And then it quantizes each sub-space by an independent
additive quantizer.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nsplits = property(_swigfaiss.ProductAdditiveQuantizer_nsplits_get, _swigfaiss.ProductAdditiveQuantizer_nsplits_set, doc=r""" number of sub-vectors we split a vector into""")
quantizers = property(_swigfaiss.ProductAdditiveQuantizer_quantizers_get, _swigfaiss.ProductAdditiveQuantizer_quantizers_set)
def __init__(self, *args):
_swigfaiss.ProductAdditiveQuantizer_swiginit(self, _swigfaiss.new_ProductAdditiveQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_ProductAdditiveQuantizer
def init(self, d, aqs, search_type):
return _swigfaiss.ProductAdditiveQuantizer_init(self, d, aqs, search_type)
def subquantizer(self, m):
r""" Train the product additive quantizer"""
return _swigfaiss.ProductAdditiveQuantizer_subquantizer(self, m)
def train(self, n, x):
return _swigfaiss.ProductAdditiveQuantizer_train(self, n, x)
def compute_codes_add_centroids(self, x, codes, n, centroids=None):
r"""
Encode a set of vectors
:type x: float
:param x: vectors to encode, size n * d
:type codes: uint8_t
:param codes: output codes, size n * code_size
:type centroids: float, optional
:param centroids: centroids to be added to x, size n * d
"""
return _swigfaiss.ProductAdditiveQuantizer_compute_codes_add_centroids(self, x, codes, n, centroids)
def compute_unpacked_codes(self, x, codes, n, centroids=None):
return _swigfaiss.ProductAdditiveQuantizer_compute_unpacked_codes(self, x, codes, n, centroids)
def decode_unpacked(self, codes, x, n, ld_codes=-1):
r"""
Decode a set of vectors in non-packed format
:type codes: int
:param codes: codes to decode, size n * ld_codes
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.ProductAdditiveQuantizer_decode_unpacked(self, codes, x, n, ld_codes)
def decode(self, codes, x, n):
r"""
Decode a set of vectors
:type codes: uint8_t
:param codes: codes to decode, size n * code_size
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.ProductAdditiveQuantizer_decode(self, codes, x, n)
def compute_LUT(self, n, xq, LUT, alpha=1.0, ld_lut=-1):
r"""
Compute inner-product look-up tables. Used in the search functions.
:type xq: float
:param xq: query vector, size (n, d)
:type LUT: float
:param LUT: look-up table, size (n, total_codebook_size)
:type alpha: float, optional
:param alpha: compute alpha * inner-product
:type ld_lut: int, optional
:param ld_lut: leading dimension of LUT
"""
return _swigfaiss.ProductAdditiveQuantizer_compute_LUT(self, n, xq, LUT, alpha, ld_lut)
# Register ProductAdditiveQuantizer in _swigfaiss:
_swigfaiss.ProductAdditiveQuantizer_swigregister(ProductAdditiveQuantizer)
class ProductLocalSearchQuantizer(ProductAdditiveQuantizer):
r""" Product Local Search Quantizer"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.ProductLocalSearchQuantizer_swiginit(self, _swigfaiss.new_ProductLocalSearchQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_ProductLocalSearchQuantizer
# Register ProductLocalSearchQuantizer in _swigfaiss:
_swigfaiss.ProductLocalSearchQuantizer_swigregister(ProductLocalSearchQuantizer)
class ProductResidualQuantizer(ProductAdditiveQuantizer):
r""" Product Residual Quantizer"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.ProductResidualQuantizer_swiginit(self, _swigfaiss.new_ProductResidualQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_ProductResidualQuantizer
# Register ProductResidualQuantizer in _swigfaiss:
_swigfaiss.ProductResidualQuantizer_swigregister(ProductResidualQuantizer)
class VectorTransform(object):
r""" Any transformation applied on a set of vectors"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
d_in = property(_swigfaiss.VectorTransform_d_in_get, _swigfaiss.VectorTransform_d_in_set)
d_out = property(_swigfaiss.VectorTransform_d_out_get, _swigfaiss.VectorTransform_d_out_set, doc=r"""input dimension""")
is_trained = property(_swigfaiss.VectorTransform_is_trained_get, _swigfaiss.VectorTransform_is_trained_set, doc=r"""
set if the VectorTransform does not require training, or if
training is done already
""")
def train(self, n, x):
r"""
Perform training on a representative set of vectors. Does
nothing by default.
:type n: int
:param n: nb of training vectors
:type x: float
:param x: training vecors, size n * d
"""
return _swigfaiss.VectorTransform_train(self, n, x)
def apply(self, n, x):
r"""
apply the transformation and return the result in an allocated pointer
:type n: int
:param n: number of vectors to transform
:type x: float
:param x: input vectors, size n * d_in
:rtype: float
:return: output vectors, size n * d_out
"""
return _swigfaiss.VectorTransform_apply(self, n, x)
def apply_noalloc(self, n, x, xt):
r"""
apply the transformation and return the result in a provided matrix
:type n: int
:param n: number of vectors to transform
:type x: float
:param x: input vectors, size n * d_in
:type xt: float
:param xt: output vectors, size n * d_out
"""
return _swigfaiss.VectorTransform_apply_noalloc(self, n, x, xt)
def reverse_transform(self, n, xt, x):
r"""
reverse transformation. May not be implemented or may return
approximate result
"""
return _swigfaiss.VectorTransform_reverse_transform(self, n, xt, x)
def check_identical(self, other):
return _swigfaiss.VectorTransform_check_identical(self, other)
__swig_destroy__ = _swigfaiss.delete_VectorTransform
# Register VectorTransform in _swigfaiss:
_swigfaiss.VectorTransform_swigregister(VectorTransform)
class LinearTransform(VectorTransform):
r"""
Generic linear transformation, with bias term applied on output
y = A * x + b
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
have_bias = property(_swigfaiss.LinearTransform_have_bias_get, _swigfaiss.LinearTransform_have_bias_set)
is_orthonormal = property(_swigfaiss.LinearTransform_is_orthonormal_get, _swigfaiss.LinearTransform_is_orthonormal_set, doc=r"""
whether to use the bias term
check if matrix A is orthonormal (enables reverse_transform)
""")
A = property(_swigfaiss.LinearTransform_A_get, _swigfaiss.LinearTransform_A_set, doc=r""" Transformation matrix, size d_out * d_in""")
b = property(_swigfaiss.LinearTransform_b_get, _swigfaiss.LinearTransform_b_set, doc=r""" bias vector, size d_out""")
def __init__(self, d_in=0, d_out=0, have_bias=False):
r""" both d_in > d_out and d_out < d_in are supported"""
_swigfaiss.LinearTransform_swiginit(self, _swigfaiss.new_LinearTransform(d_in, d_out, have_bias))
def apply_noalloc(self, n, x, xt):
r""" same as apply, but result is pre-allocated"""
return _swigfaiss.LinearTransform_apply_noalloc(self, n, x, xt)
def transform_transpose(self, n, y, x):
r"""
compute x = A^T * (x - b)
is reverse transform if A has orthonormal lines
"""
return _swigfaiss.LinearTransform_transform_transpose(self, n, y, x)
def reverse_transform(self, n, xt, x):
r""" works only if is_orthonormal"""
return _swigfaiss.LinearTransform_reverse_transform(self, n, xt, x)
def set_is_orthonormal(self):
r""" compute A^T * A to set the is_orthonormal flag"""
return _swigfaiss.LinearTransform_set_is_orthonormal(self)
verbose = property(_swigfaiss.LinearTransform_verbose_get, _swigfaiss.LinearTransform_verbose_set)
def print_if_verbose(self, name, mat, n, d):
return _swigfaiss.LinearTransform_print_if_verbose(self, name, mat, n, d)
def check_identical(self, other):
return _swigfaiss.LinearTransform_check_identical(self, other)
__swig_destroy__ = _swigfaiss.delete_LinearTransform
# Register LinearTransform in _swigfaiss:
_swigfaiss.LinearTransform_swigregister(LinearTransform)
class RandomRotationMatrix(LinearTransform):
r""" Randomly rotate a set of vectors"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def init(self, seed):
r""" must be called before the transform is used"""
return _swigfaiss.RandomRotationMatrix_init(self, seed)
def train(self, n, x):
return _swigfaiss.RandomRotationMatrix_train(self, n, x)
def __init__(self, *args):
_swigfaiss.RandomRotationMatrix_swiginit(self, _swigfaiss.new_RandomRotationMatrix(*args))
__swig_destroy__ = _swigfaiss.delete_RandomRotationMatrix
# Register RandomRotationMatrix in _swigfaiss:
_swigfaiss.RandomRotationMatrix_swigregister(RandomRotationMatrix)
class PCAMatrix(LinearTransform):
r"""
Applies a principal component analysis on a set of vectors,
with optionally whitening and random rotation.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
eigen_power = property(_swigfaiss.PCAMatrix_eigen_power_get, _swigfaiss.PCAMatrix_eigen_power_set, doc=r"""
after transformation the components are multiplied by
eigenvalues^eigen_power
=0: no whitening
=-0.5: full whitening
""")
epsilon = property(_swigfaiss.PCAMatrix_epsilon_get, _swigfaiss.PCAMatrix_epsilon_set, doc=r""" value added to eigenvalues to avoid division by 0 when whitening""")
random_rotation = property(_swigfaiss.PCAMatrix_random_rotation_get, _swigfaiss.PCAMatrix_random_rotation_set, doc=r""" random rotation after PCA""")
max_points_per_d = property(_swigfaiss.PCAMatrix_max_points_per_d_get, _swigfaiss.PCAMatrix_max_points_per_d_set, doc=r""" ratio between # training vectors and dimension""")
balanced_bins = property(_swigfaiss.PCAMatrix_balanced_bins_get, _swigfaiss.PCAMatrix_balanced_bins_set, doc=r""" try to distribute output eigenvectors in this many bins""")
mean = property(_swigfaiss.PCAMatrix_mean_get, _swigfaiss.PCAMatrix_mean_set, doc=r""" Mean, size d_in""")
eigenvalues = property(_swigfaiss.PCAMatrix_eigenvalues_get, _swigfaiss.PCAMatrix_eigenvalues_set, doc=r""" eigenvalues of covariance matrix (= squared singular values)""")
PCAMat = property(_swigfaiss.PCAMatrix_PCAMat_get, _swigfaiss.PCAMatrix_PCAMat_set, doc=r""" PCA matrix, size d_in * d_in""")
def __init__(self, d_in=0, d_out=0, eigen_power=0, random_rotation=False):
_swigfaiss.PCAMatrix_swiginit(self, _swigfaiss.new_PCAMatrix(d_in, d_out, eigen_power, random_rotation))
def train(self, n, x):
r"""
train on n vectors. If n < d_in then the eigenvector matrix
will be completed with 0s
"""
return _swigfaiss.PCAMatrix_train(self, n, x)
def copy_from(self, other):
r""" copy pre-trained PCA matrix"""
return _swigfaiss.PCAMatrix_copy_from(self, other)
def prepare_Ab(self):
r""" called after mean, PCAMat and eigenvalues are computed"""
return _swigfaiss.PCAMatrix_prepare_Ab(self)
__swig_destroy__ = _swigfaiss.delete_PCAMatrix
# Register PCAMatrix in _swigfaiss:
_swigfaiss.PCAMatrix_swigregister(PCAMatrix)
class ITQMatrix(LinearTransform):
r"""
ITQ implementation from
Iterative quantization: A procrustean approach to learning binary codes
for large-scale image retrieval,
Yunchao Gong, Svetlana Lazebnik, Albert Gordo, Florent Perronnin,
PAMI'12.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
max_iter = property(_swigfaiss.ITQMatrix_max_iter_get, _swigfaiss.ITQMatrix_max_iter_set)
seed = property(_swigfaiss.ITQMatrix_seed_get, _swigfaiss.ITQMatrix_seed_set)
init_rotation = property(_swigfaiss.ITQMatrix_init_rotation_get, _swigfaiss.ITQMatrix_init_rotation_set)
def __init__(self, d=0):
_swigfaiss.ITQMatrix_swiginit(self, _swigfaiss.new_ITQMatrix(d))
def train(self, n, x):
return _swigfaiss.ITQMatrix_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_ITQMatrix
# Register ITQMatrix in _swigfaiss:
_swigfaiss.ITQMatrix_swigregister(ITQMatrix)
class ITQTransform(VectorTransform):
r""" The full ITQ transform, including normalizations and PCA transformation"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
mean = property(_swigfaiss.ITQTransform_mean_get, _swigfaiss.ITQTransform_mean_set)
do_pca = property(_swigfaiss.ITQTransform_do_pca_get, _swigfaiss.ITQTransform_do_pca_set)
itq = property(_swigfaiss.ITQTransform_itq_get, _swigfaiss.ITQTransform_itq_set)
max_train_per_dim = property(_swigfaiss.ITQTransform_max_train_per_dim_get, _swigfaiss.ITQTransform_max_train_per_dim_set, doc=r""" max training points per dimension""")
pca_then_itq = property(_swigfaiss.ITQTransform_pca_then_itq_get, _swigfaiss.ITQTransform_pca_then_itq_set)
def __init__(self, d_in=0, d_out=0, do_pca=False):
_swigfaiss.ITQTransform_swiginit(self, _swigfaiss.new_ITQTransform(d_in, d_out, do_pca))
def train(self, n, x):
return _swigfaiss.ITQTransform_train(self, n, x)
def apply_noalloc(self, n, x, xt):
return _swigfaiss.ITQTransform_apply_noalloc(self, n, x, xt)
def check_identical(self, other):
return _swigfaiss.ITQTransform_check_identical(self, other)
__swig_destroy__ = _swigfaiss.delete_ITQTransform
# Register ITQTransform in _swigfaiss:
_swigfaiss.ITQTransform_swigregister(ITQTransform)
class OPQMatrix(LinearTransform):
r"""
Applies a rotation to align the dimensions with a PQ to minimize
the reconstruction error. Can be used before an IndexPQ or an
IndexIVFPQ. The method is the non-parametric version described in:
"Optimized Product Quantization for Approximate Nearest Neighbor Search"
Tiezheng Ge, Kaiming He, Qifa Ke, Jian Sun, CVPR'13
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
M = property(_swigfaiss.OPQMatrix_M_get, _swigfaiss.OPQMatrix_M_set, doc=r""" nb of subquantizers""")
niter = property(_swigfaiss.OPQMatrix_niter_get, _swigfaiss.OPQMatrix_niter_set, doc=r""" Number of outer training iterations""")
niter_pq = property(_swigfaiss.OPQMatrix_niter_pq_get, _swigfaiss.OPQMatrix_niter_pq_set, doc=r""" Number of training iterations for the PQ""")
niter_pq_0 = property(_swigfaiss.OPQMatrix_niter_pq_0_get, _swigfaiss.OPQMatrix_niter_pq_0_set, doc=r""" same, for the first outer iteration""")
max_train_points = property(_swigfaiss.OPQMatrix_max_train_points_get, _swigfaiss.OPQMatrix_max_train_points_set, doc=r""" if there are too many training points, resample""")
verbose = property(_swigfaiss.OPQMatrix_verbose_get, _swigfaiss.OPQMatrix_verbose_set)
pq = property(_swigfaiss.OPQMatrix_pq_get, _swigfaiss.OPQMatrix_pq_set, doc=r"""
if non-NULL, use this product quantizer for training
should be constructed with (d_out, M, _)
""")
def __init__(self, d=0, M=1, d2=-1):
r""" if d2 != -1, output vectors of this dimension"""
_swigfaiss.OPQMatrix_swiginit(self, _swigfaiss.new_OPQMatrix(d, M, d2))
def train(self, n, x):
return _swigfaiss.OPQMatrix_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_OPQMatrix
# Register OPQMatrix in _swigfaiss:
_swigfaiss.OPQMatrix_swigregister(OPQMatrix)
class RemapDimensionsTransform(VectorTransform):
r"""
remap dimensions for intput vectors, possibly inserting 0s
strictly speaking this is also a linear transform but we don't want
to compute it with matrix multiplies
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
map = property(_swigfaiss.RemapDimensionsTransform_map_get, _swigfaiss.RemapDimensionsTransform_map_set, doc=r"""
map from output dimension to input, size d_out
-1 -> set output to 0
""")
def apply_noalloc(self, n, x, xt):
return _swigfaiss.RemapDimensionsTransform_apply_noalloc(self, n, x, xt)
def reverse_transform(self, n, xt, x):
r""" reverse transform correct only when the mapping is a permutation"""
return _swigfaiss.RemapDimensionsTransform_reverse_transform(self, n, xt, x)
def __init__(self, *args):
_swigfaiss.RemapDimensionsTransform_swiginit(self, _swigfaiss.new_RemapDimensionsTransform(*args))
def check_identical(self, other):
return _swigfaiss.RemapDimensionsTransform_check_identical(self, other)
__swig_destroy__ = _swigfaiss.delete_RemapDimensionsTransform
# Register RemapDimensionsTransform in _swigfaiss:
_swigfaiss.RemapDimensionsTransform_swigregister(RemapDimensionsTransform)
class NormalizationTransform(VectorTransform):
r""" per-vector normalization"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
norm = property(_swigfaiss.NormalizationTransform_norm_get, _swigfaiss.NormalizationTransform_norm_set)
def __init__(self, *args):
_swigfaiss.NormalizationTransform_swiginit(self, _swigfaiss.new_NormalizationTransform(*args))
def apply_noalloc(self, n, x, xt):
return _swigfaiss.NormalizationTransform_apply_noalloc(self, n, x, xt)
def reverse_transform(self, n, xt, x):
r""" Identity transform since norm is not revertible"""
return _swigfaiss.NormalizationTransform_reverse_transform(self, n, xt, x)
def check_identical(self, other):
return _swigfaiss.NormalizationTransform_check_identical(self, other)
__swig_destroy__ = _swigfaiss.delete_NormalizationTransform
# Register NormalizationTransform in _swigfaiss:
_swigfaiss.NormalizationTransform_swigregister(NormalizationTransform)
class CenteringTransform(VectorTransform):
r""" Subtract the mean of each component from the vectors."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
mean = property(_swigfaiss.CenteringTransform_mean_get, _swigfaiss.CenteringTransform_mean_set, doc=r""" Mean, size d_in = d_out""")
def __init__(self, d=0):
_swigfaiss.CenteringTransform_swiginit(self, _swigfaiss.new_CenteringTransform(d))
def train(self, n, x):
r""" train on n vectors."""
return _swigfaiss.CenteringTransform_train(self, n, x)
def apply_noalloc(self, n, x, xt):
r""" subtract the mean"""
return _swigfaiss.CenteringTransform_apply_noalloc(self, n, x, xt)
def reverse_transform(self, n, xt, x):
r""" add the mean"""
return _swigfaiss.CenteringTransform_reverse_transform(self, n, xt, x)
def check_identical(self, other):
return _swigfaiss.CenteringTransform_check_identical(self, other)
__swig_destroy__ = _swigfaiss.delete_CenteringTransform
# Register CenteringTransform in _swigfaiss:
_swigfaiss.CenteringTransform_swigregister(CenteringTransform)
class SearchParametersPreTransform(SearchParameters):
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index_params = property(_swigfaiss.SearchParametersPreTransform_index_params_get, _swigfaiss.SearchParametersPreTransform_index_params_set)
def __init__(self):
_swigfaiss.SearchParametersPreTransform_swiginit(self, _swigfaiss.new_SearchParametersPreTransform())
__swig_destroy__ = _swigfaiss.delete_SearchParametersPreTransform
# Register SearchParametersPreTransform in _swigfaiss:
_swigfaiss.SearchParametersPreTransform_swigregister(SearchParametersPreTransform)
class IndexPreTransform(Index):
r"""
Index that applies a LinearTransform transform on vectors before
handing them over to a sub-index
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
chain = property(_swigfaiss.IndexPreTransform_chain_get, _swigfaiss.IndexPreTransform_chain_set)
index = property(_swigfaiss.IndexPreTransform_index_get, _swigfaiss.IndexPreTransform_index_set, doc=r"""chain of tranforms""")
own_fields = property(_swigfaiss.IndexPreTransform_own_fields_get, _swigfaiss.IndexPreTransform_own_fields_set, doc=r"""the sub-index""")
def __init__(self, *args):
r"""
*Overload 1:*
whether pointers are deleted in destructor
|
*Overload 2:*
ltrans is the last transform before the index
"""
_swigfaiss.IndexPreTransform_swiginit(self, _swigfaiss.new_IndexPreTransform(*args))
def prepend_transform(self, ltrans):
return _swigfaiss.IndexPreTransform_prepend_transform(self, ltrans)
def train(self, n, x):
return _swigfaiss.IndexPreTransform_train(self, n, x)
def add(self, n, x):
return _swigfaiss.IndexPreTransform_add(self, n, x)
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexPreTransform_add_with_ids(self, n, x, xids)
def reset(self):
return _swigfaiss.IndexPreTransform_reset(self)
def remove_ids(self, sel):
r""" removes IDs from the index. Not supported by all indexes."""
return _swigfaiss.IndexPreTransform_remove_ids(self, sel)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexPreTransform_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexPreTransform_range_search(self, n, x, radius, result, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexPreTransform_reconstruct(self, key, recons)
def reconstruct_n(self, i0, ni, recons):
return _swigfaiss.IndexPreTransform_reconstruct_n(self, i0, ni, recons)
def search_and_reconstruct(self, n, x, k, distances, labels, recons, params=None):
return _swigfaiss.IndexPreTransform_search_and_reconstruct(self, n, x, k, distances, labels, recons, params)
def apply_chain(self, n, x):
r"""
apply the transforms in the chain. The returned float * may be
equal to x, otherwise it should be deallocated.
"""
return _swigfaiss.IndexPreTransform_apply_chain(self, n, x)
def reverse_chain(self, n, xt, x):
r"""
Reverse the transforms in the chain. May not be implemented for
all transforms in the chain or may return approximate results.
"""
return _swigfaiss.IndexPreTransform_reverse_chain(self, n, xt, x)
def get_distance_computer(self):
return _swigfaiss.IndexPreTransform_get_distance_computer(self)
def sa_code_size(self):
return _swigfaiss.IndexPreTransform_sa_code_size(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexPreTransform_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexPreTransform_sa_decode(self, n, bytes, x)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexPreTransform_merge_from(self, otherIndex, add_id)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexPreTransform_check_compatible_for_merge(self, otherIndex)
__swig_destroy__ = _swigfaiss.delete_IndexPreTransform
# Register IndexPreTransform in _swigfaiss:
_swigfaiss.IndexPreTransform_swigregister(IndexPreTransform)
class IndexRefine(Index):
r"""
Index that queries in a base_index (a fast one) and refines the
results with an exact search, hopefully improving the results.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
base_index = property(_swigfaiss.IndexRefine_base_index_get, _swigfaiss.IndexRefine_base_index_set, doc=r""" faster index to pre-select the vectors that should be filtered""")
refine_index = property(_swigfaiss.IndexRefine_refine_index_get, _swigfaiss.IndexRefine_refine_index_set, doc=r""" refinement index""")
own_fields = property(_swigfaiss.IndexRefine_own_fields_get, _swigfaiss.IndexRefine_own_fields_set, doc=r""" should the base index be deallocated?""")
own_refine_index = property(_swigfaiss.IndexRefine_own_refine_index_get, _swigfaiss.IndexRefine_own_refine_index_set, doc=r""" same with the refinement index""")
k_factor = property(_swigfaiss.IndexRefine_k_factor_get, _swigfaiss.IndexRefine_k_factor_set, doc=r"""
factor between k requested in search and the k requested from
the base_index (should be >= 1)
""")
def __init__(self, *args):
_swigfaiss.IndexRefine_swiginit(self, _swigfaiss.new_IndexRefine(*args))
def train(self, n, x):
return _swigfaiss.IndexRefine_train(self, n, x)
def add(self, n, x):
return _swigfaiss.IndexRefine_add(self, n, x)
def reset(self):
return _swigfaiss.IndexRefine_reset(self)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexRefine_search(self, n, x, k, distances, labels, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexRefine_reconstruct(self, key, recons)
def sa_code_size(self):
return _swigfaiss.IndexRefine_sa_code_size(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexRefine_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
r"""
The sa_decode decodes from the index_refine, which is assumed to be more
accurate
"""
return _swigfaiss.IndexRefine_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexRefine
# Register IndexRefine in _swigfaiss:
_swigfaiss.IndexRefine_swigregister(IndexRefine)
class IndexRefineFlat(IndexRefine):
r"""
Version where the refinement index is an IndexFlat. It has one additional
constructor that takes a table of elements to add to the flat refinement
index
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexRefineFlat_swiginit(self, _swigfaiss.new_IndexRefineFlat(*args))
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexRefineFlat_search(self, n, x, k, distances, labels, params)
__swig_destroy__ = _swigfaiss.delete_IndexRefineFlat
# Register IndexRefineFlat in _swigfaiss:
_swigfaiss.IndexRefineFlat_swigregister(IndexRefineFlat)
class IndexLSH(IndexFlatCodes):
r""" The sign of each vector component is put in a binary signature"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nbits = property(_swigfaiss.IndexLSH_nbits_get, _swigfaiss.IndexLSH_nbits_set, doc=r""" nb of bits per vector""")
rotate_data = property(_swigfaiss.IndexLSH_rotate_data_get, _swigfaiss.IndexLSH_rotate_data_set, doc=r""" whether to apply a random rotation to input""")
train_thresholds = property(_swigfaiss.IndexLSH_train_thresholds_get, _swigfaiss.IndexLSH_train_thresholds_set, doc=r""" whether we train thresholds or use 0""")
rrot = property(_swigfaiss.IndexLSH_rrot_get, _swigfaiss.IndexLSH_rrot_set, doc=r""" optional random rotation""")
thresholds = property(_swigfaiss.IndexLSH_thresholds_get, _swigfaiss.IndexLSH_thresholds_set, doc=r""" thresholds to compare with""")
def apply_preprocess(self, n, x):
r"""
Preprocesses and resizes the input to the size required to
binarize the data
:type x: float
:param x: input vectors, size n * d
:rtype: float
:return: output vectors, size n * bits. May be the same pointer
as x, otherwise it should be deleted by the caller
"""
return _swigfaiss.IndexLSH_apply_preprocess(self, n, x)
def train(self, n, x):
return _swigfaiss.IndexLSH_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexLSH_search(self, n, x, k, distances, labels, params)
def transfer_thresholds(self, vt):
r"""
transfer the thresholds to a pre-processing stage (and unset
train_thresholds)
"""
return _swigfaiss.IndexLSH_transfer_thresholds(self, vt)
__swig_destroy__ = _swigfaiss.delete_IndexLSH
def __init__(self, *args):
_swigfaiss.IndexLSH_swiginit(self, _swigfaiss.new_IndexLSH(*args))
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexLSH_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexLSH_sa_decode(self, n, bytes, x)
# Register IndexLSH in _swigfaiss:
_swigfaiss.IndexLSH_swigregister(IndexLSH)
class SimulatedAnnealingParameters(object):
r""" parameters used for the simulated annealing method"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
init_temperature = property(_swigfaiss.SimulatedAnnealingParameters_init_temperature_get, _swigfaiss.SimulatedAnnealingParameters_init_temperature_set)
temperature_decay = property(_swigfaiss.SimulatedAnnealingParameters_temperature_decay_get, _swigfaiss.SimulatedAnnealingParameters_temperature_decay_set)
n_iter = property(_swigfaiss.SimulatedAnnealingParameters_n_iter_get, _swigfaiss.SimulatedAnnealingParameters_n_iter_set)
n_redo = property(_swigfaiss.SimulatedAnnealingParameters_n_redo_get, _swigfaiss.SimulatedAnnealingParameters_n_redo_set)
seed = property(_swigfaiss.SimulatedAnnealingParameters_seed_get, _swigfaiss.SimulatedAnnealingParameters_seed_set)
verbose = property(_swigfaiss.SimulatedAnnealingParameters_verbose_get, _swigfaiss.SimulatedAnnealingParameters_verbose_set)
only_bit_flips = property(_swigfaiss.SimulatedAnnealingParameters_only_bit_flips_get, _swigfaiss.SimulatedAnnealingParameters_only_bit_flips_set)
init_random = property(_swigfaiss.SimulatedAnnealingParameters_init_random_get, _swigfaiss.SimulatedAnnealingParameters_init_random_set)
def __init__(self):
_swigfaiss.SimulatedAnnealingParameters_swiginit(self, _swigfaiss.new_SimulatedAnnealingParameters())
__swig_destroy__ = _swigfaiss.delete_SimulatedAnnealingParameters
# Register SimulatedAnnealingParameters in _swigfaiss:
_swigfaiss.SimulatedAnnealingParameters_swigregister(SimulatedAnnealingParameters)
class PermutationObjective(object):
r""" abstract class for the loss function"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
n = property(_swigfaiss.PermutationObjective_n_get, _swigfaiss.PermutationObjective_n_set)
def compute_cost(self, perm):
return _swigfaiss.PermutationObjective_compute_cost(self, perm)
def cost_update(self, perm, iw, jw):
return _swigfaiss.PermutationObjective_cost_update(self, perm, iw, jw)
__swig_destroy__ = _swigfaiss.delete_PermutationObjective
# Register PermutationObjective in _swigfaiss:
_swigfaiss.PermutationObjective_swigregister(PermutationObjective)
class ReproduceDistancesObjective(PermutationObjective):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
dis_weight_factor = property(_swigfaiss.ReproduceDistancesObjective_dis_weight_factor_get, _swigfaiss.ReproduceDistancesObjective_dis_weight_factor_set)
@staticmethod
def sqr(x):
return _swigfaiss.ReproduceDistancesObjective_sqr(x)
def dis_weight(self, x):
return _swigfaiss.ReproduceDistancesObjective_dis_weight(self, x)
source_dis = property(_swigfaiss.ReproduceDistancesObjective_source_dis_get, _swigfaiss.ReproduceDistancesObjective_source_dis_set, doc=r""" "real" corrected distances (size n^2)""")
target_dis = property(_swigfaiss.ReproduceDistancesObjective_target_dis_get, _swigfaiss.ReproduceDistancesObjective_target_dis_set, doc=r""" wanted distances (size n^2)""")
weights = property(_swigfaiss.ReproduceDistancesObjective_weights_get, _swigfaiss.ReproduceDistancesObjective_weights_set, doc=r""" weights for each distance (size n^2)""")
def get_source_dis(self, i, j):
return _swigfaiss.ReproduceDistancesObjective_get_source_dis(self, i, j)
def compute_cost(self, perm):
return _swigfaiss.ReproduceDistancesObjective_compute_cost(self, perm)
def cost_update(self, perm, iw, jw):
return _swigfaiss.ReproduceDistancesObjective_cost_update(self, perm, iw, jw)
def __init__(self, n, source_dis_in, target_dis_in, dis_weight_factor):
_swigfaiss.ReproduceDistancesObjective_swiginit(self, _swigfaiss.new_ReproduceDistancesObjective(n, source_dis_in, target_dis_in, dis_weight_factor))
@staticmethod
def compute_mean_stdev(tab, n2, mean_out, stddev_out):
return _swigfaiss.ReproduceDistancesObjective_compute_mean_stdev(tab, n2, mean_out, stddev_out)
def set_affine_target_dis(self, source_dis_in):
return _swigfaiss.ReproduceDistancesObjective_set_affine_target_dis(self, source_dis_in)
__swig_destroy__ = _swigfaiss.delete_ReproduceDistancesObjective
# Register ReproduceDistancesObjective in _swigfaiss:
_swigfaiss.ReproduceDistancesObjective_swigregister(ReproduceDistancesObjective)
def ReproduceDistancesObjective_sqr(x):
return _swigfaiss.ReproduceDistancesObjective_sqr(x)
def ReproduceDistancesObjective_compute_mean_stdev(tab, n2, mean_out, stddev_out):
return _swigfaiss.ReproduceDistancesObjective_compute_mean_stdev(tab, n2, mean_out, stddev_out)
class SimulatedAnnealingOptimizer(SimulatedAnnealingParameters):
r""" Simulated annealing optimization algorithm for permutations."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
obj = property(_swigfaiss.SimulatedAnnealingOptimizer_obj_get, _swigfaiss.SimulatedAnnealingOptimizer_obj_set)
n = property(_swigfaiss.SimulatedAnnealingOptimizer_n_get, _swigfaiss.SimulatedAnnealingOptimizer_n_set, doc=r""" size of the permutation""")
logfile = property(_swigfaiss.SimulatedAnnealingOptimizer_logfile_get, _swigfaiss.SimulatedAnnealingOptimizer_logfile_set)
def __init__(self, obj, p):
r""" logs values of the cost function"""
_swigfaiss.SimulatedAnnealingOptimizer_swiginit(self, _swigfaiss.new_SimulatedAnnealingOptimizer(obj, p))
rnd = property(_swigfaiss.SimulatedAnnealingOptimizer_rnd_get, _swigfaiss.SimulatedAnnealingOptimizer_rnd_set)
init_cost = property(_swigfaiss.SimulatedAnnealingOptimizer_init_cost_get, _swigfaiss.SimulatedAnnealingOptimizer_init_cost_set, doc=r""" remember initial cost of optimization""")
def optimize(self, perm):
return _swigfaiss.SimulatedAnnealingOptimizer_optimize(self, perm)
def run_optimization(self, best_perm):
return _swigfaiss.SimulatedAnnealingOptimizer_run_optimization(self, best_perm)
__swig_destroy__ = _swigfaiss.delete_SimulatedAnnealingOptimizer
# Register SimulatedAnnealingOptimizer in _swigfaiss:
_swigfaiss.SimulatedAnnealingOptimizer_swigregister(SimulatedAnnealingOptimizer)
class PolysemousTraining(SimulatedAnnealingParameters):
r""" optimizes the order of indices in a ProductQuantizer"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
OT_None = _swigfaiss.PolysemousTraining_OT_None
OT_ReproduceDistances_affine = _swigfaiss.PolysemousTraining_OT_ReproduceDistances_affine
r""" default"""
OT_Ranking_weighted_diff = _swigfaiss.PolysemousTraining_OT_Ranking_weighted_diff
r"""
same as _2, but use rank of y+ - rank of
y-
"""
optimization_type = property(_swigfaiss.PolysemousTraining_optimization_type_get, _swigfaiss.PolysemousTraining_optimization_type_set)
ntrain_permutation = property(_swigfaiss.PolysemousTraining_ntrain_permutation_get, _swigfaiss.PolysemousTraining_ntrain_permutation_set, doc=r"""
use 1/4 of the training points for the optimization, with
max. ntrain_permutation. If ntrain_permutation == 0: train on
centroids
""")
dis_weight_factor = property(_swigfaiss.PolysemousTraining_dis_weight_factor_get, _swigfaiss.PolysemousTraining_dis_weight_factor_set, doc=r""" decay of exp that weights distance loss""")
max_memory = property(_swigfaiss.PolysemousTraining_max_memory_get, _swigfaiss.PolysemousTraining_max_memory_set, doc=r""" refuse to train if it would require more than that amount of RAM""")
log_pattern = property(_swigfaiss.PolysemousTraining_log_pattern_get, _swigfaiss.PolysemousTraining_log_pattern_set)
def __init__(self):
_swigfaiss.PolysemousTraining_swiginit(self, _swigfaiss.new_PolysemousTraining())
def optimize_pq_for_hamming(self, pq, n, x):
r"""
reorder the centroids so that the Hamming distance becomes a
good approximation of the SDC distance (called by train)
"""
return _swigfaiss.PolysemousTraining_optimize_pq_for_hamming(self, pq, n, x)
def optimize_ranking(self, pq, n, x):
r""" called by optimize_pq_for_hamming"""
return _swigfaiss.PolysemousTraining_optimize_ranking(self, pq, n, x)
def optimize_reproduce_distances(self, pq):
r""" called by optimize_pq_for_hamming"""
return _swigfaiss.PolysemousTraining_optimize_reproduce_distances(self, pq)
def memory_usage_per_thread(self, pq):
r""" make sure we don't blow up the memory"""
return _swigfaiss.PolysemousTraining_memory_usage_per_thread(self, pq)
__swig_destroy__ = _swigfaiss.delete_PolysemousTraining
# Register PolysemousTraining in _swigfaiss:
_swigfaiss.PolysemousTraining_swigregister(PolysemousTraining)
class IndexPQ(IndexFlatCodes):
r"""
Index based on a product quantizer. Stored vectors are
approximated by PQ codes.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pq = property(_swigfaiss.IndexPQ_pq_get, _swigfaiss.IndexPQ_pq_set, doc=r""" The product quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexPQ_swiginit(self, _swigfaiss.new_IndexPQ(*args))
def train(self, n, x):
return _swigfaiss.IndexPQ_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexPQ_search(self, n, x, k, distances, labels, params)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexPQ_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexPQ_sa_decode(self, n, bytes, x)
def get_FlatCodesDistanceComputer(self):
return _swigfaiss.IndexPQ_get_FlatCodesDistanceComputer(self)
do_polysemous_training = property(_swigfaiss.IndexPQ_do_polysemous_training_get, _swigfaiss.IndexPQ_do_polysemous_training_set, doc=r""" false = standard PQ""")
polysemous_training = property(_swigfaiss.IndexPQ_polysemous_training_get, _swigfaiss.IndexPQ_polysemous_training_set, doc=r""" parameters used for the polysemous training""")
ST_PQ = _swigfaiss.IndexPQ_ST_PQ
r""" asymmetric product quantizer (default)"""
ST_HE = _swigfaiss.IndexPQ_ST_HE
r""" Hamming distance on codes"""
ST_generalized_HE = _swigfaiss.IndexPQ_ST_generalized_HE
r""" nb of same codes"""
ST_SDC = _swigfaiss.IndexPQ_ST_SDC
r""" symmetric product quantizer (SDC)"""
ST_polysemous = _swigfaiss.IndexPQ_ST_polysemous
r""" HE filter (using ht) + PQ combination"""
ST_polysemous_generalize = _swigfaiss.IndexPQ_ST_polysemous_generalize
r""" Filter on generalized Hamming"""
search_type = property(_swigfaiss.IndexPQ_search_type_get, _swigfaiss.IndexPQ_search_type_set)
encode_signs = property(_swigfaiss.IndexPQ_encode_signs_get, _swigfaiss.IndexPQ_encode_signs_set)
polysemous_ht = property(_swigfaiss.IndexPQ_polysemous_ht_get, _swigfaiss.IndexPQ_polysemous_ht_set, doc=r""" Hamming threshold used for polysemy""")
def search_core_polysemous(self, n, x, k, distances, labels, polysemous_ht, generalized_hamming):
return _swigfaiss.IndexPQ_search_core_polysemous(self, n, x, k, distances, labels, polysemous_ht, generalized_hamming)
def hamming_distance_histogram(self, n, x, nb, xb, dist_histogram):
r"""
prepare query for a polysemous search, but instead of
computing the result, just get the histogram of Hamming
distances. May be computed on a provided dataset if xb != NULL
:type dist_histogram: int
:param dist_histogram: (M * nbits + 1)
"""
return _swigfaiss.IndexPQ_hamming_distance_histogram(self, n, x, nb, xb, dist_histogram)
def hamming_distance_table(self, n, x, dis):
r"""
compute pairwise distances between queries and database
:type n: int
:param n: nb of query vectors
:type x: float
:param x: query vector, size n * d
:type dis: int
:param dis: output distances, size n * ntotal
"""
return _swigfaiss.IndexPQ_hamming_distance_table(self, n, x, dis)
__swig_destroy__ = _swigfaiss.delete_IndexPQ
# Register IndexPQ in _swigfaiss:
_swigfaiss.IndexPQ_swigregister(IndexPQ)
class SearchParametersPQ(SearchParameters):
r""" override search parameters from the class"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
search_type = property(_swigfaiss.SearchParametersPQ_search_type_get, _swigfaiss.SearchParametersPQ_search_type_set)
polysemous_ht = property(_swigfaiss.SearchParametersPQ_polysemous_ht_get, _swigfaiss.SearchParametersPQ_polysemous_ht_set)
def __init__(self):
_swigfaiss.SearchParametersPQ_swiginit(self, _swigfaiss.new_SearchParametersPQ())
__swig_destroy__ = _swigfaiss.delete_SearchParametersPQ
# Register SearchParametersPQ in _swigfaiss:
_swigfaiss.SearchParametersPQ_swigregister(SearchParametersPQ)
class IndexPQStats(object):
r"""
statistics are robust to internal threading, but not if
IndexPQ::search is called by multiple threads
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nq = property(_swigfaiss.IndexPQStats_nq_get, _swigfaiss.IndexPQStats_nq_set)
ncode = property(_swigfaiss.IndexPQStats_ncode_get, _swigfaiss.IndexPQStats_ncode_set)
n_hamming_pass = property(_swigfaiss.IndexPQStats_n_hamming_pass_get, _swigfaiss.IndexPQStats_n_hamming_pass_set)
def __init__(self):
_swigfaiss.IndexPQStats_swiginit(self, _swigfaiss.new_IndexPQStats())
def reset(self):
return _swigfaiss.IndexPQStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexPQStats
# Register IndexPQStats in _swigfaiss:
_swigfaiss.IndexPQStats_swigregister(IndexPQStats)
class MultiIndexQuantizer(Index):
r"""
Quantizer where centroids are virtual: they are the Cartesian
product of sub-centroids.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pq = property(_swigfaiss.MultiIndexQuantizer_pq_get, _swigfaiss.MultiIndexQuantizer_pq_set)
def train(self, n, x):
return _swigfaiss.MultiIndexQuantizer_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.MultiIndexQuantizer_search(self, n, x, k, distances, labels, params)
def add(self, n, x):
r""" add and reset will crash at runtime"""
return _swigfaiss.MultiIndexQuantizer_add(self, n, x)
def reset(self):
return _swigfaiss.MultiIndexQuantizer_reset(self)
def __init__(self, *args):
_swigfaiss.MultiIndexQuantizer_swiginit(self, _swigfaiss.new_MultiIndexQuantizer(*args))
def reconstruct(self, key, recons):
return _swigfaiss.MultiIndexQuantizer_reconstruct(self, key, recons)
__swig_destroy__ = _swigfaiss.delete_MultiIndexQuantizer
# Register MultiIndexQuantizer in _swigfaiss:
_swigfaiss.MultiIndexQuantizer_swigregister(MultiIndexQuantizer)
class MultiIndexQuantizer2(MultiIndexQuantizer):
r""" MultiIndexQuantizer where the PQ assignmnet is performed by sub-indexes"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
assign_indexes = property(_swigfaiss.MultiIndexQuantizer2_assign_indexes_get, _swigfaiss.MultiIndexQuantizer2_assign_indexes_set, doc=r""" M Indexes on d / M dimensions""")
own_fields = property(_swigfaiss.MultiIndexQuantizer2_own_fields_get, _swigfaiss.MultiIndexQuantizer2_own_fields_set)
def __init__(self, *args):
_swigfaiss.MultiIndexQuantizer2_swiginit(self, _swigfaiss.new_MultiIndexQuantizer2(*args))
def train(self, n, x):
return _swigfaiss.MultiIndexQuantizer2_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.MultiIndexQuantizer2_search(self, n, x, k, distances, labels, params)
__swig_destroy__ = _swigfaiss.delete_MultiIndexQuantizer2
# Register MultiIndexQuantizer2 in _swigfaiss:
_swigfaiss.MultiIndexQuantizer2_swigregister(MultiIndexQuantizer2)
class IndexAdditiveQuantizer(IndexFlatCodes):
r""" Abstract class for additive quantizers. The search functions are in common."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
aq = property(_swigfaiss.IndexAdditiveQuantizer_aq_get, _swigfaiss.IndexAdditiveQuantizer_aq_set)
def __init__(self, *args):
_swigfaiss.IndexAdditiveQuantizer_swiginit(self, _swigfaiss.new_IndexAdditiveQuantizer(*args))
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexAdditiveQuantizer_search(self, n, x, k, distances, labels, params)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexAdditiveQuantizer_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexAdditiveQuantizer_sa_decode(self, n, bytes, x)
def get_FlatCodesDistanceComputer(self):
return _swigfaiss.IndexAdditiveQuantizer_get_FlatCodesDistanceComputer(self)
__swig_destroy__ = _swigfaiss.delete_IndexAdditiveQuantizer
# Register IndexAdditiveQuantizer in _swigfaiss:
_swigfaiss.IndexAdditiveQuantizer_swigregister(IndexAdditiveQuantizer)
class IndexResidualQuantizer(IndexAdditiveQuantizer):
r"""
Index based on a residual quantizer. Stored vectors are
approximated by residual quantization codes.
Can also be used as a codec
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rq = property(_swigfaiss.IndexResidualQuantizer_rq_get, _swigfaiss.IndexResidualQuantizer_rq_set, doc=r""" The residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexResidualQuantizer_swiginit(self, _swigfaiss.new_IndexResidualQuantizer(*args))
def train(self, n, x):
return _swigfaiss.IndexResidualQuantizer_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_IndexResidualQuantizer
# Register IndexResidualQuantizer in _swigfaiss:
_swigfaiss.IndexResidualQuantizer_swigregister(IndexResidualQuantizer)
class IndexLocalSearchQuantizer(IndexAdditiveQuantizer):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
lsq = property(_swigfaiss.IndexLocalSearchQuantizer_lsq_get, _swigfaiss.IndexLocalSearchQuantizer_lsq_set)
def __init__(self, *args):
_swigfaiss.IndexLocalSearchQuantizer_swiginit(self, _swigfaiss.new_IndexLocalSearchQuantizer(*args))
def train(self, n, x):
return _swigfaiss.IndexLocalSearchQuantizer_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_IndexLocalSearchQuantizer
# Register IndexLocalSearchQuantizer in _swigfaiss:
_swigfaiss.IndexLocalSearchQuantizer_swigregister(IndexLocalSearchQuantizer)
class IndexProductResidualQuantizer(IndexAdditiveQuantizer):
r""" Index based on a product residual quantizer."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
prq = property(_swigfaiss.IndexProductResidualQuantizer_prq_get, _swigfaiss.IndexProductResidualQuantizer_prq_set, doc=r""" The product residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexProductResidualQuantizer_swiginit(self, _swigfaiss.new_IndexProductResidualQuantizer(*args))
def train(self, n, x):
return _swigfaiss.IndexProductResidualQuantizer_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_IndexProductResidualQuantizer
# Register IndexProductResidualQuantizer in _swigfaiss:
_swigfaiss.IndexProductResidualQuantizer_swigregister(IndexProductResidualQuantizer)
class IndexProductLocalSearchQuantizer(IndexAdditiveQuantizer):
r""" Index based on a product local search quantizer."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
plsq = property(_swigfaiss.IndexProductLocalSearchQuantizer_plsq_get, _swigfaiss.IndexProductLocalSearchQuantizer_plsq_set, doc=r""" The product local search quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexProductLocalSearchQuantizer_swiginit(self, _swigfaiss.new_IndexProductLocalSearchQuantizer(*args))
def train(self, n, x):
return _swigfaiss.IndexProductLocalSearchQuantizer_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_IndexProductLocalSearchQuantizer
# Register IndexProductLocalSearchQuantizer in _swigfaiss:
_swigfaiss.IndexProductLocalSearchQuantizer_swigregister(IndexProductLocalSearchQuantizer)
class AdditiveCoarseQuantizer(Index):
r"""
A "virtual" index where the elements are the residual quantizer centroids.
Intended for use as a coarse quantizer in an IndexIVF.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
aq = property(_swigfaiss.AdditiveCoarseQuantizer_aq_get, _swigfaiss.AdditiveCoarseQuantizer_aq_set)
def __init__(self, *args):
_swigfaiss.AdditiveCoarseQuantizer_swiginit(self, _swigfaiss.new_AdditiveCoarseQuantizer(*args))
centroid_norms = property(_swigfaiss.AdditiveCoarseQuantizer_centroid_norms_get, _swigfaiss.AdditiveCoarseQuantizer_centroid_norms_set, doc=r""" norms of centroids, useful for knn-search""")
def add(self, n, x):
r""" N/A"""
return _swigfaiss.AdditiveCoarseQuantizer_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.AdditiveCoarseQuantizer_search(self, n, x, k, distances, labels, params)
def reconstruct(self, key, recons):
return _swigfaiss.AdditiveCoarseQuantizer_reconstruct(self, key, recons)
def train(self, n, x):
return _swigfaiss.AdditiveCoarseQuantizer_train(self, n, x)
def reset(self):
r""" N/A"""
return _swigfaiss.AdditiveCoarseQuantizer_reset(self)
__swig_destroy__ = _swigfaiss.delete_AdditiveCoarseQuantizer
# Register AdditiveCoarseQuantizer in _swigfaiss:
_swigfaiss.AdditiveCoarseQuantizer_swigregister(AdditiveCoarseQuantizer)
class ResidualCoarseQuantizer(AdditiveCoarseQuantizer):
r"""
The ResidualCoarseQuantizer is a bit specialized compared to the
default AdditiveCoarseQuantizer because it can use a beam search
at search time (slow but may be useful for very large vocabularies)
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rq = property(_swigfaiss.ResidualCoarseQuantizer_rq_get, _swigfaiss.ResidualCoarseQuantizer_rq_set, doc=r""" The residual quantizer used to encode the vectors""")
beam_factor = property(_swigfaiss.ResidualCoarseQuantizer_beam_factor_get, _swigfaiss.ResidualCoarseQuantizer_beam_factor_set, doc=r"""
factor between the beam size and the search k
if negative, use exact search-to-centroid
""")
def set_beam_factor(self, new_beam_factor):
r""" computes centroid norms if required"""
return _swigfaiss.ResidualCoarseQuantizer_set_beam_factor(self, new_beam_factor)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.ResidualCoarseQuantizer_search(self, n, x, k, distances, labels, params)
def __init__(self, *args):
_swigfaiss.ResidualCoarseQuantizer_swiginit(self, _swigfaiss.new_ResidualCoarseQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_ResidualCoarseQuantizer
# Register ResidualCoarseQuantizer in _swigfaiss:
_swigfaiss.ResidualCoarseQuantizer_swigregister(ResidualCoarseQuantizer)
class LocalSearchCoarseQuantizer(AdditiveCoarseQuantizer):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
lsq = property(_swigfaiss.LocalSearchCoarseQuantizer_lsq_get, _swigfaiss.LocalSearchCoarseQuantizer_lsq_set, doc=r""" The residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.LocalSearchCoarseQuantizer_swiginit(self, _swigfaiss.new_LocalSearchCoarseQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_LocalSearchCoarseQuantizer
# Register LocalSearchCoarseQuantizer in _swigfaiss:
_swigfaiss.LocalSearchCoarseQuantizer_swigregister(LocalSearchCoarseQuantizer)
class IOReader(object):
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
name = property(_swigfaiss.IOReader_name_get, _swigfaiss.IOReader_name_set)
def __call__(self, ptr, size, nitems):
return _swigfaiss.IOReader___call__(self, ptr, size, nitems)
def fileno(self):
return _swigfaiss.IOReader_fileno(self)
__swig_destroy__ = _swigfaiss.delete_IOReader
# Register IOReader in _swigfaiss:
_swigfaiss.IOReader_swigregister(IOReader)
class IOWriter(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
name = property(_swigfaiss.IOWriter_name_get, _swigfaiss.IOWriter_name_set)
def __call__(self, ptr, size, nitems):
return _swigfaiss.IOWriter___call__(self, ptr, size, nitems)
def fileno(self):
return _swigfaiss.IOWriter_fileno(self)
__swig_destroy__ = _swigfaiss.delete_IOWriter
# Register IOWriter in _swigfaiss:
_swigfaiss.IOWriter_swigregister(IOWriter)
class VectorIOReader(IOReader):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
data = property(_swigfaiss.VectorIOReader_data_get, _swigfaiss.VectorIOReader_data_set)
rp = property(_swigfaiss.VectorIOReader_rp_get, _swigfaiss.VectorIOReader_rp_set)
def __call__(self, ptr, size, nitems):
return _swigfaiss.VectorIOReader___call__(self, ptr, size, nitems)
def __init__(self):
_swigfaiss.VectorIOReader_swiginit(self, _swigfaiss.new_VectorIOReader())
__swig_destroy__ = _swigfaiss.delete_VectorIOReader
# Register VectorIOReader in _swigfaiss:
_swigfaiss.VectorIOReader_swigregister(VectorIOReader)
class VectorIOWriter(IOWriter):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
data = property(_swigfaiss.VectorIOWriter_data_get, _swigfaiss.VectorIOWriter_data_set)
def __call__(self, ptr, size, nitems):
return _swigfaiss.VectorIOWriter___call__(self, ptr, size, nitems)
def __init__(self):
_swigfaiss.VectorIOWriter_swiginit(self, _swigfaiss.new_VectorIOWriter())
__swig_destroy__ = _swigfaiss.delete_VectorIOWriter
# Register VectorIOWriter in _swigfaiss:
_swigfaiss.VectorIOWriter_swigregister(VectorIOWriter)
class FileIOReader(IOReader):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
f = property(_swigfaiss.FileIOReader_f_get, _swigfaiss.FileIOReader_f_set)
need_close = property(_swigfaiss.FileIOReader_need_close_get, _swigfaiss.FileIOReader_need_close_set)
def __init__(self, *args):
_swigfaiss.FileIOReader_swiginit(self, _swigfaiss.new_FileIOReader(*args))
__swig_destroy__ = _swigfaiss.delete_FileIOReader
def __call__(self, ptr, size, nitems):
return _swigfaiss.FileIOReader___call__(self, ptr, size, nitems)
def fileno(self):
return _swigfaiss.FileIOReader_fileno(self)
# Register FileIOReader in _swigfaiss:
_swigfaiss.FileIOReader_swigregister(FileIOReader)
class FileIOWriter(IOWriter):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
f = property(_swigfaiss.FileIOWriter_f_get, _swigfaiss.FileIOWriter_f_set)
need_close = property(_swigfaiss.FileIOWriter_need_close_get, _swigfaiss.FileIOWriter_need_close_set)
def __init__(self, *args):
_swigfaiss.FileIOWriter_swiginit(self, _swigfaiss.new_FileIOWriter(*args))
__swig_destroy__ = _swigfaiss.delete_FileIOWriter
def __call__(self, ptr, size, nitems):
return _swigfaiss.FileIOWriter___call__(self, ptr, size, nitems)
def fileno(self):
return _swigfaiss.FileIOWriter_fileno(self)
# Register FileIOWriter in _swigfaiss:
_swigfaiss.FileIOWriter_swigregister(FileIOWriter)
class BufferedIOReader(IOReader):
r""" wraps an ioreader to make buffered reads to avoid too small reads"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
reader = property(_swigfaiss.BufferedIOReader_reader_get, _swigfaiss.BufferedIOReader_reader_set)
bsz = property(_swigfaiss.BufferedIOReader_bsz_get, _swigfaiss.BufferedIOReader_bsz_set)
ofs = property(_swigfaiss.BufferedIOReader_ofs_get, _swigfaiss.BufferedIOReader_ofs_set, doc=r""" offset in input stream""")
ofs2 = property(_swigfaiss.BufferedIOReader_ofs2_get, _swigfaiss.BufferedIOReader_ofs2_set, doc=r""" number of bytes returned to caller""")
b0 = property(_swigfaiss.BufferedIOReader_b0_get, _swigfaiss.BufferedIOReader_b0_set, doc=r""" range of available bytes in the buffer""")
b1 = property(_swigfaiss.BufferedIOReader_b1_get, _swigfaiss.BufferedIOReader_b1_set)
buffer = property(_swigfaiss.BufferedIOReader_buffer_get, _swigfaiss.BufferedIOReader_buffer_set)
def __init__(self, *args):
r"""
:type bsz: int, optional
:param bsz: buffer size (bytes). Reads will be done by batched of
this size
"""
_swigfaiss.BufferedIOReader_swiginit(self, _swigfaiss.new_BufferedIOReader(*args))
def __call__(self, ptr, size, nitems):
return _swigfaiss.BufferedIOReader___call__(self, ptr, size, nitems)
__swig_destroy__ = _swigfaiss.delete_BufferedIOReader
# Register BufferedIOReader in _swigfaiss:
_swigfaiss.BufferedIOReader_swigregister(BufferedIOReader)
class BufferedIOWriter(IOWriter):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
writer = property(_swigfaiss.BufferedIOWriter_writer_get, _swigfaiss.BufferedIOWriter_writer_set)
bsz = property(_swigfaiss.BufferedIOWriter_bsz_get, _swigfaiss.BufferedIOWriter_bsz_set)
ofs = property(_swigfaiss.BufferedIOWriter_ofs_get, _swigfaiss.BufferedIOWriter_ofs_set)
ofs2 = property(_swigfaiss.BufferedIOWriter_ofs2_get, _swigfaiss.BufferedIOWriter_ofs2_set, doc=r""" number of bytes received from caller""")
b0 = property(_swigfaiss.BufferedIOWriter_b0_get, _swigfaiss.BufferedIOWriter_b0_set, doc=r""" amount of data in buffer""")
buffer = property(_swigfaiss.BufferedIOWriter_buffer_get, _swigfaiss.BufferedIOWriter_buffer_set)
def __init__(self, *args):
_swigfaiss.BufferedIOWriter_swiginit(self, _swigfaiss.new_BufferedIOWriter(*args))
def __call__(self, ptr, size, nitems):
return _swigfaiss.BufferedIOWriter___call__(self, ptr, size, nitems)
__swig_destroy__ = _swigfaiss.delete_BufferedIOWriter
# Register BufferedIOWriter in _swigfaiss:
_swigfaiss.BufferedIOWriter_swigregister(BufferedIOWriter)
def fourcc(*args):
return _swigfaiss.fourcc(*args)
def fourcc_inv(*args):
return _swigfaiss.fourcc_inv(*args)
def fourcc_inv_printable(x):
return _swigfaiss.fourcc_inv_printable(x)
class InvertedLists(object):
r"""
Table of inverted lists
multithreading rules:
- concurrent read accesses are allowed
- concurrent update accesses are allowed
- for resize and add_entries, only concurrent access to different lists
are allowed
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
nlist = property(_swigfaiss.InvertedLists_nlist_get, _swigfaiss.InvertedLists_nlist_set, doc=r""" number of possible key values""")
code_size = property(_swigfaiss.InvertedLists_code_size_get, _swigfaiss.InvertedLists_code_size_set, doc=r""" code size per vector in bytes""")
INVALID_CODE_SIZE = _swigfaiss.InvertedLists_INVALID_CODE_SIZE
r"""
used for BlockInvertedLists, where the codes are packed into groups
and the individual code size is meaningless
"""
def list_size(self, list_no):
r""" get the size of a list"""
return _swigfaiss.InvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
r"""
get the codes for an inverted list
must be released by release_codes
:rtype: uint8_t
:return: codes size list_size * code_size
"""
return _swigfaiss.InvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
r"""
get the ids for an inverted list
must be released by release_ids
:rtype: int
:return: ids size list_size
"""
return _swigfaiss.InvertedLists_get_ids(self, list_no)
def release_codes(self, list_no, codes):
r""" release codes returned by get_codes (default implementation is nop"""
return _swigfaiss.InvertedLists_release_codes(self, list_no, codes)
def release_ids(self, list_no, ids):
r""" release ids returned by get_ids"""
return _swigfaiss.InvertedLists_release_ids(self, list_no, ids)
def get_single_id(self, list_no, offset):
r"""
:rtype: int
:return: a single id in an inverted list
"""
return _swigfaiss.InvertedLists_get_single_id(self, list_no, offset)
def get_single_code(self, list_no, offset):
r"""
:rtype: uint8_t
:return: a single code in an inverted list
(should be deallocated with release_codes)
"""
return _swigfaiss.InvertedLists_get_single_code(self, list_no, offset)
def prefetch_lists(self, list_nos, nlist):
r"""
prepare the following lists (default does nothing)
a list can be -1 hence the signed long
"""
return _swigfaiss.InvertedLists_prefetch_lists(self, list_nos, nlist)
def add_entry(self, list_no, theid, code):
r""" add one entry to an inverted list"""
return _swigfaiss.InvertedLists_add_entry(self, list_no, theid, code)
def add_entries(self, list_no, n_entry, ids, code):
return _swigfaiss.InvertedLists_add_entries(self, list_no, n_entry, ids, code)
def update_entry(self, list_no, offset, id, code):
return _swigfaiss.InvertedLists_update_entry(self, list_no, offset, id, code)
def update_entries(self, list_no, offset, n_entry, ids, code):
return _swigfaiss.InvertedLists_update_entries(self, list_no, offset, n_entry, ids, code)
def resize(self, list_no, new_size):
return _swigfaiss.InvertedLists_resize(self, list_no, new_size)
def reset(self):
return _swigfaiss.InvertedLists_reset(self)
def merge_from(self, oivf, add_id):
r""" move all entries from oivf (empty on output)"""
return _swigfaiss.InvertedLists_merge_from(self, oivf, add_id)
__swig_destroy__ = _swigfaiss.delete_InvertedLists
def imbalance_factor(self):
r""" 1= perfectly balanced, >1: imbalanced"""
return _swigfaiss.InvertedLists_imbalance_factor(self)
def print_stats(self):
r""" display some stats about the inverted lists"""
return _swigfaiss.InvertedLists_print_stats(self)
def compute_ntotal(self):
r""" sum up list sizes"""
return _swigfaiss.InvertedLists_compute_ntotal(self)
# Register InvertedLists in _swigfaiss:
_swigfaiss.InvertedLists_swigregister(InvertedLists)
class ArrayInvertedLists(InvertedLists):
r""" simple (default) implementation as an array of inverted lists"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
codes = property(_swigfaiss.ArrayInvertedLists_codes_get, _swigfaiss.ArrayInvertedLists_codes_set)
ids = property(_swigfaiss.ArrayInvertedLists_ids_get, _swigfaiss.ArrayInvertedLists_ids_set, doc=r""" Inverted lists for indexes""")
def __init__(self, nlist, code_size):
_swigfaiss.ArrayInvertedLists_swiginit(self, _swigfaiss.new_ArrayInvertedLists(nlist, code_size))
def list_size(self, list_no):
return _swigfaiss.ArrayInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.ArrayInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.ArrayInvertedLists_get_ids(self, list_no)
def add_entries(self, list_no, n_entry, ids, code):
return _swigfaiss.ArrayInvertedLists_add_entries(self, list_no, n_entry, ids, code)
def update_entries(self, list_no, offset, n_entry, ids, code):
return _swigfaiss.ArrayInvertedLists_update_entries(self, list_no, offset, n_entry, ids, code)
def resize(self, list_no, new_size):
return _swigfaiss.ArrayInvertedLists_resize(self, list_no, new_size)
__swig_destroy__ = _swigfaiss.delete_ArrayInvertedLists
# Register ArrayInvertedLists in _swigfaiss:
_swigfaiss.ArrayInvertedLists_swigregister(ArrayInvertedLists)
class ReadOnlyInvertedLists(InvertedLists):
r""" invlists that fail for all write functions"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def add_entries(self, list_no, n_entry, ids, code):
return _swigfaiss.ReadOnlyInvertedLists_add_entries(self, list_no, n_entry, ids, code)
def update_entries(self, list_no, offset, n_entry, ids, code):
return _swigfaiss.ReadOnlyInvertedLists_update_entries(self, list_no, offset, n_entry, ids, code)
def resize(self, list_no, new_size):
return _swigfaiss.ReadOnlyInvertedLists_resize(self, list_no, new_size)
__swig_destroy__ = _swigfaiss.delete_ReadOnlyInvertedLists
# Register ReadOnlyInvertedLists in _swigfaiss:
_swigfaiss.ReadOnlyInvertedLists_swigregister(ReadOnlyInvertedLists)
class HStackInvertedLists(ReadOnlyInvertedLists):
r""" Horizontal stack of inverted lists"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
ils = property(_swigfaiss.HStackInvertedLists_ils_get, _swigfaiss.HStackInvertedLists_ils_set)
def __init__(self, nil, ils):
r""" build InvertedLists by concatenating nil of them"""
_swigfaiss.HStackInvertedLists_swiginit(self, _swigfaiss.new_HStackInvertedLists(nil, ils))
def list_size(self, list_no):
return _swigfaiss.HStackInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.HStackInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.HStackInvertedLists_get_ids(self, list_no)
def prefetch_lists(self, list_nos, nlist):
return _swigfaiss.HStackInvertedLists_prefetch_lists(self, list_nos, nlist)
def release_codes(self, list_no, codes):
return _swigfaiss.HStackInvertedLists_release_codes(self, list_no, codes)
def release_ids(self, list_no, ids):
return _swigfaiss.HStackInvertedLists_release_ids(self, list_no, ids)
def get_single_id(self, list_no, offset):
return _swigfaiss.HStackInvertedLists_get_single_id(self, list_no, offset)
def get_single_code(self, list_no, offset):
return _swigfaiss.HStackInvertedLists_get_single_code(self, list_no, offset)
__swig_destroy__ = _swigfaiss.delete_HStackInvertedLists
# Register HStackInvertedLists in _swigfaiss:
_swigfaiss.HStackInvertedLists_swigregister(HStackInvertedLists)
class SliceInvertedLists(ReadOnlyInvertedLists):
r""" vertical slice of indexes in another InvertedLists"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
il = property(_swigfaiss.SliceInvertedLists_il_get, _swigfaiss.SliceInvertedLists_il_set)
i0 = property(_swigfaiss.SliceInvertedLists_i0_get, _swigfaiss.SliceInvertedLists_i0_set)
i1 = property(_swigfaiss.SliceInvertedLists_i1_get, _swigfaiss.SliceInvertedLists_i1_set)
def __init__(self, il, i0, i1):
_swigfaiss.SliceInvertedLists_swiginit(self, _swigfaiss.new_SliceInvertedLists(il, i0, i1))
def list_size(self, list_no):
return _swigfaiss.SliceInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.SliceInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.SliceInvertedLists_get_ids(self, list_no)
def release_codes(self, list_no, codes):
return _swigfaiss.SliceInvertedLists_release_codes(self, list_no, codes)
def release_ids(self, list_no, ids):
return _swigfaiss.SliceInvertedLists_release_ids(self, list_no, ids)
def get_single_id(self, list_no, offset):
return _swigfaiss.SliceInvertedLists_get_single_id(self, list_no, offset)
def get_single_code(self, list_no, offset):
return _swigfaiss.SliceInvertedLists_get_single_code(self, list_no, offset)
def prefetch_lists(self, list_nos, nlist):
return _swigfaiss.SliceInvertedLists_prefetch_lists(self, list_nos, nlist)
__swig_destroy__ = _swigfaiss.delete_SliceInvertedLists
# Register SliceInvertedLists in _swigfaiss:
_swigfaiss.SliceInvertedLists_swigregister(SliceInvertedLists)
class VStackInvertedLists(ReadOnlyInvertedLists):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
ils = property(_swigfaiss.VStackInvertedLists_ils_get, _swigfaiss.VStackInvertedLists_ils_set)
cumsz = property(_swigfaiss.VStackInvertedLists_cumsz_get, _swigfaiss.VStackInvertedLists_cumsz_set)
def __init__(self, nil, ils):
r""" build InvertedLists by concatenating nil of them"""
_swigfaiss.VStackInvertedLists_swiginit(self, _swigfaiss.new_VStackInvertedLists(nil, ils))
def list_size(self, list_no):
return _swigfaiss.VStackInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.VStackInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.VStackInvertedLists_get_ids(self, list_no)
def release_codes(self, list_no, codes):
return _swigfaiss.VStackInvertedLists_release_codes(self, list_no, codes)
def release_ids(self, list_no, ids):
return _swigfaiss.VStackInvertedLists_release_ids(self, list_no, ids)
def get_single_id(self, list_no, offset):
return _swigfaiss.VStackInvertedLists_get_single_id(self, list_no, offset)
def get_single_code(self, list_no, offset):
return _swigfaiss.VStackInvertedLists_get_single_code(self, list_no, offset)
def prefetch_lists(self, list_nos, nlist):
return _swigfaiss.VStackInvertedLists_prefetch_lists(self, list_nos, nlist)
__swig_destroy__ = _swigfaiss.delete_VStackInvertedLists
# Register VStackInvertedLists in _swigfaiss:
_swigfaiss.VStackInvertedLists_swigregister(VStackInvertedLists)
class MaskedInvertedLists(ReadOnlyInvertedLists):
r"""
use the first inverted lists if they are non-empty otherwise use the second
This is useful if il1 has a few inverted lists that are too long,
and that il0 has replacement lists for those, with empty lists for
the others.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
il0 = property(_swigfaiss.MaskedInvertedLists_il0_get, _swigfaiss.MaskedInvertedLists_il0_set)
il1 = property(_swigfaiss.MaskedInvertedLists_il1_get, _swigfaiss.MaskedInvertedLists_il1_set)
def __init__(self, il0, il1):
_swigfaiss.MaskedInvertedLists_swiginit(self, _swigfaiss.new_MaskedInvertedLists(il0, il1))
def list_size(self, list_no):
return _swigfaiss.MaskedInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.MaskedInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.MaskedInvertedLists_get_ids(self, list_no)
def release_codes(self, list_no, codes):
return _swigfaiss.MaskedInvertedLists_release_codes(self, list_no, codes)
def release_ids(self, list_no, ids):
return _swigfaiss.MaskedInvertedLists_release_ids(self, list_no, ids)
def get_single_id(self, list_no, offset):
return _swigfaiss.MaskedInvertedLists_get_single_id(self, list_no, offset)
def get_single_code(self, list_no, offset):
return _swigfaiss.MaskedInvertedLists_get_single_code(self, list_no, offset)
def prefetch_lists(self, list_nos, nlist):
return _swigfaiss.MaskedInvertedLists_prefetch_lists(self, list_nos, nlist)
__swig_destroy__ = _swigfaiss.delete_MaskedInvertedLists
# Register MaskedInvertedLists in _swigfaiss:
_swigfaiss.MaskedInvertedLists_swigregister(MaskedInvertedLists)
class StopWordsInvertedLists(ReadOnlyInvertedLists):
r"""
if the inverted list in il is smaller than maxsize then return it,
otherwise return an empty invlist
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
il0 = property(_swigfaiss.StopWordsInvertedLists_il0_get, _swigfaiss.StopWordsInvertedLists_il0_set)
maxsize = property(_swigfaiss.StopWordsInvertedLists_maxsize_get, _swigfaiss.StopWordsInvertedLists_maxsize_set)
def __init__(self, il, maxsize):
_swigfaiss.StopWordsInvertedLists_swiginit(self, _swigfaiss.new_StopWordsInvertedLists(il, maxsize))
def list_size(self, list_no):
return _swigfaiss.StopWordsInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.StopWordsInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.StopWordsInvertedLists_get_ids(self, list_no)
def release_codes(self, list_no, codes):
return _swigfaiss.StopWordsInvertedLists_release_codes(self, list_no, codes)
def release_ids(self, list_no, ids):
return _swigfaiss.StopWordsInvertedLists_release_ids(self, list_no, ids)
def get_single_id(self, list_no, offset):
return _swigfaiss.StopWordsInvertedLists_get_single_id(self, list_no, offset)
def get_single_code(self, list_no, offset):
return _swigfaiss.StopWordsInvertedLists_get_single_code(self, list_no, offset)
def prefetch_lists(self, list_nos, nlist):
return _swigfaiss.StopWordsInvertedLists_prefetch_lists(self, list_nos, nlist)
__swig_destroy__ = _swigfaiss.delete_StopWordsInvertedLists
# Register StopWordsInvertedLists in _swigfaiss:
_swigfaiss.StopWordsInvertedLists_swigregister(StopWordsInvertedLists)
class InvertedListsIOHook(object):
r"""
Callbacks to handle other types of InvertedList objects.
The callbacks should be registered with add_callback before calling
read_index or read_InvertedLists. The callbacks for
OnDiskInvertedLists are registrered by default. The invlist type is
identified by:
- the key (a fourcc) at read time
- the class name (as given by typeid.name) at write time
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
key = property(_swigfaiss.InvertedListsIOHook_key_get, doc=r""" string version of the fourcc""")
classname = property(_swigfaiss.InvertedListsIOHook_classname_get, doc=r""" typeid.name""")
def write(self, ils, f):
r""" write the index to the IOWriter (including the fourcc)"""
return _swigfaiss.InvertedListsIOHook_write(self, ils, f)
def read(self, f, io_flags):
r""" called when the fourcc matches this class's fourcc"""
return _swigfaiss.InvertedListsIOHook_read(self, f, io_flags)
def read_ArrayInvertedLists(self, f, io_flags, nlist, code_size, sizes):
r"""
read from a ArrayInvertedLists into this invertedlist type.
For this to work, the callback has to be enabled and the io_flag has to
be set to IO_FLAG_SKIP_IVF_DATA | (16 upper bits of the fourcc)
(default implementation fails)
"""
return _swigfaiss.InvertedListsIOHook_read_ArrayInvertedLists(self, f, io_flags, nlist, code_size, sizes)
__swig_destroy__ = _swigfaiss.delete_InvertedListsIOHook
@staticmethod
def add_callback(arg1):
return _swigfaiss.InvertedListsIOHook_add_callback(arg1)
@staticmethod
def print_callbacks():
return _swigfaiss.InvertedListsIOHook_print_callbacks()
@staticmethod
def lookup(h):
return _swigfaiss.InvertedListsIOHook_lookup(h)
@staticmethod
def lookup_classname(classname):
return _swigfaiss.InvertedListsIOHook_lookup_classname(classname)
# Register InvertedListsIOHook in _swigfaiss:
_swigfaiss.InvertedListsIOHook_swigregister(InvertedListsIOHook)
def InvertedListsIOHook_add_callback(arg1):
return _swigfaiss.InvertedListsIOHook_add_callback(arg1)
def InvertedListsIOHook_print_callbacks():
return _swigfaiss.InvertedListsIOHook_print_callbacks()
def InvertedListsIOHook_lookup(h):
return _swigfaiss.InvertedListsIOHook_lookup(h)
def InvertedListsIOHook_lookup_classname(classname):
return _swigfaiss.InvertedListsIOHook_lookup_classname(classname)
class BlockInvertedLists(InvertedLists):
r"""
Inverted Lists that are organized by blocks.
Different from the regular inverted lists, the codes are organized by blocks
of size block_size bytes that reprsent a set of n_per_block. Therefore, code
allocations are always rounded up to block_size bytes. The codes are also
aligned on 32-byte boundaries for use with SIMD.
To avoid misinterpretations, the code_size is set to (size_t)(-1), even if
arguably the amount of memory consumed by code is block_size / n_per_block.
The writing functions add_entries and update_entries operate on block-aligned
data.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
n_per_block = property(_swigfaiss.BlockInvertedLists_n_per_block_get, _swigfaiss.BlockInvertedLists_n_per_block_set)
block_size = property(_swigfaiss.BlockInvertedLists_block_size_get, _swigfaiss.BlockInvertedLists_block_size_set)
codes = property(_swigfaiss.BlockInvertedLists_codes_get, _swigfaiss.BlockInvertedLists_codes_set)
ids = property(_swigfaiss.BlockInvertedLists_ids_get, _swigfaiss.BlockInvertedLists_ids_set)
def __init__(self, *args):
_swigfaiss.BlockInvertedLists_swiginit(self, _swigfaiss.new_BlockInvertedLists(*args))
def list_size(self, list_no):
return _swigfaiss.BlockInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.BlockInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.BlockInvertedLists_get_ids(self, list_no)
def add_entries(self, list_no, n_entry, ids, code):
return _swigfaiss.BlockInvertedLists_add_entries(self, list_no, n_entry, ids, code)
def update_entries(self, list_no, offset, n_entry, ids, code):
r""" not implemented"""
return _swigfaiss.BlockInvertedLists_update_entries(self, list_no, offset, n_entry, ids, code)
def resize(self, list_no, new_size):
return _swigfaiss.BlockInvertedLists_resize(self, list_no, new_size)
__swig_destroy__ = _swigfaiss.delete_BlockInvertedLists
# Register BlockInvertedLists in _swigfaiss:
_swigfaiss.BlockInvertedLists_swigregister(BlockInvertedLists)
def lo_build(list_id, offset):
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
return _swigfaiss.lo_build(list_id, offset)
def lo_listno(lo):
return _swigfaiss.lo_listno(lo)
def lo_offset(lo):
return _swigfaiss.lo_offset(lo)
class DirectMap(object):
r"""Direct map: a way to map back from ids to inverted lists"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
NoMap = _swigfaiss.DirectMap_NoMap
Array = _swigfaiss.DirectMap_Array
Hashtable = _swigfaiss.DirectMap_Hashtable
type = property(_swigfaiss.DirectMap_type_get, _swigfaiss.DirectMap_type_set)
array = property(_swigfaiss.DirectMap_array_get, _swigfaiss.DirectMap_array_set, doc=r""" map for direct access to the elements. Map ids to LO-encoded entries.""")
hashtable = property(_swigfaiss.DirectMap_hashtable_get, _swigfaiss.DirectMap_hashtable_set)
def __init__(self):
_swigfaiss.DirectMap_swiginit(self, _swigfaiss.new_DirectMap())
def set_type(self, new_type, invlists, ntotal):
r""" set type and initialize"""
return _swigfaiss.DirectMap_set_type(self, new_type, invlists, ntotal)
def get(self, id):
r""" get an entry"""
return _swigfaiss.DirectMap_get(self, id)
def no(self):
r""" for quick checks"""
return _swigfaiss.DirectMap_no(self)
def check_can_add(self, ids):
r"""
update the direct_map
throw if Array and ids is not NULL
"""
return _swigfaiss.DirectMap_check_can_add(self, ids)
def add_single_id(self, id, list_no, offset):
r""" non thread-safe version"""
return _swigfaiss.DirectMap_add_single_id(self, id, list_no, offset)
def clear(self):
r""" remove all entries"""
return _swigfaiss.DirectMap_clear(self)
def remove_ids(self, sel, invlists):
r"""
operations on inverted lists that require translation with a DirectMap
remove ids from the InvertedLists, possibly using the direct map
"""
return _swigfaiss.DirectMap_remove_ids(self, sel, invlists)
def update_codes(self, invlists, n, ids, list_nos, codes):
r""" update entries, using the direct map"""
return _swigfaiss.DirectMap_update_codes(self, invlists, n, ids, list_nos, codes)
__swig_destroy__ = _swigfaiss.delete_DirectMap
# Register DirectMap in _swigfaiss:
_swigfaiss.DirectMap_swigregister(DirectMap)
class DirectMapAdd(object):
r""" Thread-safe way of updating the direct_map"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
direct_map = property(_swigfaiss.DirectMapAdd_direct_map_get, _swigfaiss.DirectMapAdd_direct_map_set)
type = property(_swigfaiss.DirectMapAdd_type_get, _swigfaiss.DirectMapAdd_type_set)
ntotal = property(_swigfaiss.DirectMapAdd_ntotal_get, _swigfaiss.DirectMapAdd_ntotal_set)
n = property(_swigfaiss.DirectMapAdd_n_get, _swigfaiss.DirectMapAdd_n_set)
xids = property(_swigfaiss.DirectMapAdd_xids_get, _swigfaiss.DirectMapAdd_xids_set)
all_ofs = property(_swigfaiss.DirectMapAdd_all_ofs_get, _swigfaiss.DirectMapAdd_all_ofs_set)
def __init__(self, direct_map, n, xids):
_swigfaiss.DirectMapAdd_swiginit(self, _swigfaiss.new_DirectMapAdd(direct_map, n, xids))
def add(self, i, list_no, offset):
r""" add vector i (with id xids[i]) at list_no and offset"""
return _swigfaiss.DirectMapAdd_add(self, i, list_no, offset)
__swig_destroy__ = _swigfaiss.delete_DirectMapAdd
# Register DirectMapAdd in _swigfaiss:
_swigfaiss.DirectMapAdd_swigregister(DirectMapAdd)
class Level1Quantizer(object):
r"""
Encapsulates a quantizer object for the IndexIVF
The class isolates the fields that are independent of the storage
of the lists (especially training)
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
quantizer = property(_swigfaiss.Level1Quantizer_quantizer_get, _swigfaiss.Level1Quantizer_quantizer_set, doc=r""" quantizer that maps vectors to inverted lists""")
nlist = property(_swigfaiss.Level1Quantizer_nlist_get, _swigfaiss.Level1Quantizer_nlist_set, doc=r""" number of possible key values""")
quantizer_trains_alone = property(_swigfaiss.Level1Quantizer_quantizer_trains_alone_get, _swigfaiss.Level1Quantizer_quantizer_trains_alone_set, doc=r"""
= 0: use the quantizer as index in a kmeans training
= 1: just pass on the training set to the train() of the quantizer
= 2: kmeans training on a flat index + add the centroids to the quantizer
""")
own_fields = property(_swigfaiss.Level1Quantizer_own_fields_get, _swigfaiss.Level1Quantizer_own_fields_set, doc=r""" whether object owns the quantizer (false by default)""")
cp = property(_swigfaiss.Level1Quantizer_cp_get, _swigfaiss.Level1Quantizer_cp_set, doc=r""" to override default clustering params""")
clustering_index = property(_swigfaiss.Level1Quantizer_clustering_index_get, _swigfaiss.Level1Quantizer_clustering_index_set, doc=r""" to override index used during clustering""")
def train_q1(self, n, x, verbose, metric_type):
r""" Trains the quantizer and calls train_residual to train sub-quantizers"""
return _swigfaiss.Level1Quantizer_train_q1(self, n, x, verbose, metric_type)
def coarse_code_size(self):
r""" compute the number of bytes required to store list ids"""
return _swigfaiss.Level1Quantizer_coarse_code_size(self)
def encode_listno(self, list_no, code):
return _swigfaiss.Level1Quantizer_encode_listno(self, list_no, code)
def decode_listno(self, code):
return _swigfaiss.Level1Quantizer_decode_listno(self, code)
def __init__(self, *args):
_swigfaiss.Level1Quantizer_swiginit(self, _swigfaiss.new_Level1Quantizer(*args))
__swig_destroy__ = _swigfaiss.delete_Level1Quantizer
# Register Level1Quantizer in _swigfaiss:
_swigfaiss.Level1Quantizer_swigregister(Level1Quantizer)
class SearchParametersIVF(SearchParameters):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nprobe = property(_swigfaiss.SearchParametersIVF_nprobe_get, _swigfaiss.SearchParametersIVF_nprobe_set, doc=r""" number of probes at query time""")
max_codes = property(_swigfaiss.SearchParametersIVF_max_codes_get, _swigfaiss.SearchParametersIVF_max_codes_set, doc=r""" max nb of codes to visit to do a query""")
quantizer_params = property(_swigfaiss.SearchParametersIVF_quantizer_params_get, _swigfaiss.SearchParametersIVF_quantizer_params_set)
def __init__(self):
_swigfaiss.SearchParametersIVF_swiginit(self, _swigfaiss.new_SearchParametersIVF())
__swig_destroy__ = _swigfaiss.delete_SearchParametersIVF
# Register SearchParametersIVF in _swigfaiss:
_swigfaiss.SearchParametersIVF_swigregister(SearchParametersIVF)
class IndexIVF(Index, Level1Quantizer):
r"""
Index based on a inverted file (IVF)
In the inverted file, the quantizer (an Index instance) provides a
quantization index for each vector to be added. The quantization
index maps to a list (aka inverted list or posting list), where the
id of the vector is stored.
The inverted list object is required only after trainng. If none is
set externally, an ArrayInvertedLists is used automatically.
At search time, the vector to be searched is also quantized, and
only the list corresponding to the quantization index is
searched. This speeds up the search by making it
non-exhaustive. This can be relaxed using multi-probe search: a few
(nprobe) quantization indices are selected and several inverted
lists are visited.
Sub-classes implement a post-filtering of the index that refines
the distance estimation from the query to databse vectors.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
invlists = property(_swigfaiss.IndexIVF_invlists_get, _swigfaiss.IndexIVF_invlists_set, doc=r""" Access to the actual data""")
own_invlists = property(_swigfaiss.IndexIVF_own_invlists_get, _swigfaiss.IndexIVF_own_invlists_set)
code_size = property(_swigfaiss.IndexIVF_code_size_get, _swigfaiss.IndexIVF_code_size_set, doc=r""" code size per vector in bytes""")
nprobe = property(_swigfaiss.IndexIVF_nprobe_get, _swigfaiss.IndexIVF_nprobe_set, doc=r""" number of probes at query time""")
max_codes = property(_swigfaiss.IndexIVF_max_codes_get, _swigfaiss.IndexIVF_max_codes_set, doc=r""" max nb of codes to visit to do a query""")
parallel_mode = property(_swigfaiss.IndexIVF_parallel_mode_get, _swigfaiss.IndexIVF_parallel_mode_set, doc=r"""
Parallel mode determines how queries are parallelized with OpenMP
0 (default): split over queries
1: parallelize over inverted lists
2: parallelize over both
3: split over queries with a finer granularity
PARALLEL_MODE_NO_HEAP_INIT: binary or with the previous to
prevent the heap to be initialized and finalized
""")
PARALLEL_MODE_NO_HEAP_INIT = property(_swigfaiss.IndexIVF_PARALLEL_MODE_NO_HEAP_INIT_get)
direct_map = property(_swigfaiss.IndexIVF_direct_map_get, _swigfaiss.IndexIVF_direct_map_set, doc=r"""
optional map that maps back ids to invlist entries. This
enables reconstruct()
""")
def reset(self):
return _swigfaiss.IndexIVF_reset(self)
def train(self, n, x):
r""" Trains the quantizer and calls train_residual to train sub-quantizers"""
return _swigfaiss.IndexIVF_train(self, n, x)
def add(self, n, x):
r""" Calls add_with_ids with NULL ids"""
return _swigfaiss.IndexIVF_add(self, n, x)
def add_with_ids(self, n, x, xids):
r""" default implementation that calls encode_vectors"""
return _swigfaiss.IndexIVF_add_with_ids(self, n, x, xids)
def add_core(self, n, x, xids, precomputed_idx):
r"""
Implementation of vector addition where the vector assignments are
predefined. The default implementation hands over the code extraction to
encode_vectors.
:type precomputed_idx: int
:param precomputed_idx: quantization indices for the input vectors
(size n)
"""
return _swigfaiss.IndexIVF_add_core(self, n, x, xids, precomputed_idx)
def encode_vectors(self, n, x, list_nos, codes, include_listno=False):
r"""
Encodes a set of vectors as they would appear in the inverted lists
:type list_nos: int
:param list_nos: inverted list ids as returned by the
quantizer (size n). -1s are ignored.
:type codes: uint8_t
:param codes: output codes, size n * code_size
:type include_listno: boolean, optional
:param include_listno:
include the list ids in the code (in this case add
ceil(log8(nlist)) to the code size)
"""
return _swigfaiss.IndexIVF_encode_vectors(self, n, x, list_nos, codes, include_listno)
def add_sa_codes(self, n, codes, xids):
r"""
Add vectors that are computed with the standalone codec
:type codes: uint8_t
:param codes: codes to add size n * sa_code_size()
:type xids: int
:param xids: corresponding ids, size n
"""
return _swigfaiss.IndexIVF_add_sa_codes(self, n, codes, xids)
def train_residual(self, n, x):
r"""
Sub-classes that encode the residuals can train their encoders here
does nothing by default
"""
return _swigfaiss.IndexIVF_train_residual(self, n, x)
def search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params=None, stats=None):
r"""
search a set of vectors, that are pre-quantized by the IVF
quantizer. Fill in the corresponding heaps with the query
results. The default implementation uses InvertedListScanners
to do the search.
:type n: int
:param n: nb of vectors to query
:type x: float
:param x: query vectors, size nx * d
:type assign: int
:param assign: coarse quantization indices, size nx * nprobe
:type centroid_dis: float
:param centroid_dis:
distances to coarse centroids, size nx * nprobe
:param distance:
output distances, size n * k
:type labels: int
:param labels: output labels, size n * k
:type store_pairs: boolean
:param store_pairs: store inv list index + inv list offset
instead in upper/lower 32 bit of result,
instead of ids (used for reranking).
:type params: :py:class:`IVFSearchParameters`, optional
:param params: used to override the object's search parameters
:type stats: :py:class:`IndexIVFStats`, optional
:param stats: search stats to be updated (can be null)
"""
return _swigfaiss.IndexIVF_search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params, stats)
def search(self, n, x, k, distances, labels, params=None):
r""" assign the vectors, then call search_preassign"""
return _swigfaiss.IndexIVF_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexIVF_range_search(self, n, x, radius, result, params)
def range_search_preassigned(self, nx, x, radius, keys, coarse_dis, result, store_pairs=False, params=None, stats=None):
return _swigfaiss.IndexIVF_range_search_preassigned(self, nx, x, radius, keys, coarse_dis, result, store_pairs, params, stats)
def get_InvertedListScanner(self, store_pairs=False, sel=None):
r"""
Get a scanner for this index (store_pairs means ignore labels)
The default search implementation uses this to compute the distances
"""
return _swigfaiss.IndexIVF_get_InvertedListScanner(self, store_pairs, sel)
def reconstruct(self, key, recons):
r""" reconstruct a vector. Works only if maintain_direct_map is set to 1 or 2"""
return _swigfaiss.IndexIVF_reconstruct(self, key, recons)
def update_vectors(self, nv, idx, v):
r"""
Update a subset of vectors.
The index must have a direct_map
:type nv: int
:param nv: nb of vectors to update
:type idx: int
:param idx: vector indices to update, size nv
:type v: float
:param v: vectors of new values, size nv*d
"""
return _swigfaiss.IndexIVF_update_vectors(self, nv, idx, v)
def reconstruct_n(self, i0, ni, recons):
r"""
Reconstruct a subset of the indexed vectors.
Overrides default implementation to bypass reconstruct() which requires
direct_map to be maintained.
:type i0: int
:param i0: first vector to reconstruct
:type ni: int
:param ni: nb of vectors to reconstruct
:type recons: float
:param recons: output array of reconstructed vectors, size ni * d
"""
return _swigfaiss.IndexIVF_reconstruct_n(self, i0, ni, recons)
def search_and_reconstruct(self, n, x, k, distances, labels, recons, params=None):
r"""
Similar to search, but also reconstructs the stored vectors (or an
approximation in the case of lossy coding) for the search results.
Overrides default implementation to avoid having to maintain direct_map
and instead fetch the code offsets through the `store_pairs` flag in
search_preassigned().
:type recons: float
:param recons: reconstructed vectors size (n, k, d)
"""
return _swigfaiss.IndexIVF_search_and_reconstruct(self, n, x, k, distances, labels, recons, params)
def reconstruct_from_offset(self, list_no, offset, recons):
r"""
Reconstruct a vector given the location in terms of (inv list index +
inv list offset) instead of the id.
Useful for reconstructing when the direct_map is not maintained and
the inv list offset is computed by search_preassigned() with
`store_pairs` set.
"""
return _swigfaiss.IndexIVF_reconstruct_from_offset(self, list_no, offset, recons)
def remove_ids(self, sel):
r""" Dataset manipulation functions"""
return _swigfaiss.IndexIVF_remove_ids(self, sel)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexIVF_check_compatible_for_merge(self, otherIndex)
def merge_from(self, otherIndex, add_id):
return _swigfaiss.IndexIVF_merge_from(self, otherIndex, add_id)
def copy_subset_to(self, other, subset_type, a1, a2):
r"""
copy a subset of the entries index to the other index
if subset_type == 0: copies ids in [a1, a2)
if subset_type == 1: copies ids if id % a1 == a2
if subset_type == 2: copies inverted lists such that a1
elements are left before and a2 elements are after
"""
return _swigfaiss.IndexIVF_copy_subset_to(self, other, subset_type, a1, a2)
__swig_destroy__ = _swigfaiss.delete_IndexIVF
def get_list_size(self, list_no):
return _swigfaiss.IndexIVF_get_list_size(self, list_no)
def check_ids_sorted(self):
r""" are the ids sorted?"""
return _swigfaiss.IndexIVF_check_ids_sorted(self)
def make_direct_map(self, new_maintain_direct_map=True):
r"""
intialize a direct map
:type new_maintain_direct_map: boolean, optional
:param new_maintain_direct_map: if true, create a direct map,
else clear it
"""
return _swigfaiss.IndexIVF_make_direct_map(self, new_maintain_direct_map)
def set_direct_map_type(self, type):
return _swigfaiss.IndexIVF_set_direct_map_type(self, type)
def replace_invlists(self, il, own=False):
r""" replace the inverted lists, old one is deallocated if own_invlists"""
return _swigfaiss.IndexIVF_replace_invlists(self, il, own)
def sa_code_size(self):
return _swigfaiss.IndexIVF_sa_code_size(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexIVF_sa_encode(self, n, x, bytes)
# Register IndexIVF in _swigfaiss:
_swigfaiss.IndexIVF_swigregister(IndexIVF)
class InvertedListScanner(object):
r"""
Object that handles a query. The inverted lists to scan are
provided externally. The object has a lot of state, but
distance_to_code and scan_codes can be called in multiple
threads
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
list_no = property(_swigfaiss.InvertedListScanner_list_no_get, _swigfaiss.InvertedListScanner_list_no_set, doc=r""" remember current list""")
keep_max = property(_swigfaiss.InvertedListScanner_keep_max_get, _swigfaiss.InvertedListScanner_keep_max_set, doc=r""" keep maximum instead of minimum""")
store_pairs = property(_swigfaiss.InvertedListScanner_store_pairs_get, _swigfaiss.InvertedListScanner_store_pairs_set, doc=r""" store positions in invlists rather than labels""")
sel = property(_swigfaiss.InvertedListScanner_sel_get, _swigfaiss.InvertedListScanner_sel_set, doc=r""" search in this subset of ids""")
code_size = property(_swigfaiss.InvertedListScanner_code_size_get, _swigfaiss.InvertedListScanner_code_size_set, doc=r""" used in default implementation of scan_codes""")
def set_query(self, query_vector):
r""" from now on we handle this query."""
return _swigfaiss.InvertedListScanner_set_query(self, query_vector)
def set_list(self, list_no, coarse_dis):
r""" following codes come from this inverted list"""
return _swigfaiss.InvertedListScanner_set_list(self, list_no, coarse_dis)
def distance_to_code(self, code):
r""" compute a single query-to-code distance"""
return _swigfaiss.InvertedListScanner_distance_to_code(self, code)
def scan_codes(self, n, codes, ids, distances, labels, k):
r"""
scan a set of codes, compute distances to current query and
update heap of results if necessary. Default implemetation
calls distance_to_code.
:type n: int
:param n: number of codes to scan
:type codes: uint8_t
:param codes: codes to scan (n * code_size)
:type ids: int
:param ids: corresponding ids (ignored if store_pairs)
:type distances: float
:param distances: heap distances (size k)
:type labels: int
:param labels: heap labels (size k)
:type k: int
:param k: heap size
:rtype: int
:return: number of heap updates performed
"""
return _swigfaiss.InvertedListScanner_scan_codes(self, n, codes, ids, distances, labels, k)
def scan_codes_range(self, n, codes, ids, radius, result):
r"""
scan a set of codes, compute distances to current query and
update results if distances are below radius
(default implementation fails)
"""
return _swigfaiss.InvertedListScanner_scan_codes_range(self, n, codes, ids, radius, result)
__swig_destroy__ = _swigfaiss.delete_InvertedListScanner
# Register InvertedListScanner in _swigfaiss:
_swigfaiss.InvertedListScanner_swigregister(InvertedListScanner)
class IndexIVFStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nq = property(_swigfaiss.IndexIVFStats_nq_get, _swigfaiss.IndexIVFStats_nq_set)
nlist = property(_swigfaiss.IndexIVFStats_nlist_get, _swigfaiss.IndexIVFStats_nlist_set)
ndis = property(_swigfaiss.IndexIVFStats_ndis_get, _swigfaiss.IndexIVFStats_ndis_set)
nheap_updates = property(_swigfaiss.IndexIVFStats_nheap_updates_get, _swigfaiss.IndexIVFStats_nheap_updates_set)
quantization_time = property(_swigfaiss.IndexIVFStats_quantization_time_get, _swigfaiss.IndexIVFStats_quantization_time_set)
search_time = property(_swigfaiss.IndexIVFStats_search_time_get, _swigfaiss.IndexIVFStats_search_time_set)
def __init__(self):
_swigfaiss.IndexIVFStats_swiginit(self, _swigfaiss.new_IndexIVFStats())
def reset(self):
return _swigfaiss.IndexIVFStats_reset(self)
def add(self, other):
return _swigfaiss.IndexIVFStats_add(self, other)
__swig_destroy__ = _swigfaiss.delete_IndexIVFStats
# Register IndexIVFStats in _swigfaiss:
_swigfaiss.IndexIVFStats_swigregister(IndexIVFStats)
def check_compatible_for_merge(index1, index2):
r"""
check if two indexes have the same parameters and are trained in
the same way, otherwise throw.
"""
return _swigfaiss.check_compatible_for_merge(index1, index2)
def extract_index_ivf(*args):
return _swigfaiss.extract_index_ivf(*args)
def try_extract_index_ivf(*args):
return _swigfaiss.try_extract_index_ivf(*args)
def merge_into(index0, index1, shift_ids):
r"""
Merge index1 into index0. Works on IndexIVF's and IndexIVF's
embedded in a IndexPreTransform. On output, the index1 is empty.
:type shift_ids: boolean
:param shift_ids:: translate the ids from index1 to index0->prev_ntotal
"""
return _swigfaiss.merge_into(index0, index1, shift_ids)
def search_centroid(index, x, n, centroid_ids):
return _swigfaiss.search_centroid(index, x, n, centroid_ids)
def search_and_return_centroids(index, n, xin, k, distances, labels, query_centroid_ids, result_centroid_ids):
return _swigfaiss.search_and_return_centroids(index, n, xin, k, distances, labels, query_centroid_ids, result_centroid_ids)
class SlidingIndexWindow(object):
r"""
A set of IndexIVFs concatenated together in a FIFO fashion.
at each "step", the oldest index slice is removed and a new index is added.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index = property(_swigfaiss.SlidingIndexWindow_index_get, _swigfaiss.SlidingIndexWindow_index_set, doc=r""" common index that contains the sliding window""")
ils = property(_swigfaiss.SlidingIndexWindow_ils_get, _swigfaiss.SlidingIndexWindow_ils_set, doc=r""" InvertedLists of index""")
n_slice = property(_swigfaiss.SlidingIndexWindow_n_slice_get, _swigfaiss.SlidingIndexWindow_n_slice_set, doc=r""" number of slices currently in index""")
nlist = property(_swigfaiss.SlidingIndexWindow_nlist_get, _swigfaiss.SlidingIndexWindow_nlist_set, doc=r""" same as index->nlist""")
sizes = property(_swigfaiss.SlidingIndexWindow_sizes_get, _swigfaiss.SlidingIndexWindow_sizes_set, doc=r""" cumulative list sizes at each slice""")
def __init__(self, index):
r""" index should be initially empty and trained"""
_swigfaiss.SlidingIndexWindow_swiginit(self, _swigfaiss.new_SlidingIndexWindow(index))
def step(self, sub_index, remove_oldest):
r"""
Add one index to the current index and remove the oldest one.
:type sub_index: :py:class:`Index`
:param sub_index: slice to swap in (can be NULL)
:type remove_oldest: boolean
:param remove_oldest: if true, remove the oldest slices
"""
return _swigfaiss.SlidingIndexWindow_step(self, sub_index, remove_oldest)
__swig_destroy__ = _swigfaiss.delete_SlidingIndexWindow
# Register SlidingIndexWindow in _swigfaiss:
_swigfaiss.SlidingIndexWindow_swigregister(SlidingIndexWindow)
def get_invlist_range(index, i0, i1):
r""" Get a subset of inverted lists [i0, i1)"""
return _swigfaiss.get_invlist_range(index, i0, i1)
def set_invlist_range(index, i0, i1, src):
r""" Set a subset of inverted lists"""
return _swigfaiss.set_invlist_range(index, i0, i1, src)
def search_with_parameters(index, n, x, k, distances, labels, params, nb_dis=None, ms_per_stage=None):
r"""
search an IndexIVF, possibly embedded in an IndexPreTransform with
given parameters. This is a way to set the nprobe and get
statdistics in a thread-safe way.
Optionally returns (if non-nullptr):
- nb_dis: number of distances computed
- ms_per_stage: [0]: preprocessing time
[1]: coarse quantization,
[2]: list scanning
"""
return _swigfaiss.search_with_parameters(index, n, x, k, distances, labels, params, nb_dis, ms_per_stage)
def range_search_with_parameters(index, n, x, radius, result, params, nb_dis=None, ms_per_stage=None):
r""" same as search_with_parameters but for range search"""
return _swigfaiss.range_search_with_parameters(index, n, x, radius, result, params, nb_dis, ms_per_stage)
def ivf_residual_from_quantizer(arg1, nlevel):
r"""
Build an IndexIVFResidualQuantizer from an ResidualQuantizer, using the
nlevel first components as coarse quantizer and the rest as codes in invlists
"""
return _swigfaiss.ivf_residual_from_quantizer(arg1, nlevel)
def ivf_residual_add_from_flat_codes(ivfrq, ncode, codes, code_size=-1):
r"""
add from codes. NB that the norm component is not used, so the code_size can
be provided.
:type ivfrq: :py:class:`IndexIVFResidualQuantizer`
:param ivfrq: index to populate with the codes
:type codes: uint8_t
:param codes: codes to add, size (ncode, code_size)
:type code_size: int, optional
:param code_size: override the ivfrq's code_size, useful if the norm encoding
is different
"""
return _swigfaiss.ivf_residual_add_from_flat_codes(ivfrq, ncode, codes, code_size)
class ScalarQuantizer(Quantizer):
r"""
The uniform quantizer has a range [vmin, vmax]. The range can be
the same for all dimensions (uniform) or specific per dimension
(default).
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
QT_8bit = _swigfaiss.ScalarQuantizer_QT_8bit
r""" 8 bits per component"""
QT_4bit = _swigfaiss.ScalarQuantizer_QT_4bit
r""" 4 bits per component"""
QT_8bit_uniform = _swigfaiss.ScalarQuantizer_QT_8bit_uniform
r""" same, shared range for all dimensions"""
QT_4bit_uniform = _swigfaiss.ScalarQuantizer_QT_4bit_uniform
QT_fp16 = _swigfaiss.ScalarQuantizer_QT_fp16
QT_8bit_direct = _swigfaiss.ScalarQuantizer_QT_8bit_direct
r""" fast indexing of uint8s"""
QT_6bit = _swigfaiss.ScalarQuantizer_QT_6bit
r""" 6 bits per component"""
qtype = property(_swigfaiss.ScalarQuantizer_qtype_get, _swigfaiss.ScalarQuantizer_qtype_set)
RS_minmax = _swigfaiss.ScalarQuantizer_RS_minmax
r""" [min - rs*(max-min), max + rs*(max-min)]"""
RS_meanstd = _swigfaiss.ScalarQuantizer_RS_meanstd
r""" [mean - std * rs, mean + std * rs]"""
RS_quantiles = _swigfaiss.ScalarQuantizer_RS_quantiles
r""" [Q(rs), Q(1-rs)]"""
RS_optim = _swigfaiss.ScalarQuantizer_RS_optim
r""" alternate optimization of reconstruction error"""
rangestat = property(_swigfaiss.ScalarQuantizer_rangestat_get, _swigfaiss.ScalarQuantizer_rangestat_set)
rangestat_arg = property(_swigfaiss.ScalarQuantizer_rangestat_arg_get, _swigfaiss.ScalarQuantizer_rangestat_arg_set)
bits = property(_swigfaiss.ScalarQuantizer_bits_get, _swigfaiss.ScalarQuantizer_bits_set, doc=r""" bits per scalar code""")
trained = property(_swigfaiss.ScalarQuantizer_trained_get, _swigfaiss.ScalarQuantizer_trained_set, doc=r""" trained values (including the range)""")
def __init__(self, *args):
_swigfaiss.ScalarQuantizer_swiginit(self, _swigfaiss.new_ScalarQuantizer(*args))
def set_derived_sizes(self):
r""" updates internal values based on qtype and d"""
return _swigfaiss.ScalarQuantizer_set_derived_sizes(self)
def train(self, n, x):
return _swigfaiss.ScalarQuantizer_train(self, n, x)
def train_residual(self, n, x, quantizer, by_residual, verbose):
r""" Used by an IVF index to train based on the residuals"""
return _swigfaiss.ScalarQuantizer_train_residual(self, n, x, quantizer, by_residual, verbose)
def compute_codes(self, x, codes, n):
r"""
Encode a set of vectors
:type x: float
:param x: vectors to encode, size n * d
:type codes: uint8_t
:param codes: output codes, size n * code_size
"""
return _swigfaiss.ScalarQuantizer_compute_codes(self, x, codes, n)
def decode(self, code, x, n):
r"""
Decode a set of vectors
:param codes: codes to decode, size n * code_size
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.ScalarQuantizer_decode(self, code, x, n)
def select_quantizer(self):
return _swigfaiss.ScalarQuantizer_select_quantizer(self)
def get_distance_computer(self, *args):
return _swigfaiss.ScalarQuantizer_get_distance_computer(self, *args)
def select_InvertedListScanner(self, mt, quantizer, store_pairs, sel, by_residual=False):
return _swigfaiss.ScalarQuantizer_select_InvertedListScanner(self, mt, quantizer, store_pairs, sel, by_residual)
__swig_destroy__ = _swigfaiss.delete_ScalarQuantizer
# Register ScalarQuantizer in _swigfaiss:
_swigfaiss.ScalarQuantizer_swigregister(ScalarQuantizer)
class IndexScalarQuantizer(IndexFlatCodes):
r"""Flat index built on a scalar quantizer."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
sq = property(_swigfaiss.IndexScalarQuantizer_sq_get, _swigfaiss.IndexScalarQuantizer_sq_set, doc=r""" Used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexScalarQuantizer_swiginit(self, _swigfaiss.new_IndexScalarQuantizer(*args))
def train(self, n, x):
return _swigfaiss.IndexScalarQuantizer_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexScalarQuantizer_search(self, n, x, k, distances, labels, params)
def get_FlatCodesDistanceComputer(self):
return _swigfaiss.IndexScalarQuantizer_get_FlatCodesDistanceComputer(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexScalarQuantizer_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexScalarQuantizer_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexScalarQuantizer
# Register IndexScalarQuantizer in _swigfaiss:
_swigfaiss.IndexScalarQuantizer_swigregister(IndexScalarQuantizer)
class IndexIVFScalarQuantizer(IndexIVF):
r"""
An IVF implementation where the components of the residuals are
encoded with a scalar quantizer. All distance computations
are asymmetric, so the encoded vectors are decoded and approximate
distances are computed.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
sq = property(_swigfaiss.IndexIVFScalarQuantizer_sq_get, _swigfaiss.IndexIVFScalarQuantizer_sq_set)
by_residual = property(_swigfaiss.IndexIVFScalarQuantizer_by_residual_get, _swigfaiss.IndexIVFScalarQuantizer_by_residual_set)
def __init__(self, *args):
_swigfaiss.IndexIVFScalarQuantizer_swiginit(self, _swigfaiss.new_IndexIVFScalarQuantizer(*args))
def train_residual(self, n, x):
return _swigfaiss.IndexIVFScalarQuantizer_train_residual(self, n, x)
def encode_vectors(self, n, x, list_nos, codes, include_listnos=False):
return _swigfaiss.IndexIVFScalarQuantizer_encode_vectors(self, n, x, list_nos, codes, include_listnos)
def add_core(self, n, x, xids, precomputed_idx):
return _swigfaiss.IndexIVFScalarQuantizer_add_core(self, n, x, xids, precomputed_idx)
def get_InvertedListScanner(self, store_pairs, sel):
return _swigfaiss.IndexIVFScalarQuantizer_get_InvertedListScanner(self, store_pairs, sel)
def reconstruct_from_offset(self, list_no, offset, recons):
return _swigfaiss.IndexIVFScalarQuantizer_reconstruct_from_offset(self, list_no, offset, recons)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexIVFScalarQuantizer_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexIVFScalarQuantizer
# Register IndexIVFScalarQuantizer in _swigfaiss:
_swigfaiss.IndexIVFScalarQuantizer_swigregister(IndexIVFScalarQuantizer)
class IndexIVFSpectralHash(IndexIVF):
r"""
Inverted list that stores binary codes of size nbit. Before the
binary conversion, the dimension of the vectors is transformed from
dim d into dim nbit by vt (a random rotation by default).
Each coordinate is subtracted from a value determined by
threshold_type, and split into intervals of size period. Half of
the interval is a 0 bit, the other half a 1.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
vt = property(_swigfaiss.IndexIVFSpectralHash_vt_get, _swigfaiss.IndexIVFSpectralHash_vt_set, doc=r""" transformation from d to nbit dim""")
own_fields = property(_swigfaiss.IndexIVFSpectralHash_own_fields_get, _swigfaiss.IndexIVFSpectralHash_own_fields_set, doc=r""" own the vt""")
nbit = property(_swigfaiss.IndexIVFSpectralHash_nbit_get, _swigfaiss.IndexIVFSpectralHash_nbit_set, doc=r""" nb of bits of the binary signature""")
period = property(_swigfaiss.IndexIVFSpectralHash_period_get, _swigfaiss.IndexIVFSpectralHash_period_set, doc=r""" interval size for 0s and 1s""")
Thresh_global = _swigfaiss.IndexIVFSpectralHash_Thresh_global
r""" global threshold at 0"""
Thresh_centroid = _swigfaiss.IndexIVFSpectralHash_Thresh_centroid
r""" compare to centroid"""
Thresh_centroid_half = _swigfaiss.IndexIVFSpectralHash_Thresh_centroid_half
r""" central interval around centroid"""
Thresh_median = _swigfaiss.IndexIVFSpectralHash_Thresh_median
r""" median of training set"""
threshold_type = property(_swigfaiss.IndexIVFSpectralHash_threshold_type_get, _swigfaiss.IndexIVFSpectralHash_threshold_type_set)
trained = property(_swigfaiss.IndexIVFSpectralHash_trained_get, _swigfaiss.IndexIVFSpectralHash_trained_set, doc=r"""
Trained threshold.
size nlist * nbit or 0 if Thresh_global
""")
def __init__(self, *args):
_swigfaiss.IndexIVFSpectralHash_swiginit(self, _swigfaiss.new_IndexIVFSpectralHash(*args))
def train_residual(self, n, x):
return _swigfaiss.IndexIVFSpectralHash_train_residual(self, n, x)
def encode_vectors(self, n, x, list_nos, codes, include_listnos=False):
return _swigfaiss.IndexIVFSpectralHash_encode_vectors(self, n, x, list_nos, codes, include_listnos)
def get_InvertedListScanner(self, store_pairs, sel):
return _swigfaiss.IndexIVFSpectralHash_get_InvertedListScanner(self, store_pairs, sel)
def replace_vt(self, *args):
r"""
*Overload 1:*
replace the vector transform for an empty (and possibly untrained) index
|
*Overload 2:*
convenience function to get the VT from an index constucted by an
index_factory (should end in "LSH")
|
*Overload 3:*
convenience function to get the VT from an index constucted by an
index_factory (should end in "LSH")
"""
return _swigfaiss.IndexIVFSpectralHash_replace_vt(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexIVFSpectralHash
# Register IndexIVFSpectralHash in _swigfaiss:
_swigfaiss.IndexIVFSpectralHash_swigregister(IndexIVFSpectralHash)
class IndexIVFAdditiveQuantizer(IndexIVF):
r"""
Abstract class for IVF additive quantizers.
The search functions are in common.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
aq = property(_swigfaiss.IndexIVFAdditiveQuantizer_aq_get, _swigfaiss.IndexIVFAdditiveQuantizer_aq_set)
by_residual = property(_swigfaiss.IndexIVFAdditiveQuantizer_by_residual_get, _swigfaiss.IndexIVFAdditiveQuantizer_by_residual_set)
use_precomputed_table = property(_swigfaiss.IndexIVFAdditiveQuantizer_use_precomputed_table_get, _swigfaiss.IndexIVFAdditiveQuantizer_use_precomputed_table_set)
def __init__(self, *args):
_swigfaiss.IndexIVFAdditiveQuantizer_swiginit(self, _swigfaiss.new_IndexIVFAdditiveQuantizer(*args))
def train_residual(self, n, x):
return _swigfaiss.IndexIVFAdditiveQuantizer_train_residual(self, n, x)
def encode_vectors(self, n, x, list_nos, codes, include_listnos=False):
return _swigfaiss.IndexIVFAdditiveQuantizer_encode_vectors(self, n, x, list_nos, codes, include_listnos)
def get_InvertedListScanner(self, store_pairs, sel):
return _swigfaiss.IndexIVFAdditiveQuantizer_get_InvertedListScanner(self, store_pairs, sel)
def sa_decode(self, n, codes, x):
return _swigfaiss.IndexIVFAdditiveQuantizer_sa_decode(self, n, codes, x)
__swig_destroy__ = _swigfaiss.delete_IndexIVFAdditiveQuantizer
# Register IndexIVFAdditiveQuantizer in _swigfaiss:
_swigfaiss.IndexIVFAdditiveQuantizer_swigregister(IndexIVFAdditiveQuantizer)
class IndexIVFResidualQuantizer(IndexIVFAdditiveQuantizer):
r"""
IndexIVF based on a residual quantizer. Stored vectors are
approximated by residual quantization codes.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rq = property(_swigfaiss.IndexIVFResidualQuantizer_rq_get, _swigfaiss.IndexIVFResidualQuantizer_rq_set, doc=r""" The residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexIVFResidualQuantizer_swiginit(self, _swigfaiss.new_IndexIVFResidualQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFResidualQuantizer
# Register IndexIVFResidualQuantizer in _swigfaiss:
_swigfaiss.IndexIVFResidualQuantizer_swigregister(IndexIVFResidualQuantizer)
class IndexIVFLocalSearchQuantizer(IndexIVFAdditiveQuantizer):
r"""
IndexIVF based on a residual quantizer. Stored vectors are
approximated by residual quantization codes.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
lsq = property(_swigfaiss.IndexIVFLocalSearchQuantizer_lsq_get, _swigfaiss.IndexIVFLocalSearchQuantizer_lsq_set, doc=r""" The LSQ quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexIVFLocalSearchQuantizer_swiginit(self, _swigfaiss.new_IndexIVFLocalSearchQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFLocalSearchQuantizer
# Register IndexIVFLocalSearchQuantizer in _swigfaiss:
_swigfaiss.IndexIVFLocalSearchQuantizer_swigregister(IndexIVFLocalSearchQuantizer)
class IndexIVFProductResidualQuantizer(IndexIVFAdditiveQuantizer):
r"""
IndexIVF based on a product residual quantizer. Stored vectors are
approximated by product residual quantization codes.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
prq = property(_swigfaiss.IndexIVFProductResidualQuantizer_prq_get, _swigfaiss.IndexIVFProductResidualQuantizer_prq_set, doc=r""" The product residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexIVFProductResidualQuantizer_swiginit(self, _swigfaiss.new_IndexIVFProductResidualQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFProductResidualQuantizer
# Register IndexIVFProductResidualQuantizer in _swigfaiss:
_swigfaiss.IndexIVFProductResidualQuantizer_swigregister(IndexIVFProductResidualQuantizer)
class IndexIVFProductLocalSearchQuantizer(IndexIVFAdditiveQuantizer):
r"""
IndexIVF based on a product local search quantizer. Stored vectors are
approximated by product local search quantization codes.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
plsq = property(_swigfaiss.IndexIVFProductLocalSearchQuantizer_plsq_get, _swigfaiss.IndexIVFProductLocalSearchQuantizer_plsq_set, doc=r""" The product local search quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexIVFProductLocalSearchQuantizer_swiginit(self, _swigfaiss.new_IndexIVFProductLocalSearchQuantizer(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFProductLocalSearchQuantizer
# Register IndexIVFProductLocalSearchQuantizer in _swigfaiss:
_swigfaiss.IndexIVFProductLocalSearchQuantizer_swigregister(IndexIVFProductLocalSearchQuantizer)
class SearchParametersHNSW(SearchParameters):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
efSearch = property(_swigfaiss.SearchParametersHNSW_efSearch_get, _swigfaiss.SearchParametersHNSW_efSearch_set)
check_relative_distance = property(_swigfaiss.SearchParametersHNSW_check_relative_distance_get, _swigfaiss.SearchParametersHNSW_check_relative_distance_set)
__swig_destroy__ = _swigfaiss.delete_SearchParametersHNSW
def __init__(self):
_swigfaiss.SearchParametersHNSW_swiginit(self, _swigfaiss.new_SearchParametersHNSW())
# Register SearchParametersHNSW in _swigfaiss:
_swigfaiss.SearchParametersHNSW_swigregister(SearchParametersHNSW)
class HNSW(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
assign_probas = property(_swigfaiss.HNSW_assign_probas_get, _swigfaiss.HNSW_assign_probas_set, doc=r""" assignment probability to each layer (sum=1)""")
cum_nneighbor_per_level = property(_swigfaiss.HNSW_cum_nneighbor_per_level_get, _swigfaiss.HNSW_cum_nneighbor_per_level_set, doc=r"""
number of neighbors stored per layer (cumulative), should not
be changed after first add
""")
levels = property(_swigfaiss.HNSW_levels_get, _swigfaiss.HNSW_levels_set, doc=r""" level of each vector (base level = 1), size = ntotal""")
offsets = property(_swigfaiss.HNSW_offsets_get, _swigfaiss.HNSW_offsets_set, doc=r"""
offsets[i] is the offset in the neighbors array where vector i is stored
size ntotal + 1
""")
neighbors = property(_swigfaiss.HNSW_neighbors_get, _swigfaiss.HNSW_neighbors_set, doc=r"""
neighbors[offsets[i]:offsets[i+1]] is the list of neighbors of vector i
for all levels. this is where all storage goes.
""")
entry_point = property(_swigfaiss.HNSW_entry_point_get, _swigfaiss.HNSW_entry_point_set, doc=r"""
entry point in the search structure (one of the points with maximum
level
""")
rng = property(_swigfaiss.HNSW_rng_get, _swigfaiss.HNSW_rng_set)
max_level = property(_swigfaiss.HNSW_max_level_get, _swigfaiss.HNSW_max_level_set, doc=r""" maximum level""")
efConstruction = property(_swigfaiss.HNSW_efConstruction_get, _swigfaiss.HNSW_efConstruction_set, doc=r""" expansion factor at construction time""")
efSearch = property(_swigfaiss.HNSW_efSearch_get, _swigfaiss.HNSW_efSearch_set, doc=r""" expansion factor at search time""")
check_relative_distance = property(_swigfaiss.HNSW_check_relative_distance_get, _swigfaiss.HNSW_check_relative_distance_set, doc=r"""
during search: do we check whether the next best distance is good
enough?
""")
upper_beam = property(_swigfaiss.HNSW_upper_beam_get, _swigfaiss.HNSW_upper_beam_set, doc=r""" number of entry points in levels > 0.""")
search_bounded_queue = property(_swigfaiss.HNSW_search_bounded_queue_get, _swigfaiss.HNSW_search_bounded_queue_set, doc=r""" use bounded queue during exploration""")
def set_default_probas(self, M, levelMult):
r"""
initialize the assign_probas and cum_nneighbor_per_level to
have 2*M links on level 0 and M links on levels > 0
"""
return _swigfaiss.HNSW_set_default_probas(self, M, levelMult)
def set_nb_neighbors(self, level_no, n):
r""" set nb of neighbors for this level (before adding anything)"""
return _swigfaiss.HNSW_set_nb_neighbors(self, level_no, n)
def nb_neighbors(self, layer_no):
r""" nb of neighbors for this level"""
return _swigfaiss.HNSW_nb_neighbors(self, layer_no)
def cum_nb_neighbors(self, layer_no):
r""" cumumlative nb up to (and excluding) this level"""
return _swigfaiss.HNSW_cum_nb_neighbors(self, layer_no)
def neighbor_range(self, no, layer_no, begin, end):
r""" range of entries in the neighbors table of vertex no at layer_no"""
return _swigfaiss.HNSW_neighbor_range(self, no, layer_no, begin, end)
def __init__(self, M=32):
r""" only mandatory parameter: nb of neighbors"""
_swigfaiss.HNSW_swiginit(self, _swigfaiss.new_HNSW(M))
def random_level(self):
r""" pick a random level for a new point"""
return _swigfaiss.HNSW_random_level(self)
def fill_with_random_links(self, n):
r""" add n random levels to table (for debugging...)"""
return _swigfaiss.HNSW_fill_with_random_links(self, n)
def add_links_starting_from(self, ptdis, pt_id, nearest, d_nearest, level, locks, vt):
return _swigfaiss.HNSW_add_links_starting_from(self, ptdis, pt_id, nearest, d_nearest, level, locks, vt)
def add_with_locks(self, ptdis, pt_level, pt_id, locks, vt):
r"""
add point pt_id on all levels <= pt_level and build the link
structure for them.
"""
return _swigfaiss.HNSW_add_with_locks(self, ptdis, pt_level, pt_id, locks, vt)
def search(self, qdis, k, I, D, vt, params=None):
r""" search interface for 1 point, single thread"""
return _swigfaiss.HNSW_search(self, qdis, k, I, D, vt, params)
def search_level_0(self, qdis, k, idxi, simi, nprobe, nearest_i, nearest_d, search_type, search_stats, vt):
r""" search only in level 0 from a given vertex"""
return _swigfaiss.HNSW_search_level_0(self, qdis, k, idxi, simi, nprobe, nearest_i, nearest_d, search_type, search_stats, vt)
def reset(self):
return _swigfaiss.HNSW_reset(self)
def clear_neighbor_tables(self, level):
return _swigfaiss.HNSW_clear_neighbor_tables(self, level)
def print_neighbor_stats(self, level):
return _swigfaiss.HNSW_print_neighbor_stats(self, level)
def prepare_level_tab(self, n, preset_levels=False):
return _swigfaiss.HNSW_prepare_level_tab(self, n, preset_levels)
@staticmethod
def shrink_neighbor_list(qdis, input, output, max_size):
return _swigfaiss.HNSW_shrink_neighbor_list(qdis, input, output, max_size)
__swig_destroy__ = _swigfaiss.delete_HNSW
# Register HNSW in _swigfaiss:
_swigfaiss.HNSW_swigregister(HNSW)
def HNSW_shrink_neighbor_list(qdis, input, output, max_size):
return _swigfaiss.HNSW_shrink_neighbor_list(qdis, input, output, max_size)
class HNSWStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
n1 = property(_swigfaiss.HNSWStats_n1_get, _swigfaiss.HNSWStats_n1_set)
n2 = property(_swigfaiss.HNSWStats_n2_get, _swigfaiss.HNSWStats_n2_set)
n3 = property(_swigfaiss.HNSWStats_n3_get, _swigfaiss.HNSWStats_n3_set)
ndis = property(_swigfaiss.HNSWStats_ndis_get, _swigfaiss.HNSWStats_ndis_set)
nreorder = property(_swigfaiss.HNSWStats_nreorder_get, _swigfaiss.HNSWStats_nreorder_set)
def __init__(self, n1=0, n2=0, n3=0, ndis=0, nreorder=0):
_swigfaiss.HNSWStats_swiginit(self, _swigfaiss.new_HNSWStats(n1, n2, n3, ndis, nreorder))
def reset(self):
return _swigfaiss.HNSWStats_reset(self)
def combine(self, other):
return _swigfaiss.HNSWStats_combine(self, other)
__swig_destroy__ = _swigfaiss.delete_HNSWStats
# Register HNSWStats in _swigfaiss:
_swigfaiss.HNSWStats_swigregister(HNSWStats)
class ReconstructFromNeighbors(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index = property(_swigfaiss.ReconstructFromNeighbors_index_get)
M = property(_swigfaiss.ReconstructFromNeighbors_M_get, _swigfaiss.ReconstructFromNeighbors_M_set)
k = property(_swigfaiss.ReconstructFromNeighbors_k_get, _swigfaiss.ReconstructFromNeighbors_k_set)
nsq = property(_swigfaiss.ReconstructFromNeighbors_nsq_get, _swigfaiss.ReconstructFromNeighbors_nsq_set)
code_size = property(_swigfaiss.ReconstructFromNeighbors_code_size_get, _swigfaiss.ReconstructFromNeighbors_code_size_set)
k_reorder = property(_swigfaiss.ReconstructFromNeighbors_k_reorder_get, _swigfaiss.ReconstructFromNeighbors_k_reorder_set)
codebook = property(_swigfaiss.ReconstructFromNeighbors_codebook_get, _swigfaiss.ReconstructFromNeighbors_codebook_set)
codes = property(_swigfaiss.ReconstructFromNeighbors_codes_get, _swigfaiss.ReconstructFromNeighbors_codes_set)
ntotal = property(_swigfaiss.ReconstructFromNeighbors_ntotal_get, _swigfaiss.ReconstructFromNeighbors_ntotal_set)
d = property(_swigfaiss.ReconstructFromNeighbors_d_get, _swigfaiss.ReconstructFromNeighbors_d_set)
dsub = property(_swigfaiss.ReconstructFromNeighbors_dsub_get, _swigfaiss.ReconstructFromNeighbors_dsub_set)
def __init__(self, index, k=256, nsq=1):
_swigfaiss.ReconstructFromNeighbors_swiginit(self, _swigfaiss.new_ReconstructFromNeighbors(index, k, nsq))
def add_codes(self, n, x):
r"""
codes must be added in the correct order and the IndexHNSW
must be populated and sorted
"""
return _swigfaiss.ReconstructFromNeighbors_add_codes(self, n, x)
def compute_distances(self, n, shortlist, query, distances):
return _swigfaiss.ReconstructFromNeighbors_compute_distances(self, n, shortlist, query, distances)
def estimate_code(self, x, i, code):
r""" called by add_codes"""
return _swigfaiss.ReconstructFromNeighbors_estimate_code(self, x, i, code)
def reconstruct(self, i, x, tmp):
r""" called by compute_distances"""
return _swigfaiss.ReconstructFromNeighbors_reconstruct(self, i, x, tmp)
def reconstruct_n(self, n0, ni, x):
return _swigfaiss.ReconstructFromNeighbors_reconstruct_n(self, n0, ni, x)
def get_neighbor_table(self, i, out):
r""" get the M+1 -by-d table for neighbor coordinates for vector i"""
return _swigfaiss.ReconstructFromNeighbors_get_neighbor_table(self, i, out)
__swig_destroy__ = _swigfaiss.delete_ReconstructFromNeighbors
# Register ReconstructFromNeighbors in _swigfaiss:
_swigfaiss.ReconstructFromNeighbors_swigregister(ReconstructFromNeighbors)
class IndexHNSW(Index):
r"""
The HNSW index is a normal random-access index with a HNSW
link structure built on top
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
hnsw = property(_swigfaiss.IndexHNSW_hnsw_get, _swigfaiss.IndexHNSW_hnsw_set)
own_fields = property(_swigfaiss.IndexHNSW_own_fields_get, _swigfaiss.IndexHNSW_own_fields_set)
storage = property(_swigfaiss.IndexHNSW_storage_get, _swigfaiss.IndexHNSW_storage_set)
reconstruct_from_neighbors = property(_swigfaiss.IndexHNSW_reconstruct_from_neighbors_get, _swigfaiss.IndexHNSW_reconstruct_from_neighbors_set)
def __init__(self, *args):
_swigfaiss.IndexHNSW_swiginit(self, _swigfaiss.new_IndexHNSW(*args))
__swig_destroy__ = _swigfaiss.delete_IndexHNSW
def add(self, n, x):
return _swigfaiss.IndexHNSW_add(self, n, x)
def train(self, n, x):
r""" Trains the storage if needed"""
return _swigfaiss.IndexHNSW_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r""" entry point for search"""
return _swigfaiss.IndexHNSW_search(self, n, x, k, distances, labels, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexHNSW_reconstruct(self, key, recons)
def reset(self):
return _swigfaiss.IndexHNSW_reset(self)
def shrink_level_0_neighbors(self, size):
return _swigfaiss.IndexHNSW_shrink_level_0_neighbors(self, size)
def search_level_0(self, n, x, k, nearest, nearest_d, distances, labels, nprobe=1, search_type=1):
r"""
Perform search only on level 0, given the starting points for
each vertex.
:type search_type: int, optional
:param search_type: 1:perform one search per nprobe, 2: enqueue
all entry points
"""
return _swigfaiss.IndexHNSW_search_level_0(self, n, x, k, nearest, nearest_d, distances, labels, nprobe, search_type)
def init_level_0_from_knngraph(self, k, D, I):
r""" alternative graph building"""
return _swigfaiss.IndexHNSW_init_level_0_from_knngraph(self, k, D, I)
def init_level_0_from_entry_points(self, npt, points, nearests):
r""" alternative graph building"""
return _swigfaiss.IndexHNSW_init_level_0_from_entry_points(self, npt, points, nearests)
def reorder_links(self):
return _swigfaiss.IndexHNSW_reorder_links(self)
def link_singletons(self):
return _swigfaiss.IndexHNSW_link_singletons(self)
# Register IndexHNSW in _swigfaiss:
_swigfaiss.IndexHNSW_swigregister(IndexHNSW)
class IndexHNSWFlat(IndexHNSW):
r"""
Flat index topped with with a HNSW structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexHNSWFlat_swiginit(self, _swigfaiss.new_IndexHNSWFlat(*args))
__swig_destroy__ = _swigfaiss.delete_IndexHNSWFlat
# Register IndexHNSWFlat in _swigfaiss:
_swigfaiss.IndexHNSWFlat_swigregister(IndexHNSWFlat)
class IndexHNSWPQ(IndexHNSW):
r"""
PQ index topped with with a HNSW structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexHNSWPQ_swiginit(self, _swigfaiss.new_IndexHNSWPQ(*args))
def train(self, n, x):
return _swigfaiss.IndexHNSWPQ_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_IndexHNSWPQ
# Register IndexHNSWPQ in _swigfaiss:
_swigfaiss.IndexHNSWPQ_swigregister(IndexHNSWPQ)
class IndexHNSWSQ(IndexHNSW):
r"""
SQ index topped with with a HNSW structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexHNSWSQ_swiginit(self, _swigfaiss.new_IndexHNSWSQ(*args))
__swig_destroy__ = _swigfaiss.delete_IndexHNSWSQ
# Register IndexHNSWSQ in _swigfaiss:
_swigfaiss.IndexHNSWSQ_swigregister(IndexHNSWSQ)
class IndexHNSW2Level(IndexHNSW):
r""" 2-level code structure with fast random access"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexHNSW2Level_swiginit(self, _swigfaiss.new_IndexHNSW2Level(*args))
def flip_to_ivf(self):
return _swigfaiss.IndexHNSW2Level_flip_to_ivf(self)
def search(self, n, x, k, distances, labels, params=None):
r""" entry point for search"""
return _swigfaiss.IndexHNSW2Level_search(self, n, x, k, distances, labels, params)
__swig_destroy__ = _swigfaiss.delete_IndexHNSW2Level
# Register IndexHNSW2Level in _swigfaiss:
_swigfaiss.IndexHNSW2Level_swigregister(IndexHNSW2Level)
def smawk(nrows, ncols, x, argmins):
r"""
SMAWK algorithm. Find the row minima of a monotone matrix.
Expose this for testing.
:type nrows: int
:param nrows: number of rows
:type ncols: int
:param ncols: number of columns
:type x: float
:param x: input matrix, size (nrows, ncols)
:type argmins: int
:param argmins: argmin of each row
"""
return _swigfaiss.smawk(nrows, ncols, x, argmins)
def kmeans1d(x, n, nclusters, centroids):
r"""
Exact 1D K-Means by dynamic programming
From "Fast Exact k-Means, k-Medians and Bregman Divergence Clustering in 1D"
Allan Grønlund, Kasper Green Larsen, Alexander Mathiasen, Jesper Sindahl
Nielsen, Stefan Schneider, Mingzhou Song, ArXiV'17
Section 2.2
https://arxiv.org/abs/1701.07204
:type x: float
:param x: input 1D array
:type n: int
:param n: input array length
:type nclusters: int
:param nclusters: number of clusters
:type centroids: float
:param centroids: output centroids, size nclusters
:rtype: float
:return: imbalancce factor
"""
return _swigfaiss.kmeans1d(x, n, nclusters, centroids)
class Neighbor(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
id = property(_swigfaiss.Neighbor_id_get, _swigfaiss.Neighbor_id_set)
distance = property(_swigfaiss.Neighbor_distance_get, _swigfaiss.Neighbor_distance_set)
flag = property(_swigfaiss.Neighbor_flag_get, _swigfaiss.Neighbor_flag_set)
def __init__(self, *args):
_swigfaiss.Neighbor_swiginit(self, _swigfaiss.new_Neighbor(*args))
def __lt__(self, other):
return _swigfaiss.Neighbor___lt__(self, other)
__swig_destroy__ = _swigfaiss.delete_Neighbor
# Register Neighbor in _swigfaiss:
_swigfaiss.Neighbor_swigregister(Neighbor)
class Nhood(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pool = property(_swigfaiss.Nhood_pool_get, _swigfaiss.Nhood_pool_set)
M = property(_swigfaiss.Nhood_M_get, _swigfaiss.Nhood_M_set)
nn_old = property(_swigfaiss.Nhood_nn_old_get, _swigfaiss.Nhood_nn_old_set)
nn_new = property(_swigfaiss.Nhood_nn_new_get, _swigfaiss.Nhood_nn_new_set)
rnn_old = property(_swigfaiss.Nhood_rnn_old_get, _swigfaiss.Nhood_rnn_old_set)
rnn_new = property(_swigfaiss.Nhood_rnn_new_get, _swigfaiss.Nhood_rnn_new_set)
def __init__(self, *args):
_swigfaiss.Nhood_swiginit(self, _swigfaiss.new_Nhood(*args))
def insert(self, id, dist):
return _swigfaiss.Nhood_insert(self, id, dist)
__swig_destroy__ = _swigfaiss.delete_Nhood
# Register Nhood in _swigfaiss:
_swigfaiss.Nhood_swigregister(Nhood)
class NNDescent(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, d, K):
_swigfaiss.NNDescent_swiginit(self, _swigfaiss.new_NNDescent(d, K))
__swig_destroy__ = _swigfaiss.delete_NNDescent
def build(self, qdis, n, verbose):
return _swigfaiss.NNDescent_build(self, qdis, n, verbose)
def search(self, qdis, topk, indices, dists, vt):
return _swigfaiss.NNDescent_search(self, qdis, topk, indices, dists, vt)
def reset(self):
return _swigfaiss.NNDescent_reset(self)
def init_graph(self, qdis):
r""" Initialize the KNN graph randomly"""
return _swigfaiss.NNDescent_init_graph(self, qdis)
def nndescent(self, qdis, verbose):
r""" Perform NNDescent algorithm"""
return _swigfaiss.NNDescent_nndescent(self, qdis, verbose)
def join(self, qdis):
r""" Perform local join on each node"""
return _swigfaiss.NNDescent_join(self, qdis)
def update(self):
r""" Sample new neighbors for each node to peform local join later"""
return _swigfaiss.NNDescent_update(self)
def generate_eval_set(self, qdis, c, v, N):
r""" Sample a small number of points to evaluate the quality of KNNG built"""
return _swigfaiss.NNDescent_generate_eval_set(self, qdis, c, v, N)
def eval_recall(self, ctrl_points, acc_eval_set):
r""" Evaluate the quality of KNNG built"""
return _swigfaiss.NNDescent_eval_recall(self, ctrl_points, acc_eval_set)
has_built = property(_swigfaiss.NNDescent_has_built_get, _swigfaiss.NNDescent_has_built_set)
K = property(_swigfaiss.NNDescent_K_get, _swigfaiss.NNDescent_K_set)
S = property(_swigfaiss.NNDescent_S_get, _swigfaiss.NNDescent_S_set)
R = property(_swigfaiss.NNDescent_R_get, _swigfaiss.NNDescent_R_set)
L = property(_swigfaiss.NNDescent_L_get, _swigfaiss.NNDescent_L_set)
iter = property(_swigfaiss.NNDescent_iter_get, _swigfaiss.NNDescent_iter_set)
search_L = property(_swigfaiss.NNDescent_search_L_get, _swigfaiss.NNDescent_search_L_set)
random_seed = property(_swigfaiss.NNDescent_random_seed_get, _swigfaiss.NNDescent_random_seed_set)
d = property(_swigfaiss.NNDescent_d_get, _swigfaiss.NNDescent_d_set)
ntotal = property(_swigfaiss.NNDescent_ntotal_get, _swigfaiss.NNDescent_ntotal_set)
graph = property(_swigfaiss.NNDescent_graph_get, _swigfaiss.NNDescent_graph_set)
final_graph = property(_swigfaiss.NNDescent_final_graph_get, _swigfaiss.NNDescent_final_graph_set)
# Register NNDescent in _swigfaiss:
_swigfaiss.NNDescent_swigregister(NNDescent)
class IndexNNDescent(Index):
r"""
The NNDescent index is a normal random-access index with an NNDescent
link structure built on top
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nndescent = property(_swigfaiss.IndexNNDescent_nndescent_get, _swigfaiss.IndexNNDescent_nndescent_set)
own_fields = property(_swigfaiss.IndexNNDescent_own_fields_get, _swigfaiss.IndexNNDescent_own_fields_set)
storage = property(_swigfaiss.IndexNNDescent_storage_get, _swigfaiss.IndexNNDescent_storage_set)
def __init__(self, *args):
_swigfaiss.IndexNNDescent_swiginit(self, _swigfaiss.new_IndexNNDescent(*args))
__swig_destroy__ = _swigfaiss.delete_IndexNNDescent
def add(self, n, x):
return _swigfaiss.IndexNNDescent_add(self, n, x)
def train(self, n, x):
r""" Trains the storage if needed"""
return _swigfaiss.IndexNNDescent_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r""" entry point for search"""
return _swigfaiss.IndexNNDescent_search(self, n, x, k, distances, labels, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexNNDescent_reconstruct(self, key, recons)
def reset(self):
return _swigfaiss.IndexNNDescent_reset(self)
# Register IndexNNDescent in _swigfaiss:
_swigfaiss.IndexNNDescent_swigregister(IndexNNDescent)
class IndexNNDescentFlat(IndexNNDescent):
r"""
Flat index topped with with a NNDescent structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexNNDescentFlat_swiginit(self, _swigfaiss.new_IndexNNDescentFlat(*args))
__swig_destroy__ = _swigfaiss.delete_IndexNNDescentFlat
# Register IndexNNDescentFlat in _swigfaiss:
_swigfaiss.IndexNNDescentFlat_swigregister(IndexNNDescentFlat)
class IndexIVFFlat(IndexIVF):
r"""
Inverted file with stored vectors. Here the inverted file
pre-selects the vectors to be searched, but they are not otherwise
encoded, the code array just contains the raw float entries.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def add_core(self, n, x, xids, precomputed_idx):
return _swigfaiss.IndexIVFFlat_add_core(self, n, x, xids, precomputed_idx)
def encode_vectors(self, n, x, list_nos, codes, include_listnos=False):
return _swigfaiss.IndexIVFFlat_encode_vectors(self, n, x, list_nos, codes, include_listnos)
def get_InvertedListScanner(self, store_pairs, sel):
return _swigfaiss.IndexIVFFlat_get_InvertedListScanner(self, store_pairs, sel)
def reconstruct_from_offset(self, list_no, offset, recons):
return _swigfaiss.IndexIVFFlat_reconstruct_from_offset(self, list_no, offset, recons)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexIVFFlat_sa_decode(self, n, bytes, x)
def __init__(self, *args):
_swigfaiss.IndexIVFFlat_swiginit(self, _swigfaiss.new_IndexIVFFlat(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFFlat
# Register IndexIVFFlat in _swigfaiss:
_swigfaiss.IndexIVFFlat_swigregister(IndexIVFFlat)
class IndexIVFFlatDedup(IndexIVFFlat):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
instances = property(_swigfaiss.IndexIVFFlatDedup_instances_get, _swigfaiss.IndexIVFFlatDedup_instances_set, doc=r"""
Maps ids stored in the index to the ids of vectors that are
the same. When a vector is unique, it does not appear in the
instances map
""")
def train(self, n, x):
r""" also dedups the training set"""
return _swigfaiss.IndexIVFFlatDedup_train(self, n, x)
def add_with_ids(self, n, x, xids):
r""" implemented for all IndexIVF* classes"""
return _swigfaiss.IndexIVFFlatDedup_add_with_ids(self, n, x, xids)
def search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params=None, stats=None):
return _swigfaiss.IndexIVFFlatDedup_search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params, stats)
def remove_ids(self, sel):
return _swigfaiss.IndexIVFFlatDedup_remove_ids(self, sel)
def range_search(self, n, x, radius, result, params=None):
r""" not implemented"""
return _swigfaiss.IndexIVFFlatDedup_range_search(self, n, x, radius, result, params)
def update_vectors(self, nv, idx, v):
r""" not implemented"""
return _swigfaiss.IndexIVFFlatDedup_update_vectors(self, nv, idx, v)
def reconstruct_from_offset(self, list_no, offset, recons):
r""" not implemented"""
return _swigfaiss.IndexIVFFlatDedup_reconstruct_from_offset(self, list_no, offset, recons)
def __init__(self, *args):
_swigfaiss.IndexIVFFlatDedup_swiginit(self, _swigfaiss.new_IndexIVFFlatDedup(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFFlatDedup
# Register IndexIVFFlatDedup in _swigfaiss:
_swigfaiss.IndexIVFFlatDedup_swigregister(IndexIVFFlatDedup)
def storage_distance_computer(storage):
return _swigfaiss.storage_distance_computer(storage)
class NSG(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
ntotal = property(_swigfaiss.NSG_ntotal_get, _swigfaiss.NSG_ntotal_set, doc=r""" nb of nodes""")
R = property(_swigfaiss.NSG_R_get, _swigfaiss.NSG_R_set, doc=r""" nb of neighbors per node""")
L = property(_swigfaiss.NSG_L_get, _swigfaiss.NSG_L_set, doc=r""" length of the search path at construction time""")
C = property(_swigfaiss.NSG_C_get, _swigfaiss.NSG_C_set, doc=r""" candidate pool size at construction time""")
search_L = property(_swigfaiss.NSG_search_L_get, _swigfaiss.NSG_search_L_set, doc=r""" length of the search path""")
enterpoint = property(_swigfaiss.NSG_enterpoint_get, _swigfaiss.NSG_enterpoint_set, doc=r""" enterpoint""")
final_graph = property(_swigfaiss.NSG_final_graph_get, _swigfaiss.NSG_final_graph_set, doc=r""" NSG graph structure""")
is_built = property(_swigfaiss.NSG_is_built_get, _swigfaiss.NSG_is_built_set, doc=r""" NSG is built or not""")
rng = property(_swigfaiss.NSG_rng_get, _swigfaiss.NSG_rng_set, doc=r""" random generator""")
def __init__(self, R=32):
_swigfaiss.NSG_swiginit(self, _swigfaiss.new_NSG(R))
def build(self, storage, n, knn_graph, verbose):
return _swigfaiss.NSG_build(self, storage, n, knn_graph, verbose)
def reset(self):
return _swigfaiss.NSG_reset(self)
def search(self, dis, k, I, D, vt):
return _swigfaiss.NSG_search(self, dis, k, I, D, vt)
def init_graph(self, storage, knn_graph):
return _swigfaiss.NSG_init_graph(self, storage, knn_graph)
def add_reverse_links(self, q, locks, dis, graph):
return _swigfaiss.NSG_add_reverse_links(self, q, locks, dis, graph)
def sync_prune(self, q, pool, dis, vt, knn_graph, graph):
return _swigfaiss.NSG_sync_prune(self, q, pool, dis, vt, knn_graph, graph)
def link(self, storage, knn_graph, graph, verbose):
return _swigfaiss.NSG_link(self, storage, knn_graph, graph, verbose)
def tree_grow(self, storage, degrees):
return _swigfaiss.NSG_tree_grow(self, storage, degrees)
def dfs(self, vt, root, cnt):
return _swigfaiss.NSG_dfs(self, vt, root, cnt)
def attach_unlinked(self, storage, vt, vt2, degrees):
return _swigfaiss.NSG_attach_unlinked(self, storage, vt, vt2, degrees)
def check_graph(self):
return _swigfaiss.NSG_check_graph(self)
__swig_destroy__ = _swigfaiss.delete_NSG
# Register NSG in _swigfaiss:
_swigfaiss.NSG_swigregister(NSG)
class IndexNSG(Index):
r"""
The NSG index is a normal random-access index with a NSG
link structure built on top
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nsg = property(_swigfaiss.IndexNSG_nsg_get, _swigfaiss.IndexNSG_nsg_set, doc=r""" the link strcuture""")
own_fields = property(_swigfaiss.IndexNSG_own_fields_get, _swigfaiss.IndexNSG_own_fields_set, doc=r""" the sequential storage""")
storage = property(_swigfaiss.IndexNSG_storage_get, _swigfaiss.IndexNSG_storage_set)
is_built = property(_swigfaiss.IndexNSG_is_built_get, _swigfaiss.IndexNSG_is_built_set, doc=r""" the index is built or not""")
GK = property(_swigfaiss.IndexNSG_GK_get, _swigfaiss.IndexNSG_GK_set, doc=r""" K of KNN graph for building""")
build_type = property(_swigfaiss.IndexNSG_build_type_get, _swigfaiss.IndexNSG_build_type_set, doc=r"""
indicate how to build a knn graph
- 0: build NSG with brute force search
- 1: build NSG with NNDescent
""")
nndescent_S = property(_swigfaiss.IndexNSG_nndescent_S_get, _swigfaiss.IndexNSG_nndescent_S_set, doc=r""" parameters for nndescent""")
nndescent_R = property(_swigfaiss.IndexNSG_nndescent_R_get, _swigfaiss.IndexNSG_nndescent_R_set)
nndescent_L = property(_swigfaiss.IndexNSG_nndescent_L_get, _swigfaiss.IndexNSG_nndescent_L_set)
nndescent_iter = property(_swigfaiss.IndexNSG_nndescent_iter_get, _swigfaiss.IndexNSG_nndescent_iter_set)
def __init__(self, *args):
_swigfaiss.IndexNSG_swiginit(self, _swigfaiss.new_IndexNSG(*args))
__swig_destroy__ = _swigfaiss.delete_IndexNSG
def build(self, n, x, knn_graph, GK):
return _swigfaiss.IndexNSG_build(self, n, x, knn_graph, GK)
def add(self, n, x):
return _swigfaiss.IndexNSG_add(self, n, x)
def train(self, n, x):
r""" Trains the storage if needed"""
return _swigfaiss.IndexNSG_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r""" entry point for search"""
return _swigfaiss.IndexNSG_search(self, n, x, k, distances, labels, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexNSG_reconstruct(self, key, recons)
def reset(self):
return _swigfaiss.IndexNSG_reset(self)
def check_knn_graph(self, knn_graph, n, K):
return _swigfaiss.IndexNSG_check_knn_graph(self, knn_graph, n, K)
# Register IndexNSG in _swigfaiss:
_swigfaiss.IndexNSG_swigregister(IndexNSG)
class IndexNSGFlat(IndexNSG):
r"""
Flat index topped with with a NSG structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexNSGFlat_swiginit(self, _swigfaiss.new_IndexNSGFlat(*args))
__swig_destroy__ = _swigfaiss.delete_IndexNSGFlat
# Register IndexNSGFlat in _swigfaiss:
_swigfaiss.IndexNSGFlat_swigregister(IndexNSGFlat)
class IndexNSGPQ(IndexNSG):
r"""
PQ index topped with with a NSG structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexNSGPQ_swiginit(self, _swigfaiss.new_IndexNSGPQ(*args))
def train(self, n, x):
return _swigfaiss.IndexNSGPQ_train(self, n, x)
__swig_destroy__ = _swigfaiss.delete_IndexNSGPQ
# Register IndexNSGPQ in _swigfaiss:
_swigfaiss.IndexNSGPQ_swigregister(IndexNSGPQ)
class IndexNSGSQ(IndexNSG):
r"""
SQ index topped with with a NSG structure to access elements
more efficiently.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexNSGSQ_swiginit(self, _swigfaiss.new_IndexNSGSQ(*args))
__swig_destroy__ = _swigfaiss.delete_IndexNSGSQ
# Register IndexNSGSQ in _swigfaiss:
_swigfaiss.IndexNSGSQ_swigregister(IndexNSGSQ)
class OnDiskOneList(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
size = property(_swigfaiss.OnDiskOneList_size_get, _swigfaiss.OnDiskOneList_size_set)
capacity = property(_swigfaiss.OnDiskOneList_capacity_get, _swigfaiss.OnDiskOneList_capacity_set)
offset = property(_swigfaiss.OnDiskOneList_offset_get, _swigfaiss.OnDiskOneList_offset_set)
def __init__(self):
_swigfaiss.OnDiskOneList_swiginit(self, _swigfaiss.new_OnDiskOneList())
__swig_destroy__ = _swigfaiss.delete_OnDiskOneList
# Register OnDiskOneList in _swigfaiss:
_swigfaiss.OnDiskOneList_swigregister(OnDiskOneList)
class OnDiskInvertedLists(InvertedLists):
r"""
On-disk storage of inverted lists.
The data is stored in a mmapped chunk of memory (base ptointer ptr,
size totsize). Each list is a range of memory that contains (object
List) that contains:
- uint8_t codes[capacity * code_size]
- followed by idx_t ids[capacity]
in each of the arrays, the size <= capacity first elements are
used, the rest is not initialized.
Addition and resize are supported by:
- roundind up the capacity of the lists to a power of two
- maintaining a list of empty slots, sorted by size.
- resizing the mmapped block is adjusted as needed.
An OnDiskInvertedLists is compact if the size == capacity for all
lists and there are no available slots.
Addition to the invlists is slow. For incremental add it is better
to use a default ArrayInvertedLists object and convert it to an
OnDisk with merge_from.
When it is known that a set of lists will be accessed, it is useful
to call prefetch_lists, that launches a set of threads to read the
lists in parallel.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
lists = property(_swigfaiss.OnDiskInvertedLists_lists_get, _swigfaiss.OnDiskInvertedLists_lists_set)
slots = property(_swigfaiss.OnDiskInvertedLists_slots_get, _swigfaiss.OnDiskInvertedLists_slots_set)
filename = property(_swigfaiss.OnDiskInvertedLists_filename_get, _swigfaiss.OnDiskInvertedLists_filename_set)
totsize = property(_swigfaiss.OnDiskInvertedLists_totsize_get, _swigfaiss.OnDiskInvertedLists_totsize_set)
ptr = property(_swigfaiss.OnDiskInvertedLists_ptr_get, _swigfaiss.OnDiskInvertedLists_ptr_set)
read_only = property(_swigfaiss.OnDiskInvertedLists_read_only_get, _swigfaiss.OnDiskInvertedLists_read_only_set)
def list_size(self, list_no):
return _swigfaiss.OnDiskInvertedLists_list_size(self, list_no)
def get_codes(self, list_no):
return _swigfaiss.OnDiskInvertedLists_get_codes(self, list_no)
def get_ids(self, list_no):
return _swigfaiss.OnDiskInvertedLists_get_ids(self, list_no)
def add_entries(self, list_no, n_entry, ids, code):
return _swigfaiss.OnDiskInvertedLists_add_entries(self, list_no, n_entry, ids, code)
def update_entries(self, list_no, offset, n_entry, ids, code):
return _swigfaiss.OnDiskInvertedLists_update_entries(self, list_no, offset, n_entry, ids, code)
def resize(self, list_no, new_size):
return _swigfaiss.OnDiskInvertedLists_resize(self, list_no, new_size)
def merge_from(self, ils, n_il, verbose=False):
return _swigfaiss.OnDiskInvertedLists_merge_from(self, ils, n_il, verbose)
def merge_from_1(self, il, verbose=False):
r""" same as merge_from for a single invlist"""
return _swigfaiss.OnDiskInvertedLists_merge_from_1(self, il, verbose)
def crop_invlists(self, l0, l1):
r""" restrict the inverted lists to l0:l1 without touching the mmapped region"""
return _swigfaiss.OnDiskInvertedLists_crop_invlists(self, l0, l1)
def prefetch_lists(self, list_nos, nlist):
return _swigfaiss.OnDiskInvertedLists_prefetch_lists(self, list_nos, nlist)
__swig_destroy__ = _swigfaiss.delete_OnDiskInvertedLists
locks = property(_swigfaiss.OnDiskInvertedLists_locks_get, _swigfaiss.OnDiskInvertedLists_locks_set)
pf = property(_swigfaiss.OnDiskInvertedLists_pf_get, _swigfaiss.OnDiskInvertedLists_pf_set)
prefetch_nthread = property(_swigfaiss.OnDiskInvertedLists_prefetch_nthread_get, _swigfaiss.OnDiskInvertedLists_prefetch_nthread_set)
def do_mmap(self):
return _swigfaiss.OnDiskInvertedLists_do_mmap(self)
def update_totsize(self, new_totsize):
return _swigfaiss.OnDiskInvertedLists_update_totsize(self, new_totsize)
def resize_locked(self, list_no, new_size):
return _swigfaiss.OnDiskInvertedLists_resize_locked(self, list_no, new_size)
def allocate_slot(self, capacity):
return _swigfaiss.OnDiskInvertedLists_allocate_slot(self, capacity)
def free_slot(self, offset, capacity):
return _swigfaiss.OnDiskInvertedLists_free_slot(self, offset, capacity)
def set_all_lists_sizes(self, sizes):
r""" override all list sizes and make a packed storage"""
return _swigfaiss.OnDiskInvertedLists_set_all_lists_sizes(self, sizes)
def __init__(self, *args):
_swigfaiss.OnDiskInvertedLists_swiginit(self, _swigfaiss.new_OnDiskInvertedLists(*args))
# Register OnDiskInvertedLists in _swigfaiss:
_swigfaiss.OnDiskInvertedLists_swigregister(OnDiskInvertedLists)
class ZnSphereSearch(object):
r"""
returns the nearest vertex in the sphere to a query. Returns only
the coordinates, not an id.
Algorithm: all points are derived from a one atom vector up to a
permutation and sign changes. The search function finds the most
appropriate atom and transformation.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
dimS = property(_swigfaiss.ZnSphereSearch_dimS_get, _swigfaiss.ZnSphereSearch_dimS_set)
r2 = property(_swigfaiss.ZnSphereSearch_r2_get, _swigfaiss.ZnSphereSearch_r2_set)
natom = property(_swigfaiss.ZnSphereSearch_natom_get, _swigfaiss.ZnSphereSearch_natom_set)
voc = property(_swigfaiss.ZnSphereSearch_voc_get, _swigfaiss.ZnSphereSearch_voc_set, doc=r""" size dim * ntatom""")
def __init__(self, dim, r2):
_swigfaiss.ZnSphereSearch_swiginit(self, _swigfaiss.new_ZnSphereSearch(dim, r2))
def search(self, *args):
r"""
*Overload 1:*
find nearest centroid. x does not need to be normalized
|
*Overload 2:*
full call. Requires externally-allocated temp space
|
*Overload 3:*
full call. Requires externally-allocated temp space
"""
return _swigfaiss.ZnSphereSearch_search(self, *args)
def search_multi(self, n, x, c_out, dp_out):
return _swigfaiss.ZnSphereSearch_search_multi(self, n, x, c_out, dp_out)
__swig_destroy__ = _swigfaiss.delete_ZnSphereSearch
# Register ZnSphereSearch in _swigfaiss:
_swigfaiss.ZnSphereSearch_swigregister(ZnSphereSearch)
class EnumeratedVectors(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
nv = property(_swigfaiss.EnumeratedVectors_nv_get, _swigfaiss.EnumeratedVectors_nv_set, doc=r""" size of the collection""")
dim = property(_swigfaiss.EnumeratedVectors_dim_get, _swigfaiss.EnumeratedVectors_dim_set)
def encode(self, x):
r""" encode a vector from a collection"""
return _swigfaiss.EnumeratedVectors_encode(self, x)
def decode(self, code, c):
r""" decode it"""
return _swigfaiss.EnumeratedVectors_decode(self, code, c)
def encode_multi(self, nc, c, codes):
return _swigfaiss.EnumeratedVectors_encode_multi(self, nc, c, codes)
def decode_multi(self, nc, codes, c):
return _swigfaiss.EnumeratedVectors_decode_multi(self, nc, codes, c)
def find_nn(self, n, codes, nq, xq, idx, dis):
return _swigfaiss.EnumeratedVectors_find_nn(self, n, codes, nq, xq, idx, dis)
__swig_destroy__ = _swigfaiss.delete_EnumeratedVectors
# Register EnumeratedVectors in _swigfaiss:
_swigfaiss.EnumeratedVectors_swigregister(EnumeratedVectors)
class Repeat(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
val = property(_swigfaiss.Repeat_val_get, _swigfaiss.Repeat_val_set)
n = property(_swigfaiss.Repeat_n_get, _swigfaiss.Repeat_n_set)
def __init__(self):
_swigfaiss.Repeat_swiginit(self, _swigfaiss.new_Repeat())
__swig_destroy__ = _swigfaiss.delete_Repeat
# Register Repeat in _swigfaiss:
_swigfaiss.Repeat_swigregister(Repeat)
class Repeats(object):
r"""
Repeats: used to encode a vector that has n occurrences of
val. Encodes the signs and permutation of the vector. Useful for
atoms.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
dim = property(_swigfaiss.Repeats_dim_get, _swigfaiss.Repeats_dim_set)
repeats = property(_swigfaiss.Repeats_repeats_get, _swigfaiss.Repeats_repeats_set)
def __init__(self, dim=0, c=None):
_swigfaiss.Repeats_swiginit(self, _swigfaiss.new_Repeats(dim, c))
def count(self):
return _swigfaiss.Repeats_count(self)
def encode(self, c):
return _swigfaiss.Repeats_encode(self, c)
def decode(self, code, c):
return _swigfaiss.Repeats_decode(self, code, c)
__swig_destroy__ = _swigfaiss.delete_Repeats
# Register Repeats in _swigfaiss:
_swigfaiss.Repeats_swigregister(Repeats)
class ZnSphereCodec(ZnSphereSearch, EnumeratedVectors):
r"""
codec that can return ids for the encoded vectors
uses the ZnSphereSearch to encode the vector by encoding the
permutation and signs. Depends on ZnSphereSearch because it uses
the atom numbers
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
code_segments = property(_swigfaiss.ZnSphereCodec_code_segments_get, _swigfaiss.ZnSphereCodec_code_segments_set)
nv = property(_swigfaiss.ZnSphereCodec_nv_get, _swigfaiss.ZnSphereCodec_nv_set)
code_size = property(_swigfaiss.ZnSphereCodec_code_size_get, _swigfaiss.ZnSphereCodec_code_size_set)
def __init__(self, dim, r2):
_swigfaiss.ZnSphereCodec_swiginit(self, _swigfaiss.new_ZnSphereCodec(dim, r2))
def search_and_encode(self, x):
return _swigfaiss.ZnSphereCodec_search_and_encode(self, x)
def decode(self, code, c):
return _swigfaiss.ZnSphereCodec_decode(self, code, c)
def encode(self, x):
r""" takes vectors that do not need to be centroids"""
return _swigfaiss.ZnSphereCodec_encode(self, x)
__swig_destroy__ = _swigfaiss.delete_ZnSphereCodec
# Register ZnSphereCodec in _swigfaiss:
_swigfaiss.ZnSphereCodec_swigregister(ZnSphereCodec)
class ZnSphereCodecRec(EnumeratedVectors):
r"""
recursive sphere codec
Uses a recursive decomposition on the dimensions to encode
centroids found by the ZnSphereSearch. The codes are *not*
compatible with the ones of ZnSpehreCodec
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
r2 = property(_swigfaiss.ZnSphereCodecRec_r2_get, _swigfaiss.ZnSphereCodecRec_r2_set)
log2_dim = property(_swigfaiss.ZnSphereCodecRec_log2_dim_get, _swigfaiss.ZnSphereCodecRec_log2_dim_set)
code_size = property(_swigfaiss.ZnSphereCodecRec_code_size_get, _swigfaiss.ZnSphereCodecRec_code_size_set)
def __init__(self, dim, r2):
_swigfaiss.ZnSphereCodecRec_swiginit(self, _swigfaiss.new_ZnSphereCodecRec(dim, r2))
def encode_centroid(self, c):
return _swigfaiss.ZnSphereCodecRec_encode_centroid(self, c)
def decode(self, code, c):
return _swigfaiss.ZnSphereCodecRec_decode(self, code, c)
def encode(self, x):
r"""
vectors need to be centroids (does not work on arbitrary
vectors)
"""
return _swigfaiss.ZnSphereCodecRec_encode(self, x)
all_nv = property(_swigfaiss.ZnSphereCodecRec_all_nv_get, _swigfaiss.ZnSphereCodecRec_all_nv_set)
all_nv_cum = property(_swigfaiss.ZnSphereCodecRec_all_nv_cum_get, _swigfaiss.ZnSphereCodecRec_all_nv_cum_set)
decode_cache_ld = property(_swigfaiss.ZnSphereCodecRec_decode_cache_ld_get, _swigfaiss.ZnSphereCodecRec_decode_cache_ld_set)
decode_cache = property(_swigfaiss.ZnSphereCodecRec_decode_cache_get, _swigfaiss.ZnSphereCodecRec_decode_cache_set)
def get_nv(self, ld, r2a):
return _swigfaiss.ZnSphereCodecRec_get_nv(self, ld, r2a)
def get_nv_cum(self, ld, r2t, r2a):
return _swigfaiss.ZnSphereCodecRec_get_nv_cum(self, ld, r2t, r2a)
def set_nv_cum(self, ld, r2t, r2a, v):
return _swigfaiss.ZnSphereCodecRec_set_nv_cum(self, ld, r2t, r2a, v)
__swig_destroy__ = _swigfaiss.delete_ZnSphereCodecRec
# Register ZnSphereCodecRec in _swigfaiss:
_swigfaiss.ZnSphereCodecRec_swigregister(ZnSphereCodecRec)
class ZnSphereCodecAlt(ZnSphereCodec):
r"""
Codec that uses the recursive codec if dim is a power of 2 and
the regular one otherwise
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
use_rec = property(_swigfaiss.ZnSphereCodecAlt_use_rec_get, _swigfaiss.ZnSphereCodecAlt_use_rec_set)
znc_rec = property(_swigfaiss.ZnSphereCodecAlt_znc_rec_get, _swigfaiss.ZnSphereCodecAlt_znc_rec_set)
def __init__(self, dim, r2):
_swigfaiss.ZnSphereCodecAlt_swiginit(self, _swigfaiss.new_ZnSphereCodecAlt(dim, r2))
def encode(self, x):
return _swigfaiss.ZnSphereCodecAlt_encode(self, x)
def decode(self, code, c):
return _swigfaiss.ZnSphereCodecAlt_decode(self, code, c)
__swig_destroy__ = _swigfaiss.delete_ZnSphereCodecAlt
# Register ZnSphereCodecAlt in _swigfaiss:
_swigfaiss.ZnSphereCodecAlt_swigregister(ZnSphereCodecAlt)
class IndexLattice(Index):
r""" Index that encodes a vector with a series of Zn lattice quantizers"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nsq = property(_swigfaiss.IndexLattice_nsq_get, _swigfaiss.IndexLattice_nsq_set, doc=r""" number of sub-vectors""")
dsq = property(_swigfaiss.IndexLattice_dsq_get, _swigfaiss.IndexLattice_dsq_set, doc=r""" dimension of sub-vectors""")
zn_sphere_codec = property(_swigfaiss.IndexLattice_zn_sphere_codec_get, _swigfaiss.IndexLattice_zn_sphere_codec_set, doc=r""" the lattice quantizer""")
scale_nbit = property(_swigfaiss.IndexLattice_scale_nbit_get, _swigfaiss.IndexLattice_scale_nbit_set, doc=r""" nb bits used to encode the scale, per subvector""")
lattice_nbit = property(_swigfaiss.IndexLattice_lattice_nbit_get, _swigfaiss.IndexLattice_lattice_nbit_set)
code_size = property(_swigfaiss.IndexLattice_code_size_get, _swigfaiss.IndexLattice_code_size_set, doc=r""" total, in bytes""")
trained = property(_swigfaiss.IndexLattice_trained_get, _swigfaiss.IndexLattice_trained_set, doc=r""" mins and maxes of the vector norms, per subquantizer""")
def __init__(self, d, nsq, scale_nbit, r2):
_swigfaiss.IndexLattice_swiginit(self, _swigfaiss.new_IndexLattice(d, nsq, scale_nbit, r2))
def train(self, n, x):
return _swigfaiss.IndexLattice_train(self, n, x)
def sa_code_size(self):
return _swigfaiss.IndexLattice_sa_code_size(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexLattice_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexLattice_sa_decode(self, n, bytes, x)
def add(self, n, x):
r""" not implemented"""
return _swigfaiss.IndexLattice_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexLattice_search(self, n, x, k, distances, labels, params)
def reset(self):
return _swigfaiss.IndexLattice_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexLattice
# Register IndexLattice in _swigfaiss:
_swigfaiss.IndexLattice_swigregister(IndexLattice)
class IVFPQSearchParameters(SearchParametersIVF):
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
scan_table_threshold = property(_swigfaiss.IVFPQSearchParameters_scan_table_threshold_get, _swigfaiss.IVFPQSearchParameters_scan_table_threshold_set, doc=r""" use table computation or on-the-fly?""")
polysemous_ht = property(_swigfaiss.IVFPQSearchParameters_polysemous_ht_get, _swigfaiss.IVFPQSearchParameters_polysemous_ht_set, doc=r""" Hamming thresh for polysemous filtering""")
def __init__(self):
_swigfaiss.IVFPQSearchParameters_swiginit(self, _swigfaiss.new_IVFPQSearchParameters())
__swig_destroy__ = _swigfaiss.delete_IVFPQSearchParameters
# Register IVFPQSearchParameters in _swigfaiss:
_swigfaiss.IVFPQSearchParameters_swigregister(IVFPQSearchParameters)
class IndexIVFPQ(IndexIVF):
r"""
Inverted file with Product Quantizer encoding. Each residual
vector is encoded as a product quantizer code.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
by_residual = property(_swigfaiss.IndexIVFPQ_by_residual_get, _swigfaiss.IndexIVFPQ_by_residual_set, doc=r""" Encode residual or plain vector?""")
pq = property(_swigfaiss.IndexIVFPQ_pq_get, _swigfaiss.IndexIVFPQ_pq_set, doc=r""" produces the codes""")
do_polysemous_training = property(_swigfaiss.IndexIVFPQ_do_polysemous_training_get, _swigfaiss.IndexIVFPQ_do_polysemous_training_set, doc=r""" reorder PQ centroids after training?""")
polysemous_training = property(_swigfaiss.IndexIVFPQ_polysemous_training_get, _swigfaiss.IndexIVFPQ_polysemous_training_set, doc=r""" if NULL, use default""")
scan_table_threshold = property(_swigfaiss.IndexIVFPQ_scan_table_threshold_get, _swigfaiss.IndexIVFPQ_scan_table_threshold_set, doc=r""" use table computation or on-the-fly?""")
polysemous_ht = property(_swigfaiss.IndexIVFPQ_polysemous_ht_get, _swigfaiss.IndexIVFPQ_polysemous_ht_set, doc=r""" Hamming thresh for polysemous filtering""")
use_precomputed_table = property(_swigfaiss.IndexIVFPQ_use_precomputed_table_get, _swigfaiss.IndexIVFPQ_use_precomputed_table_set, doc=r"""
Precompute table that speed up query preprocessing at some
memory cost (used only for by_residual with L2 metric)
""")
precomputed_table = property(_swigfaiss.IndexIVFPQ_precomputed_table_get, _swigfaiss.IndexIVFPQ_precomputed_table_set, doc=r"""
if use_precompute_table
size nlist * pq.M * pq.ksub
""")
def encode_vectors(self, n, x, list_nos, codes, include_listnos=False):
return _swigfaiss.IndexIVFPQ_encode_vectors(self, n, x, list_nos, codes, include_listnos)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexIVFPQ_sa_decode(self, n, bytes, x)
def add_core(self, n, x, xids, precomputed_idx):
return _swigfaiss.IndexIVFPQ_add_core(self, n, x, xids, precomputed_idx)
def add_core_o(self, n, x, xids, residuals_2, precomputed_idx=None):
r"""
same as add_core, also:
- output 2nd level residuals if residuals_2 != NULL
- accepts precomputed_idx = nullptr
"""
return _swigfaiss.IndexIVFPQ_add_core_o(self, n, x, xids, residuals_2, precomputed_idx)
def train_residual(self, n, x):
r""" trains the product quantizer"""
return _swigfaiss.IndexIVFPQ_train_residual(self, n, x)
def train_residual_o(self, n, x, residuals_2):
r""" same as train_residual, also output 2nd level residuals"""
return _swigfaiss.IndexIVFPQ_train_residual_o(self, n, x, residuals_2)
def reconstruct_from_offset(self, list_no, offset, recons):
return _swigfaiss.IndexIVFPQ_reconstruct_from_offset(self, list_no, offset, recons)
def find_duplicates(self, ids, lims):
r"""
Find exact duplicates in the dataset.
the duplicates are returned in pre-allocated arrays (see the
max sizes).
:type lims: int
:param lims: limits between groups of duplicates
(max size ntotal / 2 + 1)
:type ids: int
:param ids: ids[lims[i]] : ids[lims[i+1]-1] is a group of
duplicates (max size ntotal)
:rtype: int
:return: n number of groups found
"""
return _swigfaiss.IndexIVFPQ_find_duplicates(self, ids, lims)
def encode(self, key, x, code):
return _swigfaiss.IndexIVFPQ_encode(self, key, x, code)
def encode_multiple(self, n, keys, x, codes, compute_keys=False):
r"""
Encode multiple vectors
:type n: int
:param n: nb vectors to encode
:type keys: int
:param keys: posting list ids for those vectors (size n)
:type x: float
:param x: vectors (size n * d)
:type codes: uint8_t
:param codes: output codes (size n * code_size)
:type compute_keys: boolean, optional
:param compute_keys: if false, assume keys are precomputed,
otherwise compute them
"""
return _swigfaiss.IndexIVFPQ_encode_multiple(self, n, keys, x, codes, compute_keys)
def decode_multiple(self, n, keys, xcodes, x):
r""" inverse of encode_multiple"""
return _swigfaiss.IndexIVFPQ_decode_multiple(self, n, keys, xcodes, x)
def get_InvertedListScanner(self, store_pairs, sel):
return _swigfaiss.IndexIVFPQ_get_InvertedListScanner(self, store_pairs, sel)
def precompute_table(self):
r""" build precomputed table"""
return _swigfaiss.IndexIVFPQ_precompute_table(self)
def __init__(self, *args):
_swigfaiss.IndexIVFPQ_swiginit(self, _swigfaiss.new_IndexIVFPQ(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQ
# Register IndexIVFPQ in _swigfaiss:
_swigfaiss.IndexIVFPQ_swigregister(IndexIVFPQ)
def initialize_IVFPQ_precomputed_table(use_precomputed_table, quantizer, pq, precomputed_table, verbose):
r"""
Pre-compute distance tables for IVFPQ with by-residual and METRIC_L2
:type use_precomputed_table: int
:param use_precomputed_table: (I/O)
=-1: force disable
=0: decide heuristically (default: use tables only if they are
< precomputed_tables_max_bytes), set use_precomputed_table on
output =1: tables that work for all quantizers (size 256 * nlist * M) =2:
specific version for MultiIndexQuantizer (much more compact)
:type precomputed_table: faiss::AlignedTable< float,32 >
:param precomputed_table: precomputed table to initialize
"""
return _swigfaiss.initialize_IVFPQ_precomputed_table(use_precomputed_table, quantizer, pq, precomputed_table, verbose)
class IndexIVFPQStats(object):
r"""
statistics are robust to internal threading, but not if
IndexIVFPQ::search_preassigned is called by multiple threads
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nrefine = property(_swigfaiss.IndexIVFPQStats_nrefine_get, _swigfaiss.IndexIVFPQStats_nrefine_set, doc=r""" nb of refines (IVFPQR)""")
n_hamming_pass = property(_swigfaiss.IndexIVFPQStats_n_hamming_pass_get, _swigfaiss.IndexIVFPQStats_n_hamming_pass_set, doc=r""" nb of passed Hamming distance tests (for polysemous)""")
search_cycles = property(_swigfaiss.IndexIVFPQStats_search_cycles_get, _swigfaiss.IndexIVFPQStats_search_cycles_set)
refine_cycles = property(_swigfaiss.IndexIVFPQStats_refine_cycles_get, _swigfaiss.IndexIVFPQStats_refine_cycles_set, doc=r""" only for IVFPQR""")
def __init__(self):
_swigfaiss.IndexIVFPQStats_swiginit(self, _swigfaiss.new_IndexIVFPQStats())
def reset(self):
return _swigfaiss.IndexIVFPQStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQStats
# Register IndexIVFPQStats in _swigfaiss:
_swigfaiss.IndexIVFPQStats_swigregister(IndexIVFPQStats)
class IndexIVFPQR(IndexIVFPQ):
r""" Index with an additional level of PQ refinement"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
refine_pq = property(_swigfaiss.IndexIVFPQR_refine_pq_get, _swigfaiss.IndexIVFPQR_refine_pq_set, doc=r""" 3rd level quantizer""")
refine_codes = property(_swigfaiss.IndexIVFPQR_refine_codes_get, _swigfaiss.IndexIVFPQR_refine_codes_set, doc=r""" corresponding codes""")
k_factor = property(_swigfaiss.IndexIVFPQR_k_factor_get, _swigfaiss.IndexIVFPQR_k_factor_set, doc=r""" factor between k requested in search and the k requested from the IVFPQ""")
def reset(self):
return _swigfaiss.IndexIVFPQR_reset(self)
def remove_ids(self, sel):
return _swigfaiss.IndexIVFPQR_remove_ids(self, sel)
def train_residual(self, n, x):
r""" trains the two product quantizers"""
return _swigfaiss.IndexIVFPQR_train_residual(self, n, x)
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexIVFPQR_add_with_ids(self, n, x, xids)
def add_core(self, n, x, xids, precomputed_idx):
r""" same as add_with_ids, but optionally use the precomputed list ids"""
return _swigfaiss.IndexIVFPQR_add_core(self, n, x, xids, precomputed_idx)
def reconstruct_from_offset(self, list_no, offset, recons):
return _swigfaiss.IndexIVFPQR_reconstruct_from_offset(self, list_no, offset, recons)
def merge_from(self, otherIndex, add_id):
return _swigfaiss.IndexIVFPQR_merge_from(self, otherIndex, add_id)
def search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params=None, stats=None):
return _swigfaiss.IndexIVFPQR_search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params, stats)
def __init__(self, *args):
_swigfaiss.IndexIVFPQR_swiginit(self, _swigfaiss.new_IndexIVFPQR(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQR
# Register IndexIVFPQR in _swigfaiss:
_swigfaiss.IndexIVFPQR_swigregister(IndexIVFPQR)
class Index2Layer(IndexFlatCodes):
r"""
Same as an IndexIVFPQ without the inverted lists: codes are stored
sequentially
The class is mainly inteded to store encoded vectors that can be
accessed randomly, the search function is not implemented.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
q1 = property(_swigfaiss.Index2Layer_q1_get, _swigfaiss.Index2Layer_q1_set, doc=r""" first level quantizer""")
pq = property(_swigfaiss.Index2Layer_pq_get, _swigfaiss.Index2Layer_pq_set, doc=r""" second level quantizer is always a PQ""")
code_size_1 = property(_swigfaiss.Index2Layer_code_size_1_get, _swigfaiss.Index2Layer_code_size_1_set, doc=r""" size of the code for the first level (ceil(log8(q1.nlist)))""")
code_size_2 = property(_swigfaiss.Index2Layer_code_size_2_get, _swigfaiss.Index2Layer_code_size_2_set, doc=r""" size of the code for the second level""")
def __init__(self, *args):
_swigfaiss.Index2Layer_swiginit(self, _swigfaiss.new_Index2Layer(*args))
__swig_destroy__ = _swigfaiss.delete_Index2Layer
def train(self, n, x):
return _swigfaiss.Index2Layer_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r""" not implemented"""
return _swigfaiss.Index2Layer_search(self, n, x, k, distances, labels, params)
def get_distance_computer(self):
return _swigfaiss.Index2Layer_get_distance_computer(self)
def transfer_to_IVFPQ(self, other):
r""" transfer the flat codes to an IVFPQ index"""
return _swigfaiss.Index2Layer_transfer_to_IVFPQ(self, other)
def sa_encode(self, n, x, bytes):
return _swigfaiss.Index2Layer_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.Index2Layer_sa_decode(self, n, bytes, x)
# Register Index2Layer in _swigfaiss:
_swigfaiss.Index2Layer_swigregister(Index2Layer)
class IndexFastScan(Index):
r"""
Fast scan version of IndexPQ and IndexAQ. Works for 4-bit PQ and AQ for now.
The codes are not stored sequentially but grouped in blocks of size bbs.
This makes it possible to compute distances quickly with SIMD instructions.
The trailing codes (padding codes that are added to complete the last code)
are garbage.
Implementations:
12: blocked loop with internal loop on Q with qbs
13: same with reservoir accumulator to store results
14: no qbs with heap accumulator
15: no qbs with reservoir accumulator
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
implem = property(_swigfaiss.IndexFastScan_implem_get, _swigfaiss.IndexFastScan_implem_set)
skip = property(_swigfaiss.IndexFastScan_skip_get, _swigfaiss.IndexFastScan_skip_set)
bbs = property(_swigfaiss.IndexFastScan_bbs_get, _swigfaiss.IndexFastScan_bbs_set)
qbs = property(_swigfaiss.IndexFastScan_qbs_get, _swigfaiss.IndexFastScan_qbs_set)
M = property(_swigfaiss.IndexFastScan_M_get, _swigfaiss.IndexFastScan_M_set)
nbits = property(_swigfaiss.IndexFastScan_nbits_get, _swigfaiss.IndexFastScan_nbits_set)
ksub = property(_swigfaiss.IndexFastScan_ksub_get, _swigfaiss.IndexFastScan_ksub_set)
code_size = property(_swigfaiss.IndexFastScan_code_size_get, _swigfaiss.IndexFastScan_code_size_set)
ntotal2 = property(_swigfaiss.IndexFastScan_ntotal2_get, _swigfaiss.IndexFastScan_ntotal2_set)
M2 = property(_swigfaiss.IndexFastScan_M2_get, _swigfaiss.IndexFastScan_M2_set)
codes = property(_swigfaiss.IndexFastScan_codes_get, _swigfaiss.IndexFastScan_codes_set)
orig_codes = property(_swigfaiss.IndexFastScan_orig_codes_get, _swigfaiss.IndexFastScan_orig_codes_set)
def init_fastscan(self, d, M, nbits, metric, bbs):
return _swigfaiss.IndexFastScan_init_fastscan(self, d, M, nbits, metric, bbs)
def reset(self):
return _swigfaiss.IndexFastScan_reset(self)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexFastScan_search(self, n, x, k, distances, labels, params)
def add(self, n, x):
return _swigfaiss.IndexFastScan_add(self, n, x)
def compute_codes(self, codes, n, x):
return _swigfaiss.IndexFastScan_compute_codes(self, codes, n, x)
def compute_float_LUT(self, lut, n, x):
return _swigfaiss.IndexFastScan_compute_float_LUT(self, lut, n, x)
def compute_quantized_LUT(self, n, x, lut, normalizers):
return _swigfaiss.IndexFastScan_compute_quantized_LUT(self, n, x, lut, normalizers)
def reconstruct(self, key, recons):
return _swigfaiss.IndexFastScan_reconstruct(self, key, recons)
def remove_ids(self, sel):
return _swigfaiss.IndexFastScan_remove_ids(self, sel)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexFastScan_merge_from(self, otherIndex, add_id)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexFastScan_check_compatible_for_merge(self, otherIndex)
__swig_destroy__ = _swigfaiss.delete_IndexFastScan
# Register IndexFastScan in _swigfaiss:
_swigfaiss.IndexFastScan_swigregister(IndexFastScan)
class FastScanStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
t0 = property(_swigfaiss.FastScanStats_t0_get, _swigfaiss.FastScanStats_t0_set)
t1 = property(_swigfaiss.FastScanStats_t1_get, _swigfaiss.FastScanStats_t1_set)
t2 = property(_swigfaiss.FastScanStats_t2_get, _swigfaiss.FastScanStats_t2_set)
t3 = property(_swigfaiss.FastScanStats_t3_get, _swigfaiss.FastScanStats_t3_set)
def __init__(self):
_swigfaiss.FastScanStats_swiginit(self, _swigfaiss.new_FastScanStats())
def reset(self):
return _swigfaiss.FastScanStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_FastScanStats
# Register FastScanStats in _swigfaiss:
_swigfaiss.FastScanStats_swigregister(FastScanStats)
class IndexAdditiveQuantizerFastScan(IndexFastScan):
r"""
Fast scan version of IndexAQ. Works for 4-bit AQ for now.
The codes are not stored sequentially but grouped in blocks of size bbs.
This makes it possible to compute distances quickly with SIMD instructions.
Implementations:
12: blocked loop with internal loop on Q with qbs
13: same with reservoir accumulator to store results
14: no qbs with heap accumulator
15: no qbs with reservoir accumulator
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
aq = property(_swigfaiss.IndexAdditiveQuantizerFastScan_aq_get, _swigfaiss.IndexAdditiveQuantizerFastScan_aq_set)
rescale_norm = property(_swigfaiss.IndexAdditiveQuantizerFastScan_rescale_norm_get, _swigfaiss.IndexAdditiveQuantizerFastScan_rescale_norm_set)
norm_scale = property(_swigfaiss.IndexAdditiveQuantizerFastScan_norm_scale_get, _swigfaiss.IndexAdditiveQuantizerFastScan_norm_scale_set)
max_train_points = property(_swigfaiss.IndexAdditiveQuantizerFastScan_max_train_points_get, _swigfaiss.IndexAdditiveQuantizerFastScan_max_train_points_set)
def init(self, *args):
return _swigfaiss.IndexAdditiveQuantizerFastScan_init(self, *args)
__swig_destroy__ = _swigfaiss.delete_IndexAdditiveQuantizerFastScan
def __init__(self, *args):
r"""
*Overload 1:*
build from an existing IndexAQ
|
*Overload 2:*
build from an existing IndexAQ
"""
_swigfaiss.IndexAdditiveQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexAdditiveQuantizerFastScan(*args))
def train(self, n, x):
return _swigfaiss.IndexAdditiveQuantizerFastScan_train(self, n, x)
def estimate_norm_scale(self, n, x):
return _swigfaiss.IndexAdditiveQuantizerFastScan_estimate_norm_scale(self, n, x)
def compute_codes(self, codes, n, x):
return _swigfaiss.IndexAdditiveQuantizerFastScan_compute_codes(self, codes, n, x)
def compute_float_LUT(self, lut, n, x):
return _swigfaiss.IndexAdditiveQuantizerFastScan_compute_float_LUT(self, lut, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexAdditiveQuantizerFastScan_search(self, n, x, k, distances, labels, params)
def sa_decode(self, n, bytes, x):
r"""
Decode a set of vectors.
NOTE: The codes in the IndexAdditiveQuantizerFastScan object are non-
contiguous. But this method requires a contiguous representation.
:type n: int
:param n: number of vectors
:type bytes: uint8_t
:param bytes: input encoded vectors, size n * code_size
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.IndexAdditiveQuantizerFastScan_sa_decode(self, n, bytes, x)
# Register IndexAdditiveQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexAdditiveQuantizerFastScan_swigregister(IndexAdditiveQuantizerFastScan)
class IndexResidualQuantizerFastScan(IndexAdditiveQuantizerFastScan):
r"""
Index based on a residual quantizer. Stored vectors are
approximated by residual quantization codes.
Can also be used as a codec
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rq = property(_swigfaiss.IndexResidualQuantizerFastScan_rq_get, _swigfaiss.IndexResidualQuantizerFastScan_rq_set, doc=r""" The residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexResidualQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexResidualQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexResidualQuantizerFastScan
# Register IndexResidualQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexResidualQuantizerFastScan_swigregister(IndexResidualQuantizerFastScan)
class IndexLocalSearchQuantizerFastScan(IndexAdditiveQuantizerFastScan):
r"""
Index based on a local search quantizer. Stored vectors are
approximated by local search quantization codes.
Can also be used as a codec
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
lsq = property(_swigfaiss.IndexLocalSearchQuantizerFastScan_lsq_get, _swigfaiss.IndexLocalSearchQuantizerFastScan_lsq_set)
def __init__(self, *args):
_swigfaiss.IndexLocalSearchQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexLocalSearchQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexLocalSearchQuantizerFastScan
# Register IndexLocalSearchQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexLocalSearchQuantizerFastScan_swigregister(IndexLocalSearchQuantizerFastScan)
class IndexProductResidualQuantizerFastScan(IndexAdditiveQuantizerFastScan):
r"""
Index based on a product residual quantizer. Stored vectors are
approximated by product residual quantization codes.
Can also be used as a codec
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
prq = property(_swigfaiss.IndexProductResidualQuantizerFastScan_prq_get, _swigfaiss.IndexProductResidualQuantizerFastScan_prq_set, doc=r""" The product residual quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexProductResidualQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexProductResidualQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexProductResidualQuantizerFastScan
# Register IndexProductResidualQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexProductResidualQuantizerFastScan_swigregister(IndexProductResidualQuantizerFastScan)
class IndexProductLocalSearchQuantizerFastScan(IndexAdditiveQuantizerFastScan):
r"""
Index based on a product local search quantizer. Stored vectors are
approximated by product local search quantization codes.
Can also be used as a codec
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
plsq = property(_swigfaiss.IndexProductLocalSearchQuantizerFastScan_plsq_get, _swigfaiss.IndexProductLocalSearchQuantizerFastScan_plsq_set, doc=r""" The product local search quantizer used to encode the vectors""")
def __init__(self, *args):
_swigfaiss.IndexProductLocalSearchQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexProductLocalSearchQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexProductLocalSearchQuantizerFastScan
# Register IndexProductLocalSearchQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexProductLocalSearchQuantizerFastScan_swigregister(IndexProductLocalSearchQuantizerFastScan)
class IndexPQFastScan(IndexFastScan):
r"""
Fast scan version of IndexPQ. Works for 4-bit PQ for now.
The codes are not stored sequentially but grouped in blocks of size bbs.
This makes it possible to compute distances quickly with SIMD instructions.
Implementations:
12: blocked loop with internal loop on Q with qbs
13: same with reservoir accumulator to store results
14: no qbs with heap accumulator
15: no qbs with reservoir accumulator
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pq = property(_swigfaiss.IndexPQFastScan_pq_get, _swigfaiss.IndexPQFastScan_pq_set)
def __init__(self, *args):
r"""
*Overload 1:*
build from an existing IndexPQ
|
*Overload 2:*
build from an existing IndexPQ
"""
_swigfaiss.IndexPQFastScan_swiginit(self, _swigfaiss.new_IndexPQFastScan(*args))
def train(self, n, x):
return _swigfaiss.IndexPQFastScan_train(self, n, x)
def compute_codes(self, codes, n, x):
return _swigfaiss.IndexPQFastScan_compute_codes(self, codes, n, x)
def compute_float_LUT(self, lut, n, x):
return _swigfaiss.IndexPQFastScan_compute_float_LUT(self, lut, n, x)
def sa_decode(self, n, bytes, x):
r"""
Decode a set of vectors.
NOTE: The codes in the IndexPQFastScan object are non-contiguous.
But this method requires a contiguous representation.
:type n: int
:param n: number of vectors
:type bytes: uint8_t
:param bytes: input encoded vectors, size n * code_size
:type x: float
:param x: output vectors, size n * d
"""
return _swigfaiss.IndexPQFastScan_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexPQFastScan
# Register IndexPQFastScan in _swigfaiss:
_swigfaiss.IndexPQFastScan_swigregister(IndexPQFastScan)
class IndexIVFFastScan(IndexIVF):
r"""
Fast scan version of IVFPQ and IVFAQ. Works for 4-bit PQ/AQ for now.
The codes in the inverted lists are not stored sequentially but
grouped in blocks of size bbs. This makes it possible to very quickly
compute distances with SIMD instructions.
Implementations (implem):
0: auto-select implementation (default)
1: orig's search, re-implemented
2: orig's search, re-ordered by invlist
10: optimizer int16 search, collect results in heap, no qbs
11: idem, collect results in reservoir
12: optimizer int16 search, collect results in heap, uses qbs
13: idem, collect results in reservoir
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
bbs = property(_swigfaiss.IndexIVFFastScan_bbs_get, _swigfaiss.IndexIVFFastScan_bbs_set)
M = property(_swigfaiss.IndexIVFFastScan_M_get, _swigfaiss.IndexIVFFastScan_M_set)
nbits = property(_swigfaiss.IndexIVFFastScan_nbits_get, _swigfaiss.IndexIVFFastScan_nbits_set)
ksub = property(_swigfaiss.IndexIVFFastScan_ksub_get, _swigfaiss.IndexIVFFastScan_ksub_set)
M2 = property(_swigfaiss.IndexIVFFastScan_M2_get, _swigfaiss.IndexIVFFastScan_M2_set)
implem = property(_swigfaiss.IndexIVFFastScan_implem_get, _swigfaiss.IndexIVFFastScan_implem_set)
skip = property(_swigfaiss.IndexIVFFastScan_skip_get, _swigfaiss.IndexIVFFastScan_skip_set)
by_residual = property(_swigfaiss.IndexIVFFastScan_by_residual_get, _swigfaiss.IndexIVFFastScan_by_residual_set)
qbs = property(_swigfaiss.IndexIVFFastScan_qbs_get, _swigfaiss.IndexIVFFastScan_qbs_set)
qbs2 = property(_swigfaiss.IndexIVFFastScan_qbs2_get, _swigfaiss.IndexIVFFastScan_qbs2_set)
def init_fastscan(self, M, nbits, nlist, metric, bbs):
return _swigfaiss.IndexIVFFastScan_init_fastscan(self, M, nbits, nlist, metric, bbs)
__swig_destroy__ = _swigfaiss.delete_IndexIVFFastScan
orig_invlists = property(_swigfaiss.IndexIVFFastScan_orig_invlists_get, _swigfaiss.IndexIVFFastScan_orig_invlists_set, doc=r""" orig's inverted lists (for debugging)""")
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexIVFFastScan_add_with_ids(self, n, x, xids)
def lookup_table_is_3d(self):
return _swigfaiss.IndexIVFFastScan_lookup_table_is_3d(self)
def compute_LUT(self, n, x, coarse_ids, coarse_dis, dis_tables, biases):
return _swigfaiss.IndexIVFFastScan_compute_LUT(self, n, x, coarse_ids, coarse_dis, dis_tables, biases)
def compute_LUT_uint8(self, n, x, coarse_ids, coarse_dis, dis_tables, biases, normalizers):
return _swigfaiss.IndexIVFFastScan_compute_LUT_uint8(self, n, x, coarse_ids, coarse_dis, dis_tables, biases, normalizers)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexIVFFastScan_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
r""" will just fail"""
return _swigfaiss.IndexIVFFastScan_range_search(self, n, x, radius, result, params)
def reconstruct_from_offset(self, list_no, offset, recons):
return _swigfaiss.IndexIVFFastScan_reconstruct_from_offset(self, list_no, offset, recons)
def reconstruct_orig_invlists(self):
return _swigfaiss.IndexIVFFastScan_reconstruct_orig_invlists(self)
# Register IndexIVFFastScan in _swigfaiss:
_swigfaiss.IndexIVFFastScan_swigregister(IndexIVFFastScan)
class IVFFastScanStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
times = property(_swigfaiss.IVFFastScanStats_times_get, _swigfaiss.IVFFastScanStats_times_set)
t_compute_distance_tables = property(_swigfaiss.IVFFastScanStats_t_compute_distance_tables_get, _swigfaiss.IVFFastScanStats_t_compute_distance_tables_set)
t_round = property(_swigfaiss.IVFFastScanStats_t_round_get, _swigfaiss.IVFFastScanStats_t_round_set)
t_copy_pack = property(_swigfaiss.IVFFastScanStats_t_copy_pack_get, _swigfaiss.IVFFastScanStats_t_copy_pack_set)
t_scan = property(_swigfaiss.IVFFastScanStats_t_scan_get, _swigfaiss.IVFFastScanStats_t_scan_set)
t_to_flat = property(_swigfaiss.IVFFastScanStats_t_to_flat_get, _swigfaiss.IVFFastScanStats_t_to_flat_set)
reservoir_times = property(_swigfaiss.IVFFastScanStats_reservoir_times_get, _swigfaiss.IVFFastScanStats_reservoir_times_set)
t_aq_encode = property(_swigfaiss.IVFFastScanStats_t_aq_encode_get, _swigfaiss.IVFFastScanStats_t_aq_encode_set)
t_aq_norm_encode = property(_swigfaiss.IVFFastScanStats_t_aq_norm_encode_get, _swigfaiss.IVFFastScanStats_t_aq_norm_encode_set)
def Mcy_at(self, i):
return _swigfaiss.IVFFastScanStats_Mcy_at(self, i)
def Mcy_reservoir_at(self, i):
return _swigfaiss.IVFFastScanStats_Mcy_reservoir_at(self, i)
def __init__(self):
_swigfaiss.IVFFastScanStats_swiginit(self, _swigfaiss.new_IVFFastScanStats())
def reset(self):
return _swigfaiss.IVFFastScanStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IVFFastScanStats
# Register IVFFastScanStats in _swigfaiss:
_swigfaiss.IVFFastScanStats_swigregister(IVFFastScanStats)
class IndexIVFAdditiveQuantizerFastScan(IndexIVFFastScan):
r"""
Fast scan version of IVFAQ. Works for 4-bit AQ for now.
The codes in the inverted lists are not stored sequentially but
grouped in blocks of size bbs. This makes it possible to very quickly
compute distances with SIMD instructions.
Implementations (implem):
0: auto-select implementation (default)
1: orig's search, re-implemented
2: orig's search, re-ordered by invlist
10: optimizer int16 search, collect results in heap, no qbs
11: idem, collect results in reservoir
12: optimizer int16 search, collect results in heap, uses qbs
13: idem, collect results in reservoir
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
aq = property(_swigfaiss.IndexIVFAdditiveQuantizerFastScan_aq_get, _swigfaiss.IndexIVFAdditiveQuantizerFastScan_aq_set)
rescale_norm = property(_swigfaiss.IndexIVFAdditiveQuantizerFastScan_rescale_norm_get, _swigfaiss.IndexIVFAdditiveQuantizerFastScan_rescale_norm_set)
norm_scale = property(_swigfaiss.IndexIVFAdditiveQuantizerFastScan_norm_scale_get, _swigfaiss.IndexIVFAdditiveQuantizerFastScan_norm_scale_set)
max_train_points = property(_swigfaiss.IndexIVFAdditiveQuantizerFastScan_max_train_points_get, _swigfaiss.IndexIVFAdditiveQuantizerFastScan_max_train_points_set)
def init(self, aq, nlist, metric, bbs):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_init(self, aq, nlist, metric, bbs)
__swig_destroy__ = _swigfaiss.delete_IndexIVFAdditiveQuantizerFastScan
def __init__(self, *args):
_swigfaiss.IndexIVFAdditiveQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexIVFAdditiveQuantizerFastScan(*args))
def train_residual(self, n, x):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_train_residual(self, n, x)
def estimate_norm_scale(self, n, x):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_estimate_norm_scale(self, n, x)
def encode_vectors(self, n, x, list_nos, codes, include_listno=False):
r"""
same as the regular IVFAQ encoder. The codes are not reorganized by
blocks a that point
"""
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_encode_vectors(self, n, x, list_nos, codes, include_listno)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_search(self, n, x, k, distances, labels, params)
def lookup_table_is_3d(self):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_lookup_table_is_3d(self)
def compute_LUT(self, n, x, coarse_ids, coarse_dis, dis_tables, biases):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_compute_LUT(self, n, x, coarse_ids, coarse_dis, dis_tables, biases)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexIVFAdditiveQuantizerFastScan_sa_decode(self, n, bytes, x)
# Register IndexIVFAdditiveQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexIVFAdditiveQuantizerFastScan_swigregister(IndexIVFAdditiveQuantizerFastScan)
class IndexIVFLocalSearchQuantizerFastScan(IndexIVFAdditiveQuantizerFastScan):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
lsq = property(_swigfaiss.IndexIVFLocalSearchQuantizerFastScan_lsq_get, _swigfaiss.IndexIVFLocalSearchQuantizerFastScan_lsq_set)
def __init__(self, *args):
_swigfaiss.IndexIVFLocalSearchQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexIVFLocalSearchQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFLocalSearchQuantizerFastScan
# Register IndexIVFLocalSearchQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexIVFLocalSearchQuantizerFastScan_swigregister(IndexIVFLocalSearchQuantizerFastScan)
class IndexIVFResidualQuantizerFastScan(IndexIVFAdditiveQuantizerFastScan):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rq = property(_swigfaiss.IndexIVFResidualQuantizerFastScan_rq_get, _swigfaiss.IndexIVFResidualQuantizerFastScan_rq_set)
def __init__(self, *args):
_swigfaiss.IndexIVFResidualQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexIVFResidualQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFResidualQuantizerFastScan
# Register IndexIVFResidualQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexIVFResidualQuantizerFastScan_swigregister(IndexIVFResidualQuantizerFastScan)
class IndexIVFProductLocalSearchQuantizerFastScan(IndexIVFAdditiveQuantizerFastScan):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
plsq = property(_swigfaiss.IndexIVFProductLocalSearchQuantizerFastScan_plsq_get, _swigfaiss.IndexIVFProductLocalSearchQuantizerFastScan_plsq_set)
def __init__(self, *args):
_swigfaiss.IndexIVFProductLocalSearchQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexIVFProductLocalSearchQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFProductLocalSearchQuantizerFastScan
# Register IndexIVFProductLocalSearchQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexIVFProductLocalSearchQuantizerFastScan_swigregister(IndexIVFProductLocalSearchQuantizerFastScan)
class IndexIVFProductResidualQuantizerFastScan(IndexIVFAdditiveQuantizerFastScan):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
prq = property(_swigfaiss.IndexIVFProductResidualQuantizerFastScan_prq_get, _swigfaiss.IndexIVFProductResidualQuantizerFastScan_prq_set)
def __init__(self, *args):
_swigfaiss.IndexIVFProductResidualQuantizerFastScan_swiginit(self, _swigfaiss.new_IndexIVFProductResidualQuantizerFastScan(*args))
__swig_destroy__ = _swigfaiss.delete_IndexIVFProductResidualQuantizerFastScan
# Register IndexIVFProductResidualQuantizerFastScan in _swigfaiss:
_swigfaiss.IndexIVFProductResidualQuantizerFastScan_swigregister(IndexIVFProductResidualQuantizerFastScan)
class IndexIVFPQFastScan(IndexIVFFastScan):
r"""
Fast scan version of IVFPQ. Works for 4-bit PQ for now.
The codes in the inverted lists are not stored sequentially but
grouped in blocks of size bbs. This makes it possible to very quickly
compute distances with SIMD instructions.
Implementations (implem):
0: auto-select implementation (default)
1: orig's search, re-implemented
2: orig's search, re-ordered by invlist
10: optimizer int16 search, collect results in heap, no qbs
11: idem, collect results in reservoir
12: optimizer int16 search, collect results in heap, uses qbs
13: idem, collect results in reservoir
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
pq = property(_swigfaiss.IndexIVFPQFastScan_pq_get, _swigfaiss.IndexIVFPQFastScan_pq_set, doc=r""" produces the codes""")
use_precomputed_table = property(_swigfaiss.IndexIVFPQFastScan_use_precomputed_table_get, _swigfaiss.IndexIVFPQFastScan_use_precomputed_table_set, doc=r""" precomputed tables management""")
precomputed_table = property(_swigfaiss.IndexIVFPQFastScan_precomputed_table_get, _swigfaiss.IndexIVFPQFastScan_precomputed_table_set, doc=r""" if use_precompute_table size (nlist, pq.M, pq.ksub)""")
def __init__(self, *args):
_swigfaiss.IndexIVFPQFastScan_swiginit(self, _swigfaiss.new_IndexIVFPQFastScan(*args))
def train_residual(self, n, x):
return _swigfaiss.IndexIVFPQFastScan_train_residual(self, n, x)
def precompute_table(self):
r""" build precomputed table, possibly updating use_precomputed_table"""
return _swigfaiss.IndexIVFPQFastScan_precompute_table(self)
def encode_vectors(self, n, x, list_nos, codes, include_listno=False):
r"""
same as the regular IVFPQ encoder. The codes are not reorganized by
blocks a that point
"""
return _swigfaiss.IndexIVFPQFastScan_encode_vectors(self, n, x, list_nos, codes, include_listno)
def lookup_table_is_3d(self):
return _swigfaiss.IndexIVFPQFastScan_lookup_table_is_3d(self)
def compute_LUT(self, n, x, coarse_ids, coarse_dis, dis_tables, biases):
return _swigfaiss.IndexIVFPQFastScan_compute_LUT(self, n, x, coarse_ids, coarse_dis, dis_tables, biases)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexIVFPQFastScan_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexIVFPQFastScan
# Register IndexIVFPQFastScan in _swigfaiss:
_swigfaiss.IndexIVFPQFastScan_swigregister(IndexIVFPQFastScan)
def round_uint8_per_column(tab, n, d, a_out=None, b_out=None):
r"""
Functions to quantize PQ floating-point Look Up Tables (LUT) to uint8, and
biases to uint16. The accumulation is supposed to take place in uint16.
The quantization coefficients are float (a, b) such that
original_value = quantized_value * a / b
The hardest part of the quantization is with multiple LUTs that need to be
added up together. In that case, coefficient a has to be chosen so that
the sum fits in a uint16 accumulator.
"""
return _swigfaiss.round_uint8_per_column(tab, n, d, a_out, b_out)
def round_uint8_per_column_multi(tab, m, n, d, a_out=None, b_out=None):
return _swigfaiss.round_uint8_per_column_multi(tab, m, n, d, a_out, b_out)
def quantize_LUT_and_bias(nprobe, M, ksub, lut_is_3d, LUT, bias, LUTq, M2, biasq, a_out=None, b_out=None):
r"""
LUT quantization to uint8 and bias to uint16.
(nprobe, M, ksub, lut_is_3d) determine the size of the the LUT
LUT input:
- 2D size (M, ksub): single matrix per probe (lut_is_3d=false)
- 3D size (nprobe, M, ksub): separate LUT per probe (lut_is_3d=true)
bias input:
- nullptr: bias is 0
- size (nprobe): one bias per probe
Output:
- LUTq uint8 version of the LUT (M size is rounded up to M2)
- biasq (or nullptr): uint16 version of the LUT
- a, b: scalars to approximate the true distance
"""
return _swigfaiss.quantize_LUT_and_bias(nprobe, M, ksub, lut_is_3d, LUT, bias, LUTq, M2, biasq, a_out, b_out)
def aq_quantize_LUT_and_bias(nprobe, M, ksub, LUT, bias, M_norm, norm_scale, LUTq, M2, biasq, a_out, b_out):
return _swigfaiss.aq_quantize_LUT_and_bias(nprobe, M, ksub, LUT, bias, M_norm, norm_scale, LUTq, M2, biasq, a_out, b_out)
def aq_estimate_norm_scale(M, ksub, M_norm, LUT):
return _swigfaiss.aq_estimate_norm_scale(M, ksub, M_norm, LUT)
class IndexBinary(object):
r"""
Abstract structure for a binary index.
Supports adding vertices and searching them.
All queries are symmetric because there is no distinction between codes and
vectors.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
d = property(_swigfaiss.IndexBinary_d_get, _swigfaiss.IndexBinary_d_set, doc=r""" vector dimension""")
code_size = property(_swigfaiss.IndexBinary_code_size_get, _swigfaiss.IndexBinary_code_size_set, doc=r""" number of bytes per vector ( = d / 8 )""")
ntotal = property(_swigfaiss.IndexBinary_ntotal_get, _swigfaiss.IndexBinary_ntotal_set, doc=r""" total nb of indexed vectors""")
verbose = property(_swigfaiss.IndexBinary_verbose_get, _swigfaiss.IndexBinary_verbose_set, doc=r""" verbosity level""")
is_trained = property(_swigfaiss.IndexBinary_is_trained_get, _swigfaiss.IndexBinary_is_trained_set, doc=r"""
set if the Index does not require training, or if training is done
already
""")
metric_type = property(_swigfaiss.IndexBinary_metric_type_get, _swigfaiss.IndexBinary_metric_type_set, doc=r""" type of metric this index uses for search""")
__swig_destroy__ = _swigfaiss.delete_IndexBinary
def train(self, n, x):
r"""
Perform training on a representative set of vectors.
:type n: int
:param n: nb of training vectors
:type x: uint8_t
:param x: training vecors, size n * d / 8
"""
return _swigfaiss.IndexBinary_train(self, n, x)
def add(self, n, x):
r"""
Add n vectors of dimension d to the index.
Vectors are implicitly assigned labels ntotal .. ntotal + n - 1
:type x: uint8_t
:param x: input matrix, size n * d / 8
"""
return _swigfaiss.IndexBinary_add(self, n, x)
def add_with_ids(self, n, x, xids):
r"""
Same as add, but stores xids instead of sequential ids.
The default implementation fails with an assertion, as it is
not supported by all indexes.
:type xids: int
:param xids: if non-null, ids to store for the vectors (size n)
"""
return _swigfaiss.IndexBinary_add_with_ids(self, n, x, xids)
def search(self, n, x, k, distances, labels, params=None):
r"""
Query n vectors of dimension d to the index.
return at most k vectors. If there are not enough results for a
query, the result array is padded with -1s.
:type x: uint8_t
:param x: input vectors to search, size n * d / 8
:type labels: int
:param labels: output labels of the NNs, size n*k
:type distances: int
:param distances: output pairwise distances, size n*k
"""
return _swigfaiss.IndexBinary_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
r"""
Query n vectors of dimension d to the index.
return all vectors with distance < radius. Note that many indexes
do not implement the range_search (only the k-NN search is
mandatory). The distances are converted to float to reuse the
RangeSearchResult structure, but they are integer. By convention,
only distances < radius (strict comparison) are returned,
ie. radius = 0 does not return any result and 1 returns only
exact same vectors.
:type x: uint8_t
:param x: input vectors to search, size n * d / 8
:type radius: int
:param radius: search radius
:type result: :py:class:`RangeSearchResult`
:param result: result table
"""
return _swigfaiss.IndexBinary_range_search(self, n, x, radius, result, params)
def assign(self, n, x, labels, k=1):
r"""
Return the indexes of the k vectors closest to the query x.
This function is identical to search but only returns labels of
neighbors.
:type x: uint8_t
:param x: input vectors to search, size n * d / 8
:type labels: int
:param labels: output labels of the NNs, size n*k
"""
return _swigfaiss.IndexBinary_assign(self, n, x, labels, k)
def reset(self):
r""" Removes all elements from the database."""
return _swigfaiss.IndexBinary_reset(self)
def remove_ids(self, sel):
r""" Removes IDs from the index. Not supported by all indexes."""
return _swigfaiss.IndexBinary_remove_ids(self, sel)
def reconstruct(self, key, recons):
r"""
Reconstruct a stored vector.
This function may not be defined for some indexes.
:type key: int
:param key: id of the vector to reconstruct
:type recons: uint8_t
:param recons: reconstucted vector (size d / 8)
"""
return _swigfaiss.IndexBinary_reconstruct(self, key, recons)
def reconstruct_n(self, i0, ni, recons):
r"""
Reconstruct vectors i0 to i0 + ni - 1.
This function may not be defined for some indexes.
:type recons: uint8_t
:param recons: reconstucted vectors (size ni * d / 8)
"""
return _swigfaiss.IndexBinary_reconstruct_n(self, i0, ni, recons)
def search_and_reconstruct(self, n, x, k, distances, labels, recons, params=None):
r"""
Similar to search, but also reconstructs the stored vectors (or an
approximation in the case of lossy coding) for the search results.
If there are not enough results for a query, the resulting array
is padded with -1s.
:type recons: uint8_t
:param recons: reconstructed vectors size (n, k, d)
"""
return _swigfaiss.IndexBinary_search_and_reconstruct(self, n, x, k, distances, labels, recons, params)
def display(self):
r""" Display the actual class name and some more info."""
return _swigfaiss.IndexBinary_display(self)
def merge_from(self, otherIndex, add_id=0):
r"""
moves the entries from another dataset to self.
On output, other is empty.
add_id is added to all moved ids
(for sequential ids, this would be this->ntotal)
"""
return _swigfaiss.IndexBinary_merge_from(self, otherIndex, add_id)
def check_compatible_for_merge(self, otherIndex):
r"""
check that the two indexes are compatible (ie, they are
trained in the same way and have the same
parameters). Otherwise throw.
"""
return _swigfaiss.IndexBinary_check_compatible_for_merge(self, otherIndex)
# Register IndexBinary in _swigfaiss:
_swigfaiss.IndexBinary_swigregister(IndexBinary)
class IndexBinaryFlat(IndexBinary):
r""" Index that stores the full vectors and performs exhaustive search."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
xb = property(_swigfaiss.IndexBinaryFlat_xb_get, _swigfaiss.IndexBinaryFlat_xb_set, doc=r""" database vectors, size ntotal * d / 8""")
use_heap = property(_swigfaiss.IndexBinaryFlat_use_heap_get, _swigfaiss.IndexBinaryFlat_use_heap_set, doc=r"""
Select between using a heap or counting to select the k smallest values
when scanning inverted lists.
""")
query_batch_size = property(_swigfaiss.IndexBinaryFlat_query_batch_size_get, _swigfaiss.IndexBinaryFlat_query_batch_size_set)
def add(self, n, x):
return _swigfaiss.IndexBinaryFlat_add(self, n, x)
def reset(self):
return _swigfaiss.IndexBinaryFlat_reset(self)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexBinaryFlat_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexBinaryFlat_range_search(self, n, x, radius, result, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexBinaryFlat_reconstruct(self, key, recons)
def remove_ids(self, sel):
r"""
Remove some ids. Note that because of the indexing structure,
the semantics of this operation are different from the usual ones:
the new ids are shifted.
"""
return _swigfaiss.IndexBinaryFlat_remove_ids(self, sel)
def __init__(self, *args):
_swigfaiss.IndexBinaryFlat_swiginit(self, _swigfaiss.new_IndexBinaryFlat(*args))
__swig_destroy__ = _swigfaiss.delete_IndexBinaryFlat
# Register IndexBinaryFlat in _swigfaiss:
_swigfaiss.IndexBinaryFlat_swigregister(IndexBinaryFlat)
class IndexBinaryIVF(IndexBinary):
r"""
Index based on a inverted file (IVF)
In the inverted file, the quantizer (an IndexBinary instance) provides a
quantization index for each vector to be added. The quantization
index maps to a list (aka inverted list or posting list), where the
id of the vector is stored.
Otherwise the object is similar to the IndexIVF
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
invlists = property(_swigfaiss.IndexBinaryIVF_invlists_get, _swigfaiss.IndexBinaryIVF_invlists_set, doc=r""" Access to the actual data""")
own_invlists = property(_swigfaiss.IndexBinaryIVF_own_invlists_get, _swigfaiss.IndexBinaryIVF_own_invlists_set)
nprobe = property(_swigfaiss.IndexBinaryIVF_nprobe_get, _swigfaiss.IndexBinaryIVF_nprobe_set, doc=r""" number of probes at query time""")
max_codes = property(_swigfaiss.IndexBinaryIVF_max_codes_get, _swigfaiss.IndexBinaryIVF_max_codes_set, doc=r""" max nb of codes to visit to do a query""")
use_heap = property(_swigfaiss.IndexBinaryIVF_use_heap_get, _swigfaiss.IndexBinaryIVF_use_heap_set, doc=r"""
Select between using a heap or counting to select the k smallest values
when scanning inverted lists.
""")
direct_map = property(_swigfaiss.IndexBinaryIVF_direct_map_get, _swigfaiss.IndexBinaryIVF_direct_map_set, doc=r""" map for direct access to the elements. Enables reconstruct().""")
quantizer = property(_swigfaiss.IndexBinaryIVF_quantizer_get, _swigfaiss.IndexBinaryIVF_quantizer_set, doc=r""" quantizer that maps vectors to inverted lists""")
nlist = property(_swigfaiss.IndexBinaryIVF_nlist_get, _swigfaiss.IndexBinaryIVF_nlist_set, doc=r""" number of possible key values""")
own_fields = property(_swigfaiss.IndexBinaryIVF_own_fields_get, _swigfaiss.IndexBinaryIVF_own_fields_set, doc=r""" whether object owns the quantizer""")
cp = property(_swigfaiss.IndexBinaryIVF_cp_get, _swigfaiss.IndexBinaryIVF_cp_set, doc=r""" to override default clustering params""")
clustering_index = property(_swigfaiss.IndexBinaryIVF_clustering_index_get, _swigfaiss.IndexBinaryIVF_clustering_index_set, doc=r""" to override index used during clustering""")
def __init__(self, *args):
_swigfaiss.IndexBinaryIVF_swiginit(self, _swigfaiss.new_IndexBinaryIVF(*args))
__swig_destroy__ = _swigfaiss.delete_IndexBinaryIVF
def reset(self):
return _swigfaiss.IndexBinaryIVF_reset(self)
def train(self, n, x):
r""" Trains the quantizer"""
return _swigfaiss.IndexBinaryIVF_train(self, n, x)
def add(self, n, x):
return _swigfaiss.IndexBinaryIVF_add(self, n, x)
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexBinaryIVF_add_with_ids(self, n, x, xids)
def add_core(self, n, x, xids, precomputed_idx):
r"""
Implementation of vector addition where the vector assignments are
predefined.
:type precomputed_idx: int
:param precomputed_idx: quantization indices for the input vectors
(size n)
"""
return _swigfaiss.IndexBinaryIVF_add_core(self, n, x, xids, precomputed_idx)
def search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params=None):
r"""
Search a set of vectors, that are pre-quantized by the IVF
quantizer. Fill in the corresponding heaps with the query
results. search() calls this.
:type n: int
:param n: nb of vectors to query
:type x: uint8_t
:param x: query vectors, size nx * d
:type assign: int
:param assign: coarse quantization indices, size nx * nprobe
:type centroid_dis: int
:param centroid_dis:
distances to coarse centroids, size nx * nprobe
:param distance:
output distances, size n * k
:type labels: int
:param labels: output labels, size n * k
:type store_pairs: boolean
:param store_pairs: store inv list index + inv list offset
instead in upper/lower 32 bit of result,
instead of ids (used for reranking).
:type params: :py:class:`IVFSearchParameters`, optional
:param params: used to override the object's search parameters
"""
return _swigfaiss.IndexBinaryIVF_search_preassigned(self, n, x, k, assign, centroid_dis, distances, labels, store_pairs, params)
def get_InvertedListScanner(self, store_pairs=False):
return _swigfaiss.IndexBinaryIVF_get_InvertedListScanner(self, store_pairs)
def search(self, n, x, k, distances, labels, params=None):
r""" assign the vectors, then call search_preassign"""
return _swigfaiss.IndexBinaryIVF_search(self, n, x, k, distances, labels, params)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexBinaryIVF_range_search(self, n, x, radius, result, params)
def range_search_preassigned(self, n, x, radius, assign, centroid_dis, result):
return _swigfaiss.IndexBinaryIVF_range_search_preassigned(self, n, x, radius, assign, centroid_dis, result)
def reconstruct(self, key, recons):
return _swigfaiss.IndexBinaryIVF_reconstruct(self, key, recons)
def reconstruct_n(self, i0, ni, recons):
r"""
Reconstruct a subset of the indexed vectors.
Overrides default implementation to bypass reconstruct() which requires
direct_map to be maintained.
:type i0: int
:param i0: first vector to reconstruct
:type ni: int
:param ni: nb of vectors to reconstruct
:type recons: uint8_t
:param recons: output array of reconstructed vectors, size ni * d / 8
"""
return _swigfaiss.IndexBinaryIVF_reconstruct_n(self, i0, ni, recons)
def search_and_reconstruct(self, n, x, k, distances, labels, recons, params=None):
r"""
Similar to search, but also reconstructs the stored vectors (or an
approximation in the case of lossy coding) for the search results.
Overrides default implementation to avoid having to maintain direct_map
and instead fetch the code offsets through the `store_pairs` flag in
search_preassigned().
:type recons: uint8_t
:param recons: reconstructed vectors size (n, k, d / 8)
"""
return _swigfaiss.IndexBinaryIVF_search_and_reconstruct(self, n, x, k, distances, labels, recons, params)
def reconstruct_from_offset(self, list_no, offset, recons):
r"""
Reconstruct a vector given the location in terms of (inv list index +
inv list offset) instead of the id.
Useful for reconstructing when the direct_map is not maintained and
the inv list offset is computed by search_preassigned() with
`store_pairs` set.
"""
return _swigfaiss.IndexBinaryIVF_reconstruct_from_offset(self, list_no, offset, recons)
def remove_ids(self, sel):
r""" Dataset manipulation functions"""
return _swigfaiss.IndexBinaryIVF_remove_ids(self, sel)
def merge_from(self, other, add_id):
return _swigfaiss.IndexBinaryIVF_merge_from(self, other, add_id)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexBinaryIVF_check_compatible_for_merge(self, otherIndex)
def get_list_size(self, list_no):
return _swigfaiss.IndexBinaryIVF_get_list_size(self, list_no)
def make_direct_map(self, new_maintain_direct_map=True):
r"""
intialize a direct map
:type new_maintain_direct_map: boolean, optional
:param new_maintain_direct_map: if true, create a direct map,
else clear it
"""
return _swigfaiss.IndexBinaryIVF_make_direct_map(self, new_maintain_direct_map)
def set_direct_map_type(self, type):
return _swigfaiss.IndexBinaryIVF_set_direct_map_type(self, type)
def replace_invlists(self, il, own=False):
return _swigfaiss.IndexBinaryIVF_replace_invlists(self, il, own)
# Register IndexBinaryIVF in _swigfaiss:
_swigfaiss.IndexBinaryIVF_swigregister(IndexBinaryIVF)
class BinaryInvertedListScanner(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def set_query(self, query_vector):
r""" from now on we handle this query."""
return _swigfaiss.BinaryInvertedListScanner_set_query(self, query_vector)
def set_list(self, list_no, coarse_dis):
r""" following codes come from this inverted list"""
return _swigfaiss.BinaryInvertedListScanner_set_list(self, list_no, coarse_dis)
def distance_to_code(self, code):
r""" compute a single query-to-code distance"""
return _swigfaiss.BinaryInvertedListScanner_distance_to_code(self, code)
def scan_codes(self, n, codes, ids, distances, labels, k):
r"""
compute the distances to codes. (distances, labels) should be
organized as a min- or max-heap
:type n: int
:param n: number of codes to scan
:type codes: uint8_t
:param codes: codes to scan (n * code_size)
:type ids: int
:param ids: corresponding ids (ignored if store_pairs)
:type distances: int
:param distances: heap distances (size k)
:type labels: int
:param labels: heap labels (size k)
:type k: int
:param k: heap size
"""
return _swigfaiss.BinaryInvertedListScanner_scan_codes(self, n, codes, ids, distances, labels, k)
def scan_codes_range(self, n, codes, ids, radius, result):
return _swigfaiss.BinaryInvertedListScanner_scan_codes_range(self, n, codes, ids, radius, result)
__swig_destroy__ = _swigfaiss.delete_BinaryInvertedListScanner
# Register BinaryInvertedListScanner in _swigfaiss:
_swigfaiss.BinaryInvertedListScanner_swigregister(BinaryInvertedListScanner)
class IndexBinaryFromFloat(IndexBinary):
r"""
IndexBinary backed by a float Index.
Supports adding vertices and searching them.
All queries are symmetric because there is no distinction between codes and
vectors.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index = property(_swigfaiss.IndexBinaryFromFloat_index_get, _swigfaiss.IndexBinaryFromFloat_index_set)
own_fields = property(_swigfaiss.IndexBinaryFromFloat_own_fields_get, _swigfaiss.IndexBinaryFromFloat_own_fields_set, doc=r""" Whether object owns the index pointer.""")
def __init__(self, *args):
_swigfaiss.IndexBinaryFromFloat_swiginit(self, _swigfaiss.new_IndexBinaryFromFloat(*args))
__swig_destroy__ = _swigfaiss.delete_IndexBinaryFromFloat
def add(self, n, x):
return _swigfaiss.IndexBinaryFromFloat_add(self, n, x)
def reset(self):
return _swigfaiss.IndexBinaryFromFloat_reset(self)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexBinaryFromFloat_search(self, n, x, k, distances, labels, params)
def train(self, n, x):
return _swigfaiss.IndexBinaryFromFloat_train(self, n, x)
# Register IndexBinaryFromFloat in _swigfaiss:
_swigfaiss.IndexBinaryFromFloat_swigregister(IndexBinaryFromFloat)
class IndexBinaryHNSW(IndexBinary):
r"""
The HNSW index is a normal random-access index with a HNSW
link structure built on top
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
hnsw = property(_swigfaiss.IndexBinaryHNSW_hnsw_get, _swigfaiss.IndexBinaryHNSW_hnsw_set)
own_fields = property(_swigfaiss.IndexBinaryHNSW_own_fields_get, _swigfaiss.IndexBinaryHNSW_own_fields_set)
storage = property(_swigfaiss.IndexBinaryHNSW_storage_get, _swigfaiss.IndexBinaryHNSW_storage_set)
def __init__(self, *args):
_swigfaiss.IndexBinaryHNSW_swiginit(self, _swigfaiss.new_IndexBinaryHNSW(*args))
__swig_destroy__ = _swigfaiss.delete_IndexBinaryHNSW
def get_distance_computer(self):
return _swigfaiss.IndexBinaryHNSW_get_distance_computer(self)
def add(self, n, x):
return _swigfaiss.IndexBinaryHNSW_add(self, n, x)
def train(self, n, x):
r""" Trains the storage if needed"""
return _swigfaiss.IndexBinaryHNSW_train(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r""" entry point for search"""
return _swigfaiss.IndexBinaryHNSW_search(self, n, x, k, distances, labels, params)
def reconstruct(self, key, recons):
return _swigfaiss.IndexBinaryHNSW_reconstruct(self, key, recons)
def reset(self):
return _swigfaiss.IndexBinaryHNSW_reset(self)
# Register IndexBinaryHNSW in _swigfaiss:
_swigfaiss.IndexBinaryHNSW_swigregister(IndexBinaryHNSW)
class IndexBinaryHash(IndexBinary):
r""" just uses the b first bits as a hash value"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
invlists = property(_swigfaiss.IndexBinaryHash_invlists_get, _swigfaiss.IndexBinaryHash_invlists_set)
b = property(_swigfaiss.IndexBinaryHash_b_get, _swigfaiss.IndexBinaryHash_b_set)
nflip = property(_swigfaiss.IndexBinaryHash_nflip_get, _swigfaiss.IndexBinaryHash_nflip_set)
def __init__(self, *args):
_swigfaiss.IndexBinaryHash_swiginit(self, _swigfaiss.new_IndexBinaryHash(*args))
def reset(self):
return _swigfaiss.IndexBinaryHash_reset(self)
def add(self, n, x):
return _swigfaiss.IndexBinaryHash_add(self, n, x)
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexBinaryHash_add_with_ids(self, n, x, xids)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexBinaryHash_range_search(self, n, x, radius, result, params)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexBinaryHash_search(self, n, x, k, distances, labels, params)
def display(self):
return _swigfaiss.IndexBinaryHash_display(self)
def hashtable_size(self):
return _swigfaiss.IndexBinaryHash_hashtable_size(self)
__swig_destroy__ = _swigfaiss.delete_IndexBinaryHash
# Register IndexBinaryHash in _swigfaiss:
_swigfaiss.IndexBinaryHash_swigregister(IndexBinaryHash)
class IndexBinaryHashStats(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nq = property(_swigfaiss.IndexBinaryHashStats_nq_get, _swigfaiss.IndexBinaryHashStats_nq_set)
n0 = property(_swigfaiss.IndexBinaryHashStats_n0_get, _swigfaiss.IndexBinaryHashStats_n0_set)
nlist = property(_swigfaiss.IndexBinaryHashStats_nlist_get, _swigfaiss.IndexBinaryHashStats_nlist_set)
ndis = property(_swigfaiss.IndexBinaryHashStats_ndis_get, _swigfaiss.IndexBinaryHashStats_ndis_set)
def __init__(self):
_swigfaiss.IndexBinaryHashStats_swiginit(self, _swigfaiss.new_IndexBinaryHashStats())
def reset(self):
return _swigfaiss.IndexBinaryHashStats_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexBinaryHashStats
# Register IndexBinaryHashStats in _swigfaiss:
_swigfaiss.IndexBinaryHashStats_swigregister(IndexBinaryHashStats)
class IndexBinaryMultiHash(IndexBinary):
r""" just uses the b first bits as a hash value"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
storage = property(_swigfaiss.IndexBinaryMultiHash_storage_get, _swigfaiss.IndexBinaryMultiHash_storage_set)
own_fields = property(_swigfaiss.IndexBinaryMultiHash_own_fields_get, _swigfaiss.IndexBinaryMultiHash_own_fields_set)
maps = property(_swigfaiss.IndexBinaryMultiHash_maps_get, _swigfaiss.IndexBinaryMultiHash_maps_set)
nhash = property(_swigfaiss.IndexBinaryMultiHash_nhash_get, _swigfaiss.IndexBinaryMultiHash_nhash_set, doc=r""" nb of hash maps""")
b = property(_swigfaiss.IndexBinaryMultiHash_b_get, _swigfaiss.IndexBinaryMultiHash_b_set, doc=r""" nb bits per hash map""")
nflip = property(_swigfaiss.IndexBinaryMultiHash_nflip_get, _swigfaiss.IndexBinaryMultiHash_nflip_set, doc=r""" nb bit flips to use at search time""")
def __init__(self, *args):
_swigfaiss.IndexBinaryMultiHash_swiginit(self, _swigfaiss.new_IndexBinaryMultiHash(*args))
__swig_destroy__ = _swigfaiss.delete_IndexBinaryMultiHash
def reset(self):
return _swigfaiss.IndexBinaryMultiHash_reset(self)
def add(self, n, x):
return _swigfaiss.IndexBinaryMultiHash_add(self, n, x)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexBinaryMultiHash_range_search(self, n, x, radius, result, params)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexBinaryMultiHash_search(self, n, x, k, distances, labels, params)
def hashtable_size(self):
return _swigfaiss.IndexBinaryMultiHash_hashtable_size(self)
# Register IndexBinaryMultiHash in _swigfaiss:
_swigfaiss.IndexBinaryMultiHash_swigregister(IndexBinaryMultiHash)
class ThreadedIndexBase(Index):
r"""
A holder of indices in a collection of threads
The interface to this class itself is not thread safe
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _swigfaiss.delete_ThreadedIndexBase
def addIndex(self, index):
r"""
override an index that is managed by ourselves.
WARNING: once an index is added, it becomes unsafe to touch it from any
other thread than that on which is managing it, until we are shut
down. Use runOnIndex to perform work on it instead.
"""
return _swigfaiss.ThreadedIndexBase_addIndex(self, index)
def removeIndex(self, index):
r"""
Remove an index that is managed by ourselves.
This will flush all pending work on that index, and then shut
down its managing thread, and will remove the index.
"""
return _swigfaiss.ThreadedIndexBase_removeIndex(self, index)
def runOnIndex(self, *args):
return _swigfaiss.ThreadedIndexBase_runOnIndex(self, *args)
def reset(self):
r"""
faiss::Index API
All indices receive the same call
"""
return _swigfaiss.ThreadedIndexBase_reset(self)
def count(self):
r""" Returns the number of sub-indices"""
return _swigfaiss.ThreadedIndexBase_count(self)
def at(self, *args):
r"""
*Overload 1:*
Returns the i-th sub-index
|
*Overload 2:*
Returns the i-th sub-index (const version)
"""
return _swigfaiss.ThreadedIndexBase_at(self, *args)
own_fields = property(_swigfaiss.ThreadedIndexBase_own_fields_get, _swigfaiss.ThreadedIndexBase_own_fields_set, doc=r""" Whether or not we are responsible for deleting our contained indices""")
# Register ThreadedIndexBase in _swigfaiss:
_swigfaiss.ThreadedIndexBase_swigregister(ThreadedIndexBase)
class ThreadedIndexBaseBinary(IndexBinary):
r"""
A holder of indices in a collection of threads
The interface to this class itself is not thread safe
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _swigfaiss.delete_ThreadedIndexBaseBinary
def addIndex(self, index):
r"""
override an index that is managed by ourselves.
WARNING: once an index is added, it becomes unsafe to touch it from any
other thread than that on which is managing it, until we are shut
down. Use runOnIndex to perform work on it instead.
"""
return _swigfaiss.ThreadedIndexBaseBinary_addIndex(self, index)
def removeIndex(self, index):
r"""
Remove an index that is managed by ourselves.
This will flush all pending work on that index, and then shut
down its managing thread, and will remove the index.
"""
return _swigfaiss.ThreadedIndexBaseBinary_removeIndex(self, index)
def runOnIndex(self, *args):
return _swigfaiss.ThreadedIndexBaseBinary_runOnIndex(self, *args)
def reset(self):
r"""
faiss::Index API
All indices receive the same call
"""
return _swigfaiss.ThreadedIndexBaseBinary_reset(self)
def count(self):
r""" Returns the number of sub-indices"""
return _swigfaiss.ThreadedIndexBaseBinary_count(self)
def at(self, *args):
r"""
*Overload 1:*
Returns the i-th sub-index
|
*Overload 2:*
Returns the i-th sub-index (const version)
"""
return _swigfaiss.ThreadedIndexBaseBinary_at(self, *args)
own_fields = property(_swigfaiss.ThreadedIndexBaseBinary_own_fields_get, _swigfaiss.ThreadedIndexBaseBinary_own_fields_set, doc=r""" Whether or not we are responsible for deleting our contained indices""")
# Register ThreadedIndexBaseBinary in _swigfaiss:
_swigfaiss.ThreadedIndexBaseBinary_swigregister(ThreadedIndexBaseBinary)
class IndexShards(ThreadedIndexBase):
r"""Index that concatenates the results from several sub-indexes"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
The dimension that all sub-indices must share will be the dimension of
the first sub-index added
:type threaded: boolean, optional
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:type successive_ids: boolean, optional
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 2:*
:type threaded: boolean, optional
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:type successive_ids: boolean, optional
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 3:*
:type threaded: boolean, optional
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 4:*
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 5:*
int version due to the implicit bool conversion ambiguity of int as
dimension
|
*Overload 6:*
int version due to the implicit bool conversion ambiguity of int as
dimension
|
*Overload 7:*
int version due to the implicit bool conversion ambiguity of int as
dimension
"""
_swigfaiss.IndexShards_swiginit(self, _swigfaiss.new_IndexShards(*args))
def add_shard(self, index):
r""" Alias for addIndex()"""
return _swigfaiss.IndexShards_add_shard(self, index)
def remove_shard(self, index):
r""" Alias for removeIndex()"""
return _swigfaiss.IndexShards_remove_shard(self, index)
def add(self, n, x):
r""" supported only for sub-indices that implement add_with_ids"""
return _swigfaiss.IndexShards_add(self, n, x)
def add_with_ids(self, n, x, xids):
r"""
Cases (successive_ids, xids):
- true, non-NULL ERROR: it makes no sense to pass in ids and
request them to be shifted
- true, NULL OK, but should be called only once (calls add()
on sub-indexes).
- false, non-NULL OK: will call add_with_ids with passed in xids
distributed evenly over shards
- false, NULL OK: will call add_with_ids on each sub-index,
starting at ntotal
"""
return _swigfaiss.IndexShards_add_with_ids(self, n, x, xids)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexShards_search(self, n, x, k, distances, labels, params)
def train(self, n, x):
return _swigfaiss.IndexShards_train(self, n, x)
successive_ids = property(_swigfaiss.IndexShards_successive_ids_get, _swigfaiss.IndexShards_successive_ids_set)
def syncWithSubIndexes(self):
r"""
Synchronize the top-level index (IndexShards) with data in the
sub-indices
"""
return _swigfaiss.IndexShards_syncWithSubIndexes(self)
__swig_destroy__ = _swigfaiss.delete_IndexShards
# Register IndexShards in _swigfaiss:
_swigfaiss.IndexShards_swigregister(IndexShards)
class IndexBinaryShards(ThreadedIndexBaseBinary):
r"""Index that concatenates the results from several sub-indexes"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
The dimension that all sub-indices must share will be the dimension of
the first sub-index added
:type threaded: boolean, optional
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:type successive_ids: boolean, optional
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 2:*
:type threaded: boolean, optional
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:type successive_ids: boolean, optional
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 3:*
:type threaded: boolean, optional
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 4:*
:param threaded: do we use one thread per sub_index or do
queries sequentially?
:param successive_ids: should we shift the returned ids by
the size of each sub-index or return them
as they are?
|
*Overload 5:*
int version due to the implicit bool conversion ambiguity of int as
dimension
|
*Overload 6:*
int version due to the implicit bool conversion ambiguity of int as
dimension
|
*Overload 7:*
int version due to the implicit bool conversion ambiguity of int as
dimension
"""
_swigfaiss.IndexBinaryShards_swiginit(self, _swigfaiss.new_IndexBinaryShards(*args))
def add_shard(self, index):
r""" Alias for addIndex()"""
return _swigfaiss.IndexBinaryShards_add_shard(self, index)
def remove_shard(self, index):
r""" Alias for removeIndex()"""
return _swigfaiss.IndexBinaryShards_remove_shard(self, index)
def add(self, n, x):
r""" supported only for sub-indices that implement add_with_ids"""
return _swigfaiss.IndexBinaryShards_add(self, n, x)
def add_with_ids(self, n, x, xids):
r"""
Cases (successive_ids, xids):
- true, non-NULL ERROR: it makes no sense to pass in ids and
request them to be shifted
- true, NULL OK, but should be called only once (calls add()
on sub-indexes).
- false, non-NULL OK: will call add_with_ids with passed in xids
distributed evenly over shards
- false, NULL OK: will call add_with_ids on each sub-index,
starting at ntotal
"""
return _swigfaiss.IndexBinaryShards_add_with_ids(self, n, x, xids)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexBinaryShards_search(self, n, x, k, distances, labels, params)
def train(self, n, x):
return _swigfaiss.IndexBinaryShards_train(self, n, x)
successive_ids = property(_swigfaiss.IndexBinaryShards_successive_ids_get, _swigfaiss.IndexBinaryShards_successive_ids_set)
def syncWithSubIndexes(self):
r"""
Synchronize the top-level index (IndexShards) with data in the
sub-indices
"""
return _swigfaiss.IndexBinaryShards_syncWithSubIndexes(self)
__swig_destroy__ = _swigfaiss.delete_IndexBinaryShards
# Register IndexBinaryShards in _swigfaiss:
_swigfaiss.IndexBinaryShards_swigregister(IndexBinaryShards)
class IndexReplicas(ThreadedIndexBase):
r"""
Takes individual faiss::Index instances, and splits queries for
sending to each Index instance, and joins the results together
when done.
Each index is managed by a separate CPU thread.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
The dimension that all sub-indices must share will be the dimension of
the first sub-index added
:type threaded: boolean, optional
:param threaded: do we use one thread per sub-index or do queries
sequentially?
|
*Overload 2:*
:type d: int
:param d: the dimension that all sub-indices must share
:type threaded: boolean, optional
:param threaded: do we use one thread per sub index or do queries
sequentially?
|
*Overload 3:*
:type d: int
:param d: the dimension that all sub-indices must share
:param threaded: do we use one thread per sub index or do queries
sequentially?
|
*Overload 4:*
int version due to the implicit bool conversion ambiguity of int as
dimension
|
*Overload 5:*
int version due to the implicit bool conversion ambiguity of int as
dimension
"""
_swigfaiss.IndexReplicas_swiginit(self, _swigfaiss.new_IndexReplicas(*args))
def add_replica(self, index):
r""" Alias for addIndex()"""
return _swigfaiss.IndexReplicas_add_replica(self, index)
def remove_replica(self, index):
r""" Alias for removeIndex()"""
return _swigfaiss.IndexReplicas_remove_replica(self, index)
def train(self, n, x):
r"""
faiss::Index API
All indices receive the same call
"""
return _swigfaiss.IndexReplicas_train(self, n, x)
def add(self, n, x):
r"""
faiss::Index API
All indices receive the same call
"""
return _swigfaiss.IndexReplicas_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r"""
faiss::Index API
Query is partitioned into a slice for each sub-index
split by ceil(n / #indices) for our sub-indices
"""
return _swigfaiss.IndexReplicas_search(self, n, x, k, distances, labels, params)
def reconstruct(self, arg2, v):
r""" reconstructs from the first index"""
return _swigfaiss.IndexReplicas_reconstruct(self, arg2, v)
def syncWithSubIndexes(self):
r"""
Synchronize the top-level index (IndexShards) with data in the
sub-indices
"""
return _swigfaiss.IndexReplicas_syncWithSubIndexes(self)
__swig_destroy__ = _swigfaiss.delete_IndexReplicas
# Register IndexReplicas in _swigfaiss:
_swigfaiss.IndexReplicas_swigregister(IndexReplicas)
class IndexBinaryReplicas(ThreadedIndexBaseBinary):
r"""
Takes individual faiss::Index instances, and splits queries for
sending to each Index instance, and joins the results together
when done.
Each index is managed by a separate CPU thread.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
*Overload 1:*
The dimension that all sub-indices must share will be the dimension of
the first sub-index added
:type threaded: boolean, optional
:param threaded: do we use one thread per sub-index or do queries
sequentially?
|
*Overload 2:*
:type d: int
:param d: the dimension that all sub-indices must share
:type threaded: boolean, optional
:param threaded: do we use one thread per sub index or do queries
sequentially?
|
*Overload 3:*
:type d: int
:param d: the dimension that all sub-indices must share
:param threaded: do we use one thread per sub index or do queries
sequentially?
|
*Overload 4:*
int version due to the implicit bool conversion ambiguity of int as
dimension
|
*Overload 5:*
int version due to the implicit bool conversion ambiguity of int as
dimension
"""
_swigfaiss.IndexBinaryReplicas_swiginit(self, _swigfaiss.new_IndexBinaryReplicas(*args))
def add_replica(self, index):
r""" Alias for addIndex()"""
return _swigfaiss.IndexBinaryReplicas_add_replica(self, index)
def remove_replica(self, index):
r""" Alias for removeIndex()"""
return _swigfaiss.IndexBinaryReplicas_remove_replica(self, index)
def train(self, n, x):
r"""
faiss::Index API
All indices receive the same call
"""
return _swigfaiss.IndexBinaryReplicas_train(self, n, x)
def add(self, n, x):
r"""
faiss::Index API
All indices receive the same call
"""
return _swigfaiss.IndexBinaryReplicas_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
r"""
faiss::Index API
Query is partitioned into a slice for each sub-index
split by ceil(n / #indices) for our sub-indices
"""
return _swigfaiss.IndexBinaryReplicas_search(self, n, x, k, distances, labels, params)
def reconstruct(self, arg2, v):
r""" reconstructs from the first index"""
return _swigfaiss.IndexBinaryReplicas_reconstruct(self, arg2, v)
def syncWithSubIndexes(self):
r"""
Synchronize the top-level index (IndexShards) with data in the
sub-indices
"""
return _swigfaiss.IndexBinaryReplicas_syncWithSubIndexes(self)
__swig_destroy__ = _swigfaiss.delete_IndexBinaryReplicas
# Register IndexBinaryReplicas in _swigfaiss:
_swigfaiss.IndexBinaryReplicas_swigregister(IndexBinaryReplicas)
class IndexSplitVectors(Index):
r"""
splits input vectors in segments and assigns each segment to a sub-index
used to distribute a MultiIndexQuantizer
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
own_fields = property(_swigfaiss.IndexSplitVectors_own_fields_get, _swigfaiss.IndexSplitVectors_own_fields_set)
threaded = property(_swigfaiss.IndexSplitVectors_threaded_get, _swigfaiss.IndexSplitVectors_threaded_set)
sub_indexes = property(_swigfaiss.IndexSplitVectors_sub_indexes_get, _swigfaiss.IndexSplitVectors_sub_indexes_set)
sum_d = property(_swigfaiss.IndexSplitVectors_sum_d_get, _swigfaiss.IndexSplitVectors_sum_d_set)
def __init__(self, d, threaded=False):
r""" sum of dimensions seen so far"""
_swigfaiss.IndexSplitVectors_swiginit(self, _swigfaiss.new_IndexSplitVectors(d, threaded))
def add_sub_index(self, arg2):
return _swigfaiss.IndexSplitVectors_add_sub_index(self, arg2)
def sync_with_sub_indexes(self):
return _swigfaiss.IndexSplitVectors_sync_with_sub_indexes(self)
def add(self, n, x):
return _swigfaiss.IndexSplitVectors_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexSplitVectors_search(self, n, x, k, distances, labels, params)
def train(self, n, x):
return _swigfaiss.IndexSplitVectors_train(self, n, x)
def reset(self):
return _swigfaiss.IndexSplitVectors_reset(self)
__swig_destroy__ = _swigfaiss.delete_IndexSplitVectors
# Register IndexSplitVectors in _swigfaiss:
_swigfaiss.IndexSplitVectors_swigregister(IndexSplitVectors)
class IndexIDMap(Index):
r""" Index that translates search results to ids"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index = property(_swigfaiss.IndexIDMap_index_get, _swigfaiss.IndexIDMap_index_set)
own_fields = property(_swigfaiss.IndexIDMap_own_fields_get, _swigfaiss.IndexIDMap_own_fields_set, doc=r"""the sub-index""")
id_map = property(_swigfaiss.IndexIDMap_id_map_get, _swigfaiss.IndexIDMap_id_map_set, doc=r"""whether pointers are deleted in destructo""")
def add_with_ids(self, n, x, xids):
r"""
:type xids: int
:param xids: if non-null, ids to store for the vectors (size n)
"""
return _swigfaiss.IndexIDMap_add_with_ids(self, n, x, xids)
def add(self, n, x):
r""" this will fail. Use add_with_ids"""
return _swigfaiss.IndexIDMap_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexIDMap_search(self, n, x, k, distances, labels, params)
def train(self, n, x):
return _swigfaiss.IndexIDMap_train(self, n, x)
def reset(self):
return _swigfaiss.IndexIDMap_reset(self)
def remove_ids(self, sel):
r""" remove ids adapted to IndexFlat"""
return _swigfaiss.IndexIDMap_remove_ids(self, sel)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexIDMap_range_search(self, n, x, radius, result, params)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexIDMap_merge_from(self, otherIndex, add_id)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexIDMap_check_compatible_for_merge(self, otherIndex)
__swig_destroy__ = _swigfaiss.delete_IndexIDMap
def __init__(self, *args):
_swigfaiss.IndexIDMap_swiginit(self, _swigfaiss.new_IndexIDMap(*args))
# Register IndexIDMap in _swigfaiss:
_swigfaiss.IndexIDMap_swigregister(IndexIDMap)
class IndexBinaryIDMap(IndexBinary):
r""" Index that translates search results to ids"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
index = property(_swigfaiss.IndexBinaryIDMap_index_get, _swigfaiss.IndexBinaryIDMap_index_set)
own_fields = property(_swigfaiss.IndexBinaryIDMap_own_fields_get, _swigfaiss.IndexBinaryIDMap_own_fields_set, doc=r"""the sub-index""")
id_map = property(_swigfaiss.IndexBinaryIDMap_id_map_get, _swigfaiss.IndexBinaryIDMap_id_map_set, doc=r"""whether pointers are deleted in destructo""")
def add_with_ids(self, n, x, xids):
r"""
:type xids: int
:param xids: if non-null, ids to store for the vectors (size n)
"""
return _swigfaiss.IndexBinaryIDMap_add_with_ids(self, n, x, xids)
def add(self, n, x):
r""" this will fail. Use add_with_ids"""
return _swigfaiss.IndexBinaryIDMap_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexBinaryIDMap_search(self, n, x, k, distances, labels, params)
def train(self, n, x):
return _swigfaiss.IndexBinaryIDMap_train(self, n, x)
def reset(self):
return _swigfaiss.IndexBinaryIDMap_reset(self)
def remove_ids(self, sel):
r""" remove ids adapted to IndexFlat"""
return _swigfaiss.IndexBinaryIDMap_remove_ids(self, sel)
def range_search(self, n, x, radius, result, params=None):
return _swigfaiss.IndexBinaryIDMap_range_search(self, n, x, radius, result, params)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexBinaryIDMap_merge_from(self, otherIndex, add_id)
def check_compatible_for_merge(self, otherIndex):
return _swigfaiss.IndexBinaryIDMap_check_compatible_for_merge(self, otherIndex)
__swig_destroy__ = _swigfaiss.delete_IndexBinaryIDMap
def __init__(self, *args):
_swigfaiss.IndexBinaryIDMap_swiginit(self, _swigfaiss.new_IndexBinaryIDMap(*args))
# Register IndexBinaryIDMap in _swigfaiss:
_swigfaiss.IndexBinaryIDMap_swigregister(IndexBinaryIDMap)
class IndexIDMap2(IndexIDMap):
r"""
same as IndexIDMap but also provides an efficient reconstruction
implementation via a 2-way index
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rev_map = property(_swigfaiss.IndexIDMap2_rev_map_get, _swigfaiss.IndexIDMap2_rev_map_set)
def construct_rev_map(self):
r""" make the rev_map from scratch"""
return _swigfaiss.IndexIDMap2_construct_rev_map(self)
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexIDMap2_add_with_ids(self, n, x, xids)
def remove_ids(self, sel):
return _swigfaiss.IndexIDMap2_remove_ids(self, sel)
def reconstruct(self, key, recons):
return _swigfaiss.IndexIDMap2_reconstruct(self, key, recons)
def check_consistency(self):
r""" check that the rev_map and the id_map are in sync"""
return _swigfaiss.IndexIDMap2_check_consistency(self)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexIDMap2_merge_from(self, otherIndex, add_id)
__swig_destroy__ = _swigfaiss.delete_IndexIDMap2
def __init__(self, *args):
_swigfaiss.IndexIDMap2_swiginit(self, _swigfaiss.new_IndexIDMap2(*args))
# Register IndexIDMap2 in _swigfaiss:
_swigfaiss.IndexIDMap2_swigregister(IndexIDMap2)
class IndexBinaryIDMap2(IndexBinaryIDMap):
r"""
same as IndexIDMap but also provides an efficient reconstruction
implementation via a 2-way index
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
rev_map = property(_swigfaiss.IndexBinaryIDMap2_rev_map_get, _swigfaiss.IndexBinaryIDMap2_rev_map_set)
def construct_rev_map(self):
r""" make the rev_map from scratch"""
return _swigfaiss.IndexBinaryIDMap2_construct_rev_map(self)
def add_with_ids(self, n, x, xids):
return _swigfaiss.IndexBinaryIDMap2_add_with_ids(self, n, x, xids)
def remove_ids(self, sel):
return _swigfaiss.IndexBinaryIDMap2_remove_ids(self, sel)
def reconstruct(self, key, recons):
return _swigfaiss.IndexBinaryIDMap2_reconstruct(self, key, recons)
def check_consistency(self):
r""" check that the rev_map and the id_map are in sync"""
return _swigfaiss.IndexBinaryIDMap2_check_consistency(self)
def merge_from(self, otherIndex, add_id=0):
return _swigfaiss.IndexBinaryIDMap2_merge_from(self, otherIndex, add_id)
__swig_destroy__ = _swigfaiss.delete_IndexBinaryIDMap2
def __init__(self, *args):
_swigfaiss.IndexBinaryIDMap2_swiginit(self, _swigfaiss.new_IndexBinaryIDMap2(*args))
# Register IndexBinaryIDMap2 in _swigfaiss:
_swigfaiss.IndexBinaryIDMap2_swigregister(IndexBinaryIDMap2)
class IndexRowwiseMinMaxBase(Index):
r"""
Index wrapper that performs rowwise normalization to [0,1], preserving
the coefficients. This is a vector codec index only.
Basically, this index performs a rowwise scaling to [0,1] of every row
in an input dataset before calling subindex::train() and
subindex::sa_encode(). sa_encode() call stores the scaling coefficients
(scaler and minv) in the very beginning of every output code. The format:
[scaler][minv][subindex::sa_encode() output]
The de-scaling in sa_decode() is done using:
output_rescaled = scaler * output + minv
An additional ::train_inplace() function is provided in order to do
an inplace scaling before calling subindex::train() and, thus, avoiding
the cloning of the input dataset, but modifying the input dataset because
of the scaling and the scaling back. It is up to user to call
this function instead of ::train()
Derived classes provide different data types for scaling coefficients.
Currently, versions with fp16 and fp32 scaling coefficients are available.
fp16 version adds 4 extra bytes per encoded vector
fp32 version adds 8 extra bytes per encoded vector
Provides base functions for rowwise normalizing indices.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
index = property(_swigfaiss.IndexRowwiseMinMaxBase_index_get, _swigfaiss.IndexRowwiseMinMaxBase_index_set, doc=r""" sub-index""")
own_fields = property(_swigfaiss.IndexRowwiseMinMaxBase_own_fields_get, _swigfaiss.IndexRowwiseMinMaxBase_own_fields_set, doc=r""" whether the subindex needs to be freed in the destructor.""")
__swig_destroy__ = _swigfaiss.delete_IndexRowwiseMinMaxBase
def add(self, n, x):
return _swigfaiss.IndexRowwiseMinMaxBase_add(self, n, x)
def search(self, n, x, k, distances, labels, params=None):
return _swigfaiss.IndexRowwiseMinMaxBase_search(self, n, x, k, distances, labels, params)
def reset(self):
return _swigfaiss.IndexRowwiseMinMaxBase_reset(self)
def train_inplace(self, n, x):
return _swigfaiss.IndexRowwiseMinMaxBase_train_inplace(self, n, x)
# Register IndexRowwiseMinMaxBase in _swigfaiss:
_swigfaiss.IndexRowwiseMinMaxBase_swigregister(IndexRowwiseMinMaxBase)
class IndexRowwiseMinMaxFP16(IndexRowwiseMinMaxBase):
r""" Stores scaling coefficients as fp16 values."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexRowwiseMinMaxFP16_swiginit(self, _swigfaiss.new_IndexRowwiseMinMaxFP16(*args))
def train(self, n, x):
return _swigfaiss.IndexRowwiseMinMaxFP16_train(self, n, x)
def train_inplace(self, n, x):
return _swigfaiss.IndexRowwiseMinMaxFP16_train_inplace(self, n, x)
def sa_code_size(self):
return _swigfaiss.IndexRowwiseMinMaxFP16_sa_code_size(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexRowwiseMinMaxFP16_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexRowwiseMinMaxFP16_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexRowwiseMinMaxFP16
# Register IndexRowwiseMinMaxFP16 in _swigfaiss:
_swigfaiss.IndexRowwiseMinMaxFP16_swigregister(IndexRowwiseMinMaxFP16)
class IndexRowwiseMinMax(IndexRowwiseMinMaxBase):
r""" Stores scaling coefficients as fp32 values."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
_swigfaiss.IndexRowwiseMinMax_swiginit(self, _swigfaiss.new_IndexRowwiseMinMax(*args))
def train(self, n, x):
return _swigfaiss.IndexRowwiseMinMax_train(self, n, x)
def train_inplace(self, n, x):
return _swigfaiss.IndexRowwiseMinMax_train_inplace(self, n, x)
def sa_code_size(self):
return _swigfaiss.IndexRowwiseMinMax_sa_code_size(self)
def sa_encode(self, n, x, bytes):
return _swigfaiss.IndexRowwiseMinMax_sa_encode(self, n, x, bytes)
def sa_decode(self, n, bytes, x):
return _swigfaiss.IndexRowwiseMinMax_sa_decode(self, n, bytes, x)
__swig_destroy__ = _swigfaiss.delete_IndexRowwiseMinMax
# Register IndexRowwiseMinMax in _swigfaiss:
_swigfaiss.IndexRowwiseMinMax_swigregister(IndexRowwiseMinMax)
class RangeSearchResult(object):
r"""
The objective is to have a simple result structure while
minimizing the number of mem copies in the result. The method
do_allocation can be overloaded to allocate the result tables in
the matrix type of a scripting language like Lua or Python.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nq = property(_swigfaiss.RangeSearchResult_nq_get, _swigfaiss.RangeSearchResult_nq_set, doc=r""" nb of queries""")
lims = property(_swigfaiss.RangeSearchResult_lims_get, _swigfaiss.RangeSearchResult_lims_set, doc=r""" size (nq + 1)""")
labels = property(_swigfaiss.RangeSearchResult_labels_get, _swigfaiss.RangeSearchResult_labels_set, doc=r""" result for query i is labels[lims[i]:lims[i+1]]""")
distances = property(_swigfaiss.RangeSearchResult_distances_get, _swigfaiss.RangeSearchResult_distances_set, doc=r""" corresponding distances (not sorted)""")
buffer_size = property(_swigfaiss.RangeSearchResult_buffer_size_get, _swigfaiss.RangeSearchResult_buffer_size_set, doc=r""" size of the result buffers used""")
def __init__(self, nq, alloc_lims=True):
r""" lims must be allocated on input to range_search."""
_swigfaiss.RangeSearchResult_swiginit(self, _swigfaiss.new_RangeSearchResult(nq, alloc_lims))
def do_allocation(self):
r"""
called when lims contains the nb of elements result entries
for each query
"""
return _swigfaiss.RangeSearchResult_do_allocation(self)
__swig_destroy__ = _swigfaiss.delete_RangeSearchResult
# Register RangeSearchResult in _swigfaiss:
_swigfaiss.RangeSearchResult_swigregister(RangeSearchResult)
class BufferList(object):
r"""
List of temporary buffers used to store results before they are
copied to the RangeSearchResult object.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
buffer_size = property(_swigfaiss.BufferList_buffer_size_get, _swigfaiss.BufferList_buffer_size_set)
buffers = property(_swigfaiss.BufferList_buffers_get, _swigfaiss.BufferList_buffers_set)
wp = property(_swigfaiss.BufferList_wp_get, _swigfaiss.BufferList_wp_set, doc=r""" write pointer in the last buffer.""")
def __init__(self, buffer_size):
_swigfaiss.BufferList_swiginit(self, _swigfaiss.new_BufferList(buffer_size))
__swig_destroy__ = _swigfaiss.delete_BufferList
def append_buffer(self):
r""" create a new buffer"""
return _swigfaiss.BufferList_append_buffer(self)
def add(self, id, dis):
r""" add one result, possibly appending a new buffer if needed"""
return _swigfaiss.BufferList_add(self, id, dis)
def copy_range(self, ofs, n, dest_ids, dest_dis):
r"""
copy elemnts ofs:ofs+n-1 seen as linear data in the buffers to
tables dest_ids, dest_dis
"""
return _swigfaiss.BufferList_copy_range(self, ofs, n, dest_ids, dest_dis)
# Register BufferList in _swigfaiss:
_swigfaiss.BufferList_swigregister(BufferList)
class RangeQueryResult(object):
r""" result structure for a single query"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
qno = property(_swigfaiss.RangeQueryResult_qno_get, _swigfaiss.RangeQueryResult_qno_set)
nres = property(_swigfaiss.RangeQueryResult_nres_get, _swigfaiss.RangeQueryResult_nres_set)
pres = property(_swigfaiss.RangeQueryResult_pres_get, _swigfaiss.RangeQueryResult_pres_set)
def add(self, dis, id):
r""" called by search function to report a new result"""
return _swigfaiss.RangeQueryResult_add(self, dis, id)
def __init__(self):
_swigfaiss.RangeQueryResult_swiginit(self, _swigfaiss.new_RangeQueryResult())
__swig_destroy__ = _swigfaiss.delete_RangeQueryResult
# Register RangeQueryResult in _swigfaiss:
_swigfaiss.RangeQueryResult_swigregister(RangeQueryResult)
class RangeSearchPartialResult(BufferList):
r""" the entries in the buffers are split per query"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
res = property(_swigfaiss.RangeSearchPartialResult_res_get, _swigfaiss.RangeSearchPartialResult_res_set)
def __init__(self, res_in):
r""" eventually the result will be stored in res_in"""
_swigfaiss.RangeSearchPartialResult_swiginit(self, _swigfaiss.new_RangeSearchPartialResult(res_in))
queries = property(_swigfaiss.RangeSearchPartialResult_queries_get, _swigfaiss.RangeSearchPartialResult_queries_set, doc=r""" query ids + nb of results per query.""")
def new_result(self, qno):
r""" begin a new result"""
return _swigfaiss.RangeSearchPartialResult_new_result(self, qno)
def finalize(self):
return _swigfaiss.RangeSearchPartialResult_finalize(self)
def set_lims(self):
r""" called by range_search before do_allocation"""
return _swigfaiss.RangeSearchPartialResult_set_lims(self)
def copy_result(self, incremental=False):
r""" called by range_search after do_allocation"""
return _swigfaiss.RangeSearchPartialResult_copy_result(self, incremental)
@staticmethod
def merge(partial_results, do_delete=True):
r"""
merge a set of PartialResult's into one RangeSearchResult
on ouptut the partialresults are empty!
"""
return _swigfaiss.RangeSearchPartialResult_merge(partial_results, do_delete)
__swig_destroy__ = _swigfaiss.delete_RangeSearchPartialResult
# Register RangeSearchPartialResult in _swigfaiss:
_swigfaiss.RangeSearchPartialResult_swigregister(RangeSearchPartialResult)
def RangeSearchPartialResult_merge(partial_results, do_delete=True):
r"""
merge a set of PartialResult's into one RangeSearchResult
on ouptut the partialresults are empty!
"""
return _swigfaiss.RangeSearchPartialResult_merge(partial_results, do_delete)
class InterruptCallback(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def want_interrupt(self):
return _swigfaiss.InterruptCallback_want_interrupt(self)
__swig_destroy__ = _swigfaiss.delete_InterruptCallback
@staticmethod
def clear_instance():
return _swigfaiss.InterruptCallback_clear_instance()
@staticmethod
def check():
r"""
check if:
- an interrupt callback is set
- the callback returns true
if this is the case, then throw an exception. Should not be called
from multiple threads.
"""
return _swigfaiss.InterruptCallback_check()
@staticmethod
def is_interrupted():
r"""
same as check() but return true if is interrupted instead of
throwing. Can be called from multiple threads.
"""
return _swigfaiss.InterruptCallback_is_interrupted()
@staticmethod
def get_period_hint(flops):
r"""
assuming each iteration takes a certain number of flops, what
is a reasonable interval to check for interrupts?
"""
return _swigfaiss.InterruptCallback_get_period_hint(flops)
# Register InterruptCallback in _swigfaiss:
_swigfaiss.InterruptCallback_swigregister(InterruptCallback)
def InterruptCallback_clear_instance():
return _swigfaiss.InterruptCallback_clear_instance()
def InterruptCallback_check():
r"""
check if:
- an interrupt callback is set
- the callback returns true
if this is the case, then throw an exception. Should not be called
from multiple threads.
"""
return _swigfaiss.InterruptCallback_check()
def InterruptCallback_is_interrupted():
r"""
same as check() but return true if is interrupted instead of
throwing. Can be called from multiple threads.
"""
return _swigfaiss.InterruptCallback_is_interrupted()
def InterruptCallback_get_period_hint(flops):
r"""
assuming each iteration takes a certain number of flops, what
is a reasonable interval to check for interrupts?
"""
return _swigfaiss.InterruptCallback_get_period_hint(flops)
class VisitedTable(object):
r""" set implementation optimized for fast access."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
visited = property(_swigfaiss.VisitedTable_visited_get, _swigfaiss.VisitedTable_visited_set)
visno = property(_swigfaiss.VisitedTable_visno_get, _swigfaiss.VisitedTable_visno_set)
def __init__(self, size):
_swigfaiss.VisitedTable_swiginit(self, _swigfaiss.new_VisitedTable(size))
def set(self, no):
r""" set flag #no to true"""
return _swigfaiss.VisitedTable_set(self, no)
def get(self, no):
r""" get flag #no"""
return _swigfaiss.VisitedTable_get(self, no)
def advance(self):
r""" reset all flags to false"""
return _swigfaiss.VisitedTable_advance(self)
__swig_destroy__ = _swigfaiss.delete_VisitedTable
# Register VisitedTable in _swigfaiss:
_swigfaiss.VisitedTable_swigregister(VisitedTable)
class IDSelector(object):
r""" Encapsulates a set of ids to handle."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
def is_member(self, id):
return _swigfaiss.IDSelector_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_IDSelector
# Register IDSelector in _swigfaiss:
_swigfaiss.IDSelector_swigregister(IDSelector)
class IDSelectorRange(IDSelector):
r""" ids between [imin, imax)"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
imin = property(_swigfaiss.IDSelectorRange_imin_get, _swigfaiss.IDSelectorRange_imin_set)
imax = property(_swigfaiss.IDSelectorRange_imax_get, _swigfaiss.IDSelectorRange_imax_set)
assume_sorted = property(_swigfaiss.IDSelectorRange_assume_sorted_get, _swigfaiss.IDSelectorRange_assume_sorted_set, doc=r"""
Assume that the ids to handle are sorted. In some cases this can speed
up processing
""")
def __init__(self, imin, imax, assume_sorted=False):
_swigfaiss.IDSelectorRange_swiginit(self, _swigfaiss.new_IDSelectorRange(imin, imax, assume_sorted))
def is_member(self, id):
return _swigfaiss.IDSelectorRange_is_member(self, id)
def find_sorted_ids_bounds(self, list_size, ids, jmin, jmax):
r"""
for sorted ids, find the range of list indices where the valid ids are
stored
"""
return _swigfaiss.IDSelectorRange_find_sorted_ids_bounds(self, list_size, ids, jmin, jmax)
__swig_destroy__ = _swigfaiss.delete_IDSelectorRange
# Register IDSelectorRange in _swigfaiss:
_swigfaiss.IDSelectorRange_swigregister(IDSelectorRange)
class IDSelectorArray(IDSelector):
r"""
Simple array of elements
is_member calls are very inefficient, but some operations can use the ids
directly.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
n = property(_swigfaiss.IDSelectorArray_n_get, _swigfaiss.IDSelectorArray_n_set)
ids = property(_swigfaiss.IDSelectorArray_ids_get, _swigfaiss.IDSelectorArray_ids_set)
def __init__(self, n, ids):
r"""
Construct with an array of ids to process
:type n: int
:param n: number of ids to store
:type ids: int
:param ids: elements to store. The pointer should remain valid during
IDSelectorArray's lifetime
"""
_swigfaiss.IDSelectorArray_swiginit(self, _swigfaiss.new_IDSelectorArray(n, ids))
def is_member(self, id):
return _swigfaiss.IDSelectorArray_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_IDSelectorArray
# Register IDSelectorArray in _swigfaiss:
_swigfaiss.IDSelectorArray_swigregister(IDSelectorArray)
class IDSelectorBatch(IDSelector):
r"""
Ids from a set.
Repetitions of ids in the indices set passed to the constructor does not hurt
performance.
The hash function used for the bloom filter and GCC's implementation of
unordered_set are just the least significant bits of the id. This works fine
for random ids or ids in sequences but will produce many hash collisions if
lsb's are always the same
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nbits = property(_swigfaiss.IDSelectorBatch_nbits_get, _swigfaiss.IDSelectorBatch_nbits_set)
mask = property(_swigfaiss.IDSelectorBatch_mask_get, _swigfaiss.IDSelectorBatch_mask_set)
def __init__(self, n, indices):
r"""
Construct with an array of ids to process
:type n: int
:param n: number of ids to store
:param ids: elements to store. The pointer can be released after
construction
"""
_swigfaiss.IDSelectorBatch_swiginit(self, _swigfaiss.new_IDSelectorBatch(n, indices))
def is_member(self, id):
return _swigfaiss.IDSelectorBatch_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_IDSelectorBatch
# Register IDSelectorBatch in _swigfaiss:
_swigfaiss.IDSelectorBatch_swigregister(IDSelectorBatch)
class IDSelectorBitmap(IDSelector):
r""" One bit per element. Constructed with a bitmap, size ceil(n / 8)."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
n = property(_swigfaiss.IDSelectorBitmap_n_get, _swigfaiss.IDSelectorBitmap_n_set)
bitmap = property(_swigfaiss.IDSelectorBitmap_bitmap_get, _swigfaiss.IDSelectorBitmap_bitmap_set)
def __init__(self, n, bitmap):
r"""
Construct with a binary mask
:type n: int
:param n: size of the bitmap array
:type bitmap: uint8_t
:param bitmap: id will be selected iff id / 8 < n and bit number
(i%8) of bitmap[floor(i / 8)] is 1.
"""
_swigfaiss.IDSelectorBitmap_swiginit(self, _swigfaiss.new_IDSelectorBitmap(n, bitmap))
def is_member(self, id):
return _swigfaiss.IDSelectorBitmap_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_IDSelectorBitmap
# Register IDSelectorBitmap in _swigfaiss:
_swigfaiss.IDSelectorBitmap_swigregister(IDSelectorBitmap)
class IDSelectorNot(IDSelector):
r""" reverts the membership test of another selector"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
sel = property(_swigfaiss.IDSelectorNot_sel_get, _swigfaiss.IDSelectorNot_sel_set)
def __init__(self, sel):
_swigfaiss.IDSelectorNot_swiginit(self, _swigfaiss.new_IDSelectorNot(sel))
def is_member(self, id):
return _swigfaiss.IDSelectorNot_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_IDSelectorNot
# Register IDSelectorNot in _swigfaiss:
_swigfaiss.IDSelectorNot_swigregister(IDSelectorNot)
class IDSelectorAll(IDSelector):
r""" selects all entries (useful for benchmarking)"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def is_member(self, id):
return _swigfaiss.IDSelectorAll_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_IDSelectorAll
def __init__(self):
_swigfaiss.IDSelectorAll_swiginit(self, _swigfaiss.new_IDSelectorAll())
# Register IDSelectorAll in _swigfaiss:
_swigfaiss.IDSelectorAll_swigregister(IDSelectorAll)
def downcast_index(index):
return _swigfaiss.downcast_index(index)
def downcast_VectorTransform(vt):
return _swigfaiss.downcast_VectorTransform(vt)
def downcast_IndexBinary(index):
return _swigfaiss.downcast_IndexBinary(index)
def downcast_InvertedLists(il):
return _swigfaiss.downcast_InvertedLists(il)
def downcast_AdditiveQuantizer(aq):
return _swigfaiss.downcast_AdditiveQuantizer(aq)
def downcast_Quantizer(aq):
return _swigfaiss.downcast_Quantizer(aq)
def write_index(*args):
return _swigfaiss.write_index(*args)
def write_index_binary(*args):
return _swigfaiss.write_index_binary(*args)
def read_index(*args):
return _swigfaiss.read_index(*args)
def read_index_binary(*args):
return _swigfaiss.read_index_binary(*args)
def write_VectorTransform(*args):
return _swigfaiss.write_VectorTransform(*args)
def read_VectorTransform(*args):
return _swigfaiss.read_VectorTransform(*args)
def read_ProductQuantizer(*args):
return _swigfaiss.read_ProductQuantizer(*args)
def write_ProductQuantizer(*args):
return _swigfaiss.write_ProductQuantizer(*args)
def write_InvertedLists(ils, f):
return _swigfaiss.write_InvertedLists(ils, f)
def read_InvertedLists(reader, io_flags=0):
return _swigfaiss.read_InvertedLists(reader, io_flags)
def clone_index(arg1):
return _swigfaiss.clone_index(arg1)
class Cloner(object):
r"""
Cloner class, useful to override classes with other cloning
functions. The cloning function above just calls
Cloner::clone_Index.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def clone_VectorTransform(self, arg2):
return _swigfaiss.Cloner_clone_VectorTransform(self, arg2)
def clone_Index(self, arg2):
return _swigfaiss.Cloner_clone_Index(self, arg2)
def clone_IndexIVF(self, arg2):
return _swigfaiss.Cloner_clone_IndexIVF(self, arg2)
__swig_destroy__ = _swigfaiss.delete_Cloner
def __init__(self):
_swigfaiss.Cloner_swiginit(self, _swigfaiss.new_Cloner())
# Register Cloner in _swigfaiss:
_swigfaiss.Cloner_swigregister(Cloner)
IO_FLAG_READ_ONLY = cvar.IO_FLAG_READ_ONLY
IO_FLAG_ONDISK_SAME_DIR = cvar.IO_FLAG_ONDISK_SAME_DIR
IO_FLAG_SKIP_IVF_DATA = cvar.IO_FLAG_SKIP_IVF_DATA
IO_FLAG_SKIP_PRECOMPUTE_TABLE = cvar.IO_FLAG_SKIP_PRECOMPUTE_TABLE
IO_FLAG_MMAP = cvar.IO_FLAG_MMAP
def clone_Quantizer(quant):
return _swigfaiss.clone_Quantizer(quant)
class AutoTuneCriterion(object):
r"""
Evaluation criterion. Returns a performance measure in [0,1],
higher is better.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
nq = property(_swigfaiss.AutoTuneCriterion_nq_get, _swigfaiss.AutoTuneCriterion_nq_set, doc=r""" nb of queries this criterion is evaluated on""")
nnn = property(_swigfaiss.AutoTuneCriterion_nnn_get, _swigfaiss.AutoTuneCriterion_nnn_set, doc=r""" nb of NNs that the query should request""")
gt_nnn = property(_swigfaiss.AutoTuneCriterion_gt_nnn_get, _swigfaiss.AutoTuneCriterion_gt_nnn_set, doc=r""" nb of GT NNs required to evaluate criterion""")
gt_D = property(_swigfaiss.AutoTuneCriterion_gt_D_get, _swigfaiss.AutoTuneCriterion_gt_D_set, doc=r""" Ground-truth distances (size nq * gt_nnn)""")
gt_I = property(_swigfaiss.AutoTuneCriterion_gt_I_get, _swigfaiss.AutoTuneCriterion_gt_I_set, doc=r""" Ground-truth indexes (size nq * gt_nnn)""")
def set_groundtruth(self, gt_nnn, gt_D_in, gt_I_in):
r"""
Intitializes the gt_D and gt_I vectors. Must be called before evaluating
:type gt_D_in: float
:param gt_D_in: size nq * gt_nnn
:type gt_I_in: int
:param gt_I_in: size nq * gt_nnn
"""
return _swigfaiss.AutoTuneCriterion_set_groundtruth(self, gt_nnn, gt_D_in, gt_I_in)
def evaluate(self, D, I):
r"""
Evaluate the criterion.
:type D: float
:param D: size nq * nnn
:type I: int
:param I: size nq * nnn
:rtype: float
:return: the criterion, between 0 and 1. Larger is better.
"""
return _swigfaiss.AutoTuneCriterion_evaluate(self, D, I)
__swig_destroy__ = _swigfaiss.delete_AutoTuneCriterion
# Register AutoTuneCriterion in _swigfaiss:
_swigfaiss.AutoTuneCriterion_swigregister(AutoTuneCriterion)
class OneRecallAtRCriterion(AutoTuneCriterion):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
R = property(_swigfaiss.OneRecallAtRCriterion_R_get, _swigfaiss.OneRecallAtRCriterion_R_set)
def __init__(self, nq, R):
_swigfaiss.OneRecallAtRCriterion_swiginit(self, _swigfaiss.new_OneRecallAtRCriterion(nq, R))
def evaluate(self, D, I):
return _swigfaiss.OneRecallAtRCriterion_evaluate(self, D, I)
__swig_destroy__ = _swigfaiss.delete_OneRecallAtRCriterion
# Register OneRecallAtRCriterion in _swigfaiss:
_swigfaiss.OneRecallAtRCriterion_swigregister(OneRecallAtRCriterion)
class IntersectionCriterion(AutoTuneCriterion):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
R = property(_swigfaiss.IntersectionCriterion_R_get, _swigfaiss.IntersectionCriterion_R_set)
def __init__(self, nq, R):
_swigfaiss.IntersectionCriterion_swiginit(self, _swigfaiss.new_IntersectionCriterion(nq, R))
def evaluate(self, D, I):
return _swigfaiss.IntersectionCriterion_evaluate(self, D, I)
__swig_destroy__ = _swigfaiss.delete_IntersectionCriterion
# Register IntersectionCriterion in _swigfaiss:
_swigfaiss.IntersectionCriterion_swigregister(IntersectionCriterion)
class OperatingPoint(object):
r"""
Maintains a list of experimental results. Each operating point is a
(perf, t, key) triplet, where higher perf and lower t is
better. The key field is an arbitrary identifier for the operating point.
Includes primitives to extract the Pareto-optimal operating points in the
(perf, t) space.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
perf = property(_swigfaiss.OperatingPoint_perf_get, _swigfaiss.OperatingPoint_perf_set, doc=r""" performance measure (output of a Criterion)""")
t = property(_swigfaiss.OperatingPoint_t_get, _swigfaiss.OperatingPoint_t_set, doc=r""" corresponding execution time (ms)""")
key = property(_swigfaiss.OperatingPoint_key_get, _swigfaiss.OperatingPoint_key_set, doc=r""" key that identifies this op pt""")
cno = property(_swigfaiss.OperatingPoint_cno_get, _swigfaiss.OperatingPoint_cno_set, doc=r""" integer identifer""")
def __init__(self):
_swigfaiss.OperatingPoint_swiginit(self, _swigfaiss.new_OperatingPoint())
__swig_destroy__ = _swigfaiss.delete_OperatingPoint
# Register OperatingPoint in _swigfaiss:
_swigfaiss.OperatingPoint_swigregister(OperatingPoint)
class OperatingPoints(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
all_pts = property(_swigfaiss.OperatingPoints_all_pts_get, _swigfaiss.OperatingPoints_all_pts_set, doc=r""" all operating points""")
optimal_pts = property(_swigfaiss.OperatingPoints_optimal_pts_get, _swigfaiss.OperatingPoints_optimal_pts_set, doc=r""" optimal operating points, sorted by perf""")
def __init__(self):
_swigfaiss.OperatingPoints_swiginit(self, _swigfaiss.new_OperatingPoints())
def merge_with(self, *args):
r""" add operating points from other to this, with a prefix to the keys"""
return _swigfaiss.OperatingPoints_merge_with(self, *args)
def clear(self):
return _swigfaiss.OperatingPoints_clear(self)
def add(self, perf, t, key, cno=0):
r""" add a performance measure. Return whether it is an optimal point"""
return _swigfaiss.OperatingPoints_add(self, perf, t, key, cno)
def t_for_perf(self, perf):
r""" get time required to obtain a given performance measure"""
return _swigfaiss.OperatingPoints_t_for_perf(self, perf)
def display(self, only_optimal=True):
r""" easy-to-read output"""
return _swigfaiss.OperatingPoints_display(self, only_optimal)
def all_to_gnuplot(self, fname):
r""" output to a format easy to digest by gnuplot"""
return _swigfaiss.OperatingPoints_all_to_gnuplot(self, fname)
def optimal_to_gnuplot(self, fname):
return _swigfaiss.OperatingPoints_optimal_to_gnuplot(self, fname)
__swig_destroy__ = _swigfaiss.delete_OperatingPoints
# Register OperatingPoints in _swigfaiss:
_swigfaiss.OperatingPoints_swigregister(OperatingPoints)
class ParameterRange(object):
r""" possible values of a parameter, sorted from least to most expensive/accurate"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
name = property(_swigfaiss.ParameterRange_name_get, _swigfaiss.ParameterRange_name_set)
values = property(_swigfaiss.ParameterRange_values_get, _swigfaiss.ParameterRange_values_set)
def __init__(self):
_swigfaiss.ParameterRange_swiginit(self, _swigfaiss.new_ParameterRange())
__swig_destroy__ = _swigfaiss.delete_ParameterRange
# Register ParameterRange in _swigfaiss:
_swigfaiss.ParameterRange_swigregister(ParameterRange)
class ParameterSpace(object):
r""" Uses a-priori knowledge on the Faiss indexes to extract tunable parameters."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
parameter_ranges = property(_swigfaiss.ParameterSpace_parameter_ranges_get, _swigfaiss.ParameterSpace_parameter_ranges_set, doc=r""" all tunable parameters""")
verbose = property(_swigfaiss.ParameterSpace_verbose_get, _swigfaiss.ParameterSpace_verbose_set, doc=r""" verbosity during exploration""")
n_experiments = property(_swigfaiss.ParameterSpace_n_experiments_get, _swigfaiss.ParameterSpace_n_experiments_set, doc=r""" nb of experiments during optimization (0 = try all combinations)""")
batchsize = property(_swigfaiss.ParameterSpace_batchsize_get, _swigfaiss.ParameterSpace_batchsize_set, doc=r""" maximum number of queries to submit at a time.""")
thread_over_batches = property(_swigfaiss.ParameterSpace_thread_over_batches_get, _swigfaiss.ParameterSpace_thread_over_batches_set, doc=r"""
use multithreading over batches (useful to benchmark
independent single-searches)
""")
min_test_duration = property(_swigfaiss.ParameterSpace_min_test_duration_get, _swigfaiss.ParameterSpace_min_test_duration_set, doc=r"""
run tests several times until they reach at least this
duration (to avoid jittering in MT mode)
""")
def __init__(self):
_swigfaiss.ParameterSpace_swiginit(self, _swigfaiss.new_ParameterSpace())
def n_combinations(self):
r""" nb of combinations, = product of values sizes"""
return _swigfaiss.ParameterSpace_n_combinations(self)
def combination_ge(self, c1, c2):
r""" returns whether combinations c1 >= c2 in the tuple sense"""
return _swigfaiss.ParameterSpace_combination_ge(self, c1, c2)
def combination_name(self, cno):
r""" get string representation of the combination"""
return _swigfaiss.ParameterSpace_combination_name(self, cno)
def display(self):
r""" print a description on stdout"""
return _swigfaiss.ParameterSpace_display(self)
def add_range(self, name):
r""" add a new parameter (or return it if it exists)"""
return _swigfaiss.ParameterSpace_add_range(self, name)
def initialize(self, index):
r""" initialize with reasonable parameters for the index"""
return _swigfaiss.ParameterSpace_initialize(self, index)
def set_index_parameters(self, *args):
r"""
*Overload 1:*
set a combination of parameters on an index
|
*Overload 2:*
set a combination of parameters described by a string
"""
return _swigfaiss.ParameterSpace_set_index_parameters(self, *args)
def set_index_parameter(self, index, name, val):
r""" set one of the parameters, returns whether setting was successful"""
return _swigfaiss.ParameterSpace_set_index_parameter(self, index, name, val)
def update_bounds(self, cno, op, upper_bound_perf, lower_bound_t):
r"""
find an upper bound on the performance and a lower bound on t
for configuration cno given another operating point op
"""
return _swigfaiss.ParameterSpace_update_bounds(self, cno, op, upper_bound_perf, lower_bound_t)
def explore(self, index, nq, xq, crit, ops):
r"""
explore operating points
:type index: :py:class:`Index`
:param index: index to run on
:type xq: float
:param xq: query vectors (size nq * index.d)
:type crit: :py:class:`AutoTuneCriterion`
:param crit: selection criterion
:type ops: :py:class:`OperatingPoints`
:param ops: resulting operating points
"""
return _swigfaiss.ParameterSpace_explore(self, index, nq, xq, crit, ops)
__swig_destroy__ = _swigfaiss.delete_ParameterSpace
# Register ParameterSpace in _swigfaiss:
_swigfaiss.ParameterSpace_swigregister(ParameterSpace)
def index_factory(*args):
r"""
Build and index with the sequence of processing steps described in
the string.
"""
return _swigfaiss.index_factory(*args)
def index_binary_factory(d, description):
return _swigfaiss.index_binary_factory(d, description)
class MatrixStats(object):
r"""
Reports some statistics on a dataset and comments on them.
It is a class rather than a function so that all stats can also be
accessed from code
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, n, d, x):
_swigfaiss.MatrixStats_swiginit(self, _swigfaiss.new_MatrixStats(n, d, x))
comments = property(_swigfaiss.MatrixStats_comments_get, _swigfaiss.MatrixStats_comments_set)
n = property(_swigfaiss.MatrixStats_n_get, _swigfaiss.MatrixStats_n_set)
d = property(_swigfaiss.MatrixStats_d_get, _swigfaiss.MatrixStats_d_set)
n_collision = property(_swigfaiss.MatrixStats_n_collision_get, _swigfaiss.MatrixStats_n_collision_set)
n_valid = property(_swigfaiss.MatrixStats_n_valid_get, _swigfaiss.MatrixStats_n_valid_set)
n0 = property(_swigfaiss.MatrixStats_n0_get, _swigfaiss.MatrixStats_n0_set)
min_norm2 = property(_swigfaiss.MatrixStats_min_norm2_get, _swigfaiss.MatrixStats_min_norm2_set)
max_norm2 = property(_swigfaiss.MatrixStats_max_norm2_get, _swigfaiss.MatrixStats_max_norm2_set)
per_dim_stats = property(_swigfaiss.MatrixStats_per_dim_stats_get, _swigfaiss.MatrixStats_per_dim_stats_set)
occurrences = property(_swigfaiss.MatrixStats_occurrences_get, _swigfaiss.MatrixStats_occurrences_set)
buf = property(_swigfaiss.MatrixStats_buf_get, _swigfaiss.MatrixStats_buf_set)
nbuf = property(_swigfaiss.MatrixStats_nbuf_get, _swigfaiss.MatrixStats_nbuf_set)
def do_comment(self, fmt):
return _swigfaiss.MatrixStats_do_comment(self, fmt)
__swig_destroy__ = _swigfaiss.delete_MatrixStats
# Register MatrixStats in _swigfaiss:
_swigfaiss.MatrixStats_swigregister(MatrixStats)
class PyCallbackIOWriter(IOWriter):
r"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
callback = property(_swigfaiss.PyCallbackIOWriter_callback_get, _swigfaiss.PyCallbackIOWriter_callback_set)
bs = property(_swigfaiss.PyCallbackIOWriter_bs_get, _swigfaiss.PyCallbackIOWriter_bs_set)
def __init__(self, *args):
r"""
Callback: Python function that takes a bytes object and
returns the number of bytes successfully written.
"""
_swigfaiss.PyCallbackIOWriter_swiginit(self, _swigfaiss.new_PyCallbackIOWriter(*args))
def __call__(self, ptrv, size, nitems):
return _swigfaiss.PyCallbackIOWriter___call__(self, ptrv, size, nitems)
__swig_destroy__ = _swigfaiss.delete_PyCallbackIOWriter
# Register PyCallbackIOWriter in _swigfaiss:
_swigfaiss.PyCallbackIOWriter_swigregister(PyCallbackIOWriter)
class PyCallbackIOReader(IOReader):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
callback = property(_swigfaiss.PyCallbackIOReader_callback_get, _swigfaiss.PyCallbackIOReader_callback_set)
bs = property(_swigfaiss.PyCallbackIOReader_bs_get, _swigfaiss.PyCallbackIOReader_bs_set)
def __init__(self, *args):
r"""
Callback: Python function that takes a size and returns a
bytes object with the resulting read
"""
_swigfaiss.PyCallbackIOReader_swiginit(self, _swigfaiss.new_PyCallbackIOReader(*args))
def __call__(self, ptrv, size, nitems):
return _swigfaiss.PyCallbackIOReader___call__(self, ptrv, size, nitems)
__swig_destroy__ = _swigfaiss.delete_PyCallbackIOReader
# Register PyCallbackIOReader in _swigfaiss:
_swigfaiss.PyCallbackIOReader_swigregister(PyCallbackIOReader)
class PyCallbackIDSelector(IDSelector):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
callback = property(_swigfaiss.PyCallbackIDSelector_callback_get, _swigfaiss.PyCallbackIDSelector_callback_set)
def __init__(self, callback):
_swigfaiss.PyCallbackIDSelector_swiginit(self, _swigfaiss.new_PyCallbackIDSelector(callback))
def is_member(self, id):
return _swigfaiss.PyCallbackIDSelector_is_member(self, id)
__swig_destroy__ = _swigfaiss.delete_PyCallbackIDSelector
# Register PyCallbackIDSelector in _swigfaiss:
_swigfaiss.PyCallbackIDSelector_swigregister(PyCallbackIDSelector)
def swig_ptr(a):
return _swigfaiss.swig_ptr(a)
def rev_swig_ptr(*args):
return _swigfaiss.rev_swig_ptr(*args)
class float_minheap_array_t(object):
r"""
a template structure for a set of [min|max]-heaps it is tailored
so that the actual data of the heaps can just live in compact
arrays.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nh = property(_swigfaiss.float_minheap_array_t_nh_get, _swigfaiss.float_minheap_array_t_nh_set, doc=r""" number of heaps""")
k = property(_swigfaiss.float_minheap_array_t_k_get, _swigfaiss.float_minheap_array_t_k_set, doc=r""" allocated size per heap""")
ids = property(_swigfaiss.float_minheap_array_t_ids_get, _swigfaiss.float_minheap_array_t_ids_set, doc=r""" identifiers (size nh * k)""")
val = property(_swigfaiss.float_minheap_array_t_val_get, _swigfaiss.float_minheap_array_t_val_set, doc=r""" values (distances or similarities), size nh * k""")
def get_val(self, key):
r""" Return the list of values for a heap"""
return _swigfaiss.float_minheap_array_t_get_val(self, key)
def get_ids(self, key):
r""" Correspponding identifiers"""
return _swigfaiss.float_minheap_array_t_get_ids(self, key)
def heapify(self):
r""" prepare all the heaps before adding"""
return _swigfaiss.float_minheap_array_t_heapify(self)
def addn(self, nj, vin, j0=0, i0=0, ni=-1):
r"""
add nj elements to heaps i0:i0+ni, with sequential ids
:type nj: int
:param nj: nb of elements to add to each heap
:type vin: float
:param vin: elements to add, size ni * nj
:type j0: int, optional
:param j0: add this to the ids that are added
:type i0: int, optional
:param i0: first heap to update
:type ni: int, optional
:param ni: nb of elements to update (-1 = use nh)
"""
return _swigfaiss.float_minheap_array_t_addn(self, nj, vin, j0, i0, ni)
def addn_with_ids(self, nj, vin, id_in=None, id_stride=0, i0=0, ni=-1):
r"""
same as addn
:type id_in: int, optional
:param id_in: ids of the elements to add, size ni * nj
:type id_stride: int, optional
:param id_stride: stride for id_in
"""
return _swigfaiss.float_minheap_array_t_addn_with_ids(self, nj, vin, id_in, id_stride, i0, ni)
def reorder(self):
r""" reorder all the heaps"""
return _swigfaiss.float_minheap_array_t_reorder(self)
def per_line_extrema(self, vals_out, idx_out):
r"""
this is not really a heap function. It just finds the per-line
extrema of each line of array D
:type vals_out: float
:param vals_out: extreme value of each line (size nh, or NULL)
:type idx_out: int
:param idx_out: index of extreme value (size nh or NULL)
"""
return _swigfaiss.float_minheap_array_t_per_line_extrema(self, vals_out, idx_out)
def __init__(self):
_swigfaiss.float_minheap_array_t_swiginit(self, _swigfaiss.new_float_minheap_array_t())
__swig_destroy__ = _swigfaiss.delete_float_minheap_array_t
# Register float_minheap_array_t in _swigfaiss:
_swigfaiss.float_minheap_array_t_swigregister(float_minheap_array_t)
class int_minheap_array_t(object):
r"""
a template structure for a set of [min|max]-heaps it is tailored
so that the actual data of the heaps can just live in compact
arrays.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nh = property(_swigfaiss.int_minheap_array_t_nh_get, _swigfaiss.int_minheap_array_t_nh_set, doc=r""" number of heaps""")
k = property(_swigfaiss.int_minheap_array_t_k_get, _swigfaiss.int_minheap_array_t_k_set, doc=r""" allocated size per heap""")
ids = property(_swigfaiss.int_minheap_array_t_ids_get, _swigfaiss.int_minheap_array_t_ids_set, doc=r""" identifiers (size nh * k)""")
val = property(_swigfaiss.int_minheap_array_t_val_get, _swigfaiss.int_minheap_array_t_val_set, doc=r""" values (distances or similarities), size nh * k""")
def get_val(self, key):
r""" Return the list of values for a heap"""
return _swigfaiss.int_minheap_array_t_get_val(self, key)
def get_ids(self, key):
r""" Correspponding identifiers"""
return _swigfaiss.int_minheap_array_t_get_ids(self, key)
def heapify(self):
r""" prepare all the heaps before adding"""
return _swigfaiss.int_minheap_array_t_heapify(self)
def addn(self, nj, vin, j0=0, i0=0, ni=-1):
r"""
add nj elements to heaps i0:i0+ni, with sequential ids
:type nj: int
:param nj: nb of elements to add to each heap
:type vin: int
:param vin: elements to add, size ni * nj
:type j0: int, optional
:param j0: add this to the ids that are added
:type i0: int, optional
:param i0: first heap to update
:type ni: int, optional
:param ni: nb of elements to update (-1 = use nh)
"""
return _swigfaiss.int_minheap_array_t_addn(self, nj, vin, j0, i0, ni)
def addn_with_ids(self, nj, vin, id_in=None, id_stride=0, i0=0, ni=-1):
r"""
same as addn
:type id_in: int, optional
:param id_in: ids of the elements to add, size ni * nj
:type id_stride: int, optional
:param id_stride: stride for id_in
"""
return _swigfaiss.int_minheap_array_t_addn_with_ids(self, nj, vin, id_in, id_stride, i0, ni)
def reorder(self):
r""" reorder all the heaps"""
return _swigfaiss.int_minheap_array_t_reorder(self)
def per_line_extrema(self, vals_out, idx_out):
r"""
this is not really a heap function. It just finds the per-line
extrema of each line of array D
:type vals_out: int
:param vals_out: extreme value of each line (size nh, or NULL)
:type idx_out: int
:param idx_out: index of extreme value (size nh or NULL)
"""
return _swigfaiss.int_minheap_array_t_per_line_extrema(self, vals_out, idx_out)
def __init__(self):
_swigfaiss.int_minheap_array_t_swiginit(self, _swigfaiss.new_int_minheap_array_t())
__swig_destroy__ = _swigfaiss.delete_int_minheap_array_t
# Register int_minheap_array_t in _swigfaiss:
_swigfaiss.int_minheap_array_t_swigregister(int_minheap_array_t)
class float_maxheap_array_t(object):
r"""
a template structure for a set of [min|max]-heaps it is tailored
so that the actual data of the heaps can just live in compact
arrays.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nh = property(_swigfaiss.float_maxheap_array_t_nh_get, _swigfaiss.float_maxheap_array_t_nh_set, doc=r""" number of heaps""")
k = property(_swigfaiss.float_maxheap_array_t_k_get, _swigfaiss.float_maxheap_array_t_k_set, doc=r""" allocated size per heap""")
ids = property(_swigfaiss.float_maxheap_array_t_ids_get, _swigfaiss.float_maxheap_array_t_ids_set, doc=r""" identifiers (size nh * k)""")
val = property(_swigfaiss.float_maxheap_array_t_val_get, _swigfaiss.float_maxheap_array_t_val_set, doc=r""" values (distances or similarities), size nh * k""")
def get_val(self, key):
r""" Return the list of values for a heap"""
return _swigfaiss.float_maxheap_array_t_get_val(self, key)
def get_ids(self, key):
r""" Correspponding identifiers"""
return _swigfaiss.float_maxheap_array_t_get_ids(self, key)
def heapify(self):
r""" prepare all the heaps before adding"""
return _swigfaiss.float_maxheap_array_t_heapify(self)
def addn(self, nj, vin, j0=0, i0=0, ni=-1):
r"""
add nj elements to heaps i0:i0+ni, with sequential ids
:type nj: int
:param nj: nb of elements to add to each heap
:type vin: float
:param vin: elements to add, size ni * nj
:type j0: int, optional
:param j0: add this to the ids that are added
:type i0: int, optional
:param i0: first heap to update
:type ni: int, optional
:param ni: nb of elements to update (-1 = use nh)
"""
return _swigfaiss.float_maxheap_array_t_addn(self, nj, vin, j0, i0, ni)
def addn_with_ids(self, nj, vin, id_in=None, id_stride=0, i0=0, ni=-1):
r"""
same as addn
:type id_in: int, optional
:param id_in: ids of the elements to add, size ni * nj
:type id_stride: int, optional
:param id_stride: stride for id_in
"""
return _swigfaiss.float_maxheap_array_t_addn_with_ids(self, nj, vin, id_in, id_stride, i0, ni)
def reorder(self):
r""" reorder all the heaps"""
return _swigfaiss.float_maxheap_array_t_reorder(self)
def per_line_extrema(self, vals_out, idx_out):
r"""
this is not really a heap function. It just finds the per-line
extrema of each line of array D
:type vals_out: float
:param vals_out: extreme value of each line (size nh, or NULL)
:type idx_out: int
:param idx_out: index of extreme value (size nh or NULL)
"""
return _swigfaiss.float_maxheap_array_t_per_line_extrema(self, vals_out, idx_out)
def __init__(self):
_swigfaiss.float_maxheap_array_t_swiginit(self, _swigfaiss.new_float_maxheap_array_t())
__swig_destroy__ = _swigfaiss.delete_float_maxheap_array_t
# Register float_maxheap_array_t in _swigfaiss:
_swigfaiss.float_maxheap_array_t_swigregister(float_maxheap_array_t)
class int_maxheap_array_t(object):
r"""
a template structure for a set of [min|max]-heaps it is tailored
so that the actual data of the heaps can just live in compact
arrays.
"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
nh = property(_swigfaiss.int_maxheap_array_t_nh_get, _swigfaiss.int_maxheap_array_t_nh_set, doc=r""" number of heaps""")
k = property(_swigfaiss.int_maxheap_array_t_k_get, _swigfaiss.int_maxheap_array_t_k_set, doc=r""" allocated size per heap""")
ids = property(_swigfaiss.int_maxheap_array_t_ids_get, _swigfaiss.int_maxheap_array_t_ids_set, doc=r""" identifiers (size nh * k)""")
val = property(_swigfaiss.int_maxheap_array_t_val_get, _swigfaiss.int_maxheap_array_t_val_set, doc=r""" values (distances or similarities), size nh * k""")
def get_val(self, key):
r""" Return the list of values for a heap"""
return _swigfaiss.int_maxheap_array_t_get_val(self, key)
def get_ids(self, key):
r""" Correspponding identifiers"""
return _swigfaiss.int_maxheap_array_t_get_ids(self, key)
def heapify(self):
r""" prepare all the heaps before adding"""
return _swigfaiss.int_maxheap_array_t_heapify(self)
def addn(self, nj, vin, j0=0, i0=0, ni=-1):
r"""
add nj elements to heaps i0:i0+ni, with sequential ids
:type nj: int
:param nj: nb of elements to add to each heap
:type vin: int
:param vin: elements to add, size ni * nj
:type j0: int, optional
:param j0: add this to the ids that are added
:type i0: int, optional
:param i0: first heap to update
:type ni: int, optional
:param ni: nb of elements to update (-1 = use nh)
"""
return _swigfaiss.int_maxheap_array_t_addn(self, nj, vin, j0, i0, ni)
def addn_with_ids(self, nj, vin, id_in=None, id_stride=0, i0=0, ni=-1):
r"""
same as addn
:type id_in: int, optional
:param id_in: ids of the elements to add, size ni * nj
:type id_stride: int, optional
:param id_stride: stride for id_in
"""
return _swigfaiss.int_maxheap_array_t_addn_with_ids(self, nj, vin, id_in, id_stride, i0, ni)
def reorder(self):
r""" reorder all the heaps"""
return _swigfaiss.int_maxheap_array_t_reorder(self)
def per_line_extrema(self, vals_out, idx_out):
r"""
this is not really a heap function. It just finds the per-line
extrema of each line of array D
:type vals_out: int
:param vals_out: extreme value of each line (size nh, or NULL)
:type idx_out: int
:param idx_out: index of extreme value (size nh or NULL)
"""
return _swigfaiss.int_maxheap_array_t_per_line_extrema(self, vals_out, idx_out)
def __init__(self):
_swigfaiss.int_maxheap_array_t_swiginit(self, _swigfaiss.new_int_maxheap_array_t())
__swig_destroy__ = _swigfaiss.delete_int_maxheap_array_t
# Register int_maxheap_array_t in _swigfaiss:
_swigfaiss.int_maxheap_array_t_swigregister(int_maxheap_array_t)
def CMin_float_partition_fuzzy(vals, ids, n, q_min, q_max, q_out):
r"""
partitions the table into 0:q and q:n where all elements above q are >= all
elements below q (for C = CMax, for CMin comparisons are reversed)
Returns the partition threshold. The elements q:n are destroyed on output.
"""
return _swigfaiss.CMin_float_partition_fuzzy(vals, ids, n, q_min, q_max, q_out)
def CMax_float_partition_fuzzy(vals, ids, n, q_min, q_max, q_out):
r"""
partitions the table into 0:q and q:n where all elements above q are >= all
elements below q (for C = CMax, for CMin comparisons are reversed)
Returns the partition threshold. The elements q:n are destroyed on output.
"""
return _swigfaiss.CMax_float_partition_fuzzy(vals, ids, n, q_min, q_max, q_out)
class AlignedTableUint8(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
tab = property(_swigfaiss.AlignedTableUint8_tab_get, _swigfaiss.AlignedTableUint8_tab_set)
numel = property(_swigfaiss.AlignedTableUint8_numel_get, _swigfaiss.AlignedTableUint8_numel_set)
@staticmethod
def round_capacity(n):
return _swigfaiss.AlignedTableUint8_round_capacity(n)
def __init__(self, *args):
_swigfaiss.AlignedTableUint8_swiginit(self, _swigfaiss.new_AlignedTableUint8(*args))
def itemsize(self):
return _swigfaiss.AlignedTableUint8_itemsize(self)
def resize(self, n):
return _swigfaiss.AlignedTableUint8_resize(self, n)
def clear(self):
return _swigfaiss.AlignedTableUint8_clear(self)
def size(self):
return _swigfaiss.AlignedTableUint8_size(self)
def nbytes(self):
return _swigfaiss.AlignedTableUint8_nbytes(self)
def get(self, *args):
return _swigfaiss.AlignedTableUint8_get(self, *args)
def data(self, *args):
return _swigfaiss.AlignedTableUint8_data(self, *args)
__swig_destroy__ = _swigfaiss.delete_AlignedTableUint8
# Register AlignedTableUint8 in _swigfaiss:
_swigfaiss.AlignedTableUint8_swigregister(AlignedTableUint8)
def AlignedTableUint8_round_capacity(n):
return _swigfaiss.AlignedTableUint8_round_capacity(n)
class AlignedTableUint16(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
tab = property(_swigfaiss.AlignedTableUint16_tab_get, _swigfaiss.AlignedTableUint16_tab_set)
numel = property(_swigfaiss.AlignedTableUint16_numel_get, _swigfaiss.AlignedTableUint16_numel_set)
@staticmethod
def round_capacity(n):
return _swigfaiss.AlignedTableUint16_round_capacity(n)
def __init__(self, *args):
_swigfaiss.AlignedTableUint16_swiginit(self, _swigfaiss.new_AlignedTableUint16(*args))
def itemsize(self):
return _swigfaiss.AlignedTableUint16_itemsize(self)
def resize(self, n):
return _swigfaiss.AlignedTableUint16_resize(self, n)
def clear(self):
return _swigfaiss.AlignedTableUint16_clear(self)
def size(self):
return _swigfaiss.AlignedTableUint16_size(self)
def nbytes(self):
return _swigfaiss.AlignedTableUint16_nbytes(self)
def get(self, *args):
return _swigfaiss.AlignedTableUint16_get(self, *args)
def data(self, *args):
return _swigfaiss.AlignedTableUint16_data(self, *args)
__swig_destroy__ = _swigfaiss.delete_AlignedTableUint16
# Register AlignedTableUint16 in _swigfaiss:
_swigfaiss.AlignedTableUint16_swigregister(AlignedTableUint16)
def AlignedTableUint16_round_capacity(n):
return _swigfaiss.AlignedTableUint16_round_capacity(n)
class AlignedTableFloat32(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
tab = property(_swigfaiss.AlignedTableFloat32_tab_get, _swigfaiss.AlignedTableFloat32_tab_set)
numel = property(_swigfaiss.AlignedTableFloat32_numel_get, _swigfaiss.AlignedTableFloat32_numel_set)
@staticmethod
def round_capacity(n):
return _swigfaiss.AlignedTableFloat32_round_capacity(n)
def __init__(self, *args):
_swigfaiss.AlignedTableFloat32_swiginit(self, _swigfaiss.new_AlignedTableFloat32(*args))
def itemsize(self):
return _swigfaiss.AlignedTableFloat32_itemsize(self)
def resize(self, n):
return _swigfaiss.AlignedTableFloat32_resize(self, n)
def clear(self):
return _swigfaiss.AlignedTableFloat32_clear(self)
def size(self):
return _swigfaiss.AlignedTableFloat32_size(self)
def nbytes(self):
return _swigfaiss.AlignedTableFloat32_nbytes(self)
def get(self, *args):
return _swigfaiss.AlignedTableFloat32_get(self, *args)
def data(self, *args):
return _swigfaiss.AlignedTableFloat32_data(self, *args)
__swig_destroy__ = _swigfaiss.delete_AlignedTableFloat32
# Register AlignedTableFloat32 in _swigfaiss:
_swigfaiss.AlignedTableFloat32_swigregister(AlignedTableFloat32)
def AlignedTableFloat32_round_capacity(n):
return _swigfaiss.AlignedTableFloat32_round_capacity(n)
def CMax_uint16_partition_fuzzy(*args):
return _swigfaiss.CMax_uint16_partition_fuzzy(*args)
def CMin_uint16_partition_fuzzy(*args):
return _swigfaiss.CMin_uint16_partition_fuzzy(*args)
def omp_set_num_threads(num_threads):
return _swigfaiss.omp_set_num_threads(num_threads)
def omp_get_max_threads():
return _swigfaiss.omp_get_max_threads()
def memcpy(dest, src, n):
return _swigfaiss.memcpy(dest, src, n)
def cast_integer_to_uint8_ptr(x):
return _swigfaiss.cast_integer_to_uint8_ptr(x)
def cast_integer_to_float_ptr(x):
return _swigfaiss.cast_integer_to_float_ptr(x)
def cast_integer_to_idx_t_ptr(x):
return _swigfaiss.cast_integer_to_idx_t_ptr(x)
def cast_integer_to_int_ptr(x):
return _swigfaiss.cast_integer_to_int_ptr(x)
def cast_integer_to_void_ptr(x):
return _swigfaiss.cast_integer_to_void_ptr(x)
class MapLong2Long(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
map = property(_swigfaiss.MapLong2Long_map_get, _swigfaiss.MapLong2Long_map_set)
def add(self, n, keys, vals):
return _swigfaiss.MapLong2Long_add(self, n, keys, vals)
def search(self, key):
return _swigfaiss.MapLong2Long_search(self, key)
def search_multiple(self, n, keys, vals):
return _swigfaiss.MapLong2Long_search_multiple(self, n, keys, vals)
def __init__(self):
_swigfaiss.MapLong2Long_swiginit(self, _swigfaiss.new_MapLong2Long())
__swig_destroy__ = _swigfaiss.delete_MapLong2Long
# Register MapLong2Long in _swigfaiss:
_swigfaiss.MapLong2Long_swigregister(MapLong2Long)
def wait():
return _swigfaiss.wait()