From ea3d44d80158136e44b3f9212bc325fdfa5f0150 Mon Sep 17 00:00:00 2001 From: wangguokun Date: Tue, 2 Apr 2024 07:26:44 +0000 Subject: [PATCH] lib/opencv: port lib-opencv to tenonos Signed-off-by: wangguokun --- .clang-format | 11 + .editorconfig | 14 + Config.uk | 3 + Makefile.uk | 400 + README.md | 35 + .../3rdparty/openjpeg/openjp2/opj_config.h | 10 + .../openjpeg/openjp2/opj_config_private.h | 49 + generated/custom_hal.hpp | 6 + generated/cv_cpu_config.h | 19 + generated/cvconfig.h | 149 + .../modules/core/arithm.simd_declarations.hpp | 4 + .../core/convert.simd_declarations.hpp | 4 + .../core/convert_scale.simd_declarations.hpp | 4 + .../core/count_non_zero.simd_declarations.hpp | 4 + .../core/mathfuncs_core.simd_declarations.hpp | 4 + .../modules/core/matmul.simd_declarations.hpp | 4 + .../modules/core/mean.simd_declarations.hpp | 4 + .../modules/core/merge.simd_declarations.hpp | 4 + .../modules/core/opencl_kernels_core.cpp | 4456 +++++ .../modules/core/opencl_kernels_core.hpp | 41 + .../modules/core/split.simd_declarations.hpp | 4 + .../modules/core/stat.simd_declarations.hpp | 4 + .../modules/core/sum.simd_declarations.hpp | 4 + generated/modules/core/version_string.inc | 97 + .../modules/highgui/opencv_highgui_config.hpp | 4 + .../imgproc/accum.simd_declarations.hpp | 4 + .../bilateral_filter.simd_declarations.hpp | 4 + .../imgproc/box_filter.simd_declarations.hpp | 4 + .../imgproc/color_hsv.simd_declarations.hpp | 4 + .../imgproc/color_rgb.simd_declarations.hpp | 4 + .../imgproc/color_yuv.simd_declarations.hpp | 4 + .../imgproc/filter.simd_declarations.hpp | 4 + .../imgproc/median_blur.simd_declarations.hpp | 4 + .../imgproc/morph.simd_declarations.hpp | 4 + .../imgproc/opencl_kernels_imgproc.cpp | 9129 +++++++++ .../imgproc/opencl_kernels_imgproc.hpp | 64 + .../imgproc/smooth.simd_declarations.hpp | 4 + .../imgproc/sumpixels.simd_declarations.hpp | 4 + generated/modules/opencv2/cvconfig.h | 149 + generated/modules/opencv2/opencv_modules.hpp | 18 + .../pyopencv_custom_headers.h | 5 + .../pyopencv_generated_enums.h | 249 + .../pyopencv_generated_funcs.h | 16085 ++++++++++++++++ .../pyopencv_generated_include.h | 22 + .../pyopencv_generated_modules.h | 13 + .../pyopencv_generated_modules_content.h | 1981 ++ .../pyopencv_generated_types.h | 29 + .../pyopencv_generated_types_content.h | 12635 ++++++++++++ generated/opencv_data_config.hpp | 11 + generated/opencv_tests_config.hpp | 4 + importfix/cv2/cv2.py | 1 + 51 files changed, 45777 insertions(+) create mode 100644 .clang-format create mode 100644 .editorconfig create mode 100644 Config.uk create mode 100644 Makefile.uk create mode 100644 README.md create mode 100644 generated/3rdparty/openjpeg/openjp2/opj_config.h create mode 100644 generated/3rdparty/openjpeg/openjp2/opj_config_private.h create mode 100644 generated/custom_hal.hpp create mode 100644 generated/cv_cpu_config.h create mode 100644 generated/cvconfig.h create mode 100644 generated/modules/core/arithm.simd_declarations.hpp create mode 100644 generated/modules/core/convert.simd_declarations.hpp create mode 100644 generated/modules/core/convert_scale.simd_declarations.hpp create mode 100644 generated/modules/core/count_non_zero.simd_declarations.hpp create mode 100644 generated/modules/core/mathfuncs_core.simd_declarations.hpp create mode 100644 generated/modules/core/matmul.simd_declarations.hpp create mode 100644 generated/modules/core/mean.simd_declarations.hpp create mode 100644 generated/modules/core/merge.simd_declarations.hpp create mode 100644 generated/modules/core/opencl_kernels_core.cpp create mode 100644 generated/modules/core/opencl_kernels_core.hpp create mode 100644 generated/modules/core/split.simd_declarations.hpp create mode 100644 generated/modules/core/stat.simd_declarations.hpp create mode 100644 generated/modules/core/sum.simd_declarations.hpp create mode 100644 generated/modules/core/version_string.inc create mode 100644 generated/modules/highgui/opencv_highgui_config.hpp create mode 100644 generated/modules/imgproc/accum.simd_declarations.hpp create mode 100644 generated/modules/imgproc/bilateral_filter.simd_declarations.hpp create mode 100644 generated/modules/imgproc/box_filter.simd_declarations.hpp create mode 100644 generated/modules/imgproc/color_hsv.simd_declarations.hpp create mode 100644 generated/modules/imgproc/color_rgb.simd_declarations.hpp create mode 100644 generated/modules/imgproc/color_yuv.simd_declarations.hpp create mode 100644 generated/modules/imgproc/filter.simd_declarations.hpp create mode 100644 generated/modules/imgproc/median_blur.simd_declarations.hpp create mode 100644 generated/modules/imgproc/morph.simd_declarations.hpp create mode 100644 generated/modules/imgproc/opencl_kernels_imgproc.cpp create mode 100644 generated/modules/imgproc/opencl_kernels_imgproc.hpp create mode 100644 generated/modules/imgproc/smooth.simd_declarations.hpp create mode 100644 generated/modules/imgproc/sumpixels.simd_declarations.hpp create mode 100644 generated/modules/opencv2/cvconfig.h create mode 100644 generated/modules/opencv2/opencv_modules.hpp create mode 100644 generated/modules/python_bindings_generator/pyopencv_custom_headers.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_enums.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_funcs.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_include.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_modules.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_modules_content.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_types.h create mode 100644 generated/modules/python_bindings_generator/pyopencv_generated_types_content.h create mode 100644 generated/opencv_data_config.hpp create mode 100644 generated/opencv_tests_config.hpp create mode 100644 importfix/cv2/cv2.py diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..958f4c2 --- /dev/null +++ b/.clang-format @@ -0,0 +1,11 @@ +# Style is similar to Linux Kernel +# https://www.kernel.org/doc/Documentation/CodingStyle +BasedOnStyle: LLVM +IndentWidth: 8 +UseTab: Always +BreakBeforeBraces: Linux +BreakBeforeBinaryOperators: NonAssignment +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +IndentCaseLabels: false +SortIncludes: false diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..3811cc2 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,14 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 8 +indent_style = tab +max_line_length = 80 +insert_final_newline = true +trim_trailing_whitespace = true + +[.git/**] +max_line_length = 75 + diff --git a/Config.uk b/Config.uk new file mode 100644 index 0000000..3df3bb2 --- /dev/null +++ b/Config.uk @@ -0,0 +1,3 @@ +menuconfig LIBPYTHON_OPENCV + bool "opencv: OpenCV is an open-source computer vision and machine learning library" + default n diff --git a/Makefile.uk b/Makefile.uk new file mode 100644 index 0000000..bc91733 --- /dev/null +++ b/Makefile.uk @@ -0,0 +1,400 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright 2024 The TenonOS Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +################################################################################ +# Library registration +################################################################################ +$(eval $(call addlib_s,libpython_opencv,$(CONFIG_LIBPYTHON_OPENCV))) + +################################################################################ +# Sources +################################################################################ +LIBPYTHON_OPENCV_VERSION=4.5.4 +LIBPYTHON_OPENCV_URL=https://github.com/opencv/opencv/archive/refs/tags/$(LIBPYTHON_OPENCV_VERSION).tar.gz + +ifeq ($(CONFIG_LIBPYTHON_OPENCV), y) +$(eval $(call fetch,libpython_opencv,$(LIBPYTHON_OPENCV_URL))) +LIBPYTHON_OPENCV_DIRNAME=opencv-$(LIBPYTHON_OPENCV_VERSION) +endif + +################################################################################ +# Helpers +################################################################################ +LIBPYTHON_OPENCV_SRC = $(LIBPYTHON_OPENCV_ORIGIN)/$(LIBPYTHON_OPENCV_DIRNAME) + +################################################################################ +# Library includes +################################################################################ +INCLUDES_PATH := -I$(LIBPYTHON_OPENCV_SRC)/include \ + -I$(LIBPYTHON_OPENCV_SRC)/modules \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/core/include \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/core/misc/python \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/highgui/include \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/highgui/src \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/imgproc/include \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/include \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/videoio/include \ + -I$(LIBPYTHON_OPENCV_SRC)/modules/videoio/src \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/include/opencl/1.2 \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/carotene/hal \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/carotene/include \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/carotene/src \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2 \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg \ + -I$(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng \ + -I$(LIBPYTHON_OPENCV_BASE)/generated \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/modules \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/modules/core \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/modules/highgui \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/modules/imgproc \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/modules/videoio \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/modules/python_bindings_generator \ + -I$(LIBPYTHON_OPENCV_BASE)/generated/3rdparty/openjpeg/openjp2 \ + + +CINCLUDES-$(CONFIG_LIBPYTHON_OPENCV) += $(INCLUDES_PATH) +CXXINCLUDES-$(CONFIG_LIBPYTHON_OPENCV) += $(INCLUDES_PATH) + +################################################################################ +# Global flags +################################################################################ +LIBPYTHON_OPENCV_FLAGS := -D__OPENCV_BUILD -DIW_BUILD -DICV_BASE \ + -U HAVE_OPENEXR + +GCC_INSTALLDIR_FLAGS := -idirafter $(shell LC_ALL=C $(CC) -v 2>&1 | \ + $(SED) -e '/^COLLECT_LTO_WRAPPER=\(.*\)\/lto-wrapper/!d' -e 's//\1/')/include + +ifneq ($(CONFIG_CROSS_COMPILE),"") +GCC_INCLUDEDIR_FLAGS := -idirafter $(shell LC_ALL=C $(CC) -v 2>&1 | \ + $(SED) -n -e 's/.*--includedir=\([^ ]*\).*/\1/p') +else +GCC_INCLUDEDIR_FLAGS := -idirafter $(shell LC_ALL=C $(CC) -v 2>&1 | \ + $(SED) -n -e 's/.*--prefix=\([^ ]*\).*/\1/p')/include +endif + +ifdef CONFIG_ARCH_ARM_64 +LIBPYTHON_OPENCV_CFLAGS-$(CONFIG_ARCH_ARM_64) += $(GCC_INSTALLDIR_FLAGS) $(GCC_INCLUDEDIR_FLAGS) +LIBPYTHON_OPENCV_CXXFLAGS-$(CONFIG_ARCH_ARM_64) += $(GCC_INSTALLDIR_FLAGS) $(GCC_INCLUDEDIR_FLAGS) +endif + +ifdef CONFIG_ARCH_X86_64 +LIBPYTHON_OPENCV_CFLAGS-$(CONFIG_ARCH_X86_64) += $(GCC_INSTALLDIR_FLAGS) $(GCC_INCLUDEDIR_FLAGS) +LIBPYTHON_OPENCV_CXXFLAGS-$(CONFIG_ARCH_X86_64) += $(GCC_INSTALLDIR_FLAGS) $(GCC_INCLUDEDIR_FLAGS) +endif + +LIBPYTHON_OPENCV_CFLAGS-y += $(LIBPYTHON_OPENCV_FLAGS) +LIBPYTHON_OPENCV_CXXFLAGS-y += $(LIBPYTHON_OPENCV_FLAGS) + +################################################################################ +# Library sources +################################################################################ + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/softfloat.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/system.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/lda.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/tables.cpp|core +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/va_intel.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/persistence_yml.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/stl.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/rand.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/logger.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/cuda_stream.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_c.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/persistence.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/persistence_base64_encoding.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matmul.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/persistence_types.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_expressions.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/mathfuncs.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/umatrix.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/arithm.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/mathfuncs_core.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/parallel/parallel_openmp.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/parallel/parallel.cpp|parallel +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/parallel/parallel_tbb.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/convert.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/arithm.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/buffer_area.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/kmeans.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/glob.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/ocl.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/downhill_simplex.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/lut.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/sum.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/alloc.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/minmax.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/cuda_gpu_mat.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/out.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/array.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/ovx.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/directx.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/convert_scale.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/opencl/runtime/opencl_clfft.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/opencl/runtime/opencl_clblas.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/opencl/runtime/opencl_core.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/lapack.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/cuda_host_mem.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_iterator.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/gl_core_3_1.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/convert_c.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/mean.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/types.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_operations.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/async.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/stat_c.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/persistence_json.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/batch_distance.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/lpsolver.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/copy.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/split.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/stat.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/conjugate_gradient.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_decomp.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/persistence_xml.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/bindings_utils.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/hal_internal.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/cuda_gpu_mat_nd.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_sparse.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/cuda_info.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/parallel.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/datastructs.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/opengl.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/channels.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/check.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/parallel_impl.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/trace.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/command_line_parser.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/count_non_zero.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/pca.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_wrap.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/norm.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/dxt.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/merge.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/matrix_transform.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/utils/filesystem.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/utils/logtagconfigparser.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/utils/datafile.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/utils/logtagmanager.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/utils/samples.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/core/src/algorithm.cpp + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/min_enclosing_triangle.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/color.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/pyramids.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/intersection.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/color_lab.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/blend.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/accum.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/featureselect.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/tables.cpp|imgproc +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/templmatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/imgwarp.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/moments.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/convhull.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/canny.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/segmentation.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/smooth.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/matchcontours.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/morph.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/lsd.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/grabcut.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/approx.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/drawing.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/floodfill.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/contours.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/phasecorr.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/geometry.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/color_hsv.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/median_blur.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/linefit.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/corner.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/deriv.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/utils.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/cornersubpix.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/connectedcomponents.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/samplers.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/hough.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/accum.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/hershey_fonts.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/intelligent_scissors.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/clahe.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/box_filter.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/emd.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/thresh.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/histogram.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/filter.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/resize.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/sumpixels.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/distransform.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/shapedescr.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/main.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/color_rgb.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/rotcalipers.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/color_yuv.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/generalized_hough.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/demosaicing.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/bilateral_filter.dispatch.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/spatialgradient.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/subdivision2d.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/gabor.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgproc/src/colormap.cpp + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/bitstrm.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/grfmt_base.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/grfmt_jpeg2000.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/grfmt_jpeg.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/loadsave.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/utils.cpp|imgcodecs +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/exif.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/grfmt_bmp.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/grfmt_jpeg2000_openjpeg.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/grfmt_png.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/imgcodecs/src/rgbe.cpp + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/cap_images.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/container_avi.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/backend_plugin.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/cap.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/backend_static.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/cap_mjpeg_encoder.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/videoio_registry.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/cap_mjpeg_decoder.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/cap_v4l.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/videoio/src/videoio_c.cpp + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/highgui/src/window.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/highgui/src/roiSelector.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/highgui/src/backend.cpp +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/highgui/src/window_QT.cpp + + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/modules/python/src2/cv2.cpp + +# 3rdparty -------------------------------------------------------------------------------------------- +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jaricom.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcapimin.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcapistd.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcarith.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jccoefct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jccolor.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcdctmgr.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jchuff.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcinit.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcmainct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcmarker.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcmaster.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcomapi.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcparam.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcprepct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jcsample.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jctrans.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdapimin.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdapistd.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdarith.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdatadst.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdatasrc.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdcoefct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdcolor.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jddctmgr.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdhuff.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdinput.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdmainct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdmarker.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdmaster.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdmerge.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdpostct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdsample.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jdtrans.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jerror.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jfdctflt.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jfdctfst.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jfdctint.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jidctflt.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jidctfst.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jidctint.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jmemansi.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jmemmgr.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jquant1.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jquant2.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libjpeg/jutils.c + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/bio.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/cio.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/dwt.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/event.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/function_list.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/image.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/invert.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/j2k.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/jp2.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/mct.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/mqc.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/openjpeg.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/opj_clock.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/opj_malloc.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/pi.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/sparse_array.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/t1.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/t2.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/tcd.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/tgt.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/openjpeg/openjp2/thread.c + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/png.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngerror.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngget.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngmem.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngpread.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngread.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngrio.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngrtran.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngrutil.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngset.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngtrans.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngwio.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngwrite.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngwtran.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/pngwutil.c + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/arm/arm_init.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/arm/filter_neon_intrinsics.c +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_SRC)/3rdparty/libpng/arm/palette_neon_intrinsics.c + +# BUILD -------------------------------------------------------------------------------------------- + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_BASE)/generated/modules/core/opencl_kernels_core.cpp + +LIBPYTHON_OPENCV_SRCS-y += $(LIBPYTHON_OPENCV_BASE)/generated/modules/imgproc/opencl_kernels_imgproc.cpp + +ifeq ($(CONFIG_LIBPYTHON_OPENCV),y) +# Install opencv lib into main python rootfs & cleanup non-python files +$(PYTHON_ROOTFS)/.opencv_done: $(PYTHON_ROOTFS)/.keep + . $(PYTHON_ROOTFS)/bin/activate && pip install opencv-python==4.5.4.58 + _dir=`find "$(PYTHON_ROOTFS)" -maxdepth 4 -type d -name cv2`; \ + _dir_libs=`find "$(PYTHON_ROOTFS)" -maxdepth 4 -type d -name opencv_python.libs`; \ + rm -rf "$$_dir_libs"; \ + find "$$_dir" -type f -name '*.so' -delete; \ + find "$$_dir" -type d -name '__pycache__' | xargs rm -rf; \ + find "$$_dir" -type f | grep -v '\.py$$' | tr "\n" "\0" | xargs -0 rm; \ + find "$(LIBPYTHON_OPENCV_BASE)/importfix/cv2" -mindepth 1 -maxdepth 1 | xargs $(CP) -rp -t "$$_dir"; \ + touch $@ + + +# Add opencv rootfs to main python +python-rootfs: $(PYTHON_ROOTFS)/.opencv_done + +endif diff --git a/README.md b/README.md new file mode 100644 index 0000000..170a999 --- /dev/null +++ b/README.md @@ -0,0 +1,35 @@ +# lib-opencv + +## 微库提供的功能与说明 + +Opencv(Open Source Computer Vision Library)是一个基于开源发行的跨平台计算机视觉库,它实现了图像处理和计算机视觉方面的很多通用算法,已成为计算机视觉领域最有力的研究工具。 + +opencv支持的详细特性可见其官方文档: https://github.com/opencv/opencv + +本仓库将opencv作为微库移植到TenonOS,目录结构如下: + +```powershell +. +├── Config.uk ------ 维护微库配置 +├── Makefile.uk ------ 用于微库构建 +├── generated ------ 存放源码编译opencv所生成的文件 +├── importfix ------ 存放C/C++模块对应的Python wrapper +└── patches ------ 维护对opencv源码的修改 +``` + +现支持 core、imgproc、imgcodes、videoio、highgui 模块。此外,还支持使用 Python API + +### 关于 generated + +在开发机使用 cmake 或 cmake-gui 交叉编译 opencv,或直接在 arm 型主机上对 opencv 进行编译,拷贝 build 下生成内容 +由于 opencv 涉及模块以及所依赖的第三方库较多,在 cmake 时需要指定编译哪些模块,当前仅指定了 core、imgproc、imgcodes、videoio、highgui、python bind 模块,图片格式仅勾选 png 和 jpg + +### 关于 importfix + +文件对应 modules_config.c 中的_PyImport_Inittab,所列 entry 取决于 pip 下载对应的包所包含的 so 文件 + +文件内容参考 lib-python_numpy + +## 编译构建与配置说明 + +openvc微库不依赖其他微库,默认不启用 diff --git a/generated/3rdparty/openjpeg/openjp2/opj_config.h b/generated/3rdparty/openjpeg/openjp2/opj_config.h new file mode 100644 index 0000000..016ceab --- /dev/null +++ b/generated/3rdparty/openjpeg/openjp2/opj_config.h @@ -0,0 +1,10 @@ +/* create opj_config.h for CMake */ +#define OPJ_HAVE_STDINT_H 1 + +/*--------------------------------------------------------------------------*/ +/* OpenJPEG Versioning */ + +/* Version number. */ +#define OPJ_VERSION_MAJOR 2 +#define OPJ_VERSION_MINOR 4 +#define OPJ_VERSION_BUILD 0 diff --git a/generated/3rdparty/openjpeg/openjp2/opj_config_private.h b/generated/3rdparty/openjpeg/openjp2/opj_config_private.h new file mode 100644 index 0000000..a5640b3 --- /dev/null +++ b/generated/3rdparty/openjpeg/openjp2/opj_config_private.h @@ -0,0 +1,49 @@ +/* create opj_config_private.h for CMake */ +#define OPJ_HAVE_INTTYPES_H 1 + +#define OPJ_PACKAGE_VERSION "2.4.0" + +/* Not used by openjp2*/ +/*#define HAVE_MEMORY_H 1*/ +/*#define HAVE_STDLIB_H 1*/ +/*#define HAVE_STRINGS_H 1*/ +/*#define HAVE_STRING_H 1*/ +/*#define HAVE_SYS_STAT_H 1*/ +/*#define HAVE_SYS_TYPES_H 1 */ +/*#define HAVE_UNISTD_H 1*/ + +/* #undef _LARGEFILE_SOURCE */ +/* #undef _LARGE_FILES */ +/* #undef _FILE_OFFSET_BITS */ +/* #undef OPJ_HAVE_FSEEKO */ + +/* find whether or not have */ +#define OPJ_HAVE_MALLOC_H +/* check if function `aligned_alloc` exists */ +/* #undef OPJ_HAVE_ALIGNED_ALLOC */ +/* check if function `_aligned_malloc` exists */ +/* #undef OPJ_HAVE__ALIGNED_MALLOC */ +/* check if function `memalign` exists */ +#define OPJ_HAVE_MEMALIGN +/* check if function `posix_memalign` exists */ +#define OPJ_HAVE_POSIX_MEMALIGN + +#if !defined(_POSIX_C_SOURCE) +#if defined(OPJ_HAVE_FSEEKO) || defined(OPJ_HAVE_POSIX_MEMALIGN) +/* Get declarations of fseeko, ftello, posix_memalign. */ +#define _POSIX_C_SOURCE 200112L +#endif +#endif + +/* Byte order. */ +/* All compilers that support Mac OS X define either __BIG_ENDIAN__ or +__LITTLE_ENDIAN__ to match the endianness of the architecture being +compiled for. This is not necessarily the same as the architecture of the +machine doing the building. In order to support Universal Binaries on +Mac OS X, we prefer those defines to decide the endianness. +On other platforms we use the result of the TRY_RUN. */ +#if !defined(__APPLE__) +/* #undef OPJ_BIG_ENDIAN */ +#elif defined(__BIG_ENDIAN__) +# define OPJ_BIG_ENDIAN +#endif diff --git a/generated/custom_hal.hpp b/generated/custom_hal.hpp new file mode 100644 index 0000000..e8f27f6 --- /dev/null +++ b/generated/custom_hal.hpp @@ -0,0 +1,6 @@ +#ifndef _CUSTOM_HAL_INCLUDED_ +#define _CUSTOM_HAL_INCLUDED_ + + + +#endif diff --git a/generated/cv_cpu_config.h b/generated/cv_cpu_config.h new file mode 100644 index 0000000..7518d61 --- /dev/null +++ b/generated/cv_cpu_config.h @@ -0,0 +1,19 @@ +// OpenCV CPU baseline features + +#define CV_CPU_COMPILE_NEON 1 +#define CV_CPU_BASELINE_COMPILE_NEON 1 + +#define CV_CPU_COMPILE_FP16 1 +#define CV_CPU_BASELINE_COMPILE_FP16 1 + +#define CV_CPU_BASELINE_FEATURES 0 \ + , CV_CPU_NEON \ + , CV_CPU_FP16 \ + + +// OpenCV supported CPU dispatched features + + + +#define CV_CPU_DISPATCH_FEATURES 0 \ + diff --git a/generated/cvconfig.h b/generated/cvconfig.h new file mode 100644 index 0000000..6e37976 --- /dev/null +++ b/generated/cvconfig.h @@ -0,0 +1,149 @@ +#ifndef OPENCV_CVCONFIG_H_INCLUDED +#define OPENCV_CVCONFIG_H_INCLUDED + +/* OpenCV compiled as static or dynamic libs */ +#define BUILD_SHARED_LIBS + +/* OpenCV intrinsics optimized code */ +#define CV_ENABLE_INTRINSICS + +/* OpenCV additional optimized code */ +/* #undef CV_DISABLE_OPTIMIZATION */ + +/* Compile for 'real' NVIDIA GPU architectures */ +#define CUDA_ARCH_BIN "" + +/* NVIDIA GPU features are used */ +#define CUDA_ARCH_FEATURES "" + +/* Compile for 'virtual' NVIDIA PTX architectures */ +#define CUDA_ARCH_PTX "" + +/* AMD's Basic Linear Algebra Subprograms Library*/ +/* #undef HAVE_CLAMDBLAS */ + +/* AMD's OpenCL Fast Fourier Transform Library*/ +/* #undef HAVE_CLAMDFFT */ + +/* Clp support */ +/* #undef HAVE_CLP */ + +/* NVIDIA CUDA Runtime API*/ +/* #undef HAVE_CUDA */ + +/* NVIDIA CUDA Basic Linear Algebra Subprograms (BLAS) API*/ +/* #undef HAVE_CUBLAS */ + +/* NVIDIA CUDA Deep Neural Network (cuDNN) API*/ +/* #undef HAVE_CUDNN */ + +/* NVIDIA CUDA Fast Fourier Transform (FFT) API*/ +/* #undef HAVE_CUFFT */ + +/* DirectX */ +/* #undef HAVE_DIRECTX */ +/* #undef HAVE_DIRECTX_NV12 */ +/* #undef HAVE_D3D11 */ +/* #undef HAVE_D3D10 */ +/* #undef HAVE_D3D9 */ + +/* Eigen Matrix & Linear Algebra Library */ +/* #undef HAVE_EIGEN */ + +/* Geospatial Data Abstraction Library */ +/* #undef HAVE_GDAL */ + +/* Halide support */ +/* #undef HAVE_HALIDE */ + +/* Vulkan support */ +/* #undef HAVE_VULKAN */ + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Intel Integrated Performance Primitives */ +/* #undef HAVE_IPP */ +/* #undef HAVE_IPP_ICV */ +/* #undef HAVE_IPP_IW */ +/* #undef HAVE_IPP_IW_LL */ + +/* JPEG-2000 codec */ +#define HAVE_OPENJPEG +/* #undef HAVE_JASPER */ + +/* IJG JPEG codec */ +#define HAVE_JPEG + +/* libpng/png.h needs to be included */ +/* #undef HAVE_LIBPNG_PNG_H */ + +/* GDCM DICOM codec */ +/* #undef HAVE_GDCM */ + +/* NVIDIA Video Decoding API*/ +/* #undef HAVE_NVCUVID */ +/* #undef HAVE_NVCUVID_HEADER */ +/* #undef HAVE_DYNLINK_NVCUVID_HEADER */ + +/* NVIDIA Video Encoding API*/ +/* #undef HAVE_NVCUVENC */ + +/* OpenCL Support */ +#define HAVE_OPENCL +/* #undef HAVE_OPENCL_STATIC */ +/* #undef HAVE_OPENCL_SVM */ + +/* NVIDIA OpenCL D3D Extensions support */ +/* #undef HAVE_OPENCL_D3D11_NV */ + +/* OpenEXR codec */ +/* #undef HAVE_OPENEXR */ + +/* OpenGL support*/ +/* #undef HAVE_OPENGL */ + +/* PNG codec */ +#define HAVE_PNG + +/* Posix threads (pthreads) */ +#define HAVE_PTHREAD + +/* parallel_for with pthreads */ +#define HAVE_PTHREADS_PF + +/* Intel Threading Building Blocks */ +/* #undef HAVE_TBB */ + +/* Ste||ar Group High Performance ParallelX */ +/* #undef HAVE_HPX */ + +/* TIFF codec */ +/* #undef HAVE_TIFF */ + +/* Define if your processor stores words with the most significant byte + first (like Motorola and SPARC, unlike Intel and VAX). */ +/* #undef WORDS_BIGENDIAN */ + +/* VA library (libva) */ +/* #undef HAVE_VA */ + +/* Intel VA-API/OpenCL */ +/* #undef HAVE_VA_INTEL */ + +/* Lapack */ +/* #undef HAVE_LAPACK */ + +/* Library was compiled with functions instrumentation */ +/* #undef ENABLE_INSTRUMENTATION */ + +/* OpenVX */ +/* #undef HAVE_OPENVX */ + +/* OpenCV trace utilities */ +#define OPENCV_TRACE + +/* Library QR-code decoding */ +#define HAVE_QUIRC + +#endif // OPENCV_CVCONFIG_H_INCLUDED diff --git a/generated/modules/core/arithm.simd_declarations.hpp b/generated/modules/core/arithm.simd_declarations.hpp new file mode 100644 index 0000000..d9b3892 --- /dev/null +++ b/generated/modules/core/arithm.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/arithm.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/convert.simd_declarations.hpp b/generated/modules/core/convert.simd_declarations.hpp new file mode 100644 index 0000000..1e01284 --- /dev/null +++ b/generated/modules/core/convert.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/convert.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/convert_scale.simd_declarations.hpp b/generated/modules/core/convert_scale.simd_declarations.hpp new file mode 100644 index 0000000..97828a5 --- /dev/null +++ b/generated/modules/core/convert_scale.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/convert_scale.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/count_non_zero.simd_declarations.hpp b/generated/modules/core/count_non_zero.simd_declarations.hpp new file mode 100644 index 0000000..1f1b9c8 --- /dev/null +++ b/generated/modules/core/count_non_zero.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/count_non_zero.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/mathfuncs_core.simd_declarations.hpp b/generated/modules/core/mathfuncs_core.simd_declarations.hpp new file mode 100644 index 0000000..4d74c18 --- /dev/null +++ b/generated/modules/core/mathfuncs_core.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/mathfuncs_core.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/matmul.simd_declarations.hpp b/generated/modules/core/matmul.simd_declarations.hpp new file mode 100644 index 0000000..01410a4 --- /dev/null +++ b/generated/modules/core/matmul.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/matmul.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/mean.simd_declarations.hpp b/generated/modules/core/mean.simd_declarations.hpp new file mode 100644 index 0000000..4ea8502 --- /dev/null +++ b/generated/modules/core/mean.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/mean.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/merge.simd_declarations.hpp b/generated/modules/core/merge.simd_declarations.hpp new file mode 100644 index 0000000..5d47ccb --- /dev/null +++ b/generated/modules/core/merge.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/merge.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/opencl_kernels_core.cpp b/generated/modules/core/opencl_kernels_core.cpp new file mode 100644 index 0000000..224f4cb --- /dev/null +++ b/generated/modules/core/opencl_kernels_core.cpp @@ -0,0 +1,4456 @@ +// This file is auto-generated. Do not edit! + +#include "opencv2/core.hpp" +#include "cvconfig.h" +#include "opencl_kernels_core.hpp" + +#ifdef HAVE_OPENCL + +namespace cv +{ +namespace ocl +{ +namespace core +{ + +static const char* const moduleName = "core"; + +struct cv::ocl::internal::ProgramEntry arithm_oclsrc={moduleName, "arithm", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined cl_khr_fp64\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#ifdef INTEL_DEVICE\n" +"#pragma OPENCL FP_CONTRACT ON\n" +"#pragma OPENCL FP_FAST_FMAF ON\n" +"#pragma OPENCL FP_FAST_FMA ON\n" +"#endif\n" +"#if !defined(DEPTH_dst)\n" +"#error \"Kernel configuration error: DEPTH_dst value is required\"\n" +"#elif !(DEPTH_dst >= 0 && DEPTH_dst <= 7)\n" +"#error \"Kernel configuration error: invalid DEPTH_dst value\"\n" +"#endif\n" +"#if defined(depth)\n" +"#error \"Kernel configuration error: ambiguous 'depth' value is defined, use 'DEPTH_dst' instead\"\n" +"#endif\n" +"#if DEPTH_dst < 5 \n" +"#define CV_DST_TYPE_IS_INTEGER\n" +"#else\n" +"#define CV_DST_TYPE_IS_FP\n" +"#endif\n" +"#if DEPTH_dst != 6 \n" +"#define CV_DST_TYPE_FIT_32F 1\n" +"#else\n" +"#define CV_DST_TYPE_FIT_32F 0\n" +"#endif\n" +"#if CV_DST_TYPE_FIT_32F\n" +"#define CV_PI M_PI_F\n" +"#else\n" +"#define CV_PI M_PI\n" +"#endif\n" +"#ifndef cn\n" +"#define cn 1\n" +"#endif\n" +"#if cn == 1\n" +"#undef srcT1_C1\n" +"#undef srcT2_C1\n" +"#undef dstT_C1\n" +"#define srcT1_C1 srcT1\n" +"#define srcT2_C1 srcT2\n" +"#define dstT_C1 dstT\n" +"#endif\n" +"#if cn != 3\n" +"#define storedst(val) *(__global dstT *)(dstptr + dst_index) = val\n" +"#define storedst2(val) *(__global dstT *)(dstptr2 + dst_index2) = val\n" +"#else\n" +"#define storedst(val) vstore3(val, 0, (__global dstT_C1 *)(dstptr + dst_index))\n" +"#define storedst2(val) vstore3(val, 0, (__global dstT_C1 *)(dstptr2 + dst_index2))\n" +"#endif\n" +"#define noconvert\n" +"#ifndef workT\n" +"#ifndef srcT1\n" +"#define srcT1 dstT\n" +"#endif\n" +"#ifndef srcT1_C1\n" +"#define srcT1_C1 dstT_C1\n" +"#endif\n" +"#ifndef srcT2\n" +"#define srcT2 dstT\n" +"#endif\n" +"#ifndef srcT2_C1\n" +"#define srcT2_C1 dstT_C1\n" +"#endif\n" +"#define workT dstT\n" +"#if cn != 3\n" +"#define srcelem1 *(__global srcT1 *)(srcptr1 + src1_index)\n" +"#define srcelem2 *(__global srcT2 *)(srcptr2 + src2_index)\n" +"#else\n" +"#define srcelem1 vload3(0, (__global srcT1_C1 *)(srcptr1 + src1_index))\n" +"#define srcelem2 vload3(0, (__global srcT2_C1 *)(srcptr2 + src2_index))\n" +"#endif\n" +"#ifndef convertToDT\n" +"#define convertToDT noconvert\n" +"#endif\n" +"#else\n" +"#ifndef convertToWT2\n" +"#define convertToWT2 convertToWT1\n" +"#endif\n" +"#if cn != 3\n" +"#define srcelem1 convertToWT1(*(__global srcT1 *)(srcptr1 + src1_index))\n" +"#define srcelem2 convertToWT2(*(__global srcT2 *)(srcptr2 + src2_index))\n" +"#else\n" +"#define srcelem1 convertToWT1(vload3(0, (__global srcT1_C1 *)(srcptr1 + src1_index)))\n" +"#define srcelem2 convertToWT2(vload3(0, (__global srcT2_C1 *)(srcptr2 + src2_index)))\n" +"#endif\n" +"#endif\n" +"#ifndef workST\n" +"#define workST workT\n" +"#endif\n" +"#define EXTRA_PARAMS\n" +"#define EXTRA_INDEX\n" +"#define EXTRA_INDEX_ADD\n" +"#if defined OP_ADD\n" +"#define PROCESS_ELEM storedst(convertToDT(srcelem1 + srcelem2))\n" +"#elif defined OP_SUB\n" +"#define PROCESS_ELEM storedst(convertToDT(srcelem1 - srcelem2))\n" +"#elif defined OP_RSUB\n" +"#define PROCESS_ELEM storedst(convertToDT(srcelem2 - srcelem1))\n" +"#elif defined OP_ABSDIFF\n" +"#if wdepth <= 4\n" +"#define PROCESS_ELEM \\\n" +"storedst(convertToDT(convertFromU(abs_diff(srcelem1, srcelem2))))\n" +"#else\n" +"#define PROCESS_ELEM \\\n" +"storedst(convertToDT(fabs(srcelem1 - srcelem2)))\n" +"#endif\n" +"#elif defined OP_AND\n" +"#define PROCESS_ELEM storedst(srcelem1 & srcelem2)\n" +"#elif defined OP_OR\n" +"#define PROCESS_ELEM storedst(srcelem1 | srcelem2)\n" +"#elif defined OP_XOR\n" +"#define PROCESS_ELEM storedst(srcelem1 ^ srcelem2)\n" +"#elif defined OP_NOT\n" +"#define PROCESS_ELEM storedst(~srcelem1)\n" +"#elif defined OP_MIN\n" +"#define PROCESS_ELEM storedst(min(srcelem1, srcelem2))\n" +"#elif defined OP_MAX\n" +"#define PROCESS_ELEM storedst(max(srcelem1, srcelem2))\n" +"#elif defined OP_MUL\n" +"#define PROCESS_ELEM storedst(convertToDT(srcelem1 * srcelem2))\n" +"#elif defined OP_MUL_SCALE\n" +"#undef EXTRA_PARAMS\n" +"#ifdef UNARY_OP\n" +"#define EXTRA_PARAMS , workST srcelem2_, scaleT scale\n" +"#undef srcelem2\n" +"#define srcelem2 srcelem2_\n" +"#else\n" +"#define EXTRA_PARAMS , scaleT scale\n" +"#endif\n" +"#define PROCESS_ELEM storedst(convertToDT(srcelem1 * scale * srcelem2))\n" +"#elif defined OP_DIV\n" +"#ifdef CV_DST_TYPE_IS_INTEGER\n" +"#define PROCESS_ELEM \\\n" +"workT e2 = srcelem2, zero = (workT)(0); \\\n" +"storedst(convertToDT(e2 != zero ? srcelem1 / e2 : zero))\n" +"#else\n" +"#define PROCESS_ELEM \\\n" +"workT e2 = srcelem2; \\\n" +"storedst(convertToDT(srcelem1 / e2))\n" +"#endif\n" +"#elif defined OP_DIV_SCALE\n" +"#undef EXTRA_PARAMS\n" +"#ifdef UNARY_OP\n" +"#define EXTRA_PARAMS , workST srcelem2_, scaleT scale\n" +"#undef srcelem2\n" +"#define srcelem2 srcelem2_\n" +"#else\n" +"#define EXTRA_PARAMS , scaleT scale\n" +"#endif\n" +"#ifdef CV_DST_TYPE_IS_INTEGER\n" +"#define PROCESS_ELEM \\\n" +"workT e2 = srcelem2, zero = (workT)(0); \\\n" +"storedst(convertToDT(e2 == zero ? zero : (srcelem1 * (workT)(scale) / e2)))\n" +"#else\n" +"#define PROCESS_ELEM \\\n" +"workT e2 = srcelem2; \\\n" +"storedst(convertToDT(srcelem1 * (workT)(scale) / e2))\n" +"#endif\n" +"#elif defined OP_RDIV_SCALE\n" +"#undef EXTRA_PARAMS\n" +"#ifdef UNARY_OP\n" +"#define EXTRA_PARAMS , workST srcelem2_, scaleT scale\n" +"#undef srcelem2\n" +"#define srcelem2 srcelem2_\n" +"#else\n" +"#define EXTRA_PARAMS , scaleT scale\n" +"#endif\n" +"#ifdef CV_DST_TYPE_IS_INTEGER\n" +"#define PROCESS_ELEM \\\n" +"workT e1 = srcelem1, zero = (workT)(0); \\\n" +"storedst(convertToDT(e1 == zero ? zero : (srcelem2 * (workT)(scale) / e1)))\n" +"#else\n" +"#define PROCESS_ELEM \\\n" +"workT e1 = srcelem1; \\\n" +"storedst(convertToDT(srcelem2 * (workT)(scale) / e1))\n" +"#endif\n" +"#elif defined OP_RECIP_SCALE\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , scaleT scale\n" +"#ifdef CV_DST_TYPE_IS_INTEGER\n" +"#define PROCESS_ELEM \\\n" +"workT e1 = srcelem1, zero = (workT)(0); \\\n" +"storedst(convertToDT(e1 != zero ? scale / e1 : zero))\n" +"#else\n" +"#define PROCESS_ELEM \\\n" +"workT e1 = srcelem1; \\\n" +"storedst(convertToDT(scale / e1))\n" +"#endif\n" +"#elif defined OP_ADDW\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , scaleT alpha, scaleT beta, scaleT gamma\n" +"#if wdepth <= 4\n" +"#define PROCESS_ELEM storedst(convertToDT(mad24(srcelem1, alpha, mad24(srcelem2, beta, gamma))))\n" +"#else\n" +"#define PROCESS_ELEM storedst(convertToDT(fma(srcelem1, alpha, fma(srcelem2, beta, gamma))))\n" +"#endif\n" +"#elif defined OP_MAG\n" +"#define PROCESS_ELEM storedst(hypot(srcelem1, srcelem2))\n" +"#elif defined OP_PHASE_RADIANS\n" +"#define PROCESS_ELEM \\\n" +"workT tmp = atan2(srcelem2, srcelem1); \\\n" +"if (tmp < 0) \\\n" +"tmp += 2 * CV_PI; \\\n" +"storedst(tmp)\n" +"#elif defined OP_PHASE_DEGREES\n" +"#define PROCESS_ELEM \\\n" +"workT tmp = degrees(atan2(srcelem2, srcelem1)); \\\n" +"if (tmp < 0) \\\n" +"tmp += 360; \\\n" +"storedst(tmp)\n" +"#elif defined OP_EXP\n" +"#if wdepth == 5\n" +"#define PROCESS_ELEM storedst(native_exp(srcelem1))\n" +"#else\n" +"#define PROCESS_ELEM storedst(exp(srcelem1))\n" +"#endif\n" +"#elif defined OP_POW\n" +"#define PROCESS_ELEM storedst(pow(srcelem1, srcelem2))\n" +"#elif defined OP_POWN\n" +"#undef workT\n" +"#define workT int\n" +"#define PROCESS_ELEM storedst(pown(srcelem1, srcelem2))\n" +"#elif defined OP_SQRT\n" +"#if CV_DST_TYPE_FIT_32F\n" +"#define PROCESS_ELEM storedst(native_sqrt(srcelem1))\n" +"#else\n" +"#define PROCESS_ELEM storedst(sqrt(srcelem1))\n" +"#endif\n" +"#elif defined OP_LOG\n" +"#define PROCESS_ELEM \\\n" +"storedst(log(fabs(srcelem1)))\n" +"#elif defined OP_CMP\n" +"#define srcT2 srcT1\n" +"#ifndef convertToWT1\n" +"#define convertToWT1\n" +"#endif\n" +"#define PROCESS_ELEM \\\n" +"storedst(srcelem1 CMP_OPERATOR srcelem2 ? (dstT)(255) : (dstT)(0))\n" +"#elif defined OP_CONVERT_SCALE_ABS\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , workT1 alpha, workT1 beta\n" +"#if wdepth <= 4\n" +"#define PROCESS_ELEM \\\n" +"workT value = mad24(srcelem1, (workT)(alpha), (workT)(beta)); \\\n" +"storedst(convertToDT(abs(value)))\n" +"#else\n" +"#define PROCESS_ELEM \\\n" +"workT value = fma(srcelem1, (workT)(alpha), (workT)(beta)); \\\n" +"storedst(convertToDT(fabs(value)))\n" +"#endif\n" +"#elif defined OP_SCALE_ADD\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , workT1 alpha\n" +"#if wdepth <= 4\n" +"#define PROCESS_ELEM storedst(convertToDT(mad24(srcelem1, (workT)(alpha), srcelem2)))\n" +"#else\n" +"#define PROCESS_ELEM storedst(convertToDT(fma(srcelem1, (workT)(alpha), srcelem2)))\n" +"#endif\n" +"#elif defined OP_CTP_AD || defined OP_CTP_AR\n" +"#if CV_DST_TYPE_FIT_32F\n" +"#define CV_EPSILON FLT_EPSILON\n" +"#else\n" +"#define CV_EPSILON DBL_EPSILON\n" +"#endif\n" +"#ifdef OP_CTP_AD\n" +"#define TO_DEGREE cartToPolar = degrees(cartToPolar);\n" +"#elif defined OP_CTP_AR\n" +"#define TO_DEGREE\n" +"#endif\n" +"#define PROCESS_ELEM \\\n" +"dstT x = srcelem1, y = srcelem2; \\\n" +"dstT x2 = x * x, y2 = y * y; \\\n" +"dstT magnitude = sqrt(x2 + y2); \\\n" +"dstT tmp = y >= 0 ? 0 : CV_PI * 2; \\\n" +"tmp = x < 0 ? CV_PI : tmp; \\\n" +"dstT tmp1 = y >= 0 ? CV_PI * 0.5f : CV_PI * 1.5f; \\\n" +"dstT cartToPolar = y2 <= x2 ? x * y / mad((dstT)(0.28f), y2, x2 + CV_EPSILON) + tmp : (tmp1 - x * y / mad((dstT)(0.28f), x2, y2 + CV_EPSILON)); \\\n" +"TO_DEGREE \\\n" +"storedst(magnitude); \\\n" +"storedst2(cartToPolar)\n" +"#elif defined OP_PTC_AD || defined OP_PTC_AR\n" +"#ifdef OP_PTC_AD\n" +"#define FROM_DEGREE y = radians(y)\n" +"#else\n" +"#define FROM_DEGREE\n" +"#endif\n" +"#define PROCESS_ELEM \\\n" +"dstT x = srcelem1, y = srcelem2, cosval; \\\n" +"FROM_DEGREE; \\\n" +"storedst2(sincos(y, &cosval) * x); \\\n" +"storedst(cosval * x);\n" +"#elif defined OP_PATCH_NANS\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , dstT val\n" +"#define PROCESS_ELEM \\\n" +"if (isnan(srcelem1)) \\\n" +"storedst(val)\n" +"#else\n" +"#error \"unknown op type\"\n" +"#endif\n" +"#if defined OP_CTP_AD || defined OP_CTP_AR || defined OP_PTC_AD || defined OP_PTC_AR\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , __global uchar* dstptr2, int dststep2, int dstoffset2\n" +"#undef EXTRA_INDEX\n" +"#define EXTRA_INDEX int dst_index2 = mad24(y0, dststep2, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset2))\n" +"#undef EXTRA_INDEX_ADD\n" +"#define EXTRA_INDEX_ADD dst_index2 += dststep2\n" +"#endif\n" +"#if defined UNARY_OP || defined MASK_UNARY_OP\n" +"#if defined OP_AND || defined OP_OR || defined OP_XOR || defined OP_ADD || defined OP_SAT_ADD || \\\n" +"defined OP_SUB || defined OP_SAT_SUB || defined OP_RSUB || defined OP_SAT_RSUB || \\\n" +"defined OP_ABSDIFF || defined OP_CMP || defined OP_MIN || defined OP_MAX || defined OP_POW || \\\n" +"defined OP_MUL || defined OP_DIV || defined OP_POWN || defined OP_POWR || defined OP_ROOTN\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , workST srcelem2_\n" +"#undef srcelem2\n" +"#define srcelem2 srcelem2_\n" +"#endif\n" +"#if cn == 3\n" +"#undef srcelem2\n" +"#define srcelem2 (workT)(srcelem2_.x, srcelem2_.y, srcelem2_.z)\n" +"#endif\n" +"#endif\n" +"#if defined BINARY_OP\n" +"__kernel void KF(__global const uchar * srcptr1, int srcstep1, int srcoffset1,\n" +"__global const uchar * srcptr2, int srcstep2, int srcoffset2,\n" +"__global uchar * dstptr, int dststep, int dstoffset,\n" +"int rows, int cols EXTRA_PARAMS )\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int src1_index = mad24(y0, srcstep1, mad24(x, (int)sizeof(srcT1_C1) * cn, srcoffset1));\n" +"#if !(defined(OP_RECIP_SCALE) || defined(OP_NOT))\n" +"int src2_index = mad24(y0, srcstep2, mad24(x, (int)sizeof(srcT2_C1) * cn, srcoffset2));\n" +"#endif\n" +"int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset));\n" +"EXTRA_INDEX;\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src1_index += srcstep1, dst_index += dststep)\n" +"{\n" +"PROCESS_ELEM;\n" +"#if !(defined(OP_RECIP_SCALE) || defined(OP_NOT))\n" +"src2_index += srcstep2;\n" +"#endif\n" +"EXTRA_INDEX_ADD;\n" +"}\n" +"}\n" +"}\n" +"#elif defined MASK_BINARY_OP\n" +"__kernel void KF(__global const uchar * srcptr1, int srcstep1, int srcoffset1,\n" +"__global const uchar * srcptr2, int srcstep2, int srcoffset2,\n" +"__global const uchar * mask, int maskstep, int maskoffset,\n" +"__global uchar * dstptr, int dststep, int dstoffset,\n" +"int rows, int cols EXTRA_PARAMS )\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int mask_index = mad24(y0, maskstep, x + maskoffset);\n" +"int src1_index = mad24(y0, srcstep1, mad24(x, (int)sizeof(srcT1_C1) * cn, srcoffset1));\n" +"int src2_index = mad24(y0, srcstep2, mad24(x, (int)sizeof(srcT2_C1) * cn, srcoffset2));\n" +"int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src1_index += srcstep1, src2_index += srcstep2,\n" +"mask_index += maskstep, dst_index += dststep)\n" +"if (mask[mask_index])\n" +"{\n" +"PROCESS_ELEM;\n" +"}\n" +"}\n" +"}\n" +"#elif defined UNARY_OP\n" +"__kernel void KF(__global const uchar * srcptr1, int srcstep1, int srcoffset1,\n" +"__global uchar * dstptr, int dststep, int dstoffset,\n" +"int rows, int cols EXTRA_PARAMS )\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int src1_index = mad24(y0, srcstep1, mad24(x, (int)sizeof(srcT1_C1) * cn, srcoffset1));\n" +"int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src1_index += srcstep1, dst_index += dststep)\n" +"{\n" +"PROCESS_ELEM;\n" +"}\n" +"}\n" +"}\n" +"#elif defined MASK_UNARY_OP\n" +"__kernel void KF(__global const uchar * srcptr1, int srcstep1, int srcoffset1,\n" +"__global const uchar * mask, int maskstep, int maskoffset,\n" +"__global uchar * dstptr, int dststep, int dstoffset,\n" +"int rows, int cols EXTRA_PARAMS )\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int mask_index = mad24(y0, maskstep, x + maskoffset);\n" +"int src1_index = mad24(y0, srcstep1, mad24(x, (int)sizeof(srcT1_C1) * cn, srcoffset1));\n" +"int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT_C1) * cn, dstoffset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src1_index += srcstep1, mask_index += maskstep, dst_index += dststep)\n" +"if (mask[mask_index])\n" +"{\n" +"PROCESS_ELEM;\n" +"}\n" +"}\n" +"}\n" +"#else\n" +"#error \"Unknown operation type\"\n" +"#endif\n" +, "61e807be22793264eddbe34d6e91e620", NULL}; +struct cv::ocl::internal::ProgramEntry convert_oclsrc={moduleName, "convert", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"__kernel void convertTo(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"#ifndef NO_SCALE\n" +"WT alpha, WT beta,\n" +"#endif\n" +"int rowsPerWI)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"int src_index = mad24(y0, src_step, mad24(x, (int)sizeof(srcT), src_offset));\n" +"int dst_index = mad24(y0, dst_step, mad24(x, (int)sizeof(dstT), dst_offset));\n" +"for (int y = y0, y1 = min(dst_rows, y0 + rowsPerWI); y < y1; ++y, src_index += src_step, dst_index += dst_step)\n" +"{\n" +"__global const srcT * src = (__global const srcT *)(srcptr + src_index);\n" +"__global dstT * dst = (__global dstT *)(dstptr + dst_index);\n" +"#ifdef NO_SCALE\n" +"dst[0] = convertToDT(src[0]);\n" +"#else\n" +"dst[0] = convertToDT(fma(convertToWT(src[0]), alpha, beta));\n" +"#endif\n" +"}\n" +"}\n" +"}\n" +, "2e3d527cefb9468930dac19ffd9e28bd", NULL}; +struct cv::ocl::internal::ProgramEntry copymakeborder_oclsrc={moduleName, "copymakeborder", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T*)(addr)\n" +"#define storepix(val, addr) *(__global T*)(addr) = val\n" +"#define TSIZE ((int)sizeof(T))\n" +"#define convertScalar(a) (a)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1*)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1*)(addr))\n" +"#define TSIZE ((int)sizeof(T1)*3)\n" +"#define convertScalar(a) (T)(a.x, a.y, a.z)\n" +"#endif\n" +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(x, cols) \\\n" +";\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, cols) \\\n" +"x = clamp(x, 0, cols - 1);\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, cols) \\\n" +"{ \\\n" +"if (x < 0) \\\n" +"x -= ((x - cols + 1) / cols) * cols; \\\n" +"if (x >= cols) \\\n" +"x %= cols; \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)\n" +"#ifdef BORDER_REFLECT\n" +"#define DELTA int delta = 0\n" +"#else\n" +"#define DELTA int delta = 1\n" +"#endif\n" +"#define EXTRAPOLATE(x, cols) \\\n" +"{ \\\n" +"DELTA; \\\n" +"if (cols == 1) \\\n" +"x = 0; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if( x < 0 ) \\\n" +"x = -x - 1 + delta; \\\n" +"else \\\n" +"x = cols - 1 - (x - cols) - delta; \\\n" +"} \\\n" +"while (x >= cols || x < 0); \\\n" +"}\n" +"#else\n" +"#error \"No extrapolation method\"\n" +"#endif\n" +"#define NEED_EXTRAPOLATION(x, cols) (x >= cols || x < 0)\n" +"__kernel void copyMakeBorder(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int top, int left, ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"#ifdef BORDER_CONSTANT\n" +"T scalar = convertScalar(nVal);\n" +"#endif\n" +"if (x < dst_cols)\n" +"{\n" +"int src_x = x - left, src_y;\n" +"int dst_index = mad24(y0, dst_step, mad24(x, (int)TSIZE, dst_offset));\n" +"if (NEED_EXTRAPOLATION(src_x, src_cols))\n" +"{\n" +"#ifdef BORDER_CONSTANT\n" +"for (int y = y0, y1 = min(y0 + rowsPerWI, dst_rows); y < y1; ++y, dst_index += dst_step)\n" +"storepix(scalar, dstptr + dst_index);\n" +"return;\n" +"#endif\n" +"EXTRAPOLATE(src_x, src_cols)\n" +"}\n" +"src_x = mad24(src_x, TSIZE, src_offset);\n" +"for (int y = y0, y1 = min(y0 + rowsPerWI, dst_rows); y < y1; ++y, dst_index += dst_step)\n" +"{\n" +"src_y = y - top;\n" +"if (NEED_EXTRAPOLATION(src_y, src_rows))\n" +"{\n" +"EXTRAPOLATE(src_y, src_rows)\n" +"#ifdef BORDER_CONSTANT\n" +"storepix(scalar, dstptr + dst_index);\n" +"continue;\n" +"#endif\n" +"}\n" +"int src_index = mad24(src_y, src_step, src_x);\n" +"storepix(loadpix(srcptr + src_index), dstptr + dst_index);\n" +"}\n" +"}\n" +"}\n" +, "64f03714b8763ec6c2ac2f4b2ad0cf5d", NULL}; +struct cv::ocl::internal::ProgramEntry copyset_oclsrc={moduleName, "copyset", +"#ifdef COPY_TO_MASK\n" +"#define DEFINE_DATA \\\n" +"int src_index = mad24(y, src_step, mad24(x, (int)sizeof(T1) * scn, src_offset)); \\\n" +"int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(T1) * scn, dst_offset)); \\\n" +"\\\n" +"__global const T1 * src = (__global const T1 *)(srcptr + src_index); \\\n" +"__global T1 * dst = (__global T1 *)(dstptr + dst_index)\n" +"__kernel void copyToMask(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global const uchar * mask, int mask_step, int mask_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int dst_rows, int dst_cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"mask += mad24(y, mask_step, mad24(x, mcn, mask_offset));\n" +"#if mcn == 1\n" +"if (mask[0])\n" +"{\n" +"DEFINE_DATA;\n" +"#pragma unroll\n" +"for (int c = 0; c < scn; ++c)\n" +"dst[c] = src[c];\n" +"}\n" +"#ifdef HAVE_DST_UNINIT\n" +"else\n" +"{\n" +"DEFINE_DATA;\n" +"#pragma unroll\n" +"for (int c = 0; c < scn; ++c)\n" +"dst[c] = (T1)(0);\n" +"}\n" +"#endif\n" +"#elif scn == mcn\n" +"DEFINE_DATA;\n" +"#pragma unroll\n" +"for (int c = 0; c < scn; ++c)\n" +"if (mask[c])\n" +"dst[c] = src[c];\n" +"#ifdef HAVE_DST_UNINIT\n" +"else\n" +"dst[c] = (T1)(0);\n" +"#endif\n" +"#else\n" +"#error \"(mcn == 1 || mcn == scn) should be true\"\n" +"#endif\n" +"}\n" +"}\n" +"#else\n" +"#ifndef dstST\n" +"#define dstST dstT\n" +"#endif\n" +"#if cn != 3\n" +"#define value value_\n" +"#define storedst(val) *(__global dstT *)(dstptr + dst_index) = val\n" +"#else\n" +"#define value (dstT)(value_.x, value_.y, value_.z)\n" +"#define storedst(val) vstore3(val, 0, (__global dstT1 *)(dstptr + dst_index))\n" +"#endif\n" +"__kernel void setMask(__global const uchar* mask, int maskstep, int maskoffset,\n" +"__global uchar* dstptr, int dststep, int dstoffset,\n" +"int rows, int cols, dstST value_)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int mask_index = mad24(y0, maskstep, x + maskoffset);\n" +"int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT1) * cn, dstoffset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y)\n" +"{\n" +"if( mask[mask_index] )\n" +"storedst(value);\n" +"mask_index += maskstep;\n" +"dst_index += dststep;\n" +"}\n" +"}\n" +"}\n" +"__kernel void set(__global uchar* dstptr, int dststep, int dstoffset,\n" +"int rows, int cols, dstST value_)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int dst_index = mad24(y0, dststep, mad24(x, (int)sizeof(dstT1) * cn, dstoffset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, dst_index += dststep)\n" +"storedst(value);\n" +"}\n" +"}\n" +"#endif\n" +, "f8f028f1776dc5c98bf03411d3b72318", NULL}; +struct cv::ocl::internal::ProgramEntry cvtclr_dx_oclsrc={moduleName, "cvtclr_dx", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined cl_khr_fp64\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#ifdef INTEL_DEVICE\n" +"#pragma OPENCL FP_CONTRACT ON\n" +"#pragma OPENCL FP_FAST_FMAF ON\n" +"#pragma OPENCL FP_FAST_FMA ON\n" +"#endif\n" +"static\n" +"__constant\n" +"float c_YUV2RGBCoeffs_420[5] =\n" +"{\n" +"1.163999557f,\n" +"2.017999649f,\n" +"-0.390999794f,\n" +"-0.812999725f,\n" +"1.5959997177f\n" +"};\n" +"static const __constant float CV_8U_MAX = 255.0f;\n" +"static const __constant float CV_8U_HALF = 128.0f;\n" +"static const __constant float BT601_BLACK_RANGE = 16.0f;\n" +"static const __constant float CV_8U_SCALE = 1.0f / 255.0f;\n" +"static const __constant float d1 = BT601_BLACK_RANGE / CV_8U_MAX;\n" +"static const __constant float d2 = CV_8U_HALF / CV_8U_MAX;\n" +"#define NCHANNELS 3\n" +"__kernel\n" +"void YUV2BGR_NV12_8u(\n" +"read_only image2d_t imgY,\n" +"read_only image2d_t imgUV,\n" +"__global unsigned char* pBGR,\n" +"int bgrStep,\n" +"int cols,\n" +"int rows)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"int x2 = x*2;\n" +"int y2 = y*2;\n" +"if (x2 + 1 < cols) {\n" +"if (y2 + 1 < rows) {\n" +"__global uchar *pDstRow1 = pBGR + mad24(y2, bgrStep, mad24(x2, NCHANNELS, 0));\n" +"__global uchar *pDstRow2 = pDstRow1 + bgrStep;\n" +"float4 Y1 = read_imagef(imgY, (int2)(x2 + 0, y2 + 0));\n" +"float4 Y2 = read_imagef(imgY, (int2)(x2 + 1, y2 + 0));\n" +"float4 Y3 = read_imagef(imgY, (int2)(x2 + 0, y2 + 1));\n" +"float4 Y4 = read_imagef(imgY, (int2)(x2 + 1, y2 + 1));\n" +"float4 Y = (float4)(Y1.x, Y2.x, Y3.x, Y4.x);\n" +"float4 UV = read_imagef(imgUV, (int2)(x, y)) - d2;\n" +"__constant float *coeffs = c_YUV2RGBCoeffs_420;\n" +"Y = max(0.f, Y - d1) * coeffs[0];\n" +"float ruv = fma(coeffs[4], UV.y, 0.0f);\n" +"float guv = fma(coeffs[3], UV.y, fma(coeffs[2], UV.x, 0.0f));\n" +"float buv = fma(coeffs[1], UV.x, 0.0f);\n" +"float4 R = (Y + ruv) * CV_8U_MAX;\n" +"float4 G = (Y + guv) * CV_8U_MAX;\n" +"float4 B = (Y + buv) * CV_8U_MAX;\n" +"pDstRow1[0*NCHANNELS + 0] = convert_uchar_sat(B.x);\n" +"pDstRow1[0*NCHANNELS + 1] = convert_uchar_sat(G.x);\n" +"pDstRow1[0*NCHANNELS + 2] = convert_uchar_sat(R.x);\n" +"pDstRow1[1*NCHANNELS + 0] = convert_uchar_sat(B.y);\n" +"pDstRow1[1*NCHANNELS + 1] = convert_uchar_sat(G.y);\n" +"pDstRow1[1*NCHANNELS + 2] = convert_uchar_sat(R.y);\n" +"pDstRow2[0*NCHANNELS + 0] = convert_uchar_sat(B.z);\n" +"pDstRow2[0*NCHANNELS + 1] = convert_uchar_sat(G.z);\n" +"pDstRow2[0*NCHANNELS + 2] = convert_uchar_sat(R.z);\n" +"pDstRow2[1*NCHANNELS + 0] = convert_uchar_sat(B.w);\n" +"pDstRow2[1*NCHANNELS + 1] = convert_uchar_sat(G.w);\n" +"pDstRow2[1*NCHANNELS + 2] = convert_uchar_sat(R.w);\n" +"}\n" +"}\n" +"}\n" +"static\n" +"__constant float c_RGB2YUVCoeffs_420[8] =\n" +"{\n" +"0.256999969f, 0.50399971f, 0.09799957f, -0.1479988098f,\n" +"-0.2909994125f, 0.438999176f, -0.3679990768f, -0.0709991455f\n" +"};\n" +"__kernel\n" +"void BGR2YUV_NV12_8u(\n" +"__global unsigned char* pBGR,\n" +"int bgrStep,\n" +"int cols,\n" +"int rows,\n" +"write_only image2d_t imgY,\n" +"write_only image2d_t imgUV)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"int x2 = x*2;\n" +"int y2 = y*2;\n" +"if (x2 + 1 < cols)\n" +"{\n" +"if (y2 + 1 < rows)\n" +"{\n" +"__global const uchar* pSrcRow1 = pBGR + mad24(y2, bgrStep, mad24(x2, NCHANNELS, 0));\n" +"__global const uchar* pSrcRow2 = pSrcRow1 + bgrStep;\n" +"float4 src_pix1 = convert_float4(vload4(0, pSrcRow1 + 0*NCHANNELS)) * CV_8U_SCALE;\n" +"float4 src_pix2 = convert_float4(vload4(0, pSrcRow1 + 1*NCHANNELS)) * CV_8U_SCALE;\n" +"float4 src_pix3 = convert_float4(vload4(0, pSrcRow2 + 0*NCHANNELS)) * CV_8U_SCALE;\n" +"float4 src_pix4 = convert_float4(vload4(0, pSrcRow2 + 1*NCHANNELS)) * CV_8U_SCALE;\n" +"__constant float* coeffs = c_RGB2YUVCoeffs_420;\n" +"float Y1 = fma(coeffs[0], src_pix1.z, fma(coeffs[1], src_pix1.y, fma(coeffs[2], src_pix1.x, d1)));\n" +"float Y2 = fma(coeffs[0], src_pix2.z, fma(coeffs[1], src_pix2.y, fma(coeffs[2], src_pix2.x, d1)));\n" +"float Y3 = fma(coeffs[0], src_pix3.z, fma(coeffs[1], src_pix3.y, fma(coeffs[2], src_pix3.x, d1)));\n" +"float Y4 = fma(coeffs[0], src_pix4.z, fma(coeffs[1], src_pix4.y, fma(coeffs[2], src_pix4.x, d1)));\n" +"float4 UV;\n" +"UV.x = fma(coeffs[3], src_pix1.z, fma(coeffs[4], src_pix1.y, fma(coeffs[5], src_pix1.x, d2)));\n" +"UV.y = fma(coeffs[5], src_pix1.z, fma(coeffs[6], src_pix1.y, fma(coeffs[7], src_pix1.x, d2)));\n" +"write_imagef(imgY, (int2)(x2+0, y2+0), Y1);\n" +"write_imagef(imgY, (int2)(x2+1, y2+0), Y2);\n" +"write_imagef(imgY, (int2)(x2+0, y2+1), Y3);\n" +"write_imagef(imgY, (int2)(x2+1, y2+1), Y4);\n" +"write_imagef(imgUV, (int2)(x, y), UV);\n" +"}\n" +"}\n" +"}\n" +, "6f6a1321f275b328ee827cc87cb40fa1", NULL}; +struct cv::ocl::internal::ProgramEntry fft_oclsrc={moduleName, "fft", +"#define SQRT_2 0.707106781188f\n" +"#define sin_120 0.866025403784f\n" +"#define fft5_2 0.559016994374f\n" +"#define fft5_3 -0.951056516295f\n" +"#define fft5_4 -1.538841768587f\n" +"#define fft5_5 0.363271264002f\n" +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"__attribute__((always_inline))\n" +"CT mul_complex(CT a, CT b) {\n" +"return (CT)(fma(a.x, b.x, -a.y * b.y), fma(a.x, b.y, a.y * b.x));\n" +"}\n" +"__attribute__((always_inline))\n" +"CT twiddle(CT a) {\n" +"return (CT)(a.y, -a.x);\n" +"}\n" +"__attribute__((always_inline))\n" +"void butterfly2(CT a0, CT a1, __local CT* smem, __global const CT* twiddles,\n" +"const int x, const int block_size)\n" +"{\n" +"const int k = x & (block_size - 1);\n" +"a1 = mul_complex(twiddles[k], a1);\n" +"const int dst_ind = (x << 1) - k;\n" +"smem[dst_ind] = a0 + a1;\n" +"smem[dst_ind+block_size] = a0 - a1;\n" +"}\n" +"__attribute__((always_inline))\n" +"void butterfly4(CT a0, CT a1, CT a2, CT a3, __local CT* smem, __global const CT* twiddles,\n" +"const int x, const int block_size)\n" +"{\n" +"const int k = x & (block_size - 1);\n" +"a1 = mul_complex(twiddles[k], a1);\n" +"a2 = mul_complex(twiddles[k + block_size], a2);\n" +"a3 = mul_complex(twiddles[k + 2*block_size], a3);\n" +"const int dst_ind = ((x - k) << 2) + k;\n" +"CT b0 = a0 + a2;\n" +"a2 = a0 - a2;\n" +"CT b1 = a1 + a3;\n" +"a3 = twiddle(a1 - a3);\n" +"smem[dst_ind] = b0 + b1;\n" +"smem[dst_ind + block_size] = a2 + a3;\n" +"smem[dst_ind + 2*block_size] = b0 - b1;\n" +"smem[dst_ind + 3*block_size] = a2 - a3;\n" +"}\n" +"__attribute__((always_inline))\n" +"void butterfly3(CT a0, CT a1, CT a2, __local CT* smem, __global const CT* twiddles,\n" +"const int x, const int block_size)\n" +"{\n" +"const int k = x % block_size;\n" +"a1 = mul_complex(twiddles[k], a1);\n" +"a2 = mul_complex(twiddles[k+block_size], a2);\n" +"const int dst_ind = ((x - k) * 3) + k;\n" +"CT b1 = a1 + a2;\n" +"a2 = twiddle(sin_120*(a1 - a2));\n" +"CT b0 = a0 - (CT)(0.5f)*b1;\n" +"smem[dst_ind] = a0 + b1;\n" +"smem[dst_ind + block_size] = b0 + a2;\n" +"smem[dst_ind + 2*block_size] = b0 - a2;\n" +"}\n" +"__attribute__((always_inline))\n" +"void butterfly5(CT a0, CT a1, CT a2, CT a3, CT a4, __local CT* smem, __global const CT* twiddles,\n" +"const int x, const int block_size)\n" +"{\n" +"const int k = x % block_size;\n" +"a1 = mul_complex(twiddles[k], a1);\n" +"a2 = mul_complex(twiddles[k + block_size], a2);\n" +"a3 = mul_complex(twiddles[k+2*block_size], a3);\n" +"a4 = mul_complex(twiddles[k+3*block_size], a4);\n" +"const int dst_ind = ((x - k) * 5) + k;\n" +"__local CT* dst = smem + dst_ind;\n" +"CT b0, b1, b5;\n" +"b1 = a1 + a4;\n" +"a1 -= a4;\n" +"a4 = a3 + a2;\n" +"a3 -= a2;\n" +"a2 = b1 + a4;\n" +"b0 = a0 - (CT)0.25f * a2;\n" +"b1 = fft5_2 * (b1 - a4);\n" +"a4 = fft5_3 * (CT)(-a1.y - a3.y, a1.x + a3.x);\n" +"b5 = (CT)(a4.x - fft5_5 * a1.y, a4.y + fft5_5 * a1.x);\n" +"a4.x += fft5_4 * a3.y;\n" +"a4.y -= fft5_4 * a3.x;\n" +"a1 = b0 + b1;\n" +"b0 -= b1;\n" +"dst[0] = a0 + a2;\n" +"dst[block_size] = a1 + a4;\n" +"dst[2 * block_size] = b0 + b5;\n" +"dst[3 * block_size] = b0 - b5;\n" +"dst[4 * block_size] = a1 - a4;\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix2(__local CT* smem, __global const CT* twiddles, const int x, const int block_size, const int t)\n" +"{\n" +"CT a0, a1;\n" +"if (x < t)\n" +"{\n" +"a0 = smem[x];\n" +"a1 = smem[x+t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < t)\n" +"butterfly2(a0, a1, smem, twiddles, x, block_size);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix2_B2(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1 + t/2;\n" +"CT a0, a1, a2, a3;\n" +"if (x1 < t/2)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t];\n" +"a2 = smem[x2]; a3 = smem[x2+t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/2)\n" +"{\n" +"butterfly2(a0, a1, smem, twiddles, x1, block_size);\n" +"butterfly2(a2, a3, smem, twiddles, x2, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix2_B3(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1 + t/3;\n" +"const int x3 = x1 + 2*t/3;\n" +"CT a0, a1, a2, a3, a4, a5;\n" +"if (x1 < t/3)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t];\n" +"a2 = smem[x2]; a3 = smem[x2+t];\n" +"a4 = smem[x3]; a5 = smem[x3+t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/3)\n" +"{\n" +"butterfly2(a0, a1, smem, twiddles, x1, block_size);\n" +"butterfly2(a2, a3, smem, twiddles, x2, block_size);\n" +"butterfly2(a4, a5, smem, twiddles, x3, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix2_B4(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int thread_block = t/4;\n" +"const int x2 = x1 + thread_block;\n" +"const int x3 = x1 + 2*thread_block;\n" +"const int x4 = x1 + 3*thread_block;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7;\n" +"if (x1 < t/4)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t];\n" +"a2 = smem[x2]; a3 = smem[x2+t];\n" +"a4 = smem[x3]; a5 = smem[x3+t];\n" +"a6 = smem[x4]; a7 = smem[x4+t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/4)\n" +"{\n" +"butterfly2(a0, a1, smem, twiddles, x1, block_size);\n" +"butterfly2(a2, a3, smem, twiddles, x2, block_size);\n" +"butterfly2(a4, a5, smem, twiddles, x3, block_size);\n" +"butterfly2(a6, a7, smem, twiddles, x4, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix2_B5(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int thread_block = t/5;\n" +"const int x2 = x1 + thread_block;\n" +"const int x3 = x1 + 2*thread_block;\n" +"const int x4 = x1 + 3*thread_block;\n" +"const int x5 = x1 + 4*thread_block;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7, a8, a9;\n" +"if (x1 < t/5)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t];\n" +"a2 = smem[x2]; a3 = smem[x2+t];\n" +"a4 = smem[x3]; a5 = smem[x3+t];\n" +"a6 = smem[x4]; a7 = smem[x4+t];\n" +"a8 = smem[x5]; a9 = smem[x5+t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/5)\n" +"{\n" +"butterfly2(a0, a1, smem, twiddles, x1, block_size);\n" +"butterfly2(a2, a3, smem, twiddles, x2, block_size);\n" +"butterfly2(a4, a5, smem, twiddles, x3, block_size);\n" +"butterfly2(a6, a7, smem, twiddles, x4, block_size);\n" +"butterfly2(a8, a9, smem, twiddles, x5, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix4(__local CT* smem, __global const CT* twiddles, const int x, const int block_size, const int t)\n" +"{\n" +"CT a0, a1, a2, a3;\n" +"if (x < t)\n" +"{\n" +"a0 = smem[x]; a1 = smem[x+t]; a2 = smem[x+2*t]; a3 = smem[x+3*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < t)\n" +"butterfly4(a0, a1, a2, a3, smem, twiddles, x, block_size);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix4_B2(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1 + t/2;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7;\n" +"if (x1 < t/2)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; a3 = smem[x1+3*t];\n" +"a4 = smem[x2]; a5 = smem[x2+t]; a6 = smem[x2+2*t]; a7 = smem[x2+3*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/2)\n" +"{\n" +"butterfly4(a0, a1, a2, a3, smem, twiddles, x1, block_size);\n" +"butterfly4(a4, a5, a6, a7, smem, twiddles, x2, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix4_B3(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1 + t/3;\n" +"const int x3 = x2 + t/3;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11;\n" +"if (x1 < t/3)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t]; a3 = smem[x1+3*t];\n" +"a4 = smem[x2]; a5 = smem[x2+t]; a6 = smem[x2+2*t]; a7 = smem[x2+3*t];\n" +"a8 = smem[x3]; a9 = smem[x3+t]; a10 = smem[x3+2*t]; a11 = smem[x3+3*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/3)\n" +"{\n" +"butterfly4(a0, a1, a2, a3, smem, twiddles, x1, block_size);\n" +"butterfly4(a4, a5, a6, a7, smem, twiddles, x2, block_size);\n" +"butterfly4(a8, a9, a10, a11, smem, twiddles, x3, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix8(__local CT* smem, __global const CT* twiddles, const int x, const int block_size, const int t)\n" +"{\n" +"const int k = x % block_size;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7;\n" +"if (x < t)\n" +"{\n" +"int tw_ind = block_size / 8;\n" +"a0 = smem[x];\n" +"a1 = mul_complex(twiddles[k], smem[x + t]);\n" +"a2 = mul_complex(twiddles[k + block_size],smem[x+2*t]);\n" +"a3 = mul_complex(twiddles[k+2*block_size],smem[x+3*t]);\n" +"a4 = mul_complex(twiddles[k+3*block_size],smem[x+4*t]);\n" +"a5 = mul_complex(twiddles[k+4*block_size],smem[x+5*t]);\n" +"a6 = mul_complex(twiddles[k+5*block_size],smem[x+6*t]);\n" +"a7 = mul_complex(twiddles[k+6*block_size],smem[x+7*t]);\n" +"CT b0, b1, b6, b7;\n" +"b0 = a0 + a4;\n" +"a4 = a0 - a4;\n" +"b1 = a1 + a5;\n" +"a5 = a1 - a5;\n" +"a5 = (CT)(SQRT_2) * (CT)(a5.x + a5.y, -a5.x + a5.y);\n" +"b6 = twiddle(a2 - a6);\n" +"a2 = a2 + a6;\n" +"b7 = a3 - a7;\n" +"b7 = (CT)(SQRT_2) * (CT)(-b7.x + b7.y, -b7.x - b7.y);\n" +"a3 = a3 + a7;\n" +"a0 = b0 + a2;\n" +"a2 = b0 - a2;\n" +"a1 = b1 + a3;\n" +"a3 = twiddle(b1 - a3);\n" +"a6 = a4 - b6;\n" +"a4 = a4 + b6;\n" +"a7 = twiddle(a5 - b7);\n" +"a5 = a5 + b7;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < t)\n" +"{\n" +"const int dst_ind = ((x - k) << 3) + k;\n" +"__local CT* dst = smem + dst_ind;\n" +"dst[0] = a0 + a1;\n" +"dst[block_size] = a4 + a5;\n" +"dst[2 * block_size] = a2 + a3;\n" +"dst[3 * block_size] = a6 + a7;\n" +"dst[4 * block_size] = a0 - a1;\n" +"dst[5 * block_size] = a4 - a5;\n" +"dst[6 * block_size] = a2 - a3;\n" +"dst[7 * block_size] = a6 - a7;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix3(__local CT* smem, __global const CT* twiddles, const int x, const int block_size, const int t)\n" +"{\n" +"CT a0, a1, a2;\n" +"if (x < t)\n" +"{\n" +"a0 = smem[x]; a1 = smem[x+t]; a2 = smem[x+2*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < t)\n" +"butterfly3(a0, a1, a2, smem, twiddles, x, block_size);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix3_B2(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1 + t/2;\n" +"CT a0, a1, a2, a3, a4, a5;\n" +"if (x1 < t/2)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t];\n" +"a3 = smem[x2]; a4 = smem[x2+t]; a5 = smem[x2+2*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/2)\n" +"{\n" +"butterfly3(a0, a1, a2, smem, twiddles, x1, block_size);\n" +"butterfly3(a3, a4, a5, smem, twiddles, x2, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix3_B3(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1 + t/3;\n" +"const int x3 = x2 + t/3;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7, a8;\n" +"if (x1 < t/3)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t];\n" +"a3 = smem[x2]; a4 = smem[x2+t]; a5 = smem[x2+2*t];\n" +"a6 = smem[x3]; a7 = smem[x3+t]; a8 = smem[x3+2*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/3)\n" +"{\n" +"butterfly3(a0, a1, a2, smem, twiddles, x1, block_size);\n" +"butterfly3(a3, a4, a5, smem, twiddles, x2, block_size);\n" +"butterfly3(a6, a7, a8, smem, twiddles, x3, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix3_B4(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int thread_block = t/4;\n" +"const int x2 = x1 + thread_block;\n" +"const int x3 = x1 + 2*thread_block;\n" +"const int x4 = x1 + 3*thread_block;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11;\n" +"if (x1 < t/4)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1+t]; a2 = smem[x1+2*t];\n" +"a3 = smem[x2]; a4 = smem[x2+t]; a5 = smem[x2+2*t];\n" +"a6 = smem[x3]; a7 = smem[x3+t]; a8 = smem[x3+2*t];\n" +"a9 = smem[x4]; a10 = smem[x4+t]; a11 = smem[x4+2*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/4)\n" +"{\n" +"butterfly3(a0, a1, a2, smem, twiddles, x1, block_size);\n" +"butterfly3(a3, a4, a5, smem, twiddles, x2, block_size);\n" +"butterfly3(a6, a7, a8, smem, twiddles, x3, block_size);\n" +"butterfly3(a9, a10, a11, smem, twiddles, x4, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix5(__local CT* smem, __global const CT* twiddles, const int x, const int block_size, const int t)\n" +"{\n" +"const int k = x % block_size;\n" +"CT a0, a1, a2, a3, a4;\n" +"if (x < t)\n" +"{\n" +"a0 = smem[x]; a1 = smem[x + t]; a2 = smem[x+2*t]; a3 = smem[x+3*t]; a4 = smem[x+4*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < t)\n" +"butterfly5(a0, a1, a2, a3, a4, smem, twiddles, x, block_size);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"__attribute__((always_inline))\n" +"void fft_radix5_B2(__local CT* smem, __global const CT* twiddles, const int x1, const int block_size, const int t)\n" +"{\n" +"const int x2 = x1+t/2;\n" +"CT a0, a1, a2, a3, a4, a5, a6, a7, a8, a9;\n" +"if (x1 < t/2)\n" +"{\n" +"a0 = smem[x1]; a1 = smem[x1 + t]; a2 = smem[x1+2*t]; a3 = smem[x1+3*t]; a4 = smem[x1+4*t];\n" +"a5 = smem[x2]; a6 = smem[x2 + t]; a7 = smem[x2+2*t]; a8 = smem[x2+3*t]; a9 = smem[x2+4*t];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x1 < t/2)\n" +"{\n" +"butterfly5(a0, a1, a2, a3, a4, smem, twiddles, x1, block_size);\n" +"butterfly5(a5, a6, a7, a8, a9, smem, twiddles, x2, block_size);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"#ifdef DFT_SCALE\n" +"#define SCALE_VAL(x, scale) x*scale\n" +"#else\n" +"#define SCALE_VAL(x, scale) x\n" +"#endif\n" +"__kernel void fft_multi_radix_rows(__global const uchar* src_ptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar* dst_ptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global CT* twiddles_ptr, int twiddles_step, int twiddles_offset, const int t, const int nz)\n" +"{\n" +"const int x = get_global_id(0);\n" +"const int y = get_group_id(1);\n" +"const int block_size = LOCAL_SIZE/kercn;\n" +"__local CT smem[LOCAL_SIZE];\n" +"if (y < nz)\n" +"{\n" +"__global const CT* twiddles = (__global const CT*)(twiddles_ptr + twiddles_offset);\n" +"const int ind = x;\n" +"#ifdef IS_1D\n" +"FT scale = (FT) 1/dst_cols;\n" +"#else\n" +"FT scale = (FT) 1/(dst_cols*dst_rows);\n" +"#endif\n" +"#ifdef COMPLEX_INPUT\n" +"__global const CT* src = (__global const CT*)(src_ptr + mad24(y, src_step, mad24(x, (int)(sizeof(CT)), src_offset)));\n" +"#pragma unroll\n" +"for (int i=0; i= cn && kercn == 4 && depth <= 4 && !defined HAVE_SCALAR\n" +"srcT src1 = *(__global const srcT *)(src1ptr + src1_index);\n" +"srcT src2 = *(__global const srcT *)(src2ptr + src2_index);\n" +"srcT src3 = *(__global const srcT *)(src3ptr + src3_index);\n" +"__global dstT * dst = (__global dstT *)(dstptr + dst_index);\n" +"#if cn == 1\n" +"dst[0] = src2 > src1 || src3 < src1 ? (dstT)(0) : (dstT)(255);\n" +"#elif cn == 2\n" +"dst[0] = (dstT)(src2.xy > src1.xy || src3.xy < src1.xy ||\n" +"src2.zw > src1.zw || src3.zw < src1.zw ? (dstT)(0) : (dstT)(255);\n" +"#elif cn == 4\n" +"dst[0] = (dstT)(src2.x > src1.x || src3.x < src1.x ||\n" +"src2.y > src1.y || src3.y < src1.y ||\n" +"src2.z > src1.z || src3.z < src1.z ||\n" +"src2.w > src1.w || src3.w < src1.w ? 0 : 255);\n" +"#endif\n" +"#else\n" +"__global const srcT1 * src1 = (__global const srcT1 *)(src1ptr + src1_index);\n" +"__global uchar * dst = dstptr + dst_index;\n" +"#ifndef HAVE_SCALAR\n" +"__global const srcT1 * src2 = (__global const srcT1 *)(src2ptr + src2_index);\n" +"__global const srcT1 * src3 = (__global const srcT1 *)(src3ptr + src3_index);\n" +"#endif\n" +"#pragma unroll\n" +"for (int px = 0; px < colsPerWI; ++px, src1 += cn\n" +"#ifndef HAVE_SCALAR\n" +", src2 += cn, src3 += cn\n" +"#endif\n" +")\n" +"{\n" +"dst[px] = 255;\n" +"for (int c = 0; c < cn; ++c)\n" +"if (src2[c] > src1[c] || src3[c] < src1[c])\n" +"{\n" +"dst[px] = 0;\n" +"break;\n" +"}\n" +"}\n" +"#endif\n" +"#ifndef HAVE_SCALAR\n" +"src2_index += src2_step;\n" +"src3_index += src3_step;\n" +"#endif\n" +"}\n" +"}\n" +"}\n" +, "e7220f9dc5b30fc5558622a452890287", NULL}; +struct cv::ocl::internal::ProgramEntry intel_gemm_oclsrc={moduleName, "intel_gemm", +"#if defined(cl_intel_subgroups)\n" +"#pragma OPENCL EXTENSION cl_intel_subgroups : enable\n" +"#endif\n" +"#if defined(cl_intel_subgroups)\n" +"#define VEC_SIZE 4\n" +"#define LWG_HEIGHT 4\n" +"#define TILE_M 8\n" +"#define TILE_K 16\n" +"#define TILE_N 32\n" +"__attribute__((reqd_work_group_size(8, LWG_HEIGHT, 1)))\n" +"__kernel void intelblas_gemm_buffer_NN_sp(\n" +"const __global float *src0, int off0,\n" +"const __global float *src1, int off1,\n" +"__global float *dst, int offd,\n" +"int M,\n" +"int N,\n" +"int K,\n" +"float alpha,\n" +"float beta,\n" +"int ldA,\n" +"int ldB,\n" +"int ldC,\n" +"int start_index,\n" +"int stride)\n" +"{\n" +"const int group_x = get_group_id(0);\n" +"const int group_y = get_group_id(1);\n" +"const int local_x = get_local_id(0);\n" +"const int local_y = get_local_id(1);\n" +"const int global_x = get_global_id(0);\n" +"const int global_y = get_global_id(1);\n" +"float4 brow;\n" +"float2 arow0, arow1, arow2, arow3, arow4, arow5, arow6, arow7;\n" +"__global float *dst_write0 = dst + local_x * VEC_SIZE + ( group_x * TILE_N ) + ( group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M) * ldC + offd;\n" +"const __global float *src0_read = src0 + local_x * ( TILE_K / 8 ) + ( group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M ) * ldA + start_index + off0;\n" +"const __global float *src1_read0 = src1 + local_x * VEC_SIZE + ( group_x * TILE_N ) + start_index * ldB + off1;\n" +"float4 dot00 = (start_index != 0) ? vload4(0, dst_write0) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 )) : (float4)(0.0));\n" +"float4 dot01 = (start_index != 0) ? vload4(0, dst_write0 + 1 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 1 * ldC)) : (float4)(0.0));\n" +"float4 dot02 = (start_index != 0) ? vload4(0, dst_write0 + 2 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 2 * ldC)) : (float4)(0.0));\n" +"float4 dot03 = (start_index != 0) ? vload4(0, dst_write0 + 3 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 3 * ldC)) : (float4)(0.0));\n" +"float4 dot04 = (start_index != 0) ? vload4(0, dst_write0 + 4 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 4 * ldC)) : (float4)(0.0));\n" +"float4 dot05 = (start_index != 0) ? vload4(0, dst_write0 + 5 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 5 * ldC)) : (float4)(0.0));\n" +"float4 dot06 = (start_index != 0) ? vload4(0, dst_write0 + 6 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 6 * ldC)) : (float4)(0.0));\n" +"float4 dot07 = (start_index != 0) ? vload4(0, dst_write0 + 7 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 7 * ldC)) : (float4)(0.0));\n" +"int end_index = min(start_index + stride, K);\n" +"int w = start_index;\n" +"while( w + TILE_K <= end_index ) {\n" +"arow0 = (float)alpha * vload2(0, src0_read + 0 * ldA);\n" +"arow1 = (float)alpha * vload2(0, src0_read + 1 * ldA);\n" +"arow2 = (float)alpha * vload2(0, src0_read + 2 * ldA);\n" +"arow3 = (float)alpha * vload2(0, src0_read + 3 * ldA);\n" +"arow4 = (float)alpha * vload2(0, src0_read + 4 * ldA);\n" +"arow5 = (float)alpha * vload2(0, src0_read + 5 * ldA);\n" +"arow6 = (float)alpha * vload2(0, src0_read + 6 * ldA);\n" +"arow7 = (float)alpha * vload2(0, src0_read + 7 * ldA);\n" +"#define MM_DOT_PRODUCT(index, suffix) \\\n" +"brow = vload4(0, src1_read0); src1_read0 += ldB; \\\n" +"dot00 = mad((float4)(intel_sub_group_shuffle(arow0.s##suffix,index)),brow,dot00); \\\n" +"dot01 = mad((float4)(intel_sub_group_shuffle(arow1.s##suffix,index)),brow,dot01); \\\n" +"dot02 = mad((float4)(intel_sub_group_shuffle(arow2.s##suffix,index)),brow,dot02); \\\n" +"dot03 = mad((float4)(intel_sub_group_shuffle(arow3.s##suffix,index)),brow,dot03); \\\n" +"dot04 = mad((float4)(intel_sub_group_shuffle(arow4.s##suffix,index)),brow,dot04); \\\n" +"dot05 = mad((float4)(intel_sub_group_shuffle(arow5.s##suffix,index)),brow,dot05); \\\n" +"dot06 = mad((float4)(intel_sub_group_shuffle(arow6.s##suffix,index)),brow,dot06); \\\n" +"dot07 = mad((float4)(intel_sub_group_shuffle(arow7.s##suffix,index)),brow,dot07);\n" +"MM_DOT_PRODUCT(0,0);\n" +"MM_DOT_PRODUCT(0,1);\n" +"MM_DOT_PRODUCT(1,0);\n" +"MM_DOT_PRODUCT(1,1);\n" +"MM_DOT_PRODUCT(2,0);\n" +"MM_DOT_PRODUCT(2,1);\n" +"MM_DOT_PRODUCT(3,0);\n" +"MM_DOT_PRODUCT(3,1);\n" +"MM_DOT_PRODUCT(4,0);\n" +"MM_DOT_PRODUCT(4,1);\n" +"MM_DOT_PRODUCT(5,0);\n" +"MM_DOT_PRODUCT(5,1);\n" +"MM_DOT_PRODUCT(6,0);\n" +"MM_DOT_PRODUCT(6,1);\n" +"MM_DOT_PRODUCT(7,0);\n" +"MM_DOT_PRODUCT(7,1);\n" +"#undef MM_DOT_PRODUCT\n" +"src0_read += TILE_K;\n" +"w += TILE_K;\n" +"}\n" +"vstore4(dot00, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot01, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot02, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot03, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot04, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot05, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot06, 0, dst_write0); dst_write0 += ldC;\n" +"vstore4(dot07, 0, dst_write0); dst_write0 += ldC;\n" +"}\n" +"#undef VEC_SIZE\n" +"#undef LWG_HEIGHT\n" +"#undef TILE_M\n" +"#undef TILE_K\n" +"#undef TILE_N\n" +"#define VEC_SIZE 4\n" +"#define LWG_HEIGHT 4\n" +"#define TILE_M 8\n" +"#define TILE_K 16\n" +"#define TILE_N 32\n" +"__attribute__((reqd_work_group_size(8, LWG_HEIGHT, 1)))\n" +"__kernel void intelblas_gemm_buffer_NN(\n" +"const __global float *src0, int off0,\n" +"const __global float *src1, int off1,\n" +"__global float *dst, int offd,\n" +"int M,\n" +"int N,\n" +"int K,\n" +"float alpha,\n" +"float beta,\n" +"int ldA,\n" +"int ldB,\n" +"int ldC,\n" +"int start_index,\n" +"int stride)\n" +"{\n" +"const int group_x = get_group_id(0);\n" +"const int group_y = get_group_id(1);\n" +"const int local_x = get_local_id(0);\n" +"const int local_y = get_local_id(1);\n" +"const int global_x = get_global_id(0);\n" +"const int global_y = get_global_id(1);\n" +"float4 brow;\n" +"float2 arow0, arow1, arow2, arow3, arow4, arow5, arow6, arow7;\n" +"__global float *dst_write0 = dst + local_x * VEC_SIZE + ( group_x * TILE_N ) + ( group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M) * ldC + offd;\n" +"const __global float *src0_read = src0 + local_x * ( TILE_K / 8 ) + ( group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M ) * ldA + start_index + off0;\n" +"const __global float *src1_read0 = src1 + local_x * VEC_SIZE + ( group_x * TILE_N ) + start_index * ldB + off1;\n" +"int border = -(group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M);\n" +"int row0 = mad24(global_y, TILE_M, 0) < M ? 0 : border;\n" +"int row1 = mad24(global_y, TILE_M, 1) < M ? 1 : border;\n" +"int row2 = mad24(global_y, TILE_M, 2) < M ? 2 : border;\n" +"int row3 = mad24(global_y, TILE_M, 3) < M ? 3 : border;\n" +"int row4 = mad24(global_y, TILE_M, 4) < M ? 4 : border;\n" +"int row5 = mad24(global_y, TILE_M, 5) < M ? 5 : border;\n" +"int row6 = mad24(global_y, TILE_M, 6) < M ? 6 : border;\n" +"int row7 = mad24(global_y, TILE_M, 7) < M ? 7 : border;\n" +"float4 dot00 = (start_index != 0) ? vload4(0, dst_write0) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 )) : (float4)(0.0));\n" +"float4 dot01 = (start_index != 0) ? vload4(0, dst_write0 + 1 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 1 * ldC)) : (float4)(0.0));\n" +"float4 dot02 = (start_index != 0) ? vload4(0, dst_write0 + 2 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 2 * ldC)) : (float4)(0.0));\n" +"float4 dot03 = (start_index != 0) ? vload4(0, dst_write0 + 3 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 3 * ldC)) : (float4)(0.0));\n" +"float4 dot04 = (start_index != 0) ? vload4(0, dst_write0 + 4 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 4 * ldC)) : (float4)(0.0));\n" +"float4 dot05 = (start_index != 0) ? vload4(0, dst_write0 + 5 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 5 * ldC)) : (float4)(0.0));\n" +"float4 dot06 = (start_index != 0) ? vload4(0, dst_write0 + 6 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 6 * ldC)) : (float4)(0.0));\n" +"float4 dot07 = (start_index != 0) ? vload4(0, dst_write0 + 7 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 7 * ldC)) : (float4)(0.0));\n" +"int end_index = min(start_index + stride, K);\n" +"int w = start_index;\n" +"while( w + TILE_K <= end_index ) {\n" +"arow0 = (float)alpha * vload2(0, src0_read + row0 * ldA);\n" +"arow1 = (float)alpha * vload2(0, src0_read + row1 * ldA);\n" +"arow2 = (float)alpha * vload2(0, src0_read + row2 * ldA);\n" +"arow3 = (float)alpha * vload2(0, src0_read + row3 * ldA);\n" +"arow4 = (float)alpha * vload2(0, src0_read + row4 * ldA);\n" +"arow5 = (float)alpha * vload2(0, src0_read + row5 * ldA);\n" +"arow6 = (float)alpha * vload2(0, src0_read + row6 * ldA);\n" +"arow7 = (float)alpha * vload2(0, src0_read + row7 * ldA);\n" +"#define MM_DOT_PRODUCT(index,suffix) \\\n" +"brow = vload4(0, src1_read0); src1_read0 += ldB; \\\n" +"dot00 = mad((float4)(intel_sub_group_shuffle(arow0.s##suffix,index)),brow,dot00); \\\n" +"dot01 = mad((float4)(intel_sub_group_shuffle(arow1.s##suffix,index)),brow,dot01); \\\n" +"dot02 = mad((float4)(intel_sub_group_shuffle(arow2.s##suffix,index)),brow,dot02); \\\n" +"dot03 = mad((float4)(intel_sub_group_shuffle(arow3.s##suffix,index)),brow,dot03); \\\n" +"dot04 = mad((float4)(intel_sub_group_shuffle(arow4.s##suffix,index)),brow,dot04); \\\n" +"dot05 = mad((float4)(intel_sub_group_shuffle(arow5.s##suffix,index)),brow,dot05); \\\n" +"dot06 = mad((float4)(intel_sub_group_shuffle(arow6.s##suffix,index)),brow,dot06); \\\n" +"dot07 = mad((float4)(intel_sub_group_shuffle(arow7.s##suffix,index)),brow,dot07);\n" +"MM_DOT_PRODUCT(0,0);\n" +"MM_DOT_PRODUCT(0,1);\n" +"MM_DOT_PRODUCT(1,0);\n" +"MM_DOT_PRODUCT(1,1);\n" +"MM_DOT_PRODUCT(2,0);\n" +"MM_DOT_PRODUCT(2,1);\n" +"MM_DOT_PRODUCT(3,0);\n" +"MM_DOT_PRODUCT(3,1);\n" +"MM_DOT_PRODUCT(4,0);\n" +"MM_DOT_PRODUCT(4,1);\n" +"MM_DOT_PRODUCT(5,0);\n" +"MM_DOT_PRODUCT(5,1);\n" +"MM_DOT_PRODUCT(6,0);\n" +"MM_DOT_PRODUCT(6,1);\n" +"MM_DOT_PRODUCT(7,0);\n" +"MM_DOT_PRODUCT(7,1);\n" +"#undef MM_DOT_PRODUCT\n" +"src0_read += TILE_K;\n" +"w += TILE_K;\n" +"}\n" +"if(w < end_index) {\n" +"arow0.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row0 * ldA)[0] : 0.0f;\n" +"arow0.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row0 * ldA)[1] : 0.0f;\n" +"arow1.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row1 * ldA)[0] : 0.0f;\n" +"arow1.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row1 * ldA)[1] : 0.0f;\n" +"arow2.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row2 * ldA)[0] : 0.0f;\n" +"arow2.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row2 * ldA)[1] : 0.0f;\n" +"arow3.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row3 * ldA)[0] : 0.0f;\n" +"arow3.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row3 * ldA)[1] : 0.0f;\n" +"arow4.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row4 * ldA)[0] : 0.0f;\n" +"arow4.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row4 * ldA)[1] : 0.0f;\n" +"arow5.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row5 * ldA)[0] : 0.0f;\n" +"arow5.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row5 * ldA)[1] : 0.0f;\n" +"arow6.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row6 * ldA)[0] : 0.0f;\n" +"arow6.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row6 * ldA)[1] : 0.0f;\n" +"arow7.x = ((w + local_x * 2) < K) ? (float)alpha * (src0_read + row7 * ldA)[0] : 0.0f;\n" +"arow7.y = ((w + local_x * 2 + 1) < K) ? (float)alpha * (src0_read + row7 * ldA)[1] : 0.0f;\n" +"#define MM_DOT_PRODUCT(index,suffix) \\\n" +"brow = (w < K) ? vload4(0, src1_read0) : (float)0.0f; src1_read0 += ldB; w++; \\\n" +"dot00 = mad((float4)(intel_sub_group_shuffle( arow0.s##suffix, index )),brow,dot00 ); \\\n" +"dot01 = mad((float4)(intel_sub_group_shuffle( arow1.s##suffix, index )),brow,dot01 ); \\\n" +"dot02 = mad((float4)(intel_sub_group_shuffle( arow2.s##suffix, index )),brow,dot02 ); \\\n" +"dot03 = mad((float4)(intel_sub_group_shuffle( arow3.s##suffix, index )),brow,dot03 ); \\\n" +"dot04 = mad((float4)(intel_sub_group_shuffle( arow4.s##suffix, index )),brow,dot04 ); \\\n" +"dot05 = mad((float4)(intel_sub_group_shuffle( arow5.s##suffix, index )),brow,dot05 ); \\\n" +"dot06 = mad((float4)(intel_sub_group_shuffle( arow6.s##suffix, index )),brow,dot06 ); \\\n" +"dot07 = mad((float4)(intel_sub_group_shuffle( arow7.s##suffix, index )),brow,dot07 );\n" +"MM_DOT_PRODUCT(0,0);\n" +"MM_DOT_PRODUCT(0,1);\n" +"MM_DOT_PRODUCT(1,0);\n" +"MM_DOT_PRODUCT(1,1);\n" +"MM_DOT_PRODUCT(2,0);\n" +"MM_DOT_PRODUCT(2,1);\n" +"MM_DOT_PRODUCT(3,0);\n" +"MM_DOT_PRODUCT(3,1);\n" +"MM_DOT_PRODUCT(4,0);\n" +"MM_DOT_PRODUCT(4,1);\n" +"MM_DOT_PRODUCT(5,0);\n" +"MM_DOT_PRODUCT(5,1);\n" +"MM_DOT_PRODUCT(6,0);\n" +"MM_DOT_PRODUCT(6,1);\n" +"MM_DOT_PRODUCT(7,0);\n" +"MM_DOT_PRODUCT(7,1);\n" +"#undef MM_DOT_PRODUCT\n" +"}\n" +"if(global_x * 4 < N && global_y * 8 < M) {\n" +"if(mad24(global_x, 4, 3) < N) {\n" +"vstore4(dot00, 0, dst_write0); dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { vstore4(dot01, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { vstore4(dot02, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { vstore4(dot03, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { vstore4(dot04, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { vstore4(dot05, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { vstore4(dot06, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { vstore4(dot07, 0, dst_write0); }\n" +"} else if(mad24(global_x, 4, 2) < N) {\n" +"vstore2(dot00.xy, 0, dst_write0);\n" +"dst_write0[2] = dot00.z;\n" +"dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) {\n" +"vstore2(dot01.xy, 0, dst_write0);\n" +"dst_write0[2] = dot01.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 2) < M) {\n" +"vstore2(dot02.xy, 0, dst_write0);\n" +"dst_write0[2] = dot02.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 3) < M) {\n" +"vstore2(dot03.xy, 0, dst_write0);\n" +"dst_write0[2] = dot03.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 4) < M) {\n" +"vstore2(dot04.xy, 0, dst_write0);\n" +"dst_write0[2] = dot04.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 5) < M) {\n" +"vstore2(dot05.xy, 0, dst_write0);\n" +"dst_write0[2] = dot05.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 6) < M) {\n" +"vstore2(dot06.xy, 0, dst_write0);\n" +"dst_write0[2] = dot06.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 7) < M) {\n" +"vstore2(dot07.xy, 0, dst_write0);\n" +"dst_write0[2] = dot07.z;\n" +"}\n" +"} else if(mad24(global_x, 4, 1) < N) {\n" +"vstore2(dot00.xy, 0, dst_write0); dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { vstore2(dot01.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { vstore2(dot02.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { vstore2(dot03.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { vstore2(dot04.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { vstore2(dot05.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { vstore2(dot06.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { vstore2(dot07.xy, 0, dst_write0); }\n" +"} else {\n" +"dst_write0[0] = dot00.x; dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { dst_write0[0] = dot01.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { dst_write0[0] = dot02.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { dst_write0[0] = dot03.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { dst_write0[0] = dot04.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { dst_write0[0] = dot05.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { dst_write0[0] = dot06.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { dst_write0[0] = dot07.x; }\n" +"}\n" +"}\n" +"}\n" +"#undef VEC_SIZE\n" +"#undef LWG_HEIGHT\n" +"#undef TILE_M\n" +"#undef TILE_K\n" +"#undef TILE_N\n" +"#define VEC_SIZE 1\n" +"#define LWG_HEIGHT 16\n" +"#define TILE_M 8\n" +"#define TILE_K 32\n" +"#define TILE_N 8\n" +"#define SLM_BLOCK 512\n" +"__attribute__((reqd_work_group_size(8, LWG_HEIGHT, 1)))\n" +"__kernel void intelblas_gemm_buffer_NT(\n" +"const __global float *src0, int off0,\n" +"const __global float *src1, int off1,\n" +"__global float *dst, int offd,\n" +"int M,\n" +"int N,\n" +"int K,\n" +"float alpha,\n" +"float beta,\n" +"int ldA,\n" +"int ldB,\n" +"int ldC)\n" +"{\n" +"const int group_x = get_group_id(0);\n" +"const int group_y = get_group_id(1);\n" +"const int local_x = get_local_id(0);\n" +"const int local_y = get_local_id(1);\n" +"const int global_x = get_global_id(0);\n" +"const int global_y = get_global_id(1);\n" +"float8 dot00 = 0.f;\n" +"float8 dot01 = 0.f;\n" +"float8 dot02 = 0.f;\n" +"float8 dot03 = 0.f;\n" +"float8 dot04 = 0.f;\n" +"float8 dot05 = 0.f;\n" +"float8 dot06 = 0.f;\n" +"float8 dot07 = 0.f;\n" +"const int dst_row = (global_y * TILE_M);\n" +"__global float *dst_write0 = dst + global_x + dst_row * ldC + offd;\n" +"const __global float *src0_read00 = src0 + off0;\n" +"const int a_row_base = global_y * TILE_M;\n" +"const int a_col_base = local_x * (TILE_K / 8);\n" +"const __global float *src1_read00 = src1 + off1;\n" +"const int b_row_base = (group_x * TILE_N);\n" +"__local float slm_brow[8 * SLM_BLOCK];\n" +"int local_index = mad24(local_y, 8, local_x) * 4;\n" +"int w = 0;\n" +"for (int b_tile = 0; b_tile < K; b_tile += SLM_BLOCK)\n" +"{\n" +"#define UPDATE_BROW(_row) \\\n" +"{ \\\n" +"float4 brow; \\\n" +"int b_row = b_row_base + _row; \\\n" +"int b_col = b_tile + local_index; \\\n" +"if (b_row < N && b_col <= K - 4 ) \\\n" +"brow = vload4(0, src1_read00 + mad24(b_row, ldB, b_col)); \\\n" +"else \\\n" +"brow = (float4)0; \\\n" +"vstore4(brow, 0, slm_brow + mad24(_row, SLM_BLOCK, local_index)); \\\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"UPDATE_BROW(0);\n" +"UPDATE_BROW(1);\n" +"UPDATE_BROW(2);\n" +"UPDATE_BROW(3);\n" +"UPDATE_BROW(4);\n" +"UPDATE_BROW(5);\n" +"UPDATE_BROW(6);\n" +"UPDATE_BROW(7);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"#undef UPDATE_BROW\n" +"for (int k_tile_offset = 0; k_tile_offset < SLM_BLOCK; k_tile_offset += TILE_K)\n" +"{\n" +"int a_col = a_col_base + b_tile + k_tile_offset;\n" +"if (a_col > K - 4 )\n" +"break;\n" +"int slm_brow_col = a_col_base + k_tile_offset;\n" +"#define READ_SLM_BROW(_row) \\\n" +"float4 brow##_row = vload4(0, slm_brow + mad24(_row, SLM_BLOCK, slm_brow_col));\n" +"READ_SLM_BROW(0);\n" +"READ_SLM_BROW(1);\n" +"READ_SLM_BROW(2);\n" +"READ_SLM_BROW(3);\n" +"READ_SLM_BROW(4);\n" +"READ_SLM_BROW(5);\n" +"READ_SLM_BROW(6);\n" +"READ_SLM_BROW(7);\n" +"#undef READ_SLM_BROW\n" +"#define MM_DOT_PRODUCT(_row,_dot) \\\n" +"{ \\\n" +"int a_row = a_row_base + _row; \\\n" +"if (a_row < M) { \\\n" +"float4 arow = vload4(0, src0_read00 + mad24(a_row, ldA, a_col)); \\\n" +"_dot = mad( (float8)(arow.x), (float8)(brow0.x, brow1.x, brow2.x, brow3.x, brow4.x, brow5.x, brow6.x, brow7.x), _dot ); \\\n" +"_dot = mad( (float8)(arow.y), (float8)(brow0.y, brow1.y, brow2.y, brow3.y, brow4.y, brow5.y, brow6.y, brow7.y), _dot ); \\\n" +"_dot = mad( (float8)(arow.z), (float8)(brow0.z, brow1.z, brow2.z, brow3.z, brow4.z, brow5.z, brow6.z, brow7.z), _dot ); \\\n" +"_dot = mad( (float8)(arow.w), (float8)(brow0.w, brow1.w, brow2.w, brow3.w, brow4.w, brow5.w, brow6.w, brow7.w), _dot ); \\\n" +"} \\\n" +"}\n" +"MM_DOT_PRODUCT(0,dot00);\n" +"MM_DOT_PRODUCT(1,dot01);\n" +"MM_DOT_PRODUCT(2,dot02);\n" +"MM_DOT_PRODUCT(3,dot03);\n" +"MM_DOT_PRODUCT(4,dot04);\n" +"MM_DOT_PRODUCT(5,dot05);\n" +"MM_DOT_PRODUCT(6,dot06);\n" +"MM_DOT_PRODUCT(7,dot07);\n" +"#undef MM_DOT_PRODUCT\n" +"}\n" +"}\n" +"#define REDUCE(_dot) \\\n" +"_dot.s0 = intel_sub_group_shuffle(_dot.s0, 0) + intel_sub_group_shuffle(_dot.s0, 1) + intel_sub_group_shuffle(_dot.s0, 2) + intel_sub_group_shuffle(_dot.s0, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s0, 4) + intel_sub_group_shuffle(_dot.s0, 5) + intel_sub_group_shuffle(_dot.s0, 6) + intel_sub_group_shuffle(_dot.s0, 7); \\\n" +"_dot.s1 = intel_sub_group_shuffle(_dot.s1, 0) + intel_sub_group_shuffle(_dot.s1, 1) + intel_sub_group_shuffle(_dot.s1, 2) + intel_sub_group_shuffle(_dot.s1, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s1, 4) + intel_sub_group_shuffle(_dot.s1, 5) + intel_sub_group_shuffle(_dot.s1, 6) + intel_sub_group_shuffle(_dot.s1, 7); \\\n" +"_dot.s2 = intel_sub_group_shuffle(_dot.s2, 0) + intel_sub_group_shuffle(_dot.s2, 1) + intel_sub_group_shuffle(_dot.s2, 2) + intel_sub_group_shuffle(_dot.s2, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s2, 4) + intel_sub_group_shuffle(_dot.s2, 5) + intel_sub_group_shuffle(_dot.s2, 6) + intel_sub_group_shuffle(_dot.s2, 7); \\\n" +"_dot.s3 = intel_sub_group_shuffle(_dot.s3, 0) + intel_sub_group_shuffle(_dot.s3, 1) + intel_sub_group_shuffle(_dot.s3, 2) + intel_sub_group_shuffle(_dot.s3, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s3, 4) + intel_sub_group_shuffle(_dot.s3, 5) + intel_sub_group_shuffle(_dot.s3, 6) + intel_sub_group_shuffle(_dot.s3, 7); \\\n" +"_dot.s4 = intel_sub_group_shuffle(_dot.s4, 0) + intel_sub_group_shuffle(_dot.s4, 1) + intel_sub_group_shuffle(_dot.s4, 2) + intel_sub_group_shuffle(_dot.s4, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s4, 4) + intel_sub_group_shuffle(_dot.s4, 5) + intel_sub_group_shuffle(_dot.s4, 6) + intel_sub_group_shuffle(_dot.s4, 7); \\\n" +"_dot.s5 = intel_sub_group_shuffle(_dot.s5, 0) + intel_sub_group_shuffle(_dot.s5, 1) + intel_sub_group_shuffle(_dot.s5, 2) + intel_sub_group_shuffle(_dot.s5, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s5, 4) + intel_sub_group_shuffle(_dot.s5, 5) + intel_sub_group_shuffle(_dot.s5, 6) + intel_sub_group_shuffle(_dot.s5, 7); \\\n" +"_dot.s6 = intel_sub_group_shuffle(_dot.s6, 0) + intel_sub_group_shuffle(_dot.s6, 1) + intel_sub_group_shuffle(_dot.s6, 2) + intel_sub_group_shuffle(_dot.s6, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s6, 4) + intel_sub_group_shuffle(_dot.s6, 5) + intel_sub_group_shuffle(_dot.s6, 6) + intel_sub_group_shuffle(_dot.s6, 7); \\\n" +"_dot.s7 = intel_sub_group_shuffle(_dot.s7, 0) + intel_sub_group_shuffle(_dot.s7, 1) + intel_sub_group_shuffle(_dot.s7, 2) + intel_sub_group_shuffle(_dot.s7, 3) + \\\n" +"intel_sub_group_shuffle(_dot.s7, 4) + intel_sub_group_shuffle(_dot.s7, 5) + intel_sub_group_shuffle(_dot.s7, 6) + intel_sub_group_shuffle(_dot.s7, 7);\n" +"REDUCE(dot00);\n" +"REDUCE(dot01);\n" +"REDUCE(dot02);\n" +"REDUCE(dot03);\n" +"REDUCE(dot04);\n" +"REDUCE(dot05);\n" +"REDUCE(dot06);\n" +"REDUCE(dot07);\n" +"#undef REDUCE\n" +"float output = 0.0f;\n" +"#define OUTPUT(_dot) \\\n" +"output = (local_x == 0) ? _dot.s0 : output; \\\n" +"output = (local_x == 1) ? _dot.s1 : output; \\\n" +"output = (local_x == 2) ? _dot.s2 : output; \\\n" +"output = (local_x == 3) ? _dot.s3 : output; \\\n" +"output = (local_x == 4) ? _dot.s4 : output; \\\n" +"output = (local_x == 5) ? _dot.s5 : output; \\\n" +"output = (local_x == 6) ? _dot.s6 : output; \\\n" +"output = (local_x == 7) ? _dot.s7 : output; \\\n" +"if (beta != 0.0f) \\\n" +"dst_write0[0] = mad(output, (float)alpha, ((float)beta * dst_write0[0])); \\\n" +"else \\\n" +"dst_write0[0] = output * (float)alpha; \\\n" +"dst_write0 += ldC;\n" +"if (global_x < N && dst_row < M)\n" +"{\n" +"{ OUTPUT(dot00); }\n" +"if (dst_row + 1 < M) { OUTPUT(dot01); }\n" +"if (dst_row + 2 < M) { OUTPUT(dot02); }\n" +"if (dst_row + 3 < M) { OUTPUT(dot03); }\n" +"if (dst_row + 4 < M) { OUTPUT(dot04); }\n" +"if (dst_row + 5 < M) { OUTPUT(dot05); }\n" +"if (dst_row + 6 < M) { OUTPUT(dot06); }\n" +"if (dst_row + 7 < M) { OUTPUT(dot07); }\n" +"}\n" +"#undef OUTPUT\n" +"}\n" +"#undef VEC_SIZE\n" +"#undef LWG_HEIGHT\n" +"#undef TILE_M\n" +"#undef TILE_K\n" +"#undef TILE_N\n" +"#undef SLM_BLOCK\n" +"#define VEC_SIZE 4\n" +"#define LWG_HEIGHT 4\n" +"#define TILE_M 8\n" +"#define TILE_K 16\n" +"#define TILE_N 32\n" +"__attribute__((reqd_work_group_size(8, LWG_HEIGHT, 1)))\n" +"__kernel void intelblas_gemm_buffer_TN(\n" +"const __global float *src0, int off0,\n" +"const __global float *src1, int off1,\n" +"__global float *dst, int offd,\n" +"int M,\n" +"int N,\n" +"int K,\n" +"float alpha,\n" +"float beta,\n" +"int ldA,\n" +"int ldB,\n" +"int ldC,\n" +"int start_index,\n" +"int stride)\n" +"{\n" +"const int group_x = get_group_id(0);\n" +"const int group_y = get_group_id(1);\n" +"const int local_x = get_local_id(0);\n" +"const int local_y = get_local_id(1);\n" +"const int global_x = get_global_id(0);\n" +"const int global_y = get_global_id(1);\n" +"float4 brow;\n" +"__global float *dst_write0 = dst + local_x * VEC_SIZE + ( group_x * TILE_N ) + ( group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M) * ldC + offd;\n" +"const __global float *src0_read = src0 + (local_x * ( TILE_K / 8 ) + start_index) * ldA + group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M + off0;\n" +"const __global float *src1_read0 = src1 + local_x * VEC_SIZE + ( group_x * TILE_N ) + start_index * ldB + off1;\n" +"float4 dot00 = (start_index != 0) ? vload4(0, dst_write0) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 )) : (float4)(0.0));\n" +"float4 dot01 = (start_index != 0) ? vload4(0, dst_write0 + 1 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 1 * ldC)) : (float4)(0.0));\n" +"float4 dot02 = (start_index != 0) ? vload4(0, dst_write0 + 2 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 2 * ldC)) : (float4)(0.0));\n" +"float4 dot03 = (start_index != 0) ? vload4(0, dst_write0 + 3 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 3 * ldC)) : (float4)(0.0));\n" +"float4 dot04 = (start_index != 0) ? vload4(0, dst_write0 + 4 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 4 * ldC)) : (float4)(0.0));\n" +"float4 dot05 = (start_index != 0) ? vload4(0, dst_write0 + 5 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 5 * ldC)) : (float4)(0.0));\n" +"float4 dot06 = (start_index != 0) ? vload4(0, dst_write0 + 6 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 6 * ldC)) : (float4)(0.0));\n" +"float4 dot07 = (start_index != 0) ? vload4(0, dst_write0 + 7 * ldC) : ((beta != 0.0) ? ((float)beta * vload4(0, dst_write0 + 7 * ldC)) : (float4)(0.0));\n" +"int end_index = min(start_index + stride, K);\n" +"while( start_index + TILE_K <= end_index ) {\n" +"float8 arow0 = (float)alpha * vload8(0, src0_read);\n" +"float8 arow1 = (float)alpha * vload8(0, src0_read + ldA);\n" +"#define MM_DOT_PRODUCT(_arow,index) \\\n" +"brow = vload4(0, src1_read0); src1_read0 += ldB; \\\n" +"dot00 = mad( (float4)(intel_sub_group_shuffle(_arow.s0, index)), brow, dot00 ); \\\n" +"dot01 = mad( (float4)(intel_sub_group_shuffle(_arow.s1, index)), brow, dot01 ); \\\n" +"dot02 = mad( (float4)(intel_sub_group_shuffle(_arow.s2, index)), brow, dot02 ); \\\n" +"dot03 = mad( (float4)(intel_sub_group_shuffle(_arow.s3, index)), brow, dot03 ); \\\n" +"dot04 = mad( (float4)(intel_sub_group_shuffle(_arow.s4, index)), brow, dot04 ); \\\n" +"dot05 = mad( (float4)(intel_sub_group_shuffle(_arow.s5, index)), brow, dot05 ); \\\n" +"dot06 = mad( (float4)(intel_sub_group_shuffle(_arow.s6, index)), brow, dot06 ); \\\n" +"dot07 = mad( (float4)(intel_sub_group_shuffle(_arow.s7, index)), brow, dot07 );\n" +"MM_DOT_PRODUCT(arow0,0);\n" +"MM_DOT_PRODUCT(arow1,0);\n" +"MM_DOT_PRODUCT(arow0,1);\n" +"MM_DOT_PRODUCT(arow1,1);\n" +"MM_DOT_PRODUCT(arow0,2);\n" +"MM_DOT_PRODUCT(arow1,2);\n" +"MM_DOT_PRODUCT(arow0,3);\n" +"MM_DOT_PRODUCT(arow1,3);\n" +"MM_DOT_PRODUCT(arow0,4);\n" +"MM_DOT_PRODUCT(arow1,4);\n" +"MM_DOT_PRODUCT(arow0,5);\n" +"MM_DOT_PRODUCT(arow1,5);\n" +"MM_DOT_PRODUCT(arow0,6);\n" +"MM_DOT_PRODUCT(arow1,6);\n" +"MM_DOT_PRODUCT(arow0,7);\n" +"MM_DOT_PRODUCT(arow1,7);\n" +"#undef MM_DOT_PRODUCT\n" +"src0_read += TILE_K * ldA;\n" +"start_index += TILE_K;\n" +"}\n" +"if(start_index < end_index) {\n" +"float8 arow0 = ((start_index + local_x * 2) < K) ? ((float)alpha * vload8(0, src0_read)) : (float)0.0f;\n" +"float8 arow1 = ((start_index + local_x * 2 + 1) < K) ? ((float)alpha * vload8(0, src0_read + ldA)) : (float)0.0f;\n" +"#define MM_DOT_PRODUCT(_arow,index) \\\n" +"brow = (start_index < K) ? vload4(0, src1_read0) : (float)0.0f; src1_read0 += ldB; start_index++; \\\n" +"dot00 = mad( (float4)(intel_sub_group_shuffle(_arow.s0, index)), brow, dot00 ); \\\n" +"dot01 = mad( (float4)(intel_sub_group_shuffle(_arow.s1, index)), brow, dot01 ); \\\n" +"dot02 = mad( (float4)(intel_sub_group_shuffle(_arow.s2, index)), brow, dot02 ); \\\n" +"dot03 = mad( (float4)(intel_sub_group_shuffle(_arow.s3, index)), brow, dot03 ); \\\n" +"dot04 = mad( (float4)(intel_sub_group_shuffle(_arow.s4, index)), brow, dot04 ); \\\n" +"dot05 = mad( (float4)(intel_sub_group_shuffle(_arow.s5, index)), brow, dot05 ); \\\n" +"dot06 = mad( (float4)(intel_sub_group_shuffle(_arow.s6, index)), brow, dot06 ); \\\n" +"dot07 = mad( (float4)(intel_sub_group_shuffle(_arow.s7, index)), brow, dot07 );\n" +"MM_DOT_PRODUCT(arow0,0);\n" +"MM_DOT_PRODUCT(arow1,0);\n" +"MM_DOT_PRODUCT(arow0,1);\n" +"MM_DOT_PRODUCT(arow1,1);\n" +"MM_DOT_PRODUCT(arow0,2);\n" +"MM_DOT_PRODUCT(arow1,2);\n" +"MM_DOT_PRODUCT(arow0,3);\n" +"MM_DOT_PRODUCT(arow1,3);\n" +"MM_DOT_PRODUCT(arow0,4);\n" +"MM_DOT_PRODUCT(arow1,4);\n" +"MM_DOT_PRODUCT(arow0,5);\n" +"MM_DOT_PRODUCT(arow1,5);\n" +"MM_DOT_PRODUCT(arow0,6);\n" +"MM_DOT_PRODUCT(arow1,6);\n" +"MM_DOT_PRODUCT(arow0,7);\n" +"MM_DOT_PRODUCT(arow1,7);\n" +"#undef MM_DOT_PRODUCT\n" +"}\n" +"if(global_x * 4 < N && global_y * 8 < M) {\n" +"if(mad24(global_x, 4, 3) < N) {\n" +"vstore4(dot00, 0, dst_write0); dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { vstore4(dot01, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { vstore4(dot02, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { vstore4(dot03, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { vstore4(dot04, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { vstore4(dot05, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { vstore4(dot06, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { vstore4(dot07, 0, dst_write0); }\n" +"} else if(mad24(global_x, 4, 2) < N) {\n" +"vstore2(dot00.xy, 0, dst_write0);\n" +"dst_write0[2] = dot00.z;\n" +"dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) {\n" +"vstore2(dot01.xy, 0, dst_write0);\n" +"dst_write0[2] = dot01.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 2) < M) {\n" +"vstore2(dot02.xy, 0, dst_write0);\n" +"dst_write0[2] = dot02.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 3) < M) {\n" +"vstore2(dot03.xy, 0, dst_write0);\n" +"dst_write0[2] = dot03.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 4) < M) {\n" +"vstore2(dot04.xy, 0, dst_write0);\n" +"dst_write0[2] = dot04.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 5) < M) {\n" +"vstore2(dot05.xy, 0, dst_write0);\n" +"dst_write0[2] = dot05.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 6) < M) {\n" +"vstore2(dot06.xy, 0, dst_write0);\n" +"dst_write0[2] = dot06.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 7) < M) {\n" +"vstore2(dot07.xy, 0, dst_write0);\n" +"dst_write0[2] = dot07.z;\n" +"}\n" +"} else if(mad24(global_x, 4, 1) < N) {\n" +"vstore2(dot00.xy, 0, dst_write0); dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { vstore2(dot01.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { vstore2(dot02.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { vstore2(dot03.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { vstore2(dot04.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { vstore2(dot05.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { vstore2(dot06.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { vstore2(dot07.xy, 0, dst_write0); }\n" +"} else {\n" +"dst_write0[0] = dot00.x; dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { dst_write0[0] = dot01.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { dst_write0[0] = dot02.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { dst_write0[0] = dot03.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { dst_write0[0] = dot04.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { dst_write0[0] = dot05.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { dst_write0[0] = dot06.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { dst_write0[0] = dot07.x; }\n" +"}\n" +"}\n" +"}\n" +"#undef VEC_SIZE\n" +"#undef LWG_HEIGHT\n" +"#undef TILE_M\n" +"#undef TILE_K\n" +"#undef TILE_N\n" +"#define VEC_SIZE 4\n" +"#define LWG_HEIGHT 4\n" +"#define TILE_M 8\n" +"#define TILE_K 16\n" +"#define TILE_N 32\n" +"__attribute__((reqd_work_group_size(8, LWG_HEIGHT, 1)))\n" +"__kernel void intelblas_gemm_buffer_TT(\n" +"const __global float *src0, int off0,\n" +"const __global float *src1, int off1,\n" +"__global float *dst, int offd,\n" +"int M,\n" +"int N,\n" +"int K,\n" +"float alpha,\n" +"float beta,\n" +"int ldA,\n" +"int ldB,\n" +"int ldC,\n" +"int start_index,\n" +"int stride)\n" +"{\n" +"const int group_x = get_group_id(0);\n" +"const int group_y = get_group_id(1);\n" +"const int local_x = get_local_id(0);\n" +"const int local_y = get_local_id(1);\n" +"const int global_x = get_global_id(0);\n" +"const int global_y = get_global_id(1);\n" +"float8 dot0 = 0.f;\n" +"float8 dot1 = 0.f;\n" +"float8 dot2 = 0.f;\n" +"float8 dot3 = 0.f;\n" +"float16 brow0;\n" +"float16 brow1;\n" +"float16 brow2;\n" +"float16 brow3;\n" +"__global float *dst_write0 = dst + local_x * VEC_SIZE + ( group_x * TILE_N ) + ( group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M) * ldC + offd;\n" +"const __global float *src0_read = src0 + (local_x * ( TILE_K / 8 ) + start_index) * ldA + group_y * LWG_HEIGHT * TILE_M + local_y * TILE_M + off0;\n" +"const __global float *src1_read0 = src1 + (local_x * VEC_SIZE + ( group_x * TILE_N )) * ldB + start_index + off1;\n" +"float4 dot00 = (start_index != 0) ? vload4(0, dst_write0) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 )) : (float4)(0.0));\n" +"float4 dot01 = (start_index != 0) ? vload4(0, dst_write0 + ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + ldC )) : (float4)(0.0));\n" +"float4 dot02 = (start_index != 0) ? vload4(0, dst_write0 + 2 * ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + 2 * ldC)) : (float4)(0.0));\n" +"float4 dot03 = (start_index != 0) ? vload4(0, dst_write0 + 3 * ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + 3 * ldC)) : (float4)(0.0));\n" +"float4 dot04 = (start_index != 0) ? vload4(0, dst_write0 + 4 * ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + 4 * ldC)) : (float4)(0.0));\n" +"float4 dot05 = (start_index != 0) ? vload4(0, dst_write0 + 5 * ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + 5 * ldC)) : (float4)(0.0));\n" +"float4 dot06 = (start_index != 0) ? vload4(0, dst_write0 + 6 * ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + 6 * ldC)) : (float4)(0.0));\n" +"float4 dot07 = (start_index != 0) ? vload4(0, dst_write0 + 7 * ldC) : ((beta != 0.0)? ((float)beta * vload4(0, dst_write0 + 7 * ldC)) : (float4)(0.0));\n" +"int end_index = min(start_index + stride, K);\n" +"while( start_index + TILE_K <= end_index ) {\n" +"brow0 = vload16(0, src1_read0);\n" +"brow1 = vload16(0, src1_read0 + ldB);\n" +"brow2 = vload16(0, src1_read0 + 2 * ldB);\n" +"brow3 = vload16(0, src1_read0 + 3 * ldB);\n" +"float8 arow0 = (float)alpha * vload8(0, src0_read);\n" +"float8 arow1 = (float)alpha * vload8(0, src0_read + ldA);\n" +"#define DOT_PRODUCT( _dot, _arow, index, _brow) \\\n" +"_dot.s0 = mad( intel_sub_group_shuffle( _arow.s0, index ), _brow, _dot.s0 ); \\\n" +"_dot.s1 = mad( intel_sub_group_shuffle( _arow.s1, index ), _brow, _dot.s1 ); \\\n" +"_dot.s2 = mad( intel_sub_group_shuffle( _arow.s2, index ), _brow, _dot.s2 ); \\\n" +"_dot.s3 = mad( intel_sub_group_shuffle( _arow.s3, index ), _brow, _dot.s3 ); \\\n" +"_dot.s4 = mad( intel_sub_group_shuffle( _arow.s4, index ), _brow, _dot.s4 ); \\\n" +"_dot.s5 = mad( intel_sub_group_shuffle( _arow.s5, index ), _brow, _dot.s5 ); \\\n" +"_dot.s6 = mad( intel_sub_group_shuffle( _arow.s6, index ), _brow, _dot.s6 ); \\\n" +"_dot.s7 = mad( intel_sub_group_shuffle( _arow.s7, index ), _brow, _dot.s7 );\n" +"#define MM_DOT_PRODUCT( _brow, _dot) \\\n" +"DOT_PRODUCT(_dot, arow0, 0, _brow.s0); \\\n" +"DOT_PRODUCT(_dot, arow1, 0, _brow.s1); \\\n" +"DOT_PRODUCT(_dot, arow0, 1, _brow.s2); \\\n" +"DOT_PRODUCT(_dot, arow1, 1, _brow.s3); \\\n" +"DOT_PRODUCT(_dot, arow0, 2, _brow.s4); \\\n" +"DOT_PRODUCT(_dot, arow1, 2, _brow.s5); \\\n" +"DOT_PRODUCT(_dot, arow0, 3, _brow.s6); \\\n" +"DOT_PRODUCT(_dot, arow1, 3, _brow.s7); \\\n" +"DOT_PRODUCT(_dot, arow0, 4, _brow.s8); \\\n" +"DOT_PRODUCT(_dot, arow1, 4, _brow.s9); \\\n" +"DOT_PRODUCT(_dot, arow0, 5, _brow.sa); \\\n" +"DOT_PRODUCT(_dot, arow1, 5, _brow.sb); \\\n" +"DOT_PRODUCT(_dot, arow0, 6, _brow.sc); \\\n" +"DOT_PRODUCT(_dot, arow1, 6, _brow.sd); \\\n" +"DOT_PRODUCT(_dot, arow0, 7, _brow.se); \\\n" +"DOT_PRODUCT(_dot, arow1, 7, _brow.sf);\n" +"MM_DOT_PRODUCT( brow0, dot0 );\n" +"MM_DOT_PRODUCT( brow1, dot1 );\n" +"MM_DOT_PRODUCT( brow2, dot2 );\n" +"MM_DOT_PRODUCT( brow3, dot3 );\n" +"#undef MM_DOT_PRODUCT\n" +"#undef DOT_PRODUCT\n" +"src1_read0 += TILE_K;\n" +"src0_read += TILE_K * ldA;\n" +"start_index += TILE_K;\n" +"}\n" +"if(start_index < end_index) {\n" +"brow0 = vload16(0, src1_read0); src1_read0 += ldB;\n" +"brow1 = vload16(0, src1_read0); src1_read0 += ldB;\n" +"brow2 = vload16(0, src1_read0); src1_read0 += ldB;\n" +"brow3 = vload16(0, src1_read0);\n" +"float8 arow0 = (float)alpha * vload8(0, src0_read);\n" +"float8 arow1 = (float)alpha * vload8(0, src0_read + ldA);\n" +"#define DOT_PRODUCT( _dot, _arow, index, _brow) \\\n" +"_dot.s0 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s0, index ), _brow, _dot.s0 ) : _dot.s0; \\\n" +"_dot.s1 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s1, index ), _brow, _dot.s1 ) : _dot.s1; \\\n" +"_dot.s2 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s2, index ), _brow, _dot.s2 ) : _dot.s2; \\\n" +"_dot.s3 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s3, index ), _brow, _dot.s3 ) : _dot.s3; \\\n" +"_dot.s4 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s4, index ), _brow, _dot.s4 ) : _dot.s4; \\\n" +"_dot.s5 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s5, index ), _brow, _dot.s5 ) : _dot.s5; \\\n" +"_dot.s6 = (w < K) ? mad( intel_sub_group_shuffle( _arow.s6, index ), _brow, _dot.s6 ) : _dot.s6; \\\n" +"_dot.s7 = (w++ < K) ? mad( intel_sub_group_shuffle( _arow.s7, index ), _brow, _dot.s7 ) : _dot.s7;\n" +"#define MM_DOT_PRODUCT( _brow, _dot) \\\n" +"DOT_PRODUCT(_dot, arow0, 0, _brow.s0); \\\n" +"DOT_PRODUCT(_dot, arow1, 0, _brow.s1); \\\n" +"DOT_PRODUCT(_dot, arow0, 1, _brow.s2); \\\n" +"DOT_PRODUCT(_dot, arow1, 1, _brow.s3); \\\n" +"DOT_PRODUCT(_dot, arow0, 2, _brow.s4); \\\n" +"DOT_PRODUCT(_dot, arow1, 2, _brow.s5); \\\n" +"DOT_PRODUCT(_dot, arow0, 3, _brow.s6); \\\n" +"DOT_PRODUCT(_dot, arow1, 3, _brow.s7); \\\n" +"DOT_PRODUCT(_dot, arow0, 4, _brow.s8); \\\n" +"DOT_PRODUCT(_dot, arow1, 4, _brow.s9); \\\n" +"DOT_PRODUCT(_dot, arow0, 5, _brow.sa); \\\n" +"DOT_PRODUCT(_dot, arow1, 5, _brow.sb); \\\n" +"DOT_PRODUCT(_dot, arow0, 6, _brow.sc); \\\n" +"DOT_PRODUCT(_dot, arow1, 6, _brow.sd); \\\n" +"DOT_PRODUCT(_dot, arow0, 7, _brow.se); \\\n" +"DOT_PRODUCT(_dot, arow1, 7, _brow.sf);\n" +"int w = start_index;\n" +"MM_DOT_PRODUCT( brow0, dot0 );\n" +"w = start_index;\n" +"MM_DOT_PRODUCT( brow1, dot1 );\n" +"w = start_index;\n" +"MM_DOT_PRODUCT( brow2, dot2 );\n" +"w = start_index;\n" +"MM_DOT_PRODUCT( brow3, dot3 );\n" +"#undef MM_DOT_PRODUCT\n" +"#undef DOT_PRODUCT\n" +"}\n" +"dot00 += (float4)(dot0.s0, dot1.s0, dot2.s0, dot3.s0);\n" +"dot01 += (float4)(dot0.s1, dot1.s1, dot2.s1, dot3.s1);\n" +"dot02 += (float4)(dot0.s2, dot1.s2, dot2.s2, dot3.s2);\n" +"dot03 += (float4)(dot0.s3, dot1.s3, dot2.s3, dot3.s3);\n" +"dot04 += (float4)(dot0.s4, dot1.s4, dot2.s4, dot3.s4);\n" +"dot05 += (float4)(dot0.s5, dot1.s5, dot2.s5, dot3.s5);\n" +"dot06 += (float4)(dot0.s6, dot1.s6, dot2.s6, dot3.s6);\n" +"dot07 += (float4)(dot0.s7, dot1.s7, dot2.s7, dot3.s7);\n" +"if(global_x * 4 < N && global_y * 8 < M) {\n" +"if(mad24(global_x, 4, 3) < N) {\n" +"vstore4(dot00, 0, dst_write0); dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { vstore4(dot01, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { vstore4(dot02, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { vstore4(dot03, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { vstore4(dot04, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { vstore4(dot05, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { vstore4(dot06, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { vstore4(dot07, 0, dst_write0); }\n" +"} else if(mad24(global_x, 4, 2) < N) {\n" +"vstore2(dot00.xy, 0, dst_write0);\n" +"dst_write0[2] = dot00.z;\n" +"dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) {\n" +"vstore2(dot01.xy, 0, dst_write0);\n" +"dst_write0[2] = dot01.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 2) < M) {\n" +"vstore2(dot02.xy, 0, dst_write0);\n" +"dst_write0[2] = dot02.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 3) < M) {\n" +"vstore2(dot03.xy, 0, dst_write0);\n" +"dst_write0[2] = dot03.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 4) < M) {\n" +"vstore2(dot04.xy, 0, dst_write0);\n" +"dst_write0[2] = dot04.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 5) < M) {\n" +"vstore2(dot05.xy, 0, dst_write0);\n" +"dst_write0[2] = dot05.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 6) < M) {\n" +"vstore2(dot06.xy, 0, dst_write0);\n" +"dst_write0[2] = dot06.z;\n" +"dst_write0 += ldC;\n" +"} else\n" +"return;\n" +"if(mad24(global_y, 8, 7) < M) {\n" +"vstore2(dot07.xy, 0, dst_write0);\n" +"dst_write0[2] = dot07.z;\n" +"}\n" +"} else if(mad24(global_x, 4, 1) < N) {\n" +"vstore2(dot00.xy, 0, dst_write0); dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { vstore2(dot01.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { vstore2(dot02.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { vstore2(dot03.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { vstore2(dot04.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { vstore2(dot05.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { vstore2(dot06.xy, 0, dst_write0); dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { vstore2(dot07.xy, 0, dst_write0); }\n" +"} else {\n" +"dst_write0[0] = dot00.x; dst_write0 += ldC;\n" +"if(mad24(global_y, 8, 1) < M) { dst_write0[0] = dot01.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 2) < M) { dst_write0[0] = dot02.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 3) < M) { dst_write0[0] = dot03.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 4) < M) { dst_write0[0] = dot04.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 5) < M) { dst_write0[0] = dot05.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 6) < M) { dst_write0[0] = dot06.x; dst_write0 += ldC; }\n" +"else return;\n" +"if(mad24(global_y, 8, 7) < M) { dst_write0[0] = dot07.x; }\n" +"}\n" +"}\n" +"}\n" +"#undef VEC_SIZE\n" +"#undef LWG_HEIGHT\n" +"#undef TILE_M\n" +"#undef TILE_K\n" +"#undef TILE_N\n" +"#endif\n" +, "6ca287dab5507df0be8e2d9bb7eff4ad", NULL}; +struct cv::ocl::internal::ProgramEntry lut_oclsrc={moduleName, "lut", +"#if lcn == 1\n" +"#if dcn == 4\n" +"#define LUT_OP \\\n" +"int idx = *(__global const int *)(srcptr + src_index); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx & 0xff]; \\\n" +"dst[1] = lut_l[(idx >> 8) & 0xff]; \\\n" +"dst[2] = lut_l[(idx >> 16) & 0xff]; \\\n" +"dst[3] = lut_l[(idx >> 24) & 0xff];\n" +"#elif dcn == 3\n" +"#define LUT_OP \\\n" +"uchar3 idx = vload3(0, srcptr + src_index); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx.x]; \\\n" +"dst[1] = lut_l[idx.y]; \\\n" +"dst[2] = lut_l[idx.z];\n" +"#elif dcn == 2\n" +"#define LUT_OP \\\n" +"short idx = *(__global const short *)(srcptr + src_index); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx & 0xff]; \\\n" +"dst[1] = lut_l[(idx >> 8) & 0xff];\n" +"#elif dcn == 1\n" +"#define LUT_OP \\\n" +"uchar idx = (srcptr + src_index)[0]; \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx];\n" +"#else\n" +"#define LUT_OP \\\n" +"__global const srcT * src = (__global const srcT *)(srcptr + src_index); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"for (int cn = 0; cn < dcn; ++cn) \\\n" +"dst[cn] = lut_l[src[cn]];\n" +"#endif\n" +"#else\n" +"#if dcn == 4\n" +"#define LUT_OP \\\n" +"__global const uchar4 * src_pixel = (__global const uchar4 *)(srcptr + src_index); \\\n" +"int4 idx = mad24(convert_int4(src_pixel[0]), (int4)(lcn), (int4)(0, 1, 2, 3)); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx.x]; \\\n" +"dst[1] = lut_l[idx.y]; \\\n" +"dst[2] = lut_l[idx.z]; \\\n" +"dst[3] = lut_l[idx.w];\n" +"#elif dcn == 3\n" +"#define LUT_OP \\\n" +"uchar3 src_pixel = vload3(0, srcptr + src_index); \\\n" +"int3 idx = mad24(convert_int3(src_pixel), (int3)(lcn), (int3)(0, 1, 2)); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx.x]; \\\n" +"dst[1] = lut_l[idx.y]; \\\n" +"dst[2] = lut_l[idx.z];\n" +"#elif dcn == 2\n" +"#define LUT_OP \\\n" +"__global const uchar2 * src_pixel = (__global const uchar2 *)(srcptr + src_index); \\\n" +"int2 idx = mad24(convert_int2(src_pixel[0]), lcn, (int2)(0, 1)); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx.x]; \\\n" +"dst[1] = lut_l[idx.y];\n" +"#elif dcn == 1\n" +"#define LUT_OP \\\n" +"uchar idx = (srcptr + src_index)[0]; \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"dst[0] = lut_l[idx];\n" +"#else\n" +"#define LUT_OP \\\n" +"__global const srcT * src = (__global const srcT *)(srcptr + src_index); \\\n" +"dst = (__global dstT *)(dstptr + dst_index); \\\n" +"for (int cn = 0; cn < dcn; ++cn) \\\n" +"dst[cn] = lut_l[mad24(src[cn], lcn, cn)];\n" +"#endif\n" +"#endif\n" +"__kernel void LUT(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global const uchar * lutptr, int lut_step, int lut_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) << 2;\n" +"__local dstT lut_l[256 * lcn];\n" +"__global const dstT * lut = (__global const dstT *)(lutptr + lut_offset);\n" +"for (int i = mad24((int)get_local_id(1), (int)get_local_size(0), (int)get_local_id(0)),\n" +"step = get_local_size(0) * get_local_size(1); i < 256 * lcn; i += step)\n" +"lut_l[i] = lut[i];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < cols && y < rows)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, (int)sizeof(srcT) * dcn, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT) * dcn, dst_offset));\n" +"__global dstT * dst;\n" +"LUT_OP;\n" +"if (y < rows - 1)\n" +"{\n" +"src_index += src_step;\n" +"dst_index += dst_step;\n" +"LUT_OP;\n" +"if (y < rows - 2)\n" +"{\n" +"src_index += src_step;\n" +"dst_index += dst_step;\n" +"LUT_OP;\n" +"if (y < rows - 3)\n" +"{\n" +"src_index += src_step;\n" +"dst_index += dst_step;\n" +"LUT_OP;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"}\n" +, "02217d060320fc126306ad16885be711", NULL}; +struct cv::ocl::internal::ProgramEntry meanstddev_oclsrc={moduleName, "meanstddev", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define storesqpix(val, addr) *(__global sqdstT *)(addr) = val\n" +"#define srcTSIZE (int)sizeof(srcT)\n" +"#define dstTSIZE (int)sizeof(dstT)\n" +"#define sqdstTSIZE (int)sizeof(sqdstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define storesqpix(val, addr) vstore3(val, 0, (__global sqdstT1 *)(addr))\n" +"#define srcTSIZE ((int)sizeof(srcT1)*3)\n" +"#define dstTSIZE ((int)sizeof(dstT1)*3)\n" +"#define sqdstTSIZE ((int)sizeof(sqdstT1)*3)\n" +"#endif\n" +"__kernel void meanStdDev(__global const uchar * srcptr, int src_step, int src_offset, int cols,\n" +"int total, int groups, __global uchar * dstptr\n" +"#ifdef HAVE_MASK\n" +", __global const uchar * mask, int mask_step, int mask_offset\n" +"#endif\n" +")\n" +"{\n" +"int lid = get_local_id(0);\n" +"int gid = get_group_id(0);\n" +"int id = get_global_id(0);\n" +"__local dstT localMemSum[WGS2_ALIGNED];\n" +"__local sqdstT localMemSqSum[WGS2_ALIGNED];\n" +"#ifdef HAVE_MASK\n" +"__local int localMemNonZero[WGS2_ALIGNED];\n" +"#endif\n" +"dstT accSum = (dstT)(0);\n" +"sqdstT accSqSum = (sqdstT)(0);\n" +"#ifdef HAVE_MASK\n" +"int accNonZero = 0;\n" +"mask += mask_offset;\n" +"#endif\n" +"srcptr += src_offset;\n" +"for (int grain = groups * WGS; id < total; id += grain)\n" +"{\n" +"#ifdef HAVE_MASK\n" +"#ifdef HAVE_MASK_CONT\n" +"int mask_index = id;\n" +"#else\n" +"int mask_index = mad24(id / cols, mask_step, id % cols);\n" +"#endif\n" +"if (mask[mask_index])\n" +"#endif\n" +"{\n" +"#ifdef HAVE_SRC_CONT\n" +"int src_index = id * srcTSIZE;\n" +"#else\n" +"int src_index = mad24(id / cols, src_step, mul24(id % cols, srcTSIZE));\n" +"#endif\n" +"srcT value = loadpix(srcptr + src_index);\n" +"accSum += convertToDT(value);\n" +"sqdstT dvalue = convertToSDT(value);\n" +"accSqSum = fma(dvalue, dvalue, accSqSum);\n" +"#ifdef HAVE_MASK\n" +"++accNonZero;\n" +"#endif\n" +"}\n" +"}\n" +"if (lid < WGS2_ALIGNED)\n" +"{\n" +"localMemSum[lid] = accSum;\n" +"localMemSqSum[lid] = accSqSum;\n" +"#ifdef HAVE_MASK\n" +"localMemNonZero[lid] = accNonZero;\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (lid >= WGS2_ALIGNED && total >= WGS2_ALIGNED)\n" +"{\n" +"localMemSum[lid - WGS2_ALIGNED] += accSum;\n" +"localMemSqSum[lid - WGS2_ALIGNED] += accSqSum;\n" +"#ifdef HAVE_MASK\n" +"localMemNonZero[lid - WGS2_ALIGNED] += accNonZero;\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int lsize = WGS2_ALIGNED >> 1; lsize > 0; lsize >>= 1)\n" +"{\n" +"if (lid < lsize)\n" +"{\n" +"int lid2 = lsize + lid;\n" +"localMemSum[lid] += localMemSum[lid2];\n" +"localMemSqSum[lid] += localMemSqSum[lid2];\n" +"#ifdef HAVE_MASK\n" +"localMemNonZero[lid] += localMemNonZero[lid2];\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"if (lid == 0)\n" +"{\n" +"storepix(localMemSum[0], dstptr + dstTSIZE * gid);\n" +"storesqpix(localMemSqSum[0], dstptr + mad24(dstTSIZE, groups, sqdstTSIZE * gid));\n" +"#ifdef HAVE_MASK\n" +"*(__global int *)(dstptr + mad24(dstTSIZE + sqdstTSIZE, groups, (int)sizeof(int) * gid)) = localMemNonZero[0];\n" +"#endif\n" +"}\n" +"}\n" +, "1284edd21da32ce135cd26c0c897bd08", NULL}; +struct cv::ocl::internal::ProgramEntry minmaxloc_oclsrc={moduleName, "minmaxloc", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"static inline int align(int pos)\n" +"{\n" +"return (pos + (MINMAX_STRUCT_ALIGNMENT - 1)) & (~(MINMAX_STRUCT_ALIGNMENT - 1));\n" +"}\n" +"#ifdef DEPTH_0\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL UCHAR_MAX\n" +"#elif defined DEPTH_1\n" +"#define MIN_VAL SCHAR_MIN\n" +"#define MAX_VAL SCHAR_MAX\n" +"#elif defined DEPTH_2\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL USHRT_MAX\n" +"#elif defined DEPTH_3\n" +"#define MIN_VAL SHRT_MIN\n" +"#define MAX_VAL SHRT_MAX\n" +"#elif defined DEPTH_4\n" +"#define MIN_VAL INT_MIN\n" +"#define MAX_VAL INT_MAX\n" +"#elif defined DEPTH_5\n" +"#define MIN_VAL (-FLT_MAX)\n" +"#define MAX_VAL FLT_MAX\n" +"#elif defined DEPTH_6\n" +"#define MIN_VAL (-DBL_MAX)\n" +"#define MAX_VAL DBL_MAX\n" +"#endif\n" +"#define noconvert\n" +"#define INDEX_MAX UINT_MAX\n" +"#if wdepth <= 4\n" +"#define MIN_ABS(a) convertFromU(abs(a))\n" +"#define MIN_ABS2(a, b) convertFromU(abs_diff(a, b))\n" +"#define MIN(a, b) min(a, b)\n" +"#define MAX(a, b) max(a, b)\n" +"#else\n" +"#define MIN_ABS(a) fabs(a)\n" +"#define MIN_ABS2(a, b) fabs(a - b)\n" +"#define MIN(a, b) fmin(a, b)\n" +"#define MAX(a, b) fmax(a, b)\n" +"#endif\n" +"#if kercn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define srcTSIZE (int)sizeof(srcT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define srcTSIZE ((int)sizeof(srcT1) * 3)\n" +"#endif\n" +"#ifndef HAVE_MASK\n" +"#undef srcTSIZE\n" +"#define srcTSIZE (int)sizeof(srcT1)\n" +"#endif\n" +"#ifdef NEED_MINVAL\n" +"#ifdef NEED_MINLOC\n" +"#define CALC_MIN(p, inc) \\\n" +"if (minval > temp.p) \\\n" +"{ \\\n" +"minval = temp.p; \\\n" +"minloc = id + inc; \\\n" +"}\n" +"#else\n" +"#define CALC_MIN(p, inc) \\\n" +"minval = MIN(minval, temp.p);\n" +"#endif\n" +"#else\n" +"#define CALC_MIN(p, inc)\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"#ifdef NEED_MAXLOC\n" +"#define CALC_MAX(p, inc) \\\n" +"if (maxval < temp.p) \\\n" +"{ \\\n" +"maxval = temp.p; \\\n" +"maxloc = id + inc; \\\n" +"}\n" +"#else\n" +"#define CALC_MAX(p, inc) \\\n" +"maxval = MAX(maxval, temp.p);\n" +"#endif\n" +"#else\n" +"#define CALC_MAX(p, inc)\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"#define CALC_MAX2(p) \\\n" +"maxval2 = MAX(maxval2, temp2.p);\n" +"#else\n" +"#define CALC_MAX2(p)\n" +"#endif\n" +"#define CALC_P(p, inc) \\\n" +"CALC_MIN(p, inc) \\\n" +"CALC_MAX(p, inc) \\\n" +"CALC_MAX2(p)\n" +"__kernel void minmaxloc(__global const uchar * srcptr, int src_step, int src_offset, int cols,\n" +"int total, int groupnum, __global uchar * dstptr\n" +"#ifdef HAVE_MASK\n" +", __global const uchar * mask, int mask_step, int mask_offset\n" +"#endif\n" +"#ifdef HAVE_SRC2\n" +", __global const uchar * src2ptr, int src2_step, int src2_offset\n" +"#endif\n" +")\n" +"{\n" +"int lid = get_local_id(0);\n" +"int gid = get_group_id(0);\n" +"int id = get_global_id(0)\n" +"#ifndef HAVE_MASK\n" +"* kercn;\n" +"#else\n" +";\n" +"#endif\n" +"srcptr += src_offset;\n" +"#ifdef HAVE_MASK\n" +"mask += mask_offset;\n" +"#endif\n" +"#ifdef HAVE_SRC2\n" +"src2ptr += src2_offset;\n" +"#endif\n" +"#ifdef NEED_MINVAL\n" +"__local dstT1 localmem_min[WGS2_ALIGNED];\n" +"dstT1 minval = MAX_VAL;\n" +"#ifdef NEED_MINLOC\n" +"__local uint localmem_minloc[WGS2_ALIGNED];\n" +"uint minloc = INDEX_MAX;\n" +"#endif\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"dstT1 maxval = MIN_VAL;\n" +"__local dstT1 localmem_max[WGS2_ALIGNED];\n" +"#ifdef NEED_MAXLOC\n" +"__local uint localmem_maxloc[WGS2_ALIGNED];\n" +"uint maxloc = INDEX_MAX;\n" +"#endif\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"__local dstT1 localmem_max2[WGS2_ALIGNED];\n" +"dstT1 maxval2 = MIN_VAL;\n" +"#endif\n" +"int src_index;\n" +"#ifdef HAVE_MASK\n" +"int mask_index;\n" +"#endif\n" +"#ifdef HAVE_SRC2\n" +"int src2_index;\n" +"#endif\n" +"dstT temp;\n" +"#ifdef HAVE_SRC2\n" +"dstT temp2;\n" +"#endif\n" +"for (int grain = groupnum * WGS\n" +"#ifndef HAVE_MASK\n" +"* kercn\n" +"#endif\n" +"; id < total; id += grain)\n" +"{\n" +"#ifdef HAVE_MASK\n" +"#ifdef HAVE_MASK_CONT\n" +"mask_index = id;\n" +"#else\n" +"mask_index = mad24(id / cols, mask_step, id % cols);\n" +"#endif\n" +"if (mask[mask_index])\n" +"#endif\n" +"{\n" +"#ifdef HAVE_SRC_CONT\n" +"src_index = id * srcTSIZE;\n" +"#else\n" +"src_index = mad24(id / cols, src_step, mul24(id % cols, srcTSIZE));\n" +"#endif\n" +"temp = convertToDT(loadpix(srcptr + src_index));\n" +"#ifdef OP_ABS\n" +"temp = MIN_ABS(temp);\n" +"#endif\n" +"#ifdef HAVE_SRC2\n" +"#ifdef HAVE_SRC2_CONT\n" +"src2_index = id * srcTSIZE;\n" +"#else\n" +"src2_index = mad24(id / cols, src2_step, mul24(id % cols, srcTSIZE));\n" +"#endif\n" +"temp2 = convertToDT(loadpix(src2ptr + src2_index));\n" +"temp = MIN_ABS2(temp, temp2);\n" +"#ifdef OP_CALC2\n" +"temp2 = MIN_ABS(temp2);\n" +"#endif\n" +"#endif\n" +"#if kercn == 1\n" +"#ifdef NEED_MINVAL\n" +"#ifdef NEED_MINLOC\n" +"if (minval > temp)\n" +"{\n" +"minval = temp;\n" +"minloc = id;\n" +"}\n" +"#else\n" +"minval = MIN(minval, temp);\n" +"#endif\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"#ifdef NEED_MAXLOC\n" +"if (maxval < temp)\n" +"{\n" +"maxval = temp;\n" +"maxloc = id;\n" +"}\n" +"#else\n" +"maxval = MAX(maxval, temp);\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"maxval2 = MAX(maxval2, temp2);\n" +"#endif\n" +"#endif\n" +"#elif kercn >= 2\n" +"CALC_P(s0, 0)\n" +"CALC_P(s1, 1)\n" +"#if kercn >= 3\n" +"CALC_P(s2, 2)\n" +"#if kercn >= 4\n" +"CALC_P(s3, 3)\n" +"#if kercn >= 8\n" +"CALC_P(s4, 4)\n" +"CALC_P(s5, 5)\n" +"CALC_P(s6, 6)\n" +"CALC_P(s7, 7)\n" +"#if kercn == 16\n" +"CALC_P(s8, 8)\n" +"CALC_P(s9, 9)\n" +"CALC_P(sA, 10)\n" +"CALC_P(sB, 11)\n" +"CALC_P(sC, 12)\n" +"CALC_P(sD, 13)\n" +"CALC_P(sE, 14)\n" +"CALC_P(sF, 15)\n" +"#endif\n" +"#endif\n" +"#endif\n" +"#endif\n" +"#endif\n" +"}\n" +"}\n" +"if (lid < WGS2_ALIGNED)\n" +"{\n" +"#ifdef NEED_MINVAL\n" +"localmem_min[lid] = minval;\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"localmem_max[lid] = maxval;\n" +"#endif\n" +"#ifdef NEED_MINLOC\n" +"localmem_minloc[lid] = minloc;\n" +"#endif\n" +"#ifdef NEED_MAXLOC\n" +"localmem_maxloc[lid] = maxloc;\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"localmem_max2[lid] = maxval2;\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (lid >= WGS2_ALIGNED && total >= WGS2_ALIGNED)\n" +"{\n" +"int lid3 = lid - WGS2_ALIGNED;\n" +"#ifdef NEED_MINVAL\n" +"#ifdef NEED_MINLOC\n" +"if (localmem_min[lid3] >= minval)\n" +"{\n" +"if (localmem_min[lid3] == minval)\n" +"localmem_minloc[lid3] = min(localmem_minloc[lid3], minloc);\n" +"else\n" +"localmem_minloc[lid3] = minloc,\n" +"localmem_min[lid3] = minval;\n" +"}\n" +"#else\n" +"localmem_min[lid3] = MIN(localmem_min[lid3], minval);\n" +"#endif\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"#ifdef NEED_MAXLOC\n" +"if (localmem_max[lid3] <= maxval)\n" +"{\n" +"if (localmem_max[lid3] == maxval)\n" +"localmem_maxloc[lid3] = min(localmem_maxloc[lid3], maxloc);\n" +"else\n" +"localmem_maxloc[lid3] = maxloc,\n" +"localmem_max[lid3] = maxval;\n" +"}\n" +"#else\n" +"localmem_max[lid3] = MAX(localmem_max[lid3], maxval);\n" +"#endif\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"localmem_max2[lid3] = MAX(localmem_max2[lid3], maxval2);\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int lsize = WGS2_ALIGNED >> 1; lsize > 0; lsize >>= 1)\n" +"{\n" +"if (lid < lsize)\n" +"{\n" +"int lid2 = lsize + lid;\n" +"#ifdef NEED_MINVAL\n" +"#ifdef NEED_MINLOC\n" +"if (localmem_min[lid] >= localmem_min[lid2])\n" +"{\n" +"if (localmem_min[lid] == localmem_min[lid2])\n" +"localmem_minloc[lid] = min(localmem_minloc[lid2], localmem_minloc[lid]);\n" +"else\n" +"localmem_minloc[lid] = localmem_minloc[lid2],\n" +"localmem_min[lid] = localmem_min[lid2];\n" +"}\n" +"#else\n" +"localmem_min[lid] = MIN(localmem_min[lid], localmem_min[lid2]);\n" +"#endif\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"#ifdef NEED_MAXLOC\n" +"if (localmem_max[lid] <= localmem_max[lid2])\n" +"{\n" +"if (localmem_max[lid] == localmem_max[lid2])\n" +"localmem_maxloc[lid] = min(localmem_maxloc[lid2], localmem_maxloc[lid]);\n" +"else\n" +"localmem_maxloc[lid] = localmem_maxloc[lid2],\n" +"localmem_max[lid] = localmem_max[lid2];\n" +"}\n" +"#else\n" +"localmem_max[lid] = MAX(localmem_max[lid], localmem_max[lid2]);\n" +"#endif\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"localmem_max2[lid] = MAX(localmem_max2[lid], localmem_max2[lid2]);\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"if (lid == 0)\n" +"{\n" +"int pos = 0;\n" +"#ifdef NEED_MINVAL\n" +"*(__global dstT1 *)(dstptr + mad24(gid, (int)sizeof(dstT1), pos)) = localmem_min[0];\n" +"pos = mad24(groupnum, (int)sizeof(dstT1), pos);\n" +"pos = align(pos);\n" +"#endif\n" +"#ifdef NEED_MAXVAL\n" +"*(__global dstT1 *)(dstptr + mad24(gid, (int)sizeof(dstT1), pos)) = localmem_max[0];\n" +"pos = mad24(groupnum, (int)sizeof(dstT1), pos);\n" +"pos = align(pos);\n" +"#endif\n" +"#ifdef NEED_MINLOC\n" +"*(__global uint *)(dstptr + mad24(gid, (int)sizeof(uint), pos)) = localmem_minloc[0];\n" +"pos = mad24(groupnum, (int)sizeof(uint), pos);\n" +"pos = align(pos);\n" +"#endif\n" +"#ifdef NEED_MAXLOC\n" +"*(__global uint *)(dstptr + mad24(gid, (int)sizeof(uint), pos)) = localmem_maxloc[0];\n" +"#ifdef OP_CALC2\n" +"pos = mad24(groupnum, (int)sizeof(uint), pos);\n" +"pos = align(pos);\n" +"#endif\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"*(__global dstT1 *)(dstptr + mad24(gid, (int)sizeof(dstT1), pos)) = localmem_max2[0];\n" +"#endif\n" +"}\n" +"}\n" +, "db5f17eea45ac79530d7323906118580", NULL}; +struct cv::ocl::internal::ProgramEntry mixchannels_oclsrc={moduleName, "mixchannels", +"#define DECLARE_INPUT_MAT(i) \\\n" +"__global const uchar * src##i##ptr, int src##i##_step, int src##i##_offset,\n" +"#define DECLARE_OUTPUT_MAT(i) \\\n" +"__global uchar * dst##i##ptr, int dst##i##_step, int dst##i##_offset,\n" +"#define DECLARE_INDEX(i) \\\n" +"int src##i##_index = mad24(src##i##_step, y0, mad24(x, (int)sizeof(T) * scn##i, src##i##_offset)); \\\n" +"int dst##i##_index = mad24(dst##i##_step, y0, mad24(x, (int)sizeof(T) * dcn##i, dst##i##_offset));\n" +"#define PROCESS_ELEM(i) \\\n" +"__global const T * src##i = (__global const T *)(src##i##ptr + src##i##_index); \\\n" +"__global T * dst##i = (__global T *)(dst##i##ptr + dst##i##_index); \\\n" +"dst##i[0] = src##i[0]; \\\n" +"src##i##_index += src##i##_step; \\\n" +"dst##i##_index += dst##i##_step;\n" +"__kernel void mixChannels(DECLARE_INPUT_MAT_N DECLARE_OUTPUT_MAT_N int rows, int cols, int rowsPerWI)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"DECLARE_INDEX_N\n" +"for (int y = y0, y1 = min(y0 + rowsPerWI, rows); y < y1; ++y)\n" +"{\n" +"PROCESS_ELEM_N\n" +"}\n" +"}\n" +"}\n" +, "26a27b81c3e2524a8eb918b3a518da0a", NULL}; +struct cv::ocl::internal::ProgramEntry mulspectrums_oclsrc={moduleName, "mulspectrums", +"inline float2 cmulf(float2 a, float2 b)\n" +"{\n" +"return (float2)(mad(a.x, b.x, - a.y * b.y), mad(a.x, b.y, a.y * b.x));\n" +"}\n" +"inline float2 conjf(float2 a)\n" +"{\n" +"return (float2)(a.x, - a.y);\n" +"}\n" +"__kernel void mulAndScaleSpectrums(__global const uchar * src1ptr, int src1_step, int src1_offset,\n" +"__global const uchar * src2ptr, int src2_step, int src2_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int dst_rows, int dst_cols, int rowsPerWI)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"int src1_index = mad24(y0, src1_step, mad24(x, (int)sizeof(float2), src1_offset));\n" +"int src2_index = mad24(y0, src2_step, mad24(x, (int)sizeof(float2), src2_offset));\n" +"int dst_index = mad24(y0, dst_step, mad24(x, (int)sizeof(float2), dst_offset));\n" +"for (int y = y0, y1 = min(dst_rows, y0 + rowsPerWI); y < y1; ++y,\n" +"src1_index += src1_step, src2_index += src2_step, dst_index += dst_step)\n" +"{\n" +"float2 src0 = *(__global const float2 *)(src1ptr + src1_index);\n" +"float2 src1 = *(__global const float2 *)(src2ptr + src2_index);\n" +"__global float2 * dst = (__global float2 *)(dstptr + dst_index);\n" +"#ifdef CONJ\n" +"float2 v = cmulf(src0, conjf(src1));\n" +"#else\n" +"float2 v = cmulf(src0, src1);\n" +"#endif\n" +"dst[0] = v;\n" +"}\n" +"}\n" +"}\n" +, "0ffb2c858f6664aa3e56efb81f025f5c", NULL}; +struct cv::ocl::internal::ProgramEntry normalize_oclsrc={moduleName, "normalize", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define srcTSIZE (int)sizeof(srcT)\n" +"#define dstTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define srcTSIZE ((int)sizeof(srcT1)*3)\n" +"#define dstTSIZE ((int)sizeof(dstT1)*3)\n" +"#endif\n" +"__kernel void normalizek(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global const uchar * mask, int mask_step, int mask_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols\n" +"#ifdef HAVE_SCALE\n" +", float scale\n" +"#endif\n" +"#ifdef HAVE_DELTA\n" +", float delta\n" +"#endif\n" +")\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"int src_index = mad24(y0, src_step, mad24(x, srcTSIZE, src_offset));\n" +"int mask_index = mad24(y0, mask_step, x + mask_offset);\n" +"int dst_index = mad24(y0, dst_step, mad24(x, dstTSIZE, dst_offset));\n" +"for (int y = y0, y1 = min(y0 + rowsPerWI, dst_rows); y < y1;\n" +"++y, src_index += src_step, dst_index += dst_step, mask_index += mask_step)\n" +"{\n" +"if (mask[mask_index])\n" +"{\n" +"workT value = convertToWT(loadpix(srcptr + src_index));\n" +"#ifdef HAVE_SCALE\n" +"#ifdef HAVE_DELTA\n" +"value = fma(value, (workT)(scale), (workT)(delta));\n" +"#else\n" +"value *= (workT)(scale);\n" +"#endif\n" +"#else\n" +"#ifdef HAVE_DELTA\n" +"value += (workT)(delta);\n" +"#endif\n" +"#endif\n" +"storepix(convertToDT(value), dstptr + dst_index);\n" +"}\n" +"}\n" +"}\n" +"}\n" +, "05e23451b4bf16c50a0eba9d6c5c0012", NULL}; +struct cv::ocl::internal::ProgramEntry reduce_oclsrc={moduleName, "reduce", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if defined OP_NORM_INF_MASK\n" +"#ifdef DEPTH_0\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL 255\n" +"#elif defined DEPTH_1\n" +"#define MIN_VAL -128\n" +"#define MAX_VAL 127\n" +"#elif defined DEPTH_2\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL 65535\n" +"#elif defined DEPTH_3\n" +"#define MIN_VAL -32768\n" +"#define MAX_VAL 32767\n" +"#elif defined DEPTH_4\n" +"#define MIN_VAL INT_MIN\n" +"#define MAX_VAL INT_MAX\n" +"#elif defined DEPTH_5\n" +"#define MIN_VAL (-FLT_MAX)\n" +"#define MAX_VAL FLT_MAX\n" +"#elif defined DEPTH_6\n" +"#define MIN_VAL (-DBL_MAX)\n" +"#define MAX_VAL DBL_MAX\n" +"#endif\n" +"#define dstT srcT\n" +"#define dstT1 srcT1\n" +"#endif\n" +"#define noconvert\n" +"#ifndef kercn\n" +"#define kercn 1\n" +"#endif\n" +"#ifdef HAVE_MASK_CONT\n" +"#define MASK_INDEX int mask_index = id + mask_offset;\n" +"#else\n" +"#define MASK_INDEX int mask_index = mad24(id / cols, mask_step, mask_offset + (id % cols))\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#if kercn == 1\n" +"#define srcTSIZE (int)sizeof(srcT)\n" +"#else\n" +"#define srcTSIZE (int)sizeof(srcT1)\n" +"#endif\n" +"#define dstTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define srcTSIZE ((int)sizeof(srcT1)*3)\n" +"#define dstTSIZE ((int)sizeof(dstT1)*3)\n" +"#endif\n" +"#if ddepth <= 4\n" +"#define SUM_ABS(a) convertFromU(abs(a))\n" +"#define SUM_ABS2(a, b) convertFromU(abs_diff(a, b))\n" +"#else\n" +"#define SUM_ABS(a) fabs(a)\n" +"#define SUM_ABS2(a, b) fabs(a - b)\n" +"#endif\n" +"#ifdef HAVE_MASK\n" +"#ifdef HAVE_SRC2\n" +"#define EXTRA_PARAMS , __global const uchar * mask, int mask_step, int mask_offset, __global const uchar * src2ptr, int src2_step, int src2_offset\n" +"#else\n" +"#define EXTRA_PARAMS , __global const uchar * mask, int mask_step, int mask_offset\n" +"#endif\n" +"#else\n" +"#ifdef HAVE_SRC2\n" +"#define EXTRA_PARAMS , __global const uchar * src2ptr, int src2_step, int src2_offset\n" +"#else\n" +"#define EXTRA_PARAMS\n" +"#endif\n" +"#endif\n" +"#if defined OP_SUM || defined OP_SUM_ABS || defined OP_SUM_SQR || defined OP_DOT\n" +"#ifdef OP_DOT\n" +"#if ddepth <= 4\n" +"#define FUNC(a, b, c) a = mad24(b, c, a)\n" +"#else\n" +"#define FUNC(a, b, c) a = mad(b, c, a)\n" +"#endif\n" +"#elif defined OP_SUM\n" +"#define FUNC(a, b) a += b\n" +"#elif defined OP_SUM_ABS\n" +"#define FUNC(a, b) a += SUM_ABS(b)\n" +"#elif defined OP_SUM_SQR\n" +"#if ddepth <= 4\n" +"#define FUNC(a, b) a = mad24(b, b, a)\n" +"#else\n" +"#define FUNC(a, b) a = mad(b, b, a)\n" +"#endif\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"#define DECLARE_LOCAL_MEM \\\n" +"__local dstT localmem[WGS2_ALIGNED], localmem2[WGS2_ALIGNED]\n" +"#define DEFINE_ACCUMULATOR \\\n" +"dstT accumulator = (dstT)(0), accumulator2 = (dstT)(0)\n" +"#else\n" +"#define DECLARE_LOCAL_MEM \\\n" +"__local dstT localmem[WGS2_ALIGNED]\n" +"#define DEFINE_ACCUMULATOR \\\n" +"dstT accumulator = (dstT)(0)\n" +"#endif\n" +"#ifdef HAVE_SRC2\n" +"#ifdef OP_CALC2\n" +"#define PROCESS_ELEMS \\\n" +"dstT temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstT temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"temp2 = SUM_ABS(temp2); \\\n" +"FUNC(accumulator2, temp2); \\\n" +"FUNC(accumulator, temp)\n" +"#else\n" +"#define PROCESS_ELEMS \\\n" +"dstT temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstT temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"FUNC(accumulator, temp)\n" +"#endif\n" +"#else\n" +"#define PROCESS_ELEMS \\\n" +"dstT temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"FUNC(accumulator, temp)\n" +"#endif\n" +"#ifdef HAVE_MASK\n" +"#define REDUCE_GLOBAL \\\n" +"MASK_INDEX; \\\n" +"if (mask[mask_index]) \\\n" +"{ \\\n" +"PROCESS_ELEMS; \\\n" +"}\n" +"#elif defined OP_DOT\n" +"#ifdef HAVE_SRC2_CONT\n" +"#define SRC2_INDEX int src2_index = mad24(id, srcTSIZE, src2_offset);\n" +"#else\n" +"#define SRC2_INDEX int src2_index = mad24(id / cols, src2_step, mad24(id % cols, srcTSIZE, src2_offset))\n" +"#endif\n" +"#if kercn == 1\n" +"#define REDUCE_GLOBAL \\\n" +"SRC2_INDEX; \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)), temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"FUNC(accumulator, temp, temp2)\n" +"#elif kercn == 2\n" +"#define REDUCE_GLOBAL \\\n" +"SRC2_INDEX; \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)), temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"FUNC(accumulator, temp.s0, temp2.s0); \\\n" +"FUNC(accumulator, temp.s1, temp2.s1)\n" +"#elif kercn == 4\n" +"#define REDUCE_GLOBAL \\\n" +"SRC2_INDEX; \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)), temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"FUNC(accumulator, temp.s0, temp2.s0); \\\n" +"FUNC(accumulator, temp.s1, temp2.s1); \\\n" +"FUNC(accumulator, temp.s2, temp2.s2); \\\n" +"FUNC(accumulator, temp.s3, temp2.s3)\n" +"#elif kercn == 8\n" +"#define REDUCE_GLOBAL \\\n" +"SRC2_INDEX; \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)), temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"FUNC(accumulator, temp.s0, temp2.s0); \\\n" +"FUNC(accumulator, temp.s1, temp2.s1); \\\n" +"FUNC(accumulator, temp.s2, temp2.s2); \\\n" +"FUNC(accumulator, temp.s3, temp2.s3); \\\n" +"FUNC(accumulator, temp.s4, temp2.s4); \\\n" +"FUNC(accumulator, temp.s5, temp2.s5); \\\n" +"FUNC(accumulator, temp.s6, temp2.s6); \\\n" +"FUNC(accumulator, temp.s7, temp2.s7)\n" +"#elif kercn == 16\n" +"#define REDUCE_GLOBAL \\\n" +"SRC2_INDEX; \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)), temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"FUNC(accumulator, temp.s0, temp2.s0); \\\n" +"FUNC(accumulator, temp.s1, temp2.s1); \\\n" +"FUNC(accumulator, temp.s2, temp2.s2); \\\n" +"FUNC(accumulator, temp.s3, temp2.s3); \\\n" +"FUNC(accumulator, temp.s4, temp2.s4); \\\n" +"FUNC(accumulator, temp.s5, temp2.s5); \\\n" +"FUNC(accumulator, temp.s6, temp2.s6); \\\n" +"FUNC(accumulator, temp.s7, temp2.s7); \\\n" +"FUNC(accumulator, temp.s8, temp2.s8); \\\n" +"FUNC(accumulator, temp.s9, temp2.s9); \\\n" +"FUNC(accumulator, temp.sA, temp2.sA); \\\n" +"FUNC(accumulator, temp.sB, temp2.sB); \\\n" +"FUNC(accumulator, temp.sC, temp2.sC); \\\n" +"FUNC(accumulator, temp.sD, temp2.sD); \\\n" +"FUNC(accumulator, temp.sE, temp2.sE); \\\n" +"FUNC(accumulator, temp.sF, temp2.sF)\n" +"#endif\n" +"#else\n" +"#ifdef HAVE_SRC2\n" +"#ifdef OP_CALC2\n" +"#if kercn == 1\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"temp2 = SUM_ABS(temp2); \\\n" +"FUNC(accumulator, temp); \\\n" +"FUNC(accumulator2, temp2)\n" +"#elif kercn == 2\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"temp2 = SUM_ABS(temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator2, temp2.s0); \\\n" +"FUNC(accumulator2, temp2.s1)\n" +"#elif kercn == 4\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"temp2 = SUM_ABS(temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator2, temp2.s0); \\\n" +"FUNC(accumulator2, temp2.s1); \\\n" +"FUNC(accumulator2, temp2.s2); \\\n" +"FUNC(accumulator2, temp2.s3)\n" +"#elif kercn == 8\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"temp2 = SUM_ABS(temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator, temp.s4); \\\n" +"FUNC(accumulator, temp.s5); \\\n" +"FUNC(accumulator, temp.s6); \\\n" +"FUNC(accumulator, temp.s7); \\\n" +"FUNC(accumulator2, temp2.s0); \\\n" +"FUNC(accumulator2, temp2.s1); \\\n" +"FUNC(accumulator2, temp2.s2); \\\n" +"FUNC(accumulator2, temp2.s3); \\\n" +"FUNC(accumulator2, temp2.s4); \\\n" +"FUNC(accumulator2, temp2.s5); \\\n" +"FUNC(accumulator2, temp2.s6); \\\n" +"FUNC(accumulator2, temp2.s7)\n" +"#elif kercn == 16\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"temp2 = SUM_ABS(temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator, temp.s4); \\\n" +"FUNC(accumulator, temp.s5); \\\n" +"FUNC(accumulator, temp.s6); \\\n" +"FUNC(accumulator, temp.s7); \\\n" +"FUNC(accumulator, temp.s8); \\\n" +"FUNC(accumulator, temp.s9); \\\n" +"FUNC(accumulator, temp.sA); \\\n" +"FUNC(accumulator, temp.sB); \\\n" +"FUNC(accumulator, temp.sC); \\\n" +"FUNC(accumulator, temp.sD); \\\n" +"FUNC(accumulator, temp.sE); \\\n" +"FUNC(accumulator, temp.sF); \\\n" +"FUNC(accumulator2, temp2.s0); \\\n" +"FUNC(accumulator2, temp2.s1); \\\n" +"FUNC(accumulator2, temp2.s2); \\\n" +"FUNC(accumulator2, temp2.s3); \\\n" +"FUNC(accumulator2, temp2.s4); \\\n" +"FUNC(accumulator2, temp2.s5); \\\n" +"FUNC(accumulator2, temp2.s6); \\\n" +"FUNC(accumulator2, temp2.s7); \\\n" +"FUNC(accumulator2, temp2.s8); \\\n" +"FUNC(accumulator2, temp2.s9); \\\n" +"FUNC(accumulator2, temp2.sA); \\\n" +"FUNC(accumulator2, temp2.sB); \\\n" +"FUNC(accumulator2, temp2.sC); \\\n" +"FUNC(accumulator2, temp2.sD); \\\n" +"FUNC(accumulator2, temp2.sE); \\\n" +"FUNC(accumulator2, temp2.sF)\n" +"#endif\n" +"#else\n" +"#if kercn == 1\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"FUNC(accumulator, temp)\n" +"#elif kercn == 2\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1)\n" +"#elif kercn == 4\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3)\n" +"#elif kercn == 8\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator, temp.s4); \\\n" +"FUNC(accumulator, temp.s5); \\\n" +"FUNC(accumulator, temp.s6); \\\n" +"FUNC(accumulator, temp.s7)\n" +"#elif kercn == 16\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"dstTK temp2 = convertToDT(loadpix(src2ptr + src2_index)); \\\n" +"temp = SUM_ABS2(temp, temp2); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator, temp.s4); \\\n" +"FUNC(accumulator, temp.s5); \\\n" +"FUNC(accumulator, temp.s6); \\\n" +"FUNC(accumulator, temp.s7); \\\n" +"FUNC(accumulator, temp.s8); \\\n" +"FUNC(accumulator, temp.s9); \\\n" +"FUNC(accumulator, temp.sA); \\\n" +"FUNC(accumulator, temp.sB); \\\n" +"FUNC(accumulator, temp.sC); \\\n" +"FUNC(accumulator, temp.sD); \\\n" +"FUNC(accumulator, temp.sE); \\\n" +"FUNC(accumulator, temp.sF)\n" +"#endif\n" +"#endif\n" +"#else\n" +"#if kercn == 1\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"FUNC(accumulator, temp)\n" +"#elif kercn == 2\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1)\n" +"#elif kercn == 4\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3)\n" +"#elif kercn == 8\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator, temp.s4); \\\n" +"FUNC(accumulator, temp.s5); \\\n" +"FUNC(accumulator, temp.s6); \\\n" +"FUNC(accumulator, temp.s7)\n" +"#elif kercn == 16\n" +"#define REDUCE_GLOBAL \\\n" +"dstTK temp = convertToDT(loadpix(srcptr + src_index)); \\\n" +"FUNC(accumulator, temp.s0); \\\n" +"FUNC(accumulator, temp.s1); \\\n" +"FUNC(accumulator, temp.s2); \\\n" +"FUNC(accumulator, temp.s3); \\\n" +"FUNC(accumulator, temp.s4); \\\n" +"FUNC(accumulator, temp.s5); \\\n" +"FUNC(accumulator, temp.s6); \\\n" +"FUNC(accumulator, temp.s7); \\\n" +"FUNC(accumulator, temp.s8); \\\n" +"FUNC(accumulator, temp.s9); \\\n" +"FUNC(accumulator, temp.sA); \\\n" +"FUNC(accumulator, temp.sB); \\\n" +"FUNC(accumulator, temp.sC); \\\n" +"FUNC(accumulator, temp.sD); \\\n" +"FUNC(accumulator, temp.sE); \\\n" +"FUNC(accumulator, temp.sF)\n" +"#endif\n" +"#endif\n" +"#endif\n" +"#ifdef OP_CALC2\n" +"#define SET_LOCAL_1 \\\n" +"localmem[lid] = accumulator; \\\n" +"localmem2[lid] = accumulator2\n" +"#define REDUCE_LOCAL_1 \\\n" +"localmem[lid - WGS2_ALIGNED] += accumulator; \\\n" +"localmem2[lid - WGS2_ALIGNED] += accumulator2\n" +"#define REDUCE_LOCAL_2 \\\n" +"localmem[lid] += localmem[lid2]; \\\n" +"localmem2[lid] += localmem2[lid2]\n" +"#define CALC_RESULT \\\n" +"storepix(localmem[0], dstptr + dstTSIZE * gid); \\\n" +"storepix(localmem2[0], dstptr + mad24(groupnum, dstTSIZE, dstTSIZE * gid))\n" +"#else\n" +"#define SET_LOCAL_1 \\\n" +"localmem[lid] = accumulator\n" +"#define REDUCE_LOCAL_1 \\\n" +"localmem[lid - WGS2_ALIGNED] += accumulator\n" +"#define REDUCE_LOCAL_2 \\\n" +"localmem[lid] += localmem[lid2]\n" +"#define CALC_RESULT \\\n" +"storepix(localmem[0], dstptr + dstTSIZE * gid)\n" +"#endif\n" +"#elif defined OP_COUNT_NON_ZERO\n" +"#define dstT int\n" +"#define DECLARE_LOCAL_MEM \\\n" +"__local dstT localmem[WGS2_ALIGNED]\n" +"#define DEFINE_ACCUMULATOR \\\n" +"dstT accumulator = (dstT)(0); \\\n" +"srcT1 zero = (srcT1)(0), one = (srcT1)(1)\n" +"#if kercn == 1\n" +"#define REDUCE_GLOBAL \\\n" +"accumulator += loadpix(srcptr + src_index) == zero ? zero : one\n" +"#elif kercn == 2\n" +"#define REDUCE_GLOBAL \\\n" +"srcT value = loadpix(srcptr + src_index); \\\n" +"accumulator += value.s0 == zero ? zero : one; \\\n" +"accumulator += value.s1 == zero ? zero : one\n" +"#elif kercn == 4\n" +"#define REDUCE_GLOBAL \\\n" +"srcT value = loadpix(srcptr + src_index); \\\n" +"accumulator += value.s0 == zero ? zero : one; \\\n" +"accumulator += value.s1 == zero ? zero : one; \\\n" +"accumulator += value.s2 == zero ? zero : one; \\\n" +"accumulator += value.s3 == zero ? zero : one\n" +"#elif kercn == 8\n" +"#define REDUCE_GLOBAL \\\n" +"srcT value = loadpix(srcptr + src_index); \\\n" +"accumulator += value.s0 == zero ? zero : one; \\\n" +"accumulator += value.s1 == zero ? zero : one; \\\n" +"accumulator += value.s2 == zero ? zero : one; \\\n" +"accumulator += value.s3 == zero ? zero : one; \\\n" +"accumulator += value.s4 == zero ? zero : one; \\\n" +"accumulator += value.s5 == zero ? zero : one; \\\n" +"accumulator += value.s6 == zero ? zero : one; \\\n" +"accumulator += value.s7 == zero ? zero : one\n" +"#elif kercn == 16\n" +"#define REDUCE_GLOBAL \\\n" +"srcT value = loadpix(srcptr + src_index); \\\n" +"accumulator += value.s0 == zero ? zero : one; \\\n" +"accumulator += value.s1 == zero ? zero : one; \\\n" +"accumulator += value.s2 == zero ? zero : one; \\\n" +"accumulator += value.s3 == zero ? zero : one; \\\n" +"accumulator += value.s4 == zero ? zero : one; \\\n" +"accumulator += value.s5 == zero ? zero : one; \\\n" +"accumulator += value.s6 == zero ? zero : one; \\\n" +"accumulator += value.s7 == zero ? zero : one; \\\n" +"accumulator += value.s8 == zero ? zero : one; \\\n" +"accumulator += value.s9 == zero ? zero : one; \\\n" +"accumulator += value.sA == zero ? zero : one; \\\n" +"accumulator += value.sB == zero ? zero : one; \\\n" +"accumulator += value.sC == zero ? zero : one; \\\n" +"accumulator += value.sD == zero ? zero : one; \\\n" +"accumulator += value.sE == zero ? zero : one; \\\n" +"accumulator += value.sF == zero ? zero : one\n" +"#endif\n" +"#define SET_LOCAL_1 \\\n" +"localmem[lid] = accumulator\n" +"#define REDUCE_LOCAL_1 \\\n" +"localmem[lid - WGS2_ALIGNED] += accumulator\n" +"#define REDUCE_LOCAL_2 \\\n" +"localmem[lid] += localmem[lid2]\n" +"#define CALC_RESULT \\\n" +"storepix(localmem[0], dstptr + dstTSIZE * gid)\n" +"#else\n" +"#error \"No operation\"\n" +"#endif\n" +"#ifdef OP_DOT\n" +"#undef EXTRA_PARAMS\n" +"#define EXTRA_PARAMS , __global uchar * src2ptr, int src2_step, int src2_offset\n" +"#endif\n" +"__kernel void reduce(__global const uchar * srcptr, int src_step, int src_offset, int cols,\n" +"int total, int groupnum, __global uchar * dstptr EXTRA_PARAMS)\n" +"{\n" +"int lid = get_local_id(0);\n" +"int gid = get_group_id(0);\n" +"int id = get_global_id(0) * kercn;\n" +"srcptr += src_offset;\n" +"#ifdef HAVE_SRC2\n" +"src2ptr += src2_offset;\n" +"#endif\n" +"DECLARE_LOCAL_MEM;\n" +"DEFINE_ACCUMULATOR;\n" +"for (int grain = groupnum * WGS * kercn; id < total; id += grain)\n" +"{\n" +"#ifdef HAVE_SRC_CONT\n" +"int src_index = id * srcTSIZE;\n" +"#else\n" +"int src_index = mad24(id / cols, src_step, mul24(id % cols, srcTSIZE));\n" +"#endif\n" +"#ifdef HAVE_SRC2\n" +"#ifdef HAVE_SRC2_CONT\n" +"int src2_index = id * srcTSIZE;\n" +"#else\n" +"int src2_index = mad24(id / cols, src2_step, mul24(id % cols, srcTSIZE));\n" +"#endif\n" +"#endif\n" +"REDUCE_GLOBAL;\n" +"}\n" +"if (lid < WGS2_ALIGNED)\n" +"{\n" +"SET_LOCAL_1;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (lid >= WGS2_ALIGNED && total >= WGS2_ALIGNED)\n" +"{\n" +"REDUCE_LOCAL_1;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int lsize = WGS2_ALIGNED >> 1; lsize > 0; lsize >>= 1)\n" +"{\n" +"if (lid < lsize)\n" +"{\n" +"int lid2 = lsize + lid;\n" +"REDUCE_LOCAL_2;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"if (lid == 0)\n" +"{\n" +"CALC_RESULT;\n" +"}\n" +"}\n" +, "2bd554448b0b0af7e1a1ddd57a55f5a6", NULL}; +struct cv::ocl::internal::ProgramEntry reduce2_oclsrc={moduleName, "reduce2", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if ddepth == 0\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL 255\n" +"#elif ddepth == 1\n" +"#define MIN_VAL -128\n" +"#define MAX_VAL 127\n" +"#elif ddepth == 2\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL 65535\n" +"#elif ddepth == 3\n" +"#define MIN_VAL -32768\n" +"#define MAX_VAL 32767\n" +"#elif ddepth == 4\n" +"#define MIN_VAL INT_MIN\n" +"#define MAX_VAL INT_MAX\n" +"#elif ddepth == 5\n" +"#define MIN_VAL (-FLT_MAX)\n" +"#define MAX_VAL FLT_MAX\n" +"#elif ddepth == 6\n" +"#define MIN_VAL (-DBL_MAX)\n" +"#define MAX_VAL DBL_MAX\n" +"#else\n" +"#error \"Unsupported depth\"\n" +"#endif\n" +"#define noconvert\n" +"#if defined OCL_CV_REDUCE_SUM || defined OCL_CV_REDUCE_AVG\n" +"#define INIT_VALUE 0\n" +"#define PROCESS_ELEM(acc, value) acc += value\n" +"#elif defined OCL_CV_REDUCE_MAX\n" +"#define INIT_VALUE MIN_VAL\n" +"#define PROCESS_ELEM(acc, value) acc = max(value, acc)\n" +"#elif defined OCL_CV_REDUCE_MIN\n" +"#define INIT_VALUE MAX_VAL\n" +"#define PROCESS_ELEM(acc, value) acc = min(value, acc)\n" +"#else\n" +"#error \"No operation is specified\"\n" +"#endif\n" +"#ifdef OP_REDUCE_PRE\n" +"__kernel void reduce_horz_opt(__global const uchar * srcptr, int src_step, int src_offset, int rows, int cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset\n" +"#ifdef OCL_CV_REDUCE_AVG\n" +", float fscale\n" +"#endif\n" +")\n" +"{\n" +"__local bufT lsmem[TILE_HEIGHT][BUF_COLS][cn];\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"int liy = get_local_id(1);\n" +"if ((x < BUF_COLS) && (y < rows))\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, (int)sizeof(srcT) * cn, src_offset));\n" +"__global const srcT * src = (__global const srcT *)(srcptr + src_index);\n" +"bufT tmp[cn];\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"tmp[c] = INIT_VALUE;\n" +"int src_step_mul = BUF_COLS * cn;\n" +"for (int idx = x; idx < cols; idx += BUF_COLS, src += src_step_mul)\n" +"{\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"{\n" +"bufT value = convertToBufT(src[c]);\n" +"PROCESS_ELEM(tmp[c], value);\n" +"}\n" +"}\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"lsmem[liy][x][c] = tmp[c];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if ((x < BUF_COLS / 2) && (y < rows))\n" +"{\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"{\n" +"PROCESS_ELEM(lsmem[liy][x][c], lsmem[liy][x + BUF_COLS / 2][c]);\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if ((x == 0) && (y < rows))\n" +"{\n" +"int dst_index = mad24(y, dst_step, dst_offset);\n" +"__global dstT * dst = (__global dstT *)(dstptr + dst_index);\n" +"bufT tmp[cn];\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"tmp[c] = INIT_VALUE;\n" +"#pragma unroll\n" +"for (int xin = 0; xin < BUF_COLS / 2; xin ++)\n" +"{\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"{\n" +"PROCESS_ELEM(tmp[c], lsmem[liy][xin][c]);\n" +"}\n" +"}\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"#ifdef OCL_CV_REDUCE_AVG\n" +"dst[c] = convertToDT(convertToWT(tmp[c]) * fscale);\n" +"#else\n" +"dst[c] = convertToDT(tmp[c]);\n" +"#endif\n" +"}\n" +"}\n" +"#else\n" +"__kernel void reduce(__global const uchar * srcptr, int src_step, int src_offset, int rows, int cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset\n" +"#ifdef OCL_CV_REDUCE_AVG\n" +", float fscale\n" +"#endif\n" +")\n" +"{\n" +"#if dim == 0\n" +"int x = get_global_id(0);\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(x, (int)sizeof(srcT) * cn, src_offset);\n" +"int dst_index = mad24(x, (int)sizeof(dstT0) * cn, dst_offset);\n" +"__global dstT0 * dst = (__global dstT0 *)(dstptr + dst_index);\n" +"dstT tmp[cn];\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"tmp[c] = INIT_VALUE;\n" +"for (int y = 0; y < rows; ++y, src_index += src_step)\n" +"{\n" +"__global const srcT * src = (__global const srcT *)(srcptr + src_index);\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"{\n" +"dstT value = convertToDT(src[c]);\n" +"PROCESS_ELEM(tmp[c], value);\n" +"}\n" +"}\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"#ifdef OCL_CV_REDUCE_AVG\n" +"dst[c] = convertToDT0(convertToWT(tmp[c]) * fscale);\n" +"#else\n" +"dst[c] = convertToDT0(tmp[c]);\n" +"#endif\n" +"}\n" +"#elif dim == 1\n" +"int y = get_global_id(0);\n" +"if (y < rows)\n" +"{\n" +"int src_index = mad24(y, src_step, src_offset);\n" +"int dst_index = mad24(y, dst_step, dst_offset);\n" +"__global const srcT * src = (__global const srcT *)(srcptr + src_index);\n" +"__global dstT * dst = (__global dstT *)(dstptr + dst_index);\n" +"dstT tmp[cn];\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"tmp[c] = INIT_VALUE;\n" +"for (int x = 0; x < cols; ++x, src += cn)\n" +"{\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"{\n" +"dstT value = convertToDT(src[c]);\n" +"PROCESS_ELEM(tmp[c], value);\n" +"}\n" +"}\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"#ifdef OCL_CV_REDUCE_AVG\n" +"dst[c] = convertToDT0(convertToWT(tmp[c]) * fscale);\n" +"#else\n" +"dst[c] = convertToDT0(tmp[c]);\n" +"#endif\n" +"}\n" +"#else\n" +"#error \"Dims must be either 0 or 1\"\n" +"#endif\n" +"}\n" +"#endif\n" +, "675811294a4da68880f2ace25764d371", NULL}; +struct cv::ocl::internal::ProgramEntry repeat_oclsrc={moduleName, "repeat", +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T *)(addr)\n" +"#define storepix(val, addr) *(__global T *)(addr) = val\n" +"#define TSIZE (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))\n" +"#define TSIZE ((int)sizeof(T1)*3)\n" +"#endif\n" +"__kernel void repeat(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < src_cols)\n" +"{\n" +"int src_index = mad24(y0, src_step, mad24(x, (int)sizeof(T), src_offset));\n" +"int dst_index0 = mad24(y0, dst_step, mad24(x, (int)sizeof(T), dst_offset));\n" +"for (int y = y0, y1 = min(src_rows, y0 + rowsPerWI); y < y1; ++y, src_index += src_step, dst_index0 += dst_step)\n" +"{\n" +"T srcelem = loadpix(srcptr + src_index);\n" +"#pragma unroll\n" +"for (int ey = 0; ey < ny; ++ey)\n" +"{\n" +"int dst_index = mad24(ey * src_rows, dst_step, dst_index0);\n" +"#pragma unroll\n" +"for (int ex = 0; ex < nx; ++ex)\n" +"{\n" +"storepix(srcelem, dstptr + dst_index);\n" +"dst_index = mad24(src_cols, (int)sizeof(T), dst_index);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"}\n" +, "d7a6b479ac9abf39f50a2d86c0b50863", NULL}; +struct cv::ocl::internal::ProgramEntry set_identity_oclsrc={moduleName, "set_identity", +"#if kercn != 3\n" +"#define storepix(val, addr) *(__global T *)(addr) = val\n" +"#define TSIZE (int)sizeof(T)\n" +"#define scalar scalar_\n" +"#else\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))\n" +"#define TSIZE ((int)sizeof(T1)*3)\n" +"#define scalar (T)(scalar_.x, scalar_.y, scalar_.z)\n" +"#endif\n" +"__kernel void setIdentity(__global uchar * srcptr, int src_step, int src_offset, int rows, int cols,\n" +"ST scalar_)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y0, src_step, mad24(x, TSIZE, src_offset));\n" +"#if kercn == cn\n" +"#pragma unroll\n" +"for (int y = y0, i = 0, y1 = min(rows, y0 + rowsPerWI); i < rowsPerWI; ++y, ++i, src_index += src_step)\n" +"if (y < y1)\n" +"storepix(x == y ? scalar : (T)(0), srcptr + src_index);\n" +"#elif kercn == 4 && cn == 1\n" +"if (y0 < rows)\n" +"{\n" +"storepix(x == y0 >> 2 ? (T)(scalar, 0, 0, 0) : (T)(0), srcptr + src_index);\n" +"if (++y0 < rows)\n" +"{\n" +"src_index += src_step;\n" +"storepix(x == y0 >> 2 ? (T)(0, scalar, 0, 0) : (T)(0), srcptr + src_index);\n" +"if (++y0 < rows)\n" +"{\n" +"src_index += src_step;\n" +"storepix(x == y0 >> 2 ? (T)(0, 0, scalar, 0) : (T)(0), srcptr + src_index);\n" +"if (++y0 < rows)\n" +"{\n" +"src_index += src_step;\n" +"storepix(x == y0 >> 2 ? (T)(0, 0, 0, scalar) : (T)(0), srcptr + src_index);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#else\n" +"#error \"Incorrect combination of cn && kercn\"\n" +"#endif\n" +"}\n" +"}\n" +, "75020e8c1da6cf8aece6bd5cc5b9ed4f", NULL}; +struct cv::ocl::internal::ProgramEntry split_merge_oclsrc={moduleName, "split_merge", +"#ifdef OP_MERGE\n" +"#define DECLARE_SRC_PARAM(index) __global const uchar * src##index##ptr, int src##index##_step, int src##index##_offset,\n" +"#define DECLARE_INDEX(index) int src##index##_index = mad24(src##index##_step, y0, mad24(x, (int)sizeof(T) * scn##index, src##index##_offset));\n" +"#define PROCESS_ELEM(index) \\\n" +"__global const T * src##index = (__global const T *)(src##index##ptr + src##index##_index); \\\n" +"dst[index] = src##index[0]; \\\n" +"src##index##_index += src##index##_step;\n" +"__kernel void merge(DECLARE_SRC_PARAMS_N\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols, int rowsPerWI)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"DECLARE_INDEX_N\n" +"int dst_index = mad24(dst_step, y0, mad24(x, (int)sizeof(T) * cn, dst_offset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, dst_index += dst_step)\n" +"{\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"PROCESS_ELEMS_N\n" +"}\n" +"}\n" +"}\n" +"#elif defined OP_SPLIT\n" +"#define DECLARE_DST_PARAM(index) , __global uchar * dst##index##ptr, int dst##index##_step, int dst##index##_offset\n" +"#define DECLARE_INDEX(index) int dst##index##_index = mad24(y0, dst##index##_step, mad24(x, (int)sizeof(T), dst##index##_offset));\n" +"#define PROCESS_ELEM(index) \\\n" +"__global T * dst##index = (__global T *)(dst##index##ptr + dst##index##_index); \\\n" +"dst##index[0] = src[index]; \\\n" +"dst##index##_index += dst##index##_step;\n" +"__kernel void split(__global uchar* srcptr, int src_step, int src_offset, int rows, int cols DECLARE_DST_PARAMS, int rowsPerWI)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y0 = get_global_id(1) * rowsPerWI;\n" +"if (x < cols)\n" +"{\n" +"DECLARE_INDEX_N\n" +"int src_index = mad24(y0, src_step, mad24(x, cn * (int)sizeof(T), src_offset));\n" +"for (int y = y0, y1 = min(rows, y0 + rowsPerWI); y < y1; ++y, src_index += src_step)\n" +"{\n" +"__global const T * src = (__global const T *)(srcptr + src_index);\n" +"PROCESS_ELEMS_N\n" +"}\n" +"}\n" +"}\n" +"#else\n" +"#error \"No operation\"\n" +"#endif\n" +, "11e06966b3c2f2081fd02cf70337b495", NULL}; +struct cv::ocl::internal::ProgramEntry transpose_oclsrc={moduleName, "transpose", +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T *)(addr)\n" +"#define storepix(val, addr) *(__global T *)(addr) = val\n" +"#define TSIZE (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))\n" +"#define TSIZE ((int)sizeof(T1)*3)\n" +"#endif\n" +"#ifndef INPLACE\n" +"#define LDS_STEP (TILE_DIM + 1)\n" +"__kernel void transpose(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset)\n" +"{\n" +"int gp_x = get_group_id(0), gp_y = get_group_id(1);\n" +"int gs_x = get_num_groups(0), gs_y = get_num_groups(1);\n" +"int groupId_x, groupId_y;\n" +"if (src_rows == src_cols)\n" +"{\n" +"groupId_y = gp_x;\n" +"groupId_x = (gp_x + gp_y) % gs_x;\n" +"}\n" +"else\n" +"{\n" +"int bid = mad24(gs_x, gp_y, gp_x);\n" +"groupId_y = bid % gs_y;\n" +"groupId_x = ((bid / gs_y) + groupId_y) % gs_x;\n" +"}\n" +"int lx = get_local_id(0);\n" +"int ly = get_local_id(1);\n" +"int x = mad24(groupId_x, TILE_DIM, lx);\n" +"int y = mad24(groupId_y, TILE_DIM, ly);\n" +"int x_index = mad24(groupId_y, TILE_DIM, lx);\n" +"int y_index = mad24(groupId_x, TILE_DIM, ly);\n" +"__local T tile[TILE_DIM * LDS_STEP];\n" +"if (x < src_cols && y < src_rows)\n" +"{\n" +"int index_src = mad24(y, src_step, mad24(x, TSIZE, src_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS)\n" +"if (y + i < src_rows)\n" +"{\n" +"tile[mad24(ly + i, LDS_STEP, lx)] = loadpix(srcptr + index_src);\n" +"index_src = mad24(BLOCK_ROWS, src_step, index_src);\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x_index < src_rows && y_index < src_cols)\n" +"{\n" +"int index_dst = mad24(y_index, dst_step, mad24(x_index, TSIZE, dst_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS)\n" +"if ((y_index + i) < src_cols)\n" +"{\n" +"storepix(tile[mad24(lx, LDS_STEP, ly + i)], dstptr + index_dst);\n" +"index_dst = mad24(BLOCK_ROWS, dst_step, index_dst);\n" +"}\n" +"}\n" +"}\n" +"#else\n" +"__kernel void transpose_inplace(__global uchar * srcptr, int src_step, int src_offset, int src_rows)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < y + rowsPerWI)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, TSIZE, src_offset));\n" +"int dst_index = mad24(x, src_step, mad24(y, TSIZE, src_offset));\n" +"T tmp;\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y, src_index += src_step, dst_index += TSIZE)\n" +"if (y < src_rows && x < y)\n" +"{\n" +"__global uchar * src = srcptr + src_index;\n" +"__global uchar * dst = srcptr + dst_index;\n" +"tmp = loadpix(dst);\n" +"storepix(loadpix(src), dst);\n" +"storepix(tmp, src);\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "f938bc7c686ae7bca004809df19ef032", NULL}; + +}}} +#endif diff --git a/generated/modules/core/opencl_kernels_core.hpp b/generated/modules/core/opencl_kernels_core.hpp new file mode 100644 index 0000000..a385cee --- /dev/null +++ b/generated/modules/core/opencl_kernels_core.hpp @@ -0,0 +1,41 @@ +// This file is auto-generated. Do not edit! + +#include "opencv2/core/ocl.hpp" +#include "opencv2/core/ocl_genbase.hpp" +#include "opencv2/core/opencl/ocl_defs.hpp" + +#ifdef HAVE_OPENCL + +namespace cv +{ +namespace ocl +{ +namespace core +{ + +extern struct cv::ocl::internal::ProgramEntry arithm_oclsrc; +extern struct cv::ocl::internal::ProgramEntry convert_oclsrc; +extern struct cv::ocl::internal::ProgramEntry copymakeborder_oclsrc; +extern struct cv::ocl::internal::ProgramEntry copyset_oclsrc; +extern struct cv::ocl::internal::ProgramEntry cvtclr_dx_oclsrc; +extern struct cv::ocl::internal::ProgramEntry fft_oclsrc; +extern struct cv::ocl::internal::ProgramEntry flip_oclsrc; +extern struct cv::ocl::internal::ProgramEntry gemm_oclsrc; +extern struct cv::ocl::internal::ProgramEntry halfconvert_oclsrc; +extern struct cv::ocl::internal::ProgramEntry inrange_oclsrc; +extern struct cv::ocl::internal::ProgramEntry intel_gemm_oclsrc; +extern struct cv::ocl::internal::ProgramEntry lut_oclsrc; +extern struct cv::ocl::internal::ProgramEntry meanstddev_oclsrc; +extern struct cv::ocl::internal::ProgramEntry minmaxloc_oclsrc; +extern struct cv::ocl::internal::ProgramEntry mixchannels_oclsrc; +extern struct cv::ocl::internal::ProgramEntry mulspectrums_oclsrc; +extern struct cv::ocl::internal::ProgramEntry normalize_oclsrc; +extern struct cv::ocl::internal::ProgramEntry reduce_oclsrc; +extern struct cv::ocl::internal::ProgramEntry reduce2_oclsrc; +extern struct cv::ocl::internal::ProgramEntry repeat_oclsrc; +extern struct cv::ocl::internal::ProgramEntry set_identity_oclsrc; +extern struct cv::ocl::internal::ProgramEntry split_merge_oclsrc; +extern struct cv::ocl::internal::ProgramEntry transpose_oclsrc; + +}}} +#endif diff --git a/generated/modules/core/split.simd_declarations.hpp b/generated/modules/core/split.simd_declarations.hpp new file mode 100644 index 0000000..fa6bb44 --- /dev/null +++ b/generated/modules/core/split.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/split.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/stat.simd_declarations.hpp b/generated/modules/core/stat.simd_declarations.hpp new file mode 100644 index 0000000..3c01c89 --- /dev/null +++ b/generated/modules/core/stat.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/stat.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/sum.simd_declarations.hpp b/generated/modules/core/sum.simd_declarations.hpp new file mode 100644 index 0000000..5153063 --- /dev/null +++ b/generated/modules/core/sum.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/core/src/sum.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/core/version_string.inc b/generated/modules/core/version_string.inc new file mode 100644 index 0000000..008ac23 --- /dev/null +++ b/generated/modules/core/version_string.inc @@ -0,0 +1,97 @@ +"\n" +"General configuration for OpenCV 4.5.4 =====================================\n" +" Version control: 4.5.4\n" +"\n" +" Platform:\n" +" Timestamp: 2024-03-28T06:23:21Z\n" +" Host: Linux 4.15.0-70-generic aarch64\n" +" CMake: 3.10.2\n" +" CMake generator: Unix Makefiles\n" +" CMake build tool: /usr/bin/make\n" +" Configuration: Release\n" +"\n" +" CPU/HW features:\n" +" Baseline: NEON FP16\n" +"\n" +" C/C++:\n" +" Built as dynamic libs?: YES\n" +" C++ standard: 11\n" +" C++ Compiler: /usr/bin/c++ (ver 7.4.0)\n" +" C++ flags (Release): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wundef -Winit-self -Wpointer-arith -Wshadow -Wsign-promo -Wuninitialized -Wsuggest-override -Wno-delete-non-virtual-dtor -Wno-comment -Wimplicit-fallthrough=3 -Wno-strict-overflow -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -fvisibility=hidden -fvisibility-inlines-hidden -O3 -DNDEBUG -DNDEBUG\n" +" C++ flags (Debug): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wundef -Winit-self -Wpointer-arith -Wshadow -Wsign-promo -Wuninitialized -Wsuggest-override -Wno-delete-non-virtual-dtor -Wno-comment -Wimplicit-fallthrough=3 -Wno-strict-overflow -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -fvisibility=hidden -fvisibility-inlines-hidden -g -O0 -DDEBUG -D_DEBUG\n" +" C Compiler: /usr/bin/cc\n" +" C flags (Release): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wmissing-prototypes -Wstrict-prototypes -Wundef -Winit-self -Wpointer-arith -Wshadow -Wuninitialized -Wno-comment -Wimplicit-fallthrough=3 -Wno-strict-overflow -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -fvisibility=hidden -O3 -DNDEBUG -DNDEBUG\n" +" C flags (Debug): -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wmissing-prototypes -Wstrict-prototypes -Wundef -Winit-self -Wpointer-arith -Wshadow -Wuninitialized -Wno-comment -Wimplicit-fallthrough=3 -Wno-strict-overflow -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -fvisibility=hidden -g -O0 -DDEBUG -D_DEBUG\n" +" Linker flags (Release): -Wl,--gc-sections -Wl,--as-needed \n" +" Linker flags (Debug): -Wl,--gc-sections -Wl,--as-needed \n" +" ccache: NO\n" +" Precompiled headers: NO\n" +" Extra dependencies: dl m pthread rt\n" +" 3rdparty dependencies:\n" +"\n" +" OpenCV modules:\n" +" To be built: core imgcodecs imgproc python3 videoio\n" +" Disabled: calib3d dnn features2d flann gapi highgui java_bindings_generator js_bindings_generator ml objc_bindings_generator objdetect photo python_tests stitching ts video world\n" +" Disabled by dependency: -\n" +" Unavailable: java python2\n" +" Applications: -\n" +" Documentation: NO\n" +" Non-free algorithms: NO\n" +"\n" +" GUI: \n" +" GTK+: NO\n" +" VTK support: NO\n" +"\n" +" Media I/O: \n" +" ZLib: zlib (ver 1.2.11)\n" +" JPEG: libjpeg-turbo (ver 2.1.0-62)\n" +" WEBP: build (ver encoder: 0x020f)\n" +" PNG: build (ver 1.6.37)\n" +" JPEG 2000: build (ver 2.4.0)\n" +" HDR: NO\n" +" SUNRASTER: NO\n" +" PXM: NO\n" +" PFM: NO\n" +"\n" +" Video I/O:\n" +" DC1394: NO\n" +" FFMPEG: NO\n" +" avcodec: NO\n" +" avformat: NO\n" +" avutil: NO\n" +" swscale: NO\n" +" avresample: NO\n" +" GStreamer: NO\n" +" v4l/v4l2: YES (linux/videodev2.h)\n" +"\n" +" Parallel framework: pthreads\n" +"\n" +" Trace: YES (with Intel ITT)\n" +"\n" +" Other third-party libraries:\n" +" Lapack: NO\n" +" Eigen: NO\n" +" Custom HAL: NO\n" +" Protobuf: build (3.5.1)\n" +"\n" +" OpenCL: YES (no extra features)\n" +" Include path: /root/opencv/3rdparty/include/opencl/1.2\n" +" Link libraries: Dynamic load\n" +"\n" +" Python 3:\n" +" Interpreter: /usr/bin/python3 (ver 3.6.9)\n" +" Libraries: /usr/lib/aarch64-linux-gnu/libpython3.6m.so (ver 3.6.9)\n" +" numpy: /usr/local/lib/python3.6/dist-packages/numpy/core/include (ver 1.19.5)\n" +" install path: lib/python3.6/dist-packages/cv2/python-3.6\n" +"\n" +" Python (for build): /usr/bin/python3\n" +"\n" +" Java: \n" +" ant: NO\n" +" JNI: NO\n" +" Java wrappers: NO\n" +" Java tests: NO\n" +"\n" +" Install to: /usr/local\n" +"-----------------------------------------------------------------\n" +"\n" diff --git a/generated/modules/highgui/opencv_highgui_config.hpp b/generated/modules/highgui/opencv_highgui_config.hpp new file mode 100644 index 0000000..e164a87 --- /dev/null +++ b/generated/modules/highgui/opencv_highgui_config.hpp @@ -0,0 +1,4 @@ +// Auto-generated file +#define OPENCV_HIGHGUI_BUILTIN_BACKEND_STR "NONE" + +#define OPENCV_HIGHGUI_WITHOUT_BUILTIN_BACKEND 1 diff --git a/generated/modules/imgproc/accum.simd_declarations.hpp b/generated/modules/imgproc/accum.simd_declarations.hpp new file mode 100644 index 0000000..c4e0088 --- /dev/null +++ b/generated/modules/imgproc/accum.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/accum.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/bilateral_filter.simd_declarations.hpp b/generated/modules/imgproc/bilateral_filter.simd_declarations.hpp new file mode 100644 index 0000000..73918ee --- /dev/null +++ b/generated/modules/imgproc/bilateral_filter.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/bilateral_filter.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/box_filter.simd_declarations.hpp b/generated/modules/imgproc/box_filter.simd_declarations.hpp new file mode 100644 index 0000000..e0fde47 --- /dev/null +++ b/generated/modules/imgproc/box_filter.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/box_filter.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/color_hsv.simd_declarations.hpp b/generated/modules/imgproc/color_hsv.simd_declarations.hpp new file mode 100644 index 0000000..488136f --- /dev/null +++ b/generated/modules/imgproc/color_hsv.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/color_hsv.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/color_rgb.simd_declarations.hpp b/generated/modules/imgproc/color_rgb.simd_declarations.hpp new file mode 100644 index 0000000..89aab19 --- /dev/null +++ b/generated/modules/imgproc/color_rgb.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/color_rgb.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/color_yuv.simd_declarations.hpp b/generated/modules/imgproc/color_yuv.simd_declarations.hpp new file mode 100644 index 0000000..282e785 --- /dev/null +++ b/generated/modules/imgproc/color_yuv.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/color_yuv.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/filter.simd_declarations.hpp b/generated/modules/imgproc/filter.simd_declarations.hpp new file mode 100644 index 0000000..5162e06 --- /dev/null +++ b/generated/modules/imgproc/filter.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/filter.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/median_blur.simd_declarations.hpp b/generated/modules/imgproc/median_blur.simd_declarations.hpp new file mode 100644 index 0000000..b18b5af --- /dev/null +++ b/generated/modules/imgproc/median_blur.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/median_blur.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/morph.simd_declarations.hpp b/generated/modules/imgproc/morph.simd_declarations.hpp new file mode 100644 index 0000000..98736fd --- /dev/null +++ b/generated/modules/imgproc/morph.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/morph.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/opencl_kernels_imgproc.cpp b/generated/modules/imgproc/opencl_kernels_imgproc.cpp new file mode 100644 index 0000000..f04c8e8 --- /dev/null +++ b/generated/modules/imgproc/opencl_kernels_imgproc.cpp @@ -0,0 +1,9129 @@ +// This file is auto-generated. Do not edit! + +#include "opencv2/core.hpp" +#include "cvconfig.h" +#include "opencl_kernels_imgproc.hpp" + +#ifdef HAVE_OPENCL + +namespace cv +{ +namespace ocl +{ +namespace imgproc +{ + +static const char* const moduleName = "imgproc"; + +struct cv::ocl::internal::ProgramEntry accumulate_oclsrc={moduleName, "accumulate", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define SRC_TSIZE cn * (int)sizeof(srcT1)\n" +"#define DST_TSIZE cn * (int)sizeof(dstT1)\n" +"#define noconvert\n" +"__kernel void accumulate(__global const uchar * srcptr, int src_step, int src_offset,\n" +"#ifdef ACCUMULATE_PRODUCT\n" +"__global const uchar * src2ptr, int src2_step, int src2_offset,\n" +"#endif\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols\n" +"#ifdef ACCUMULATE_WEIGHTED\n" +", dstT1 alpha\n" +"#endif\n" +"#ifdef HAVE_MASK\n" +", __global const uchar * mask, int mask_step, int mask_offset\n" +"#endif\n" +")\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, SRC_TSIZE, src_offset));\n" +"#ifdef HAVE_MASK\n" +"int mask_index = mad24(y, mask_step, mask_offset + x);\n" +"mask += mask_index;\n" +"#endif\n" +"#ifdef ACCUMULATE_PRODUCT\n" +"int src2_index = mad24(y, src2_step, mad24(x, SRC_TSIZE, src2_offset));\n" +"#endif\n" +"int dst_index = mad24(y, dst_step, mad24(x, DST_TSIZE, dst_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const srcT1 * src = (__global const srcT1 *)(srcptr + src_index);\n" +"#ifdef ACCUMULATE_PRODUCT\n" +"__global const srcT1 * src2 = (__global const srcT1 *)(src2ptr + src2_index);\n" +"#endif\n" +"__global dstT1 * dst = (__global dstT1 *)(dstptr + dst_index);\n" +"#ifdef HAVE_MASK\n" +"if (mask[0])\n" +"#endif\n" +"#pragma unroll\n" +"for (int c = 0; c < cn; ++c)\n" +"{\n" +"#ifdef ACCUMULATE\n" +"dst[c] += convertToDT(src[c]);\n" +"#elif defined ACCUMULATE_SQUARE\n" +"dstT1 val = convertToDT(src[c]);\n" +"dst[c] = fma(val, val, dst[c]);\n" +"#elif defined ACCUMULATE_PRODUCT\n" +"dst[c] = fma(convertToDT(src[c]), convertToDT(src2[c]), dst[c]);\n" +"#elif defined ACCUMULATE_WEIGHTED\n" +"dst[c] = fma(1 - alpha, dst[c], src[c] * alpha);\n" +"#else\n" +"#error \"Unknown accumulation type\"\n" +"#endif\n" +"}\n" +"src_index += src_step;\n" +"#ifdef ACCUMULATE_PRODUCT\n" +"src2_index += src2_step;\n" +"#endif\n" +"#ifdef HAVE_MASK\n" +"mask += mask_step;\n" +"#endif\n" +"dst_index += dst_step;\n" +"++y;\n" +"}\n" +"}\n" +"}\n" +, "5f2c2d40f721d738ad2b8ef755376c6f", NULL}; +struct cv::ocl::internal::ProgramEntry bilateral_oclsrc={moduleName, "bilateral", +"#if cn != 3\n" +"#define loadpix(addr) *(__global const uchar_t *)(addr)\n" +"#define storepix(val, addr) *(__global uchar_t *)(addr) = val\n" +"#define TSIZE cn\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const uchar *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global uchar *)(addr))\n" +"#define TSIZE 3\n" +"#endif\n" +"#if cn == 1\n" +"#define SUM(a) a\n" +"#elif cn == 2\n" +"#define SUM(a) a.x + a.y\n" +"#elif cn == 3\n" +"#define SUM(a) a.x + a.y + a.z\n" +"#elif cn == 4\n" +"#define SUM(a) a.x + a.y + a.z + a.w\n" +"#else\n" +"#error \"cn should be <= 4\"\n" +"#endif\n" +"__kernel void bilateral(__global const uchar * src, int src_step, int src_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * space_weight, __constant int * space_ofs)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (y < dst_rows && x < dst_cols)\n" +"{\n" +"int src_index = mad24(y + radius, src_step, mad24(x + radius, TSIZE, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"float_t sum = (float_t)(0.0f);\n" +"float wsum = 0.0f;\n" +"#ifdef INTEL_DEVICE\n" +"float_t val0 = convert_float_t(loadpix(src + src_index));\n" +"#else\n" +"int_t val0 = convert_int_t(loadpix(src + src_index));\n" +"#endif\n" +"#pragma unroll\n" +"for (int k = 0; k < maxk; k++ )\n" +"{\n" +"#ifdef INTEL_DEVICE\n" +"float_t val = convert_float_t(loadpix(src + src_index + space_ofs[k]));\n" +"float diff = SUM(fabs(val - val0));\n" +"#else\n" +"int_t val = convert_int_t(loadpix(src + src_index + space_ofs[k]));\n" +"int diff = SUM(abs(val - val0));\n" +"#endif\n" +"float w = space_weight[k] * native_exp((float)(diff * diff * gauss_color_coeff));\n" +"sum += convert_float_t(val) * (float_t)(w);\n" +"wsum += w;\n" +"}\n" +"storepix(convert_uchar_t(sum / (float_t)(wsum)), dst + dst_index);\n" +"}\n" +"}\n" +"#ifdef INTEL_DEVICE\n" +"#if cn == 1\n" +"__kernel void bilateral_float4(__global const uchar * src, int src_step, int src_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * space_weight, __constant int * space_ofs)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (y < dst_rows && x < dst_cols / 4 )\n" +"{\n" +"int src_index = ((y + radius) * src_step) + x * 4 + (radius + src_offset);\n" +"int dst_index = (y * dst_step) + x * 4 + dst_offset ;\n" +"float4 sum = 0.f, wsum = 0.f;\n" +"float4 val0 = convert_float4(vload4(0, src + src_index));\n" +"#pragma unroll\n" +"for (int k = 0; k < maxk; k++ )\n" +"{\n" +"float4 val = convert_float4(vload4(0, src + src_index + space_ofs[k]));\n" +"float4 w = space_weight[k] * native_exp((val - val0) * (val - val0) * gauss_color_coeff);\n" +"sum += val * w;\n" +"wsum += w;\n" +"}\n" +"sum = sum / wsum + .5f;\n" +"vstore4(convert_uchar4_rtz(sum), 0, dst + dst_index);\n" +"}\n" +"}\n" +"#endif\n" +"#endif\n" +, "1cc12569fdb93cbfa05bb215d3d42e64", NULL}; +struct cv::ocl::internal::ProgramEntry blend_linear_oclsrc={moduleName, "blend_linear", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"__kernel void blendLinear(__global const uchar * src1ptr, int src1_step, int src1_offset,\n" +"__global const uchar * src2ptr, int src2_step, int src2_offset,\n" +"__global const uchar * weight1, int weight1_step, int weight1_offset,\n" +"__global const uchar * weight2, int weight2_step, int weight2_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int src1_index = mad24(y, src1_step, src1_offset + x * cn * (int)sizeof(T));\n" +"int src2_index = mad24(y, src2_step, src2_offset + x * cn * (int)sizeof(T));\n" +"int weight1_index = mad24(y, weight1_step, weight1_offset + x * (int)sizeof(float));\n" +"int weight2_index = mad24(y, weight2_step, weight2_offset + x * (int)sizeof(float));\n" +"int dst_index = mad24(y, dst_step, dst_offset + x * cn * (int)sizeof(T));\n" +"float w1 = *(__global const float *)(weight1 + weight1_index),\n" +"w2 = *(__global const float *)(weight2 + weight2_index);\n" +"float den = w1 + w2 + 1e-5f;\n" +"__global const T * src1 = (__global const T *)(src1ptr + src1_index);\n" +"__global const T * src2 = (__global const T *)(src2ptr + src2_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"#pragma unroll\n" +"for (int i = 0; i < cn; ++i)\n" +"{\n" +"float num = w1 * convert_float(src1[i]) + w2 * convert_float(src2[i]);\n" +"dst[i] = convertToT(num / den);\n" +"}\n" +"}\n" +"}\n" +, "76072b51c3ede4951ee0200aa33297dc", NULL}; +struct cv::ocl::internal::ProgramEntry boxFilter_oclsrc={moduleName, "boxFilter", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const ST *)(addr)\n" +"#define storepix(val, addr) *(__global DT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(ST)\n" +"#define DSTSIZE (int)sizeof(DT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const ST1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global DT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(ST1)*cn\n" +"#define DSTSIZE (int)sizeof(DT1)*cn\n" +"#endif\n" +"#ifdef BORDER_CONSTANT\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"x = max(min(x, maxX - 1), minX); \\\n" +"y = max(min(y, maxY - 1), minY); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"if (x < minX) \\\n" +"x -= ((x - maxX + 1) / maxX) * maxX; \\\n" +"if (x >= maxX) \\\n" +"x %= maxX; \\\n" +"if (y < minY) \\\n" +"y -= ((y - maxY + 1) / maxY) * maxY; \\\n" +"if (y >= maxY) \\\n" +"y %= maxY; \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)\n" +"#define EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, delta) \\\n" +"{ \\\n" +"if (maxX - minX == 1) \\\n" +"x = minX; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (x < minX) \\\n" +"x = minX - (x - minX) - 1 + delta; \\\n" +"else \\\n" +"x = maxX - 1 - (x - maxX) - delta; \\\n" +"} \\\n" +"while (x >= maxX || x < minX); \\\n" +"\\\n" +"if (maxY - minY == 1) \\\n" +"y = minY; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (y < minY) \\\n" +"y = minY - (y - minY) - 1 + delta; \\\n" +"else \\\n" +"y = maxY - 1 - (y - maxY) - delta; \\\n" +"} \\\n" +"while (y >= maxY || y < minY); \\\n" +"}\n" +"#ifdef BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 0)\n" +"#elif defined(BORDER_REFLECT_101)\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 1)\n" +"#endif\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#define noconvert\n" +"#ifdef SQR\n" +"#define PROCESS_ELEM(value) (value * value)\n" +"#else\n" +"#define PROCESS_ELEM(value) value\n" +"#endif\n" +"struct RectCoords\n" +"{\n" +"int x1, y1, x2, y2;\n" +"};\n" +"inline WT readSrcPixel(int2 pos, __global const uchar * srcptr, int src_step, const struct RectCoords srcCoords)\n" +"{\n" +"#ifdef BORDER_ISOLATED\n" +"if (pos.x >= srcCoords.x1 && pos.y >= srcCoords.y1 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)\n" +"#else\n" +"if (pos.x >= 0 && pos.y >= 0 && pos.x < srcCoords.x2 && pos.y < srcCoords.y2)\n" +"#endif\n" +"{\n" +"int src_index = mad24(pos.y, src_step, pos.x * SRCSIZE);\n" +"WT value = convertToWT(loadpix(srcptr + src_index));\n" +"return PROCESS_ELEM(value);\n" +"}\n" +"else\n" +"{\n" +"#ifdef BORDER_CONSTANT\n" +"return (WT)(0);\n" +"#else\n" +"int selected_col = pos.x, selected_row = pos.y;\n" +"EXTRAPOLATE(selected_col, selected_row,\n" +"#ifdef BORDER_ISOLATED\n" +"srcCoords.x1, srcCoords.y1,\n" +"#else\n" +"0, 0,\n" +"#endif\n" +"srcCoords.x2, srcCoords.y2);\n" +"int src_index = mad24(selected_row, src_step, selected_col * SRCSIZE);\n" +"WT value = convertToWT(loadpix(srcptr + src_index));\n" +"return PROCESS_ELEM(value);\n" +"#endif\n" +"}\n" +"}\n" +"__kernel void boxFilter(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols\n" +"#ifdef NORMALIZE\n" +", float alpha\n" +"#endif\n" +")\n" +"{\n" +"const struct RectCoords srcCoords = { srcOffsetX, srcOffsetY, srcEndX, srcEndY };\n" +"int x = get_local_id(0) + (LOCAL_SIZE_X - (KERNEL_SIZE_X - 1)) * get_group_id(0) - ANCHOR_X;\n" +"int y = get_global_id(1) * BLOCK_SIZE_Y;\n" +"int local_id = get_local_id(0);\n" +"WT data[KERNEL_SIZE_Y];\n" +"__local WT sumOfCols[LOCAL_SIZE_X];\n" +"int2 srcPos = (int2)(srcCoords.x1 + x, srcCoords.y1 + y - ANCHOR_Y);\n" +"#pragma unroll\n" +"for (int sy = 0; sy < KERNEL_SIZE_Y; sy++, srcPos.y++)\n" +"data[sy] = readSrcPixel(srcPos, srcptr, src_step, srcCoords);\n" +"WT tmp_sum = (WT)(0);\n" +"#pragma unroll\n" +"for (int sy = 0; sy < KERNEL_SIZE_Y; sy++)\n" +"tmp_sum += data[sy];\n" +"sumOfCols[local_id] = tmp_sum;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int dst_index = mad24(y, dst_step, mad24(x, DSTSIZE, dst_offset));\n" +"__global DT * dst = (__global DT *)(dstptr + dst_index);\n" +"int sy_index = 0;\n" +"for (int i = 0, stepY = min(rows - y, BLOCK_SIZE_Y); i < stepY; ++i)\n" +"{\n" +"if (local_id >= ANCHOR_X && local_id < LOCAL_SIZE_X - (KERNEL_SIZE_X - 1 - ANCHOR_X) &&\n" +"x >= 0 && x < cols)\n" +"{\n" +"WT total_sum = (WT)(0);\n" +"#pragma unroll\n" +"for (int sx = 0; sx < KERNEL_SIZE_X; sx++)\n" +"total_sum += sumOfCols[local_id + sx - ANCHOR_X];\n" +"#ifdef NORMALIZE\n" +"DT dstval = convertToDT((WT)(alpha) * total_sum);\n" +"#else\n" +"DT dstval = convertToDT(total_sum);\n" +"#endif\n" +"storepix(dstval, dst);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"tmp_sum = sumOfCols[local_id];\n" +"tmp_sum -= data[sy_index];\n" +"data[sy_index] = readSrcPixel(srcPos, srcptr, src_step, srcCoords);\n" +"srcPos.y++;\n" +"tmp_sum += data[sy_index];\n" +"sumOfCols[local_id] = tmp_sum;\n" +"sy_index = sy_index + 1 < KERNEL_SIZE_Y ? sy_index + 1 : 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"dst = (__global DT *)((__global uchar *)dst + dst_step);\n" +"}\n" +"}\n" +, "d3e542270fa2ea1fc3744043dad50cb4", NULL}; +struct cv::ocl::internal::ProgramEntry boxFilter3x3_oclsrc={moduleName, "boxFilter3x3", +"__kernel void boxFilter3x3_8UC1_cols16_rows2(__global const uint* src, int src_step,\n" +"__global uint* dst, int dst_step, int rows, int cols\n" +"#ifdef NORMALIZE\n" +", float alpha\n" +"#endif\n" +")\n" +"{\n" +"int block_x = get_global_id(0);\n" +"int y = get_global_id(1) * 2;\n" +"int ssx, dsx;\n" +"if ((block_x * 16) >= cols || y >= rows) return;\n" +"uint4 line[4];\n" +"uint4 line_out[2];\n" +"ushort a; ushort16 b; ushort c;\n" +"ushort d; ushort16 e; ushort f;\n" +"ushort g; ushort16 h; ushort i;\n" +"ushort j; ushort16 k; ushort l;\n" +"ssx = dsx = 1;\n" +"int src_index = block_x * 4 * ssx + (y - 1) * (src_step / 4);\n" +"line[1] = vload4(0, src + src_index + (src_step / 4));\n" +"line[2] = vload4(0, src + src_index + 2 * (src_step / 4));\n" +"#ifdef BORDER_CONSTANT\n" +"line[0] = (y == 0) ? (uint4)0 : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? (uint4)0 : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined BORDER_REFLECT_101\n" +"line[0] = (y == 0) ? line[2] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[1] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"line[0] = (y == 0) ? line[1] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[2] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#endif\n" +"ushort16 sum, mid;\n" +"__global uchar *src_p = (__global uchar *)src;\n" +"src_index = block_x * 16 * ssx + (y - 1) * src_step;\n" +"bool line_end = ((block_x + 1) * 16 == cols);\n" +"b = convert_ushort16(as_uchar16(line[0]));\n" +"e = convert_ushort16(as_uchar16(line[1]));\n" +"h = convert_ushort16(as_uchar16(line[2]));\n" +"k = convert_ushort16(as_uchar16(line[3]));\n" +"#ifdef BORDER_CONSTANT\n" +"a = (block_x == 0 || y == 0) ? 0 : convert_ushort(src_p[src_index - 1]);\n" +"c = (line_end || y == 0) ? 0 : convert_ushort(src_p[src_index + 16]);\n" +"d = (block_x == 0) ? 0 : convert_ushort(src_p[src_index + src_step - 1]);\n" +"f = line_end ? 0 : convert_ushort(src_p[src_index + src_step + 16]);\n" +"g = (block_x == 0) ? 0 : convert_ushort(src_p[src_index + 2 * src_step - 1]);\n" +"i = line_end ? 0 : convert_ushort(src_p[src_index + 2 * src_step + 16]);\n" +"j = (block_x == 0 || y == (rows - 2)) ? 0 : convert_ushort(src_p[src_index + 3 * src_step - 1]);\n" +"l = (line_end || y == (rows - 2))? 0 : convert_ushort(src_p[src_index + 3 * src_step + 16]);\n" +"#elif defined BORDER_REFLECT_101\n" +"int offset;\n" +"offset = (y == 0) ? (2 * src_step) : 0;\n" +"a = (block_x == 0) ? convert_ushort(src_p[src_index + offset + 1]) : convert_ushort(src_p[src_index + offset - 1]);\n" +"c = line_end ? convert_ushort(src_p[src_index + offset + 14]) : convert_ushort(src_p[src_index + offset + 16]);\n" +"d = (block_x == 0) ? convert_ushort(src_p[src_index + src_step + 1]) : convert_ushort(src_p[src_index + src_step - 1]);\n" +"f = line_end ? convert_ushort(src_p[src_index + src_step + 14]) : convert_ushort(src_p[src_index + src_step + 16]);\n" +"g = (block_x == 0) ? convert_ushort(src_p[src_index + 2 * src_step + 1]) : convert_ushort(src_p[src_index + 2 * src_step - 1]);\n" +"i = line_end ? convert_ushort(src_p[src_index + 2 * src_step + 14]) : convert_ushort(src_p[src_index + 2 * src_step + 16]);\n" +"offset = (y == (rows - 2)) ? (1 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? convert_ushort(src_p[src_index + offset + 1]) : convert_ushort(src_p[src_index + offset - 1]);\n" +"l = line_end ? convert_ushort(src_p[src_index + offset + 14]) : convert_ushort(src_p[src_index + offset + 16]);\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"int offset;\n" +"offset = (y == 0) ? (1 * src_step) : 0;\n" +"a = (block_x == 0) ? convert_ushort(src_p[src_index + offset]) : convert_ushort(src_p[src_index + offset - 1]);\n" +"c = line_end ? convert_ushort(src_p[src_index + offset + 15]) : convert_ushort(src_p[src_index + offset + 16]);\n" +"d = (block_x == 0) ? convert_ushort(src_p[src_index + src_step]) : convert_ushort(src_p[src_index + src_step - 1]);\n" +"f = line_end ? convert_ushort(src_p[src_index + src_step + 15]) : convert_ushort(src_p[src_index + src_step + 16]);\n" +"g = (block_x == 0) ? convert_ushort(src_p[src_index + 2 * src_step]) : convert_ushort(src_p[src_index + 2 * src_step - 1]);\n" +"i = line_end ? convert_ushort(src_p[src_index + 2 * src_step + 15]) : convert_ushort(src_p[src_index + 2 * src_step + 16]);\n" +"offset = (y == (rows - 2)) ? (2 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? convert_ushort(src_p[src_index + offset]) : convert_ushort(src_p[src_index + offset - 1]);\n" +"l = line_end ? convert_ushort(src_p[src_index + offset + 15]) : convert_ushort(src_p[src_index + offset + 16]);\n" +"#endif\n" +"mid = (ushort16)(d, e.s0123, e.s456789ab, e.scde) + e + (ushort16)(e.s123, e.s4567, e.s89abcdef, f) +\n" +"(ushort16)(g, h.s0123, h.s456789ab, h.scde) + h + (ushort16)(h.s123, h.s4567, h.s89abcdef, i);\n" +"sum = (ushort16)(a, b.s0123, b.s456789ab, b.scde) + b + (ushort16)(b.s123, b.s4567, b.s89abcdef, c) +\n" +"mid;\n" +"#ifdef NORMALIZE\n" +"line_out[0] = as_uint4(convert_uchar16_sat_rte((convert_float16(sum) * alpha)));\n" +"#else\n" +"line_out[0] = as_uint4(convert_uchar16_sat_rte(sum));\n" +"#endif\n" +"sum = mid +\n" +"(ushort16)(j, k.s0123, k.s456789ab, k.scde) + k + (ushort16)(k.s123, k.s4567, k.s89abcdef, l);\n" +"#ifdef NORMALIZE\n" +"line_out[1] = as_uint4(convert_uchar16_sat_rte((convert_float16(sum) * alpha)));\n" +"#else\n" +"line_out[1] = as_uint4(convert_uchar16_sat_rte(sum));\n" +"#endif\n" +"int dst_index = block_x * 4 * dsx + y * (dst_step / 4);\n" +"vstore4(line_out[0], 0, dst + dst_index);\n" +"vstore4(line_out[1], 0, dst + dst_index + (dst_step / 4));\n" +"}\n" +, "72a31e7e412911731db2747210c1b3d4", NULL}; +struct cv::ocl::internal::ProgramEntry calc_back_project_oclsrc={moduleName, "calc_back_project", +"#define OUT_OF_RANGE -1\n" +"#define ROUNDING_EPS 0.000001f\n" +"#if histdims == 1\n" +"__kernel void calcLUT(__global const uchar * histptr, int hist_step, int hist_offset, int hist_bins,\n" +"__global int * lut, float scale, __constant float * ranges)\n" +"{\n" +"int x = get_global_id(0);\n" +"float value = convert_float(x);\n" +"if (value > ranges[1] || value < ranges[0])\n" +"lut[x] = OUT_OF_RANGE;\n" +"else\n" +"{\n" +"float lb = ranges[0], ub = ranges[1], gap = (ub - lb) / hist_bins;\n" +"value -= lb;\n" +"int bin = convert_int_sat_rtn(value / gap + ROUNDING_EPS);\n" +"if (bin >= hist_bins)\n" +"lut[x] = OUT_OF_RANGE;\n" +"else\n" +"{\n" +"int hist_index = mad24(hist_step, bin, hist_offset);\n" +"__global const float * hist = (__global const float *)(histptr + hist_index);\n" +"lut[x] = (int)convert_uchar_sat_rte(hist[0] * scale);\n" +"}\n" +"}\n" +"}\n" +"__kernel void LUT(__global const uchar * src, int src_step, int src_offset,\n" +"__constant int * lut,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int src_index = mad24(y, src_step, src_offset + x * scn);\n" +"int dst_index = mad24(y, dst_step, dst_offset + x);\n" +"int value = lut[src[src_index]];\n" +"dst[dst_index] = value == OUT_OF_RANGE ? 0 : convert_uchar(value);\n" +"}\n" +"}\n" +"#elif histdims == 2\n" +"__kernel void calcLUT(int hist_bins, __global int * lut, int lut_offset,\n" +"__constant float * ranges, int roffset)\n" +"{\n" +"int x = get_global_id(0);\n" +"float value = convert_float(x);\n" +"ranges += roffset;\n" +"lut += lut_offset;\n" +"if (value > ranges[1] || value < ranges[0])\n" +"lut[x] = OUT_OF_RANGE;\n" +"else\n" +"{\n" +"float lb = ranges[0], ub = ranges[1], gap = (ub - lb) / hist_bins;\n" +"value -= lb;\n" +"int bin = convert_int_sat_rtn(value / gap + ROUNDING_EPS);\n" +"lut[x] = bin >= hist_bins ? OUT_OF_RANGE : bin;\n" +"}\n" +"}\n" +"__kernel void LUT(__global const uchar * src1, int src1_step, int src1_offset,\n" +"__global const uchar * src2, int src2_step, int src2_offset,\n" +"__global const uchar * histptr, int hist_step, int hist_offset,\n" +"__constant int * lut, float scale,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int src1_index = mad24(y, src1_step, src1_offset + x * scn1);\n" +"int src2_index = mad24(y, src2_step, src2_offset + x * scn2);\n" +"int dst_index = mad24(y, dst_step, dst_offset + x);\n" +"int bin1 = lut[src1[src1_index]];\n" +"int bin2 = lut[src2[src2_index] + 256];\n" +"dst[dst_index] = bin1 == OUT_OF_RANGE || bin2 == OUT_OF_RANGE ? 0 :\n" +"convert_uchar_sat_rte(*(__global const float *)(histptr +\n" +"mad24(hist_step, bin1, hist_offset + bin2 * (int)sizeof(float))) * scale);\n" +"}\n" +"}\n" +"#else\n" +"#error \"(nimages <= 2) should be true\"\n" +"#endif\n" +, "6bab391f796ff5b2ba3d38f23929307e", NULL}; +struct cv::ocl::internal::ProgramEntry canny_oclsrc={moduleName, "canny", +"#define TG22 0.4142135623730950488016887242097f\n" +"#define TG67 2.4142135623730950488016887242097f\n" +"#ifdef WITH_SOBEL\n" +"#if cn == 1\n" +"#define loadpix(addr) convert_floatN(*(__global const TYPE *)(addr))\n" +"#else\n" +"#define loadpix(addr) convert_floatN(vload3(0, (__global const TYPE *)(addr)))\n" +"#endif\n" +"#define storepix(value, addr) *(__global int *)(addr) = (int)(value)\n" +"__constant int prev[4][2] = {\n" +"{ 0, -1 },\n" +"{ -1, 1 },\n" +"{ -1, 0 },\n" +"{ -1, -1 }\n" +"};\n" +"__constant int next[4][2] = {\n" +"{ 0, 1 },\n" +"{ 1, -1 },\n" +"{ 1, 0 },\n" +"{ 1, 1 }\n" +"};\n" +"inline float3 sobel(int idx, __local const floatN *smem)\n" +"{\n" +"float3 res;\n" +"floatN dx = fma((floatN)2, smem[idx + GRP_SIZEX + 6] - smem[idx + GRP_SIZEX + 4],\n" +"smem[idx + 2] - smem[idx] + smem[idx + 2 * GRP_SIZEX + 10] - smem[idx + 2 * GRP_SIZEX + 8]);\n" +"floatN dy = fma((floatN)2, smem[idx + 1] - smem[idx + 2 * GRP_SIZEX + 9],\n" +"smem[idx + 2] - smem[idx + 2 * GRP_SIZEX + 10] + smem[idx] - smem[idx + 2 * GRP_SIZEX + 8]);\n" +"#ifdef L2GRAD\n" +"floatN magN = fma(dx, dx, dy * dy);\n" +"#else\n" +"floatN magN = fabs(dx) + fabs(dy);\n" +"#endif\n" +"#if cn == 1\n" +"res.z = magN;\n" +"res.x = dx;\n" +"res.y = dy;\n" +"#else\n" +"res.z = max(magN.x, max(magN.y, magN.z));\n" +"if (res.z == magN.y)\n" +"{\n" +"dx.x = dx.y;\n" +"dy.x = dy.y;\n" +"}\n" +"else if (res.z == magN.z)\n" +"{\n" +"dx.x = dx.z;\n" +"dy.x = dy.z;\n" +"}\n" +"res.x = dx.x;\n" +"res.y = dy.x;\n" +"#endif\n" +"return res;\n" +"}\n" +"__kernel void stage1_with_sobel(__global const uchar *src, int src_step, int src_offset, int rows, int cols,\n" +"__global uchar *map, int map_step, int map_offset,\n" +"float low_thr, float high_thr)\n" +"{\n" +"__local floatN smem[(GRP_SIZEX + 4) * (GRP_SIZEY + 4)];\n" +"int lidx = get_local_id(0);\n" +"int lidy = get_local_id(1);\n" +"int start_x = GRP_SIZEX * get_group_id(0);\n" +"int start_y = GRP_SIZEY * get_group_id(1);\n" +"int i = lidx + lidy * GRP_SIZEX;\n" +"for (int j = i; j < (GRP_SIZEX + 4) * (GRP_SIZEY + 4); j += GRP_SIZEX * GRP_SIZEY)\n" +"{\n" +"int x = clamp(start_x - 2 + (j % (GRP_SIZEX + 4)), 0, cols - 1);\n" +"int y = clamp(start_y - 2 + (j / (GRP_SIZEX + 4)), 0, rows - 1);\n" +"smem[j] = loadpix(src + mad24(y, src_step, mad24(x, cn * (int)sizeof(TYPE), src_offset)));\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"__local float mag[(GRP_SIZEX + 2) * (GRP_SIZEY + 2)];\n" +"lidx++;\n" +"lidy++;\n" +"if (i < GRP_SIZEX + 2)\n" +"{\n" +"int grp_sizey = min(GRP_SIZEY + 1, rows - start_y);\n" +"mag[i] = (sobel(i, smem)).z;\n" +"mag[i + grp_sizey * (GRP_SIZEX + 2)] = (sobel(i + grp_sizey * (GRP_SIZEX + 4), smem)).z;\n" +"}\n" +"if (i < GRP_SIZEY + 2)\n" +"{\n" +"int grp_sizex = min(GRP_SIZEX + 1, cols - start_x);\n" +"mag[i * (GRP_SIZEX + 2)] = (sobel(i * (GRP_SIZEX + 4), smem)).z;\n" +"mag[i * (GRP_SIZEX + 2) + grp_sizex] = (sobel(i * (GRP_SIZEX + 4) + grp_sizex, smem)).z;\n" +"}\n" +"int idx = lidx + lidy * (GRP_SIZEX + 4);\n" +"i = lidx + lidy * (GRP_SIZEX + 2);\n" +"float3 res = sobel(idx, smem);\n" +"mag[i] = res.z;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int x = (int) res.x;\n" +"int y = (int) res.y;\n" +"int gidx = get_global_id(0);\n" +"int gidy = get_global_id(1);\n" +"if (gidx >= cols || gidy >= rows)\n" +"return;\n" +"float mag0 = mag[i];\n" +"int value = 1;\n" +"if (mag0 > low_thr)\n" +"{\n" +"float x_ = abs(x);\n" +"float y_ = abs(y);\n" +"int a = (y_ * TG22 >= x_) ? 2 : 1;\n" +"int b = (y_ * TG67 >= x_) ? 1 : 0;\n" +"int dir3 = (a * b) & (((x ^ y) & 0x80000000) >> 31);\n" +"int dir = a * b + 2 * dir3;\n" +"float prev_mag = mag[(lidy + prev[dir][0]) * (GRP_SIZEX + 2) + lidx + prev[dir][1]];\n" +"float next_mag = mag[(lidy + next[dir][0]) * (GRP_SIZEX + 2) + lidx + next[dir][1]] + (dir & 1);\n" +"if (mag0 > prev_mag && mag0 >= next_mag)\n" +"{\n" +"value = (mag0 > high_thr) ? 2 : 0;\n" +"}\n" +"}\n" +"storepix(value, map + mad24(gidy, map_step, mad24(gidx, (int)sizeof(int), map_offset)));\n" +"}\n" +"#elif defined WITHOUT_SOBEL\n" +"#define loadpix(addr) (__global short *)(addr)\n" +"#define storepix(val, addr) *(__global int *)(addr) = (int)(val)\n" +"#ifdef L2GRAD\n" +"#define dist(x, y) ((int)(x) * (x) + (int)(y) * (y))\n" +"#else\n" +"#define dist(x, y) (abs((int)(x)) + abs((int)(y)))\n" +"#endif\n" +"__constant int prev[4][2] = {\n" +"{ 0, -1 },\n" +"{ -1, -1 },\n" +"{ -1, 0 },\n" +"{ -1, 1 }\n" +"};\n" +"__constant int next[4][2] = {\n" +"{ 0, 1 },\n" +"{ 1, 1 },\n" +"{ 1, 0 },\n" +"{ 1, -1 }\n" +"};\n" +"__kernel void stage1_without_sobel(__global const uchar *dxptr, int dx_step, int dx_offset,\n" +"__global const uchar *dyptr, int dy_step, int dy_offset,\n" +"__global uchar *map, int map_step, int map_offset, int rows, int cols,\n" +"int low_thr, int high_thr)\n" +"{\n" +"int start_x = get_group_id(0) * GRP_SIZEX;\n" +"int start_y = get_group_id(1) * GRP_SIZEY;\n" +"int lidx = get_local_id(0);\n" +"int lidy = get_local_id(1);\n" +"__local int mag[(GRP_SIZEX + 2) * (GRP_SIZEY + 2)];\n" +"__local short2 sigma[(GRP_SIZEX + 2) * (GRP_SIZEY + 2)];\n" +"#pragma unroll\n" +"for (int i = lidx + lidy * GRP_SIZEX; i < (GRP_SIZEX + 2) * (GRP_SIZEY + 2); i += GRP_SIZEX * GRP_SIZEY)\n" +"{\n" +"int x = clamp(start_x - 1 + i % (GRP_SIZEX + 2), 0, cols - 1);\n" +"int y = clamp(start_y - 1 + i / (GRP_SIZEX + 2), 0, rows - 1);\n" +"int dx_index = mad24(y, dx_step, mad24(x, cn * (int)sizeof(short), dx_offset));\n" +"int dy_index = mad24(y, dy_step, mad24(x, cn * (int)sizeof(short), dy_offset));\n" +"__global short *dx = loadpix(dxptr + dx_index);\n" +"__global short *dy = loadpix(dyptr + dy_index);\n" +"int mag0 = dist(dx[0], dy[0]);\n" +"#if cn > 1\n" +"short cdx = dx[0], cdy = dy[0];\n" +"#pragma unroll\n" +"for (int j = 1; j < cn; ++j)\n" +"{\n" +"int mag1 = dist(dx[j], dy[j]);\n" +"if (mag1 > mag0)\n" +"{\n" +"mag0 = mag1;\n" +"cdx = dx[j];\n" +"cdy = dy[j];\n" +"}\n" +"}\n" +"dx[0] = cdx;\n" +"dy[0] = cdy;\n" +"#endif\n" +"mag[i] = mag0;\n" +"sigma[i] = (short2)(dx[0], dy[0]);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int gidx = get_global_id(0);\n" +"int gidy = get_global_id(1);\n" +"if (gidx >= cols || gidy >= rows)\n" +"return;\n" +"lidx++;\n" +"lidy++;\n" +"int mag0 = mag[lidx + lidy * (GRP_SIZEX + 2)];\n" +"short x = (sigma[lidx + lidy * (GRP_SIZEX + 2)]).x;\n" +"short y = (sigma[lidx + lidy * (GRP_SIZEX + 2)]).y;\n" +"int value = 1;\n" +"if (mag0 > low_thr)\n" +"{\n" +"float x_ = abs(x);\n" +"float y_ = abs(y);\n" +"int a = (y_ * TG22 >= x_) ? 2 : 1;\n" +"int b = (y_ * TG67 >= x_) ? 1 : 0;\n" +"int dir3 = (a * b) & (((x ^ y) & 0x80000000) >> 31);\n" +"int dir = a * b + 2 * dir3;\n" +"int prev_mag = mag[(lidy + prev[dir][0]) * (GRP_SIZEX + 2) + lidx + prev[dir][1]];\n" +"int next_mag = mag[(lidy + next[dir][0]) * (GRP_SIZEX + 2) + lidx + next[dir][1]] + (dir & 1);\n" +"if (mag0 > prev_mag && mag0 >= next_mag)\n" +"{\n" +"value = (mag0 > high_thr) ? 2 : 0;\n" +"}\n" +"}\n" +"storepix(value, map + mad24(gidy, map_step, mad24(gidx, (int)sizeof(int), map_offset)));\n" +"}\n" +"#undef TG22\n" +"#undef CANNY_SHIFT\n" +"#elif defined STAGE2\n" +"#define loadpix(addr) *(__global int *)(addr)\n" +"#define storepix(val, addr) *(__global int *)(addr) = (int)(val)\n" +"#define LOCAL_TOTAL (LOCAL_X*LOCAL_Y)\n" +"#define l_stack_size (4*LOCAL_TOTAL)\n" +"#define p_stack_size 8\n" +"__constant short move_dir[2][8] = {\n" +"{ -1, -1, -1, 0, 0, 1, 1, 1 },\n" +"{ -1, 0, 1, -1, 1, -1, 0, 1 }\n" +"};\n" +"__kernel void stage2_hysteresis(__global uchar *map_ptr, int map_step, int map_offset, int rows, int cols)\n" +"{\n" +"map_ptr += map_offset;\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI;\n" +"int lid = get_local_id(0) + get_local_id(1) * LOCAL_X;\n" +"__local ushort2 l_stack[l_stack_size];\n" +"__local int l_counter;\n" +"if (lid == 0)\n" +"l_counter = 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x < cols)\n" +"{\n" +"__global uchar* map = map_ptr + mad24(y, map_step, x * (int)sizeof(int));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"int type = loadpix(map);\n" +"if (type == 2)\n" +"{\n" +"l_stack[atomic_inc(&l_counter)] = (ushort2)(x, y);\n" +"}\n" +"y++;\n" +"map += map_step;\n" +"}\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"ushort2 p_stack[p_stack_size];\n" +"int p_counter = 0;\n" +"while(l_counter != 0)\n" +"{\n" +"int mod = l_counter % LOCAL_TOTAL;\n" +"int pix_per_thr = l_counter / LOCAL_TOTAL + ((lid < mod) ? 1 : 0);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int i = 0; i < pix_per_thr; ++i)\n" +"{\n" +"int index = atomic_dec(&l_counter) - 1;\n" +"if (index < 0)\n" +"continue;\n" +"ushort2 pos = l_stack[ index ];\n" +"#pragma unroll\n" +"for (int j = 0; j < 8; ++j)\n" +"{\n" +"ushort posx = pos.x + move_dir[0][j];\n" +"ushort posy = pos.y + move_dir[1][j];\n" +"if (posx < 0 || posy < 0 || posx >= cols || posy >= rows)\n" +"continue;\n" +"__global uchar *addr = map_ptr + mad24(posy, map_step, posx * (int)sizeof(int));\n" +"int type = loadpix(addr);\n" +"if (type == 0)\n" +"{\n" +"p_stack[p_counter++] = (ushort2)(posx, posy);\n" +"storepix(2, addr);\n" +"}\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (l_counter < 0)\n" +"l_counter = 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"while (p_counter > 0)\n" +"{\n" +"l_stack[ atomic_inc(&l_counter) ] = p_stack[--p_counter];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"}\n" +"#elif defined GET_EDGES\n" +"__kernel void getEdges(__global const uchar *mapptr, int map_step, int map_offset, int rows, int cols,\n" +"__global uchar *dst, int dst_step, int dst_offset)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI;\n" +"if (x < cols)\n" +"{\n" +"int map_index = mad24(map_step, y, mad24(x, (int)sizeof(int), map_offset));\n" +"int dst_index = mad24(dst_step, y, x + dst_offset);\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const int * map = (__global const int *)(mapptr + map_index);\n" +"dst[dst_index] = (uchar)(-(map[0] >> 1));\n" +"y++;\n" +"map_index += map_step;\n" +"dst_index += dst_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "2e3c93ad703f4bcee1efbbcc0107fc8d", NULL}; +struct cv::ocl::internal::ProgramEntry clahe_oclsrc={moduleName, "clahe", +"inline int calc_lut(__local int* smem, int val, int tid)\n" +"{\n" +"smem[tid] = val;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid == 0)\n" +"for (int i = 1; i < 256; ++i)\n" +"smem[i] += smem[i - 1];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"return smem[tid];\n" +"}\n" +"inline int reduce(__local volatile int* smem, int val, int tid)\n" +"{\n" +"smem[tid] = val;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid < 128)\n" +"smem[tid] = val += smem[tid + 128];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid < 64)\n" +"smem[tid] = val += smem[tid + 64];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid < 32)\n" +"{\n" +"smem[tid] += smem[tid + 32];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid < 16)\n" +"{\n" +"smem[tid] += smem[tid + 16];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid < 8)\n" +"{\n" +"smem[tid] += smem[tid + 8];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid < 4)\n" +"{\n" +"smem[tid] += smem[tid + 4];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (tid == 0)\n" +"{\n" +"smem[0] = (smem[0] + smem[1]) + (smem[2] + smem[3]);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"val = smem[0];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"return val;\n" +"}\n" +"__kernel void calcLut(__global __const uchar * src, const int srcStep,\n" +"const int src_offset, __global uchar * lut,\n" +"const int dstStep, const int dst_offset,\n" +"const int2 tileSize, const int tilesX,\n" +"const int clipLimit, const float lutScale)\n" +"{\n" +"__local int smem[512];\n" +"int tx = get_group_id(0);\n" +"int ty = get_group_id(1);\n" +"int tid = get_local_id(1) * get_local_size(0)\n" +"+ get_local_id(0);\n" +"smem[tid] = 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int i = get_local_id(1); i < tileSize.y; i += get_local_size(1))\n" +"{\n" +"__global const uchar* srcPtr = src + mad24(ty * tileSize.y + i, srcStep, tx * tileSize.x + src_offset);\n" +"for (int j = get_local_id(0); j < tileSize.x; j += get_local_size(0))\n" +"{\n" +"const int data = srcPtr[j];\n" +"atomic_inc(&smem[data]);\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int tHistVal = smem[tid];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (clipLimit > 0)\n" +"{\n" +"int clipped = 0;\n" +"if (tHistVal > clipLimit)\n" +"{\n" +"clipped = tHistVal - clipLimit;\n" +"tHistVal = clipLimit;\n" +"}\n" +"clipped = reduce(smem, clipped, tid);\n" +"int redistBatch = clipped / 256;\n" +"tHistVal += redistBatch;\n" +"int residual = clipped - redistBatch * 256;\n" +"int rStep = 256 / residual;\n" +"if (rStep < 1)\n" +"rStep = 1;\n" +"if (tid%rStep == 0 && (tid/rStep)= cols || y >= rows)\n" +"return;\n" +"const float tyf = (convert_float(y) / tileSize.y) - 0.5f;\n" +"int ty1 = convert_int_rtn(tyf);\n" +"int ty2 = ty1 + 1;\n" +"const float ya = tyf - ty1;\n" +"ty1 = max(ty1, 0);\n" +"ty2 = min(ty2, tilesY - 1);\n" +"const float txf = (convert_float(x) / tileSize.x) - 0.5f;\n" +"int tx1 = convert_int_rtn(txf);\n" +"int tx2 = tx1 + 1;\n" +"const float xa = txf - tx1;\n" +"tx1 = max(tx1, 0);\n" +"tx2 = min(tx2, tilesX - 1);\n" +"const int srcVal = src[mad24(y, srcStep, x + src_offset)];\n" +"float res = 0;\n" +"res += lut[mad24(ty1 * tilesX + tx1, lutStep, srcVal + lut_offset)] * ((1.0f - xa) * (1.0f - ya));\n" +"res += lut[mad24(ty1 * tilesX + tx2, lutStep, srcVal + lut_offset)] * ((xa) * (1.0f - ya));\n" +"res += lut[mad24(ty2 * tilesX + tx1, lutStep, srcVal + lut_offset)] * ((1.0f - xa) * (ya));\n" +"res += lut[mad24(ty2 * tilesX + tx2, lutStep, srcVal + lut_offset)] * ((xa) * (ya));\n" +"uint ires = (uint)convert_int_rte(res);\n" +"dst[mad24(y, dstStep, x + dst_offset)] = convert_uchar(clamp(ires, (uint)0, (uint)255));\n" +"}\n" +, "7399905b8fbf557bd2d5336b25327a7c", NULL}; +struct cv::ocl::internal::ProgramEntry color_hsv_oclsrc={moduleName, "color_hsv", +"#if depth == 0\n" +"#define DATA_TYPE uchar\n" +"#define MAX_NUM 255\n" +"#define HALF_MAX_NUM 128\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_uchar_sat(num)\n" +"#define DEPTH_0\n" +"#elif depth == 2\n" +"#define DATA_TYPE ushort\n" +"#define MAX_NUM 65535\n" +"#define HALF_MAX_NUM 32768\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_ushort_sat(num)\n" +"#define DEPTH_2\n" +"#elif depth == 5\n" +"#define DATA_TYPE float\n" +"#define MAX_NUM 1.0f\n" +"#define HALF_MAX_NUM 0.5f\n" +"#define COEFF_TYPE float\n" +"#define SAT_CAST(num) (num)\n" +"#define DEPTH_5\n" +"#else\n" +"#error \"invalid depth: should be 0 (CV_8U), 2 (CV_16U) or 5 (CV_32F)\"\n" +"#endif\n" +"#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))\n" +"enum\n" +"{\n" +"hsv_shift = 12\n" +"};\n" +"#define scnbytes ((int)sizeof(DATA_TYPE)*scn)\n" +"#define dcnbytes ((int)sizeof(DATA_TYPE)*dcn)\n" +"#ifndef hscale\n" +"#define hscale 0\n" +"#endif\n" +"#ifndef hrange\n" +"#define hrange 0\n" +"#endif\n" +"#if bidx == 0\n" +"#define R_COMP z\n" +"#define G_COMP y\n" +"#define B_COMP x\n" +"#else\n" +"#define R_COMP x\n" +"#define G_COMP y\n" +"#define B_COMP z\n" +"#endif\n" +"__constant int sector_data[][3] = { { 1, 3, 0 },\n" +"{ 1, 0, 2 },\n" +"{ 3, 0, 1 },\n" +"{ 0, 2, 1 },\n" +"{ 0, 1, 3 },\n" +"{ 2, 1, 0 } };\n" +"#ifdef DEPTH_0\n" +"__kernel void RGB2HSV(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols,\n" +"__constant int * sdiv_table, __constant int * hdiv_table)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = vload4(0, src + src_index);\n" +"int b = src_pix.B_COMP, g = src_pix.G_COMP, r = src_pix.R_COMP;\n" +"int h, s, v = b;\n" +"int vmin = b, diff;\n" +"int vr, vg;\n" +"v = max(v, g);\n" +"v = max(v, r);\n" +"vmin = min(vmin, g);\n" +"vmin = min(vmin, r);\n" +"diff = v - vmin;\n" +"vr = v == r ? -1 : 0;\n" +"vg = v == g ? -1 : 0;\n" +"s = mad24(diff, sdiv_table[v], (1 << (hsv_shift-1))) >> hsv_shift;\n" +"h = (vr & (g - b)) +\n" +"(~vr & ((vg & mad24(diff, 2, b - r)) + ((~vg) & mad24(4, diff, r - g))));\n" +"h = mad24(h, hdiv_table[diff], (1 << (hsv_shift-1))) >> hsv_shift;\n" +"h += h < 0 ? hrange : 0;\n" +"dst[dst_index] = convert_uchar_sat_rte(h);\n" +"dst[dst_index + 1] = (uchar)s;\n" +"dst[dst_index + 2] = (uchar)v;\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void HSV2RGB(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = vload4(0, src + src_index);\n" +"float h = src_pix.x, s = src_pix.y*(1/255.f), v = src_pix.z*(1/255.f);\n" +"float b, g, r;\n" +"if (s != 0)\n" +"{\n" +"float tab[4];\n" +"int sector;\n" +"h *= hscale;\n" +"if( h < 0 )\n" +"do h += 6; while( h < 0 );\n" +"else if( h >= 6 )\n" +"do h -= 6; while( h >= 6 );\n" +"sector = convert_int_sat_rtn(h);\n" +"h -= sector;\n" +"if( (unsigned)sector >= 6u )\n" +"{\n" +"sector = 0;\n" +"h = 0.f;\n" +"}\n" +"tab[0] = v;\n" +"tab[1] = v*(1.f - s);\n" +"tab[2] = v*(1.f - s*h);\n" +"tab[3] = v*(1.f - s*(1.f - h));\n" +"b = tab[sector_data[sector][0]];\n" +"g = tab[sector_data[sector][1]];\n" +"r = tab[sector_data[sector][2]];\n" +"}\n" +"else\n" +"b = g = r = v;\n" +"dst[dst_index + bidx] = convert_uchar_sat_rte(b*255.f);\n" +"dst[dst_index + 1] = convert_uchar_sat_rte(g*255.f);\n" +"dst[dst_index + (bidx^2)] = convert_uchar_sat_rte(r*255.f);\n" +"#if dcn == 4\n" +"dst[dst_index + 3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#elif defined DEPTH_5\n" +"__kernel void RGB2HSV(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const float * src = (__global const float *)(srcptr + src_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"float4 src_pix = vload4(0, src);\n" +"float b = src_pix.B_COMP, g = src_pix.G_COMP, r = src_pix.R_COMP;\n" +"float h, s, v;\n" +"float vmin, diff;\n" +"v = vmin = r;\n" +"if( v < g ) v = g;\n" +"if( v < b ) v = b;\n" +"if( vmin > g ) vmin = g;\n" +"if( vmin > b ) vmin = b;\n" +"diff = v - vmin;\n" +"s = diff/(float)(fabs(v) + FLT_EPSILON);\n" +"diff = (float)(60.f/(diff + FLT_EPSILON));\n" +"if( v == r )\n" +"h = (g - b)*diff;\n" +"else if( v == g )\n" +"h = fma(b - r, diff, 120.f);\n" +"else\n" +"h = fma(r - g, diff, 240.f);\n" +"if( h < 0 )\n" +"h += 360.f;\n" +"dst[0] = h*hscale;\n" +"dst[1] = s;\n" +"dst[2] = v;\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void HSV2RGB(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const float * src = (__global const float *)(srcptr + src_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"float4 src_pix = vload4(0, src);\n" +"float h = src_pix.x, s = src_pix.y, v = src_pix.z;\n" +"float b, g, r;\n" +"if (s != 0)\n" +"{\n" +"float tab[4];\n" +"int sector;\n" +"h *= hscale;\n" +"if(h < 0)\n" +"do h += 6; while (h < 0);\n" +"else if (h >= 6)\n" +"do h -= 6; while (h >= 6);\n" +"sector = convert_int_sat_rtn(h);\n" +"h -= sector;\n" +"if ((unsigned)sector >= 6u)\n" +"{\n" +"sector = 0;\n" +"h = 0.f;\n" +"}\n" +"tab[0] = v;\n" +"tab[1] = v*(1.f - s);\n" +"tab[2] = v*(1.f - s*h);\n" +"tab[3] = v*(1.f - s*(1.f - h));\n" +"b = tab[sector_data[sector][0]];\n" +"g = tab[sector_data[sector][1]];\n" +"r = tab[sector_data[sector][2]];\n" +"}\n" +"else\n" +"b = g = r = v;\n" +"dst[bidx] = b;\n" +"dst[1] = g;\n" +"dst[bidx^2] = r;\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +"#ifdef DEPTH_0\n" +"__kernel void RGB2HLS(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = vload4(0, src + src_index);\n" +"float b = src_pix.B_COMP*(1/255.f), g = src_pix.G_COMP*(1/255.f), r = src_pix.R_COMP*(1/255.f);\n" +"float h = 0.f, s = 0.f, l;\n" +"float vmin, vmax, diff;\n" +"vmax = vmin = r;\n" +"if (vmax < g) vmax = g;\n" +"if (vmax < b) vmax = b;\n" +"if (vmin > g) vmin = g;\n" +"if (vmin > b) vmin = b;\n" +"diff = vmax - vmin;\n" +"l = (vmax + vmin)*0.5f;\n" +"if (diff > FLT_EPSILON)\n" +"{\n" +"s = l < 0.5f ? diff/(vmax + vmin) : diff/(2 - vmax - vmin);\n" +"diff = 60.f/diff;\n" +"if( vmax == r )\n" +"h = (g - b)*diff;\n" +"else if( vmax == g )\n" +"h = fma(b - r, diff, 120.f);\n" +"else\n" +"h = fma(r - g, diff, 240.f);\n" +"if( h < 0.f )\n" +"h += 360.f;\n" +"}\n" +"dst[dst_index] = convert_uchar_sat_rte(h*hscale);\n" +"dst[dst_index + 1] = convert_uchar_sat_rte(l*255.f);\n" +"dst[dst_index + 2] = convert_uchar_sat_rte(s*255.f);\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void HLS2RGB(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = vload4(0, src + src_index);\n" +"float h = src_pix.x, l = src_pix.y*(1.f/255.f), s = src_pix.z*(1.f/255.f);\n" +"float b, g, r;\n" +"if (s != 0)\n" +"{\n" +"float tab[4];\n" +"float p2 = l <= 0.5f ? l*(1 + s) : l + s - l*s;\n" +"float p1 = 2*l - p2;\n" +"h *= hscale;\n" +"if( h < 0 )\n" +"do h += 6; while( h < 0 );\n" +"else if( h >= 6 )\n" +"do h -= 6; while( h >= 6 );\n" +"int sector = convert_int_sat_rtn(h);\n" +"h -= sector;\n" +"tab[0] = p2;\n" +"tab[1] = p1;\n" +"tab[2] = fma(p2 - p1, 1-h, p1);\n" +"tab[3] = fma(p2 - p1, h, p1);\n" +"b = tab[sector_data[sector][0]];\n" +"g = tab[sector_data[sector][1]];\n" +"r = tab[sector_data[sector][2]];\n" +"}\n" +"else\n" +"b = g = r = l;\n" +"dst[dst_index + bidx] = convert_uchar_sat_rte(b*255.f);\n" +"dst[dst_index + 1] = convert_uchar_sat_rte(g*255.f);\n" +"dst[dst_index + (bidx^2)] = convert_uchar_sat_rte(r*255.f);\n" +"#if dcn == 4\n" +"dst[dst_index + 3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#elif defined DEPTH_5\n" +"__kernel void RGB2HLS(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const float * src = (__global const float *)(srcptr + src_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"float4 src_pix = vload4(0, src);\n" +"float b = src_pix.B_COMP, g = src_pix.G_COMP, r = src_pix.R_COMP;\n" +"float h = 0.f, s = 0.f, l;\n" +"float vmin, vmax, diff;\n" +"vmax = vmin = r;\n" +"if (vmax < g) vmax = g;\n" +"if (vmax < b) vmax = b;\n" +"if (vmin > g) vmin = g;\n" +"if (vmin > b) vmin = b;\n" +"diff = vmax - vmin;\n" +"l = (vmax + vmin)*0.5f;\n" +"if (diff > FLT_EPSILON)\n" +"{\n" +"s = l < 0.5f ? diff/(vmax + vmin) : diff/(2 - vmax - vmin);\n" +"diff = 60.f/diff;\n" +"if( vmax == r )\n" +"h = (g - b)*diff;\n" +"else if( vmax == g )\n" +"h = fma(b - r, diff, 120.f);\n" +"else\n" +"h = fma(r - g, diff, 240.f);\n" +"if( h < 0.f ) h += 360.f;\n" +"}\n" +"dst[0] = h*hscale;\n" +"dst[1] = l;\n" +"dst[2] = s;\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void HLS2RGB(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const float * src = (__global const float *)(srcptr + src_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"float4 src_pix = vload4(0, src);\n" +"float h = src_pix.x, l = src_pix.y, s = src_pix.z;\n" +"float b, g, r;\n" +"if (s != 0)\n" +"{\n" +"float tab[4];\n" +"int sector;\n" +"float p2 = l <= 0.5f ? l*(1 + s) : l + s - l*s;\n" +"float p1 = 2*l - p2;\n" +"h *= hscale;\n" +"if( h < 0 )\n" +"do h += 6; while( h < 0 );\n" +"else if( h >= 6 )\n" +"do h -= 6; while( h >= 6 );\n" +"sector = convert_int_sat_rtn(h);\n" +"h -= sector;\n" +"tab[0] = p2;\n" +"tab[1] = p1;\n" +"tab[2] = fma(p2 - p1, 1-h, p1);\n" +"tab[3] = fma(p2 - p1, h, p1);\n" +"b = tab[sector_data[sector][0]];\n" +"g = tab[sector_data[sector][1]];\n" +"r = tab[sector_data[sector][2]];\n" +"}\n" +"else\n" +"b = g = r = l;\n" +"dst[bidx] = b;\n" +"dst[1] = g;\n" +"dst[bidx^2] = r;\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "7a58346cde62814d89c0233200848842", NULL}; +struct cv::ocl::internal::ProgramEntry color_lab_oclsrc={moduleName, "color_lab", +"#if depth == 0\n" +"#define DATA_TYPE uchar\n" +"#define MAX_NUM 255\n" +"#define HALF_MAX_NUM 128\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_uchar_sat(num)\n" +"#define DEPTH_0\n" +"#elif depth == 2\n" +"#define DATA_TYPE ushort\n" +"#define MAX_NUM 65535\n" +"#define HALF_MAX_NUM 32768\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_ushort_sat(num)\n" +"#define DEPTH_2\n" +"#elif depth == 5\n" +"#define DATA_TYPE float\n" +"#define MAX_NUM 1.0f\n" +"#define HALF_MAX_NUM 0.5f\n" +"#define COEFF_TYPE float\n" +"#define SAT_CAST(num) (num)\n" +"#define DEPTH_5\n" +"#else\n" +"#error \"invalid depth: should be 0 (CV_8U), 2 (CV_16U) or 5 (CV_32F)\"\n" +"#endif\n" +"#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))\n" +"enum\n" +"{\n" +"xyz_shift = 12,\n" +"};\n" +"#define scnbytes ((int)sizeof(DATA_TYPE)*scn)\n" +"#define dcnbytes ((int)sizeof(DATA_TYPE)*dcn)\n" +"#define __CAT(x, y) x##y\n" +"#define CAT(x, y) __CAT(x, y)\n" +"#define DATA_TYPE_4 CAT(DATA_TYPE, 4)\n" +"#define DATA_TYPE_3 CAT(DATA_TYPE, 3)\n" +"__kernel void RGB2XYZ(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols, __constant COEFF_TYPE * coeffs)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (dx < cols)\n" +"{\n" +"int src_index = mad24(dy, src_step, mad24(dx, scnbytes, src_offset));\n" +"int dst_index = mad24(dy, dst_step, mad24(dx, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (dy < rows)\n" +"{\n" +"__global const DATA_TYPE * src = (__global const DATA_TYPE *)(srcptr + src_index);\n" +"__global DATA_TYPE * dst = (__global DATA_TYPE *)(dstptr + dst_index);\n" +"DATA_TYPE_4 src_pix = vload4(0, src);\n" +"DATA_TYPE r = src_pix.x, g = src_pix.y, b = src_pix.z;\n" +"#ifdef DEPTH_5\n" +"float x = fma(r, coeffs[0], fma(g, coeffs[1], b * coeffs[2]));\n" +"float y = fma(r, coeffs[3], fma(g, coeffs[4], b * coeffs[5]));\n" +"float z = fma(r, coeffs[6], fma(g, coeffs[7], b * coeffs[8]));\n" +"#else\n" +"int x = CV_DESCALE(mad24(r, coeffs[0], mad24(g, coeffs[1], b * coeffs[2])), xyz_shift);\n" +"int y = CV_DESCALE(mad24(r, coeffs[3], mad24(g, coeffs[4], b * coeffs[5])), xyz_shift);\n" +"int z = CV_DESCALE(mad24(r, coeffs[6], mad24(g, coeffs[7], b * coeffs[8])), xyz_shift);\n" +"#endif\n" +"dst[0] = SAT_CAST(x);\n" +"dst[1] = SAT_CAST(y);\n" +"dst[2] = SAT_CAST(z);\n" +"++dy;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void XYZ2RGB(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols, __constant COEFF_TYPE * coeffs)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (dx < cols)\n" +"{\n" +"int src_index = mad24(dy, src_step, mad24(dx, scnbytes, src_offset));\n" +"int dst_index = mad24(dy, dst_step, mad24(dx, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (dy < rows)\n" +"{\n" +"__global const DATA_TYPE * src = (__global const DATA_TYPE *)(srcptr + src_index);\n" +"__global DATA_TYPE * dst = (__global DATA_TYPE *)(dstptr + dst_index);\n" +"DATA_TYPE_4 src_pix = vload4(0, src);\n" +"DATA_TYPE x = src_pix.x, y = src_pix.y, z = src_pix.z;\n" +"#ifdef DEPTH_5\n" +"float b = fma(x, coeffs[0], fma(y, coeffs[1], z * coeffs[2]));\n" +"float g = fma(x, coeffs[3], fma(y, coeffs[4], z * coeffs[5]));\n" +"float r = fma(x, coeffs[6], fma(y, coeffs[7], z * coeffs[8]));\n" +"#else\n" +"int b = CV_DESCALE(mad24(x, coeffs[0], mad24(y, coeffs[1], z * coeffs[2])), xyz_shift);\n" +"int g = CV_DESCALE(mad24(x, coeffs[3], mad24(y, coeffs[4], z * coeffs[5])), xyz_shift);\n" +"int r = CV_DESCALE(mad24(x, coeffs[6], mad24(y, coeffs[7], z * coeffs[8])), xyz_shift);\n" +"#endif\n" +"DATA_TYPE dst0 = SAT_CAST(b);\n" +"DATA_TYPE dst1 = SAT_CAST(g);\n" +"DATA_TYPE dst2 = SAT_CAST(r);\n" +"#if dcn == 3 || defined DEPTH_5\n" +"dst[0] = dst0;\n" +"dst[1] = dst1;\n" +"dst[2] = dst2;\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"#else\n" +"*(__global DATA_TYPE_4 *)dst = (DATA_TYPE_4)(dst0, dst1, dst2, MAX_NUM);\n" +"#endif\n" +"++dy;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#define lab_shift xyz_shift\n" +"#define gamma_shift 3\n" +"#define lab_shift2 (lab_shift + gamma_shift)\n" +"#define GAMMA_TAB_SIZE 1024\n" +"#define GammaTabScale (float)GAMMA_TAB_SIZE\n" +"inline float splineInterpolate(float x, __global const float * tab, int n)\n" +"{\n" +"int ix = clamp(convert_int_sat_rtn(x), 0, n-1);\n" +"x -= ix;\n" +"tab += ix << 2;\n" +"return fma(fma(fma(tab[3], x, tab[2]), x, tab[1]), x, tab[0]);\n" +"}\n" +"#ifdef DEPTH_0\n" +"__kernel void BGR2Lab(__global const uchar * src, int src_step, int src_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int rows, int cols,\n" +"__global const ushort * gammaTab, __global ushort * LabCbrtTab_b,\n" +"__constant int * coeffs, int Lscale, int Lshift)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const uchar* src_ptr = src + src_index;\n" +"__global uchar* dst_ptr = dst + dst_index;\n" +"uchar4 src_pix = vload4(0, src_ptr);\n" +"int C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2],\n" +"C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],\n" +"C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];\n" +"int R = gammaTab[src_pix.x], G = gammaTab[src_pix.y], B = gammaTab[src_pix.z];\n" +"int fX = LabCbrtTab_b[CV_DESCALE(mad24(R, C0, mad24(G, C1, B*C2)), lab_shift)];\n" +"int fY = LabCbrtTab_b[CV_DESCALE(mad24(R, C3, mad24(G, C4, B*C5)), lab_shift)];\n" +"int fZ = LabCbrtTab_b[CV_DESCALE(mad24(R, C6, mad24(G, C7, B*C8)), lab_shift)];\n" +"int L = CV_DESCALE( Lscale*fY + Lshift, lab_shift2 );\n" +"int a = CV_DESCALE( mad24(500, fX - fY, 128*(1 << lab_shift2)), lab_shift2 );\n" +"int b = CV_DESCALE( mad24(200, fY - fZ, 128*(1 << lab_shift2)), lab_shift2 );\n" +"dst_ptr[0] = SAT_CAST(L);\n" +"dst_ptr[1] = SAT_CAST(a);\n" +"dst_ptr[2] = SAT_CAST(b);\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#elif defined DEPTH_5\n" +"__kernel void BGR2Lab(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols,\n" +"#ifdef SRGB\n" +"__global const float * gammaTab,\n" +"#endif\n" +"__constant float * coeffs, float _1_3, float _a)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const float * src = (__global const float *)(srcptr + src_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"float4 src_pix = vload4(0, src);\n" +"float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2],\n" +"C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],\n" +"C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];\n" +"float R = clamp(src_pix.x, 0.0f, 1.0f);\n" +"float G = clamp(src_pix.y, 0.0f, 1.0f);\n" +"float B = clamp(src_pix.z, 0.0f, 1.0f);\n" +"#ifdef SRGB\n" +"R = splineInterpolate(R * GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"G = splineInterpolate(G * GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"B = splineInterpolate(B * GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"#endif\n" +"float X = fma(R, C0, fma(G, C1, B*C2));\n" +"float Y = fma(R, C3, fma(G, C4, B*C5));\n" +"float Z = fma(R, C6, fma(G, C7, B*C8));\n" +"float FX = X > 0.008856f ? rootn(X, 3) : fma(7.787f, X, _a);\n" +"float FY = Y > 0.008856f ? rootn(Y, 3) : fma(7.787f, Y, _a);\n" +"float FZ = Z > 0.008856f ? rootn(Z, 3) : fma(7.787f, Z, _a);\n" +"float L = Y > 0.008856f ? fma(116.f, FY, -16.f) : (903.3f * Y);\n" +"float a = 500.f * (FX - FY);\n" +"float b = 200.f * (FY - FZ);\n" +"dst[0] = L;\n" +"dst[1] = a;\n" +"dst[2] = b;\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +"inline void Lab2BGR_f(const float * srcbuf, float * dstbuf,\n" +"#ifdef SRGB\n" +"__global const float * gammaTab,\n" +"#endif\n" +"__constant float * coeffs, float lThresh, float fThresh)\n" +"{\n" +"float li = srcbuf[0], ai = srcbuf[1], bi = srcbuf[2];\n" +"float C0 = coeffs[0], C1 = coeffs[1], C2 = coeffs[2],\n" +"C3 = coeffs[3], C4 = coeffs[4], C5 = coeffs[5],\n" +"C6 = coeffs[6], C7 = coeffs[7], C8 = coeffs[8];\n" +"float y, fy;\n" +"if (li <= lThresh)\n" +"{\n" +"y = li / 903.3f;\n" +"fy = fma(7.787f, y, 16.0f / 116.0f);\n" +"}\n" +"else\n" +"{\n" +"fy = (li + 16.0f) / 116.0f;\n" +"y = fy * fy * fy;\n" +"}\n" +"float fxz[] = { ai / 500.0f + fy, fy - bi / 200.0f };\n" +"#pragma unroll\n" +"for (int j = 0; j < 2; j++)\n" +"if (fxz[j] <= fThresh)\n" +"fxz[j] = (fxz[j] - 16.0f / 116.0f) / 7.787f;\n" +"else\n" +"fxz[j] = fxz[j] * fxz[j] * fxz[j];\n" +"float x = fxz[0], z = fxz[1];\n" +"float ro = clamp(fma(C0, x, fma(C1, y, C2 * z)), 0.0f, 1.0f);\n" +"float go = clamp(fma(C3, x, fma(C4, y, C5 * z)), 0.0f, 1.0f);\n" +"float bo = clamp(fma(C6, x, fma(C7, y, C8 * z)), 0.0f, 1.0f);\n" +"#ifdef SRGB\n" +"ro = splineInterpolate(ro * GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"go = splineInterpolate(go * GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"bo = splineInterpolate(bo * GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"#endif\n" +"dstbuf[0] = ro, dstbuf[1] = go, dstbuf[2] = bo;\n" +"}\n" +"#ifdef DEPTH_0\n" +"__kernel void Lab2BGR(__global const uchar * src, int src_step, int src_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int rows, int cols,\n" +"#ifdef SRGB\n" +"__global const float * gammaTab,\n" +"#endif\n" +"__constant float * coeffs, float lThresh, float fThresh)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const uchar* src_ptr = src + src_index;\n" +"__global uchar * dst_ptr = dst + dst_index;\n" +"uchar4 src_pix = vload4(0, src_ptr);\n" +"float srcbuf[3], dstbuf[3];\n" +"srcbuf[0] = src_pix.x*(100.f/255.f);\n" +"srcbuf[1] = convert_float(src_pix.y - 128);\n" +"srcbuf[2] = convert_float(src_pix.z - 128);\n" +"Lab2BGR_f(&srcbuf[0], &dstbuf[0],\n" +"#ifdef SRGB\n" +"gammaTab,\n" +"#endif\n" +"coeffs, lThresh, fThresh);\n" +"#if dcn == 3\n" +"dst_ptr[0] = SAT_CAST(dstbuf[0] * 255.0f);\n" +"dst_ptr[1] = SAT_CAST(dstbuf[1] * 255.0f);\n" +"dst_ptr[2] = SAT_CAST(dstbuf[2] * 255.0f);\n" +"#else\n" +"*(__global uchar4 *)dst_ptr = (uchar4)(SAT_CAST(dstbuf[0] * 255.0f),\n" +"SAT_CAST(dstbuf[1] * 255.0f), SAT_CAST(dstbuf[2] * 255.0f), MAX_NUM);\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#elif defined DEPTH_5\n" +"__kernel void Lab2BGR(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols,\n" +"#ifdef SRGB\n" +"__global const float * gammaTab,\n" +"#endif\n" +"__constant float * coeffs, float lThresh, float fThresh)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const float * src = (__global const float *)(srcptr + src_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"float4 src_pix = vload4(0, src);\n" +"float srcbuf[3], dstbuf[3];\n" +"srcbuf[0] = src_pix.x, srcbuf[1] = src_pix.y, srcbuf[2] = src_pix.z;\n" +"Lab2BGR_f(&srcbuf[0], &dstbuf[0],\n" +"#ifdef SRGB\n" +"gammaTab,\n" +"#endif\n" +"coeffs, lThresh, fThresh);\n" +"dst[0] = dstbuf[0], dst[1] = dstbuf[1], dst[2] = dstbuf[2];\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +"#define LAB_CBRT_TAB_SIZE 1024\n" +"#define LAB_CBRT_TAB_SIZE_B (256*3/2*(1<= 8)\n" +"{\n" +"Y = fma(L, 1.f/116.f, 16.f/116.f);\n" +"Y = Y*Y*Y;\n" +"}\n" +"else\n" +"{\n" +"Y = L * (1.0f/903.3f);\n" +"}\n" +"float up = 3.f*fma(L, _un, u);\n" +"float vp = 0.25f/fma(L, _vn, v);\n" +"vp = clamp(vp, -0.25f, 0.25f);\n" +"X = 3.f*Y*up*vp;\n" +"Z = Y*fma(fma(12.f*13.f, L, -up), vp, -5.f);\n" +"float R = fma(X, coeffs[0], fma(Y, coeffs[1], Z * coeffs[2]));\n" +"float G = fma(X, coeffs[3], fma(Y, coeffs[4], Z * coeffs[5]));\n" +"float B = fma(X, coeffs[6], fma(Y, coeffs[7], Z * coeffs[8]));\n" +"R = clamp(R, 0.f, 1.f);\n" +"G = clamp(G, 0.f, 1.f);\n" +"B = clamp(B, 0.f, 1.f);\n" +"#ifdef SRGB\n" +"R = splineInterpolate(R*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"G = splineInterpolate(G*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"B = splineInterpolate(B*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"#endif\n" +"dst[0] = R;\n" +"dst[1] = G;\n" +"dst[2] = B;\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"#elif defined DEPTH_0\n" +"__kernel void Luv2BGR(__global const uchar * src, int src_step, int src_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int rows, int cols,\n" +"#ifdef SRGB\n" +"__global const float * gammaTab,\n" +"#endif\n" +"__constant float * coeffs, float _un, float _vn)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"src += mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"dst += mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"if (y < rows)\n" +"{\n" +"float d, X, Y, Z;\n" +"float L = src[0]*(100.f/255.f);\n" +"float u = fma(convert_float(src[1]), 1.388235294117647f, -134.f);\n" +"float v = fma(convert_float(src[2]), 1.027450980392157f, - 140.f);\n" +"if(L >= 8)\n" +"{\n" +"Y = fma(L, 1.f/116.f, 16.f/116.f);\n" +"Y = Y*Y*Y;\n" +"}\n" +"else\n" +"{\n" +"Y = L * (1.0f/903.3f);\n" +"}\n" +"float up = 3.f*fma(L, _un, u);\n" +"float vp = 0.25f/fma(L, _vn, v);\n" +"vp = clamp(vp, -0.25f, 0.25f);\n" +"X = 3.f*Y*up*vp;\n" +"Z = Y*fma(fma(12.f*13.f, L, -up), vp, -5.f);\n" +"X = clamp(X, 0.f, 2.f); Z = clamp(Z, 0.f, 2.f);\n" +"float R = fma(X, coeffs[0], fma(Y, coeffs[1], Z * coeffs[2]));\n" +"float G = fma(X, coeffs[3], fma(Y, coeffs[4], Z * coeffs[5]));\n" +"float B = fma(X, coeffs[6], fma(Y, coeffs[7], Z * coeffs[8]));\n" +"R = clamp(R, 0.f, 1.f);\n" +"G = clamp(G, 0.f, 1.f);\n" +"B = clamp(B, 0.f, 1.f);\n" +"#ifdef SRGB\n" +"R = splineInterpolate(R*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"G = splineInterpolate(G*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"B = splineInterpolate(B*GammaTabScale, gammaTab, GAMMA_TAB_SIZE);\n" +"#endif\n" +"uchar dst0 = SAT_CAST(R * 255.0f);\n" +"uchar dst1 = SAT_CAST(G * 255.0f);\n" +"uchar dst2 = SAT_CAST(B * 255.0f);\n" +"#if dcn == 4\n" +"*(__global uchar4 *)dst = (uchar4)(dst0, dst1, dst2, MAX_NUM);\n" +"#else\n" +"dst[0] = dst0;\n" +"dst[1] = dst1;\n" +"dst[2] = dst2;\n" +"#endif\n" +"++y;\n" +"dst += dst_step;\n" +"src += src_step;\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "5321f879afd5057810f4e7d4db9b2e78", NULL}; +struct cv::ocl::internal::ProgramEntry color_rgb_oclsrc={moduleName, "color_rgb", +"#if depth == 0\n" +"#define DATA_TYPE uchar\n" +"#define MAX_NUM 255\n" +"#define HALF_MAX_NUM 128\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_uchar_sat(num)\n" +"#define DEPTH_0\n" +"#elif depth == 2\n" +"#define DATA_TYPE ushort\n" +"#define MAX_NUM 65535\n" +"#define HALF_MAX_NUM 32768\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_ushort_sat(num)\n" +"#define DEPTH_2\n" +"#elif depth == 5\n" +"#define DATA_TYPE float\n" +"#define MAX_NUM 1.0f\n" +"#define HALF_MAX_NUM 0.5f\n" +"#define COEFF_TYPE float\n" +"#define SAT_CAST(num) (num)\n" +"#define DEPTH_5\n" +"#else\n" +"#error \"invalid depth: should be 0 (CV_8U), 2 (CV_16U) or 5 (CV_32F)\"\n" +"#endif\n" +"#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))\n" +"enum\n" +"{\n" +"gray_shift = 15,\n" +"RY15 = 9798,\n" +"GY15 = 19235,\n" +"BY15 = 3735\n" +"};\n" +"#define B2YF 0.114f\n" +"#define G2YF 0.587f\n" +"#define R2YF 0.299f\n" +"#define scnbytes ((int)sizeof(DATA_TYPE)*scn)\n" +"#define dcnbytes ((int)sizeof(DATA_TYPE)*dcn)\n" +"#if bidx == 0\n" +"#define R_COMP z\n" +"#define G_COMP y\n" +"#define B_COMP x\n" +"#else\n" +"#define R_COMP x\n" +"#define G_COMP y\n" +"#define B_COMP z\n" +"#endif\n" +"#define __CAT(x, y) x##y\n" +"#define CAT(x, y) __CAT(x, y)\n" +"#define DATA_TYPE_4 CAT(DATA_TYPE, 4)\n" +"#define DATA_TYPE_3 CAT(DATA_TYPE, 3)\n" +"__kernel void RGB2Gray(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE* src = (__global const DATA_TYPE*)(srcptr + src_index);\n" +"__global DATA_TYPE* dst = (__global DATA_TYPE*)(dstptr + dst_index);\n" +"DATA_TYPE_3 src_pix = vload3(0, src);\n" +"#ifdef DEPTH_5\n" +"dst[0] = fma(src_pix.B_COMP, B2YF, fma(src_pix.G_COMP, G2YF, src_pix.R_COMP * R2YF));\n" +"#else\n" +"dst[0] = (DATA_TYPE)CV_DESCALE(mad24(src_pix.B_COMP, BY15, mad24(src_pix.G_COMP, GY15, mul24(src_pix.R_COMP, RY15))), gray_shift);\n" +"#endif\n" +"++y;\n" +"src_index += src_step;\n" +"dst_index += dst_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void Gray2RGB(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE* src = (__global const DATA_TYPE*)(srcptr + src_index);\n" +"__global DATA_TYPE* dst = (__global DATA_TYPE*)(dstptr + dst_index);\n" +"DATA_TYPE val = src[0];\n" +"#if dcn == 3 || defined DEPTH_5\n" +"dst[0] = dst[1] = dst[2] = val;\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"#else\n" +"*(__global DATA_TYPE_4 *)dst = (DATA_TYPE_4)(val, val, val, MAX_NUM);\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void RGB(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE * src = (__global const DATA_TYPE *)(srcptr + src_index);\n" +"__global DATA_TYPE * dst = (__global DATA_TYPE *)(dstptr + dst_index);\n" +"#if scn == 3\n" +"DATA_TYPE_3 src_pix = vload3(0, src);\n" +"#else\n" +"DATA_TYPE_4 src_pix = vload4(0, src);\n" +"#endif\n" +"#ifdef REVERSE\n" +"dst[0] = src_pix.z;\n" +"dst[1] = src_pix.y;\n" +"dst[2] = src_pix.x;\n" +"#else\n" +"dst[0] = src_pix.x;\n" +"dst[1] = src_pix.y;\n" +"dst[2] = src_pix.z;\n" +"#endif\n" +"#if dcn == 4\n" +"#if scn == 3\n" +"dst[3] = MAX_NUM;\n" +"#else\n" +"dst[3] = src[3];\n" +"#endif\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void RGB5x52RGB(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"ushort t = *((__global const ushort*)(src + src_index));\n" +"#if greenbits == 6\n" +"dst[dst_index + bidx] = (uchar)(t << 3);\n" +"dst[dst_index + 1] = (uchar)((t >> 3) & ~3);\n" +"dst[dst_index + (bidx^2)] = (uchar)((t >> 8) & ~7);\n" +"#else\n" +"dst[dst_index + bidx] = (uchar)(t << 3);\n" +"dst[dst_index + 1] = (uchar)((t >> 2) & ~7);\n" +"dst[dst_index + (bidx^2)] = (uchar)((t >> 7) & ~7);\n" +"#endif\n" +"#if dcn == 4\n" +"#if greenbits == 6\n" +"dst[dst_index + 3] = 255;\n" +"#else\n" +"dst[dst_index + 3] = t & 0x8000 ? 255 : 0;\n" +"#endif\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void RGB2RGB5x5(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = vload4(0, src + src_index);\n" +"#if greenbits == 6\n" +"*((__global ushort*)(dst + dst_index)) = (ushort)((src_pix.B_COMP >> 3)|((src_pix.G_COMP&~3) << 3)|((src_pix.R_COMP&~7) << 8));\n" +"#elif scn == 3\n" +"*((__global ushort*)(dst + dst_index)) = (ushort)((src_pix.B_COMP >> 3)|((src_pix.G_COMP&~7) << 2)|((src_pix.R_COMP&~7) << 7));\n" +"#else\n" +"*((__global ushort*)(dst + dst_index)) = (ushort)((src_pix.B_COMP >> 3)|((src_pix.G_COMP&~7) << 2)|\n" +"((src_pix.R_COMP&~7) << 7)|(src_pix.w ? 0x8000 : 0));\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void BGR5x52Gray(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, dst_offset + x);\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"int t = *((__global const ushort*)(src + src_index));\n" +"#if greenbits == 6\n" +"dst[dst_index] = (uchar)CV_DESCALE(mad24((t << 3) & 0xf8, BY15, mad24((t >> 3) & 0xfc, GY15, ((t >> 8) & 0xf8) * RY15)), gray_shift);\n" +"#else\n" +"dst[dst_index] = (uchar)CV_DESCALE(mad24((t << 3) & 0xf8, BY15, mad24((t >> 2) & 0xf8, GY15, ((t >> 7) & 0xf8) * RY15)), gray_shift);\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void Gray2BGR5x5(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, src_offset + x);\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"int t = src[src_index];\n" +"#if greenbits == 6\n" +"*((__global ushort*)(dst + dst_index)) = (ushort)((t >> 3) | ((t & ~3) << 3) | ((t & ~7) << 8));\n" +"#else\n" +"t >>= 3;\n" +"*((__global ushort*)(dst + dst_index)) = (ushort)(t|(t << 5)|(t << 10));\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#ifdef DEPTH_0\n" +"__kernel void RGBA2mRGBA(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, src_offset + (x << 2));\n" +"int dst_index = mad24(y, dst_step, dst_offset + (x << 2));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = *(__global const uchar4 *)(src + src_index);\n" +"*(__global uchar4 *)(dst + dst_index) =\n" +"(uchar4)(mad24(src_pix.x, src_pix.w, HALF_MAX_NUM) / MAX_NUM,\n" +"mad24(src_pix.y, src_pix.w, HALF_MAX_NUM) / MAX_NUM,\n" +"mad24(src_pix.z, src_pix.w, HALF_MAX_NUM) / MAX_NUM, src_pix.w);\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void mRGBA2RGBA(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, 4, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, 4, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"uchar4 src_pix = *(__global const uchar4 *)(src + src_index);\n" +"uchar v3 = src_pix.w, v3_half = v3 / 2;\n" +"if (v3 == 0)\n" +"*(__global uchar4 *)(dst + dst_index) = (uchar4)(0, 0, 0, 0);\n" +"else\n" +"*(__global uchar4 *)(dst + dst_index) =\n" +"(uchar4)(SAT_CAST(mad24(src_pix.x, MAX_NUM, v3_half) / v3),\n" +"SAT_CAST(mad24(src_pix.y, MAX_NUM, v3_half) / v3),\n" +"SAT_CAST(mad24(src_pix.z, MAX_NUM, v3_half) / v3),\n" +"SAT_CAST(v3));\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "7c7f5a9dd8757ef67722074f65469f9a", NULL}; +struct cv::ocl::internal::ProgramEntry color_yuv_oclsrc={moduleName, "color_yuv", +"#if depth == 0\n" +"#define DATA_TYPE uchar\n" +"#define MAX_NUM 255\n" +"#define HALF_MAX_NUM 128\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_uchar_sat(num)\n" +"#define DEPTH_0\n" +"#elif depth == 2\n" +"#define DATA_TYPE ushort\n" +"#define MAX_NUM 65535\n" +"#define HALF_MAX_NUM 32768\n" +"#define COEFF_TYPE int\n" +"#define SAT_CAST(num) convert_ushort_sat(num)\n" +"#define DEPTH_2\n" +"#elif depth == 5\n" +"#define DATA_TYPE float\n" +"#define MAX_NUM 1.0f\n" +"#define HALF_MAX_NUM 0.5f\n" +"#define COEFF_TYPE float\n" +"#define SAT_CAST(num) (num)\n" +"#define DEPTH_5\n" +"#else\n" +"#error \"invalid depth: should be 0 (CV_8U), 2 (CV_16U) or 5 (CV_32F)\"\n" +"#endif\n" +"#define CV_DESCALE(x,n) (((x) + (1 << ((n)-1))) >> (n))\n" +"enum\n" +"{\n" +"yuv_shift = 14,\n" +"R2Y = 4899,\n" +"G2Y = 9617,\n" +"B2Y = 1868,\n" +"};\n" +"#define B2YF 0.114f\n" +"#define G2YF 0.587f\n" +"#define R2YF 0.299f\n" +"#define YCBF 0.564f\n" +"#define YCRF 0.713f\n" +"#define YCBI 9241\n" +"#define YCRI 11682\n" +"#define B2UF 0.492f\n" +"#define R2VF 0.877f\n" +"#define B2UI 8061\n" +"#define R2VI 14369\n" +"#define U2BF 2.032f\n" +"#define U2GF -0.395f\n" +"#define V2GF -0.581f\n" +"#define V2RF 1.140f\n" +"#define U2BI 33292\n" +"#define U2GI -6472\n" +"#define V2GI -9519\n" +"#define V2RI 18678\n" +"#define CR2RF 1.403f\n" +"#define CB2GF -0.344f\n" +"#define CR2GF -0.714f\n" +"#define CB2BF 1.773f\n" +"#define CR2RI 22987\n" +"#define CB2GI -5636\n" +"#define CR2GI -11698\n" +"#define CB2BI 29049\n" +"#define scnbytes ((int)sizeof(DATA_TYPE)*scn)\n" +"#define dcnbytes ((int)sizeof(DATA_TYPE)*dcn)\n" +"#if bidx == 0\n" +"#define R_COMP z\n" +"#define G_COMP y\n" +"#define B_COMP x\n" +"#else\n" +"#define R_COMP x\n" +"#define G_COMP y\n" +"#define B_COMP z\n" +"#endif\n" +"#ifndef uidx\n" +"#define uidx 0\n" +"#endif\n" +"#ifndef yidx\n" +"#define yidx 0\n" +"#endif\n" +"#ifndef PIX_PER_WI_X\n" +"#define PIX_PER_WI_X 1\n" +"#endif\n" +"#define __CAT(x, y) x##y\n" +"#define CAT(x, y) __CAT(x, y)\n" +"#define DATA_TYPE_4 CAT(DATA_TYPE, 4)\n" +"#define DATA_TYPE_3 CAT(DATA_TYPE, 3)\n" +"__constant float c_RGB2YUVCoeffs_f[5] = { B2YF, G2YF, R2YF, B2UF, R2VF };\n" +"__constant int c_RGB2YUVCoeffs_i[5] = { B2Y, G2Y, R2Y, B2UI, R2VI };\n" +"__kernel void RGB2YUV(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dt_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dt_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE* src = (__global const DATA_TYPE*)(srcptr + src_index);\n" +"__global DATA_TYPE* dst = (__global DATA_TYPE*)(dstptr + dst_index);\n" +"DATA_TYPE_3 src_pix = vload3(0, src);\n" +"DATA_TYPE b = src_pix.B_COMP, g = src_pix.G_COMP, r = src_pix.R_COMP;\n" +"#ifdef DEPTH_5\n" +"__constant float * coeffs = c_RGB2YUVCoeffs_f;\n" +"const DATA_TYPE Y = fma(b, coeffs[0], fma(g, coeffs[1], r * coeffs[2]));\n" +"const DATA_TYPE U = fma(b - Y, coeffs[3], HALF_MAX_NUM);\n" +"const DATA_TYPE V = fma(r - Y, coeffs[4], HALF_MAX_NUM);\n" +"#else\n" +"__constant int * coeffs = c_RGB2YUVCoeffs_i;\n" +"const int delta = HALF_MAX_NUM * (1 << yuv_shift);\n" +"const int Y = CV_DESCALE(mad24(b, coeffs[0], mad24(g, coeffs[1], mul24(r, coeffs[2]))), yuv_shift);\n" +"const int U = CV_DESCALE(mad24(b - Y, coeffs[3], delta), yuv_shift);\n" +"const int V = CV_DESCALE(mad24(r - Y, coeffs[4], delta), yuv_shift);\n" +"#endif\n" +"dst[0] = SAT_CAST( Y );\n" +"dst[1] = SAT_CAST( U );\n" +"dst[2] = SAT_CAST( V );\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__constant float c_YUV2RGBCoeffs_f[4] = { U2BF, U2GF, V2GF, V2RF };\n" +"__constant int c_YUV2RGBCoeffs_i[4] = { U2BI, U2GI, V2GI, V2RI };\n" +"__kernel void YUV2RGB(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dt_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dt_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE* src = (__global const DATA_TYPE*)(srcptr + src_index);\n" +"__global DATA_TYPE* dst = (__global DATA_TYPE*)(dstptr + dst_index);\n" +"DATA_TYPE_4 src_pix = vload4(0, src);\n" +"DATA_TYPE Y = src_pix.x, U = src_pix.y, V = src_pix.z;\n" +"#ifdef DEPTH_5\n" +"__constant float * coeffs = c_YUV2RGBCoeffs_f;\n" +"float r = fma(V - HALF_MAX_NUM, coeffs[3], Y);\n" +"float g = fma(V - HALF_MAX_NUM, coeffs[2], fma(U - HALF_MAX_NUM, coeffs[1], Y));\n" +"float b = fma(U - HALF_MAX_NUM, coeffs[0], Y);\n" +"#else\n" +"__constant int * coeffs = c_YUV2RGBCoeffs_i;\n" +"const int r = Y + CV_DESCALE(mul24(V - HALF_MAX_NUM, coeffs[3]), yuv_shift);\n" +"const int g = Y + CV_DESCALE(mad24(V - HALF_MAX_NUM, coeffs[2], mul24(U - HALF_MAX_NUM, coeffs[1])), yuv_shift);\n" +"const int b = Y + CV_DESCALE(mul24(U - HALF_MAX_NUM, coeffs[0]), yuv_shift);\n" +"#endif\n" +"dst[bidx] = SAT_CAST( b );\n" +"dst[1] = SAT_CAST( g );\n" +"dst[bidx^2] = SAT_CAST( r );\n" +"#if dcn == 4\n" +"dst[3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__constant float c_YUV2RGBCoeffs_420[5] = { 1.163999557f, 2.017999649f, -0.390999794f,\n" +"-0.812999725f, 1.5959997177f };\n" +"__kernel void YUV2RGB_NVx(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dt_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols / 2)\n" +"{\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows / 2 )\n" +"{\n" +"__global const uchar* ysrc = srcptr + mad24(y << 1, src_step, (x << 1) + src_offset);\n" +"__global const uchar* usrc = srcptr + mad24(rows + y, src_step, (x << 1) + src_offset);\n" +"__global uchar* dst1 = dstptr + mad24(y << 1, dst_step, mad24(x, dcn<<1, dt_offset));\n" +"__global uchar* dst2 = dst1 + dst_step;\n" +"float Y1 = ysrc[0];\n" +"float Y2 = ysrc[1];\n" +"float Y3 = ysrc[src_step];\n" +"float Y4 = ysrc[src_step + 1];\n" +"float U = ((float)usrc[uidx]) - HALF_MAX_NUM;\n" +"float V = ((float)usrc[1-uidx]) - HALF_MAX_NUM;\n" +"__constant float* coeffs = c_YUV2RGBCoeffs_420;\n" +"float ruv = fma(coeffs[4], V, 0.5f);\n" +"float guv = fma(coeffs[3], V, fma(coeffs[2], U, 0.5f));\n" +"float buv = fma(coeffs[1], U, 0.5f);\n" +"Y1 = max(0.f, Y1 - 16.f) * coeffs[0];\n" +"dst1[2 - bidx] = convert_uchar_sat(Y1 + ruv);\n" +"dst1[1] = convert_uchar_sat(Y1 + guv);\n" +"dst1[bidx] = convert_uchar_sat(Y1 + buv);\n" +"#if dcn == 4\n" +"dst1[3] = 255;\n" +"#endif\n" +"Y2 = max(0.f, Y2 - 16.f) * coeffs[0];\n" +"dst1[dcn + 2 - bidx] = convert_uchar_sat(Y2 + ruv);\n" +"dst1[dcn + 1] = convert_uchar_sat(Y2 + guv);\n" +"dst1[dcn + bidx] = convert_uchar_sat(Y2 + buv);\n" +"#if dcn == 4\n" +"dst1[7] = 255;\n" +"#endif\n" +"Y3 = max(0.f, Y3 - 16.f) * coeffs[0];\n" +"dst2[2 - bidx] = convert_uchar_sat(Y3 + ruv);\n" +"dst2[1] = convert_uchar_sat(Y3 + guv);\n" +"dst2[bidx] = convert_uchar_sat(Y3 + buv);\n" +"#if dcn == 4\n" +"dst2[3] = 255;\n" +"#endif\n" +"Y4 = max(0.f, Y4 - 16.f) * coeffs[0];\n" +"dst2[dcn + 2 - bidx] = convert_uchar_sat(Y4 + ruv);\n" +"dst2[dcn + 1] = convert_uchar_sat(Y4 + guv);\n" +"dst2[dcn + bidx] = convert_uchar_sat(Y4 + buv);\n" +"#if dcn == 4\n" +"dst2[7] = 255;\n" +"#endif\n" +"}\n" +"++y;\n" +"}\n" +"}\n" +"}\n" +"#if uidx < 2\n" +"__kernel void YUV2RGB_YV12_IYUV(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dt_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols / 2)\n" +"{\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows / 2 )\n" +"{\n" +"__global const uchar* ysrc = srcptr + mad24(y << 1, src_step, (x << 1) + src_offset);\n" +"__global uchar* dst1 = dstptr + mad24(y << 1, dst_step, x * (dcn<<1) + dt_offset);\n" +"__global uchar* dst2 = dst1 + dst_step;\n" +"float Y1 = ysrc[0];\n" +"float Y2 = ysrc[1];\n" +"float Y3 = ysrc[src_step];\n" +"float Y4 = ysrc[src_step + 1];\n" +"#ifdef SRC_CONT\n" +"__global const uchar* uvsrc = srcptr + mad24(rows, src_step, src_offset);\n" +"int u_ind = mad24(y, cols >> 1, x);\n" +"float uv[2] = { ((float)uvsrc[u_ind]) - HALF_MAX_NUM, ((float)uvsrc[u_ind + ((rows * cols) >> 2)]) - HALF_MAX_NUM };\n" +"#else\n" +"int vsteps[2] = { cols >> 1, src_step - (cols >> 1)};\n" +"__global const uchar* usrc = srcptr + mad24(rows + (y>>1), src_step, src_offset + (y%2)*(cols >> 1) + x);\n" +"__global const uchar* vsrc = usrc + mad24(rows >> 2, src_step, rows % 4 ? vsteps[y%2] : 0);\n" +"float uv[2] = { ((float)usrc[0]) - HALF_MAX_NUM, ((float)vsrc[0]) - HALF_MAX_NUM };\n" +"#endif\n" +"float U = uv[uidx];\n" +"float V = uv[1-uidx];\n" +"__constant float* coeffs = c_YUV2RGBCoeffs_420;\n" +"float ruv = fma(coeffs[4], V, 0.5f);\n" +"float guv = fma(coeffs[3], V, fma(coeffs[2], U, 0.5f));\n" +"float buv = fma(coeffs[1], U, 0.5f);\n" +"Y1 = max(0.f, Y1 - 16.f) * coeffs[0];\n" +"dst1[2 - bidx] = convert_uchar_sat(Y1 + ruv);\n" +"dst1[1] = convert_uchar_sat(Y1 + guv);\n" +"dst1[bidx] = convert_uchar_sat(Y1 + buv);\n" +"#if dcn == 4\n" +"dst1[3] = 255;\n" +"#endif\n" +"Y2 = max(0.f, Y2 - 16.f) * coeffs[0];\n" +"dst1[dcn + 2 - bidx] = convert_uchar_sat(Y2 + ruv);\n" +"dst1[dcn + 1] = convert_uchar_sat(Y2 + guv);\n" +"dst1[dcn + bidx] = convert_uchar_sat(Y2 + buv);\n" +"#if dcn == 4\n" +"dst1[7] = 255;\n" +"#endif\n" +"Y3 = max(0.f, Y3 - 16.f) * coeffs[0];\n" +"dst2[2 - bidx] = convert_uchar_sat(Y3 + ruv);\n" +"dst2[1] = convert_uchar_sat(Y3 + guv);\n" +"dst2[bidx] = convert_uchar_sat(Y3 + buv);\n" +"#if dcn == 4\n" +"dst2[3] = 255;\n" +"#endif\n" +"Y4 = max(0.f, Y4 - 16.f) * coeffs[0];\n" +"dst2[dcn + 2 - bidx] = convert_uchar_sat(Y4 + ruv);\n" +"dst2[dcn + 1] = convert_uchar_sat(Y4 + guv);\n" +"dst2[dcn + bidx] = convert_uchar_sat(Y4 + buv);\n" +"#if dcn == 4\n" +"dst2[7] = 255;\n" +"#endif\n" +"}\n" +"++y;\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +"#if uidx < 2\n" +"__constant float c_RGB2YUVCoeffs_420[8] = { 0.256999969f, 0.50399971f, 0.09799957f, -0.1479988098f, -0.2909994125f,\n" +"0.438999176f, -0.3679990768f, -0.0709991455f };\n" +"__kernel void RGB2YUV_YV12_IYUV(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0) * PIX_PER_WI_X;\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols/2)\n" +"{\n" +"int src_index = mad24(y << 1, src_step, mad24(x << 1, scn, src_offset));\n" +"int ydst_index = mad24(y << 1, dst_step, (x << 1) + dst_offset);\n" +"int y_rows = rows / 3 * 2;\n" +"int vsteps[2] = { cols >> 1, dst_step - (cols >> 1)};\n" +"__constant float* coeffs = c_RGB2YUVCoeffs_420;\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows / 3)\n" +"{\n" +"__global const uchar* src1 = srcptr + src_index;\n" +"__global const uchar* src2 = src1 + src_step;\n" +"__global uchar* ydst1 = dstptr + ydst_index;\n" +"__global uchar* ydst2 = ydst1 + dst_step;\n" +"__global uchar* udst = dstptr + mad24(y_rows + (y>>1), dst_step, dst_offset + (y%2)*(cols >> 1) + x);\n" +"__global uchar* vdst = udst + mad24(y_rows >> 2, dst_step, y_rows % 4 ? vsteps[y%2] : 0);\n" +"#if PIX_PER_WI_X == 2\n" +"int s11 = *((__global const int*) src1);\n" +"int s12 = *((__global const int*) src1 + 1);\n" +"int s13 = *((__global const int*) src1 + 2);\n" +"#if scn == 4\n" +"int s14 = *((__global const int*) src1 + 3);\n" +"#endif\n" +"int s21 = *((__global const int*) src2);\n" +"int s22 = *((__global const int*) src2 + 1);\n" +"int s23 = *((__global const int*) src2 + 2);\n" +"#if scn == 4\n" +"int s24 = *((__global const int*) src2 + 3);\n" +"#endif\n" +"float src_pix1[scn * 4], src_pix2[scn * 4];\n" +"*((float4*) src_pix1) = convert_float4(as_uchar4(s11));\n" +"*((float4*) src_pix1 + 1) = convert_float4(as_uchar4(s12));\n" +"*((float4*) src_pix1 + 2) = convert_float4(as_uchar4(s13));\n" +"#if scn == 4\n" +"*((float4*) src_pix1 + 3) = convert_float4(as_uchar4(s14));\n" +"#endif\n" +"*((float4*) src_pix2) = convert_float4(as_uchar4(s21));\n" +"*((float4*) src_pix2 + 1) = convert_float4(as_uchar4(s22));\n" +"*((float4*) src_pix2 + 2) = convert_float4(as_uchar4(s23));\n" +"#if scn == 4\n" +"*((float4*) src_pix2 + 3) = convert_float4(as_uchar4(s24));\n" +"#endif\n" +"uchar4 y1, y2;\n" +"y1.x = convert_uchar_sat(fma(coeffs[0], src_pix1[ 2-bidx], fma(coeffs[1], src_pix1[ 1], fma(coeffs[2], src_pix1[ bidx], 16.5f))));\n" +"y1.y = convert_uchar_sat(fma(coeffs[0], src_pix1[ scn+2-bidx], fma(coeffs[1], src_pix1[ scn+1], fma(coeffs[2], src_pix1[ scn+bidx], 16.5f))));\n" +"y1.z = convert_uchar_sat(fma(coeffs[0], src_pix1[2*scn+2-bidx], fma(coeffs[1], src_pix1[2*scn+1], fma(coeffs[2], src_pix1[2*scn+bidx], 16.5f))));\n" +"y1.w = convert_uchar_sat(fma(coeffs[0], src_pix1[3*scn+2-bidx], fma(coeffs[1], src_pix1[3*scn+1], fma(coeffs[2], src_pix1[3*scn+bidx], 16.5f))));\n" +"y2.x = convert_uchar_sat(fma(coeffs[0], src_pix2[ 2-bidx], fma(coeffs[1], src_pix2[ 1], fma(coeffs[2], src_pix2[ bidx], 16.5f))));\n" +"y2.y = convert_uchar_sat(fma(coeffs[0], src_pix2[ scn+2-bidx], fma(coeffs[1], src_pix2[ scn+1], fma(coeffs[2], src_pix2[ scn+bidx], 16.5f))));\n" +"y2.z = convert_uchar_sat(fma(coeffs[0], src_pix2[2*scn+2-bidx], fma(coeffs[1], src_pix2[2*scn+1], fma(coeffs[2], src_pix2[2*scn+bidx], 16.5f))));\n" +"y2.w = convert_uchar_sat(fma(coeffs[0], src_pix2[3*scn+2-bidx], fma(coeffs[1], src_pix2[3*scn+1], fma(coeffs[2], src_pix2[3*scn+bidx], 16.5f))));\n" +"*((__global int*) ydst1) = as_int(y1);\n" +"*((__global int*) ydst2) = as_int(y2);\n" +"float uv[4] = { fma(coeffs[3], src_pix1[ 2-bidx], fma(coeffs[4], src_pix1[ 1], fma(coeffs[5], src_pix1[ bidx], 128.5f))),\n" +"fma(coeffs[5], src_pix1[ 2-bidx], fma(coeffs[6], src_pix1[ 1], fma(coeffs[7], src_pix1[ bidx], 128.5f))),\n" +"fma(coeffs[3], src_pix1[2*scn+2-bidx], fma(coeffs[4], src_pix1[2*scn+1], fma(coeffs[5], src_pix1[2*scn+bidx], 128.5f))),\n" +"fma(coeffs[5], src_pix1[2*scn+2-bidx], fma(coeffs[6], src_pix1[2*scn+1], fma(coeffs[7], src_pix1[2*scn+bidx], 128.5f))) };\n" +"udst[0] = convert_uchar_sat(uv[uidx] );\n" +"vdst[0] = convert_uchar_sat(uv[1 - uidx]);\n" +"udst[1] = convert_uchar_sat(uv[2 + uidx]);\n" +"vdst[1] = convert_uchar_sat(uv[3 - uidx]);\n" +"#else\n" +"float4 src_pix1 = convert_float4(vload4(0, src1));\n" +"float4 src_pix2 = convert_float4(vload4(0, src1+scn));\n" +"float4 src_pix3 = convert_float4(vload4(0, src2));\n" +"float4 src_pix4 = convert_float4(vload4(0, src2+scn));\n" +"ydst1[0] = convert_uchar_sat(fma(coeffs[0], src_pix1.R_COMP, fma(coeffs[1], src_pix1.G_COMP, fma(coeffs[2], src_pix1.B_COMP, 16.5f))));\n" +"ydst1[1] = convert_uchar_sat(fma(coeffs[0], src_pix2.R_COMP, fma(coeffs[1], src_pix2.G_COMP, fma(coeffs[2], src_pix2.B_COMP, 16.5f))));\n" +"ydst2[0] = convert_uchar_sat(fma(coeffs[0], src_pix3.R_COMP, fma(coeffs[1], src_pix3.G_COMP, fma(coeffs[2], src_pix3.B_COMP, 16.5f))));\n" +"ydst2[1] = convert_uchar_sat(fma(coeffs[0], src_pix4.R_COMP, fma(coeffs[1], src_pix4.G_COMP, fma(coeffs[2], src_pix4.B_COMP, 16.5f))));\n" +"float uv[2] = { fma(coeffs[3], src_pix1.R_COMP, fma(coeffs[4], src_pix1.G_COMP, fma(coeffs[5], src_pix1.B_COMP, 128.5f))),\n" +"fma(coeffs[5], src_pix1.R_COMP, fma(coeffs[6], src_pix1.G_COMP, fma(coeffs[7], src_pix1.B_COMP, 128.5f))) };\n" +"udst[0] = convert_uchar_sat(uv[uidx] );\n" +"vdst[0] = convert_uchar_sat(uv[1-uidx]);\n" +"#endif\n" +"++y;\n" +"src_index += 2*src_step;\n" +"ydst_index += 2*dst_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +"__kernel void YUV2RGB_422(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols / 2)\n" +"{\n" +"__global const uchar* src = srcptr + mad24(y, src_step, (x << 2) + src_offset);\n" +"__global uchar* dst = dstptr + mad24(y, dst_step, mad24(x << 1, dcn, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows )\n" +"{\n" +"__constant float* coeffs = c_YUV2RGBCoeffs_420;\n" +"#ifndef USE_OPTIMIZED_LOAD\n" +"float U = ((float) src[uidx]) - HALF_MAX_NUM;\n" +"float V = ((float) src[(2 + uidx) % 4]) - HALF_MAX_NUM;\n" +"float y00 = max(0.f, ((float) src[yidx]) - 16.f) * coeffs[0];\n" +"float y01 = max(0.f, ((float) src[yidx + 2]) - 16.f) * coeffs[0];\n" +"#else\n" +"int load_src = *((__global int*) src);\n" +"float vec_src[4] = { load_src & 0xff, (load_src >> 8) & 0xff, (load_src >> 16) & 0xff, (load_src >> 24) & 0xff};\n" +"float U = vec_src[uidx] - HALF_MAX_NUM;\n" +"float V = vec_src[(2 + uidx) % 4] - HALF_MAX_NUM;\n" +"float y00 = max(0.f, vec_src[yidx] - 16.f) * coeffs[0];\n" +"float y01 = max(0.f, vec_src[yidx + 2] - 16.f) * coeffs[0];\n" +"#endif\n" +"float ruv = fma(coeffs[4], V, 0.5f);\n" +"float guv = fma(coeffs[3], V, fma(coeffs[2], U, 0.5f));\n" +"float buv = fma(coeffs[1], U, 0.5f);\n" +"dst[2 - bidx] = convert_uchar_sat(y00 + ruv);\n" +"dst[1] = convert_uchar_sat(y00 + guv);\n" +"dst[bidx] = convert_uchar_sat(y00 + buv);\n" +"#if dcn == 4\n" +"dst[3] = 255;\n" +"#endif\n" +"dst[dcn + 2 - bidx] = convert_uchar_sat(y01 + ruv);\n" +"dst[dcn + 1] = convert_uchar_sat(y01 + guv);\n" +"dst[dcn + bidx] = convert_uchar_sat(y01 + buv);\n" +"#if dcn == 4\n" +"dst[7] = 255;\n" +"#endif\n" +"}\n" +"++y;\n" +"src += src_step;\n" +"dst += dst_step;\n" +"}\n" +"}\n" +"}\n" +"__constant float c_RGB2YCrCbCoeffs_f[5] = {R2YF, G2YF, B2YF, YCRF, YCBF};\n" +"__constant int c_RGB2YCrCbCoeffs_i[5] = {R2Y, G2Y, B2Y, YCRI, YCBI};\n" +"__kernel void RGB2YCrCb(__global const uchar* srcptr, int src_step, int src_offset,\n" +"__global uchar* dstptr, int dst_step, int dt_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dt_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE* src = (__global const DATA_TYPE*)(srcptr + src_index);\n" +"__global DATA_TYPE* dst = (__global DATA_TYPE*)(dstptr + dst_index);\n" +"DATA_TYPE_4 src_pix = vload4(0, src);\n" +"DATA_TYPE b = src_pix.B_COMP, g = src_pix.G_COMP, r = src_pix.R_COMP;\n" +"#ifdef DEPTH_5\n" +"__constant float * coeffs = c_RGB2YCrCbCoeffs_f;\n" +"DATA_TYPE Y = fma(b, coeffs[2], fma(g, coeffs[1], r * coeffs[0]));\n" +"DATA_TYPE Cr = fma(r - Y, coeffs[3], HALF_MAX_NUM);\n" +"DATA_TYPE Cb = fma(b - Y, coeffs[4], HALF_MAX_NUM);\n" +"#else\n" +"__constant int * coeffs = c_RGB2YCrCbCoeffs_i;\n" +"int delta = HALF_MAX_NUM * (1 << yuv_shift);\n" +"int Y = CV_DESCALE(mad24(b, coeffs[2], mad24(g, coeffs[1], mul24(r, coeffs[0]))), yuv_shift);\n" +"int Cr = CV_DESCALE(mad24(r - Y, coeffs[3], delta), yuv_shift);\n" +"int Cb = CV_DESCALE(mad24(b - Y, coeffs[4], delta), yuv_shift);\n" +"#endif\n" +"dst[0] = SAT_CAST( Y );\n" +"dst[1] = SAT_CAST( Cr );\n" +"dst[2] = SAT_CAST( Cb );\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__constant float c_YCrCb2RGBCoeffs_f[4] = { CR2RF, CR2GF, CB2GF, CB2BF };\n" +"__constant int c_YCrCb2RGBCoeffs_i[4] = { CR2RI, CR2GI, CB2GI, CB2BI };\n" +"__kernel void YCrCb2RGB(__global const uchar* src, int src_step, int src_offset,\n" +"__global uchar* dst, int dst_step, int dst_offset,\n" +"int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * PIX_PER_WI_Y;\n" +"if (x < cols)\n" +"{\n" +"int src_index = mad24(y, src_step, mad24(x, scnbytes, src_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, dcnbytes, dst_offset));\n" +"#pragma unroll\n" +"for (int cy = 0; cy < PIX_PER_WI_Y; ++cy)\n" +"{\n" +"if (y < rows)\n" +"{\n" +"__global const DATA_TYPE * srcptr = (__global const DATA_TYPE*)(src + src_index);\n" +"__global DATA_TYPE * dstptr = (__global DATA_TYPE*)(dst + dst_index);\n" +"DATA_TYPE_4 src_pix = vload4(0, srcptr);\n" +"DATA_TYPE yp = src_pix.x, cr = src_pix.y, cb = src_pix.z;\n" +"#ifdef DEPTH_5\n" +"__constant float * coeff = c_YCrCb2RGBCoeffs_f;\n" +"float r = fma(coeff[0], cr - HALF_MAX_NUM, yp);\n" +"float g = fma(coeff[1], cr - HALF_MAX_NUM, fma(coeff[2], cb - HALF_MAX_NUM, yp));\n" +"float b = fma(coeff[3], cb - HALF_MAX_NUM, yp);\n" +"#else\n" +"__constant int * coeff = c_YCrCb2RGBCoeffs_i;\n" +"int r = yp + CV_DESCALE(coeff[0] * (cr - HALF_MAX_NUM), yuv_shift);\n" +"int g = yp + CV_DESCALE(mad24(coeff[1], cr - HALF_MAX_NUM, coeff[2] * (cb - HALF_MAX_NUM)), yuv_shift);\n" +"int b = yp + CV_DESCALE(coeff[3] * (cb - HALF_MAX_NUM), yuv_shift);\n" +"#endif\n" +"dstptr[(bidx^2)] = SAT_CAST(r);\n" +"dstptr[1] = SAT_CAST(g);\n" +"dstptr[bidx] = SAT_CAST(b);\n" +"#if dcn == 4\n" +"dstptr[3] = MAX_NUM;\n" +"#endif\n" +"++y;\n" +"dst_index += dst_step;\n" +"src_index += src_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +, "5c9da97816c27d6319efa89b2fd7c1ea", NULL}; +struct cv::ocl::internal::ProgramEntry corner_oclsrc={moduleName, "corner", +"#ifdef BORDER_CONSTANT\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"x = max(min(x, maxV - 1), 0); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"if (x < 0) \\\n" +"x -= ((x - maxV + 1) / maxV) * maxV; \\\n" +"if (x >= maxV) \\\n" +"x %= maxV; \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT101)\n" +"#define EXTRAPOLATE_(x, maxV, delta) \\\n" +"{ \\\n" +"if (maxV == 1) \\\n" +"x = 0; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if ( x < 0 ) \\\n" +"x = -x - 1 + delta; \\\n" +"else \\\n" +"x = maxV - 1 - (x - maxV) - delta; \\\n" +"} \\\n" +"while (x >= maxV || x < 0); \\\n" +"}\n" +"#ifdef BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, maxV) EXTRAPOLATE_(x, maxV, 0)\n" +"#else\n" +"#define EXTRAPOLATE(x, maxV) EXTRAPOLATE_(x, maxV, 1)\n" +"#endif\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#define THREADS 256\n" +"__kernel void corner(__global const float * Dx, int dx_step, int dx_offset, int dx_whole_rows, int dx_whole_cols,\n" +"__global const float * Dy, int dy_step, int dy_offset, int dy_whole_rows, int dy_whole_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols, float k)\n" +"{\n" +"int col = get_local_id(0);\n" +"int gX = get_group_id(0);\n" +"int gY = get_group_id(1);\n" +"int gly = get_global_id(1);\n" +"int dx_x_off = (dx_offset % dx_step) >> 2;\n" +"int dx_y_off = dx_offset / dx_step;\n" +"int dy_x_off = (dy_offset % dy_step) >> 2;\n" +"int dy_y_off = dy_offset / dy_step;\n" +"int dst_x_off = (dst_offset % dst_step) >> 2;\n" +"int dst_y_off = dst_offset / dst_step;\n" +"int dx_startX = gX * (THREADS-ksX+1) - anX + dx_x_off;\n" +"int dx_startY = (gY << 1) - anY + dx_y_off;\n" +"int dy_startX = gX * (THREADS-ksX+1) - anX + dy_x_off;\n" +"int dy_startY = (gY << 1) - anY + dy_y_off;\n" +"int dst_startX = gX * (THREADS-ksX+1) + dst_x_off;\n" +"int dst_startY = (gY << 1) + dst_y_off;\n" +"float data[3][ksY+1];\n" +"__local float temp[6][THREADS];\n" +"#ifdef BORDER_CONSTANT\n" +"for (int i=0; i < ksY+1; i++)\n" +"{\n" +"bool dx_con = dx_startX+col >= 0 && dx_startX+col < dx_whole_cols && dx_startY+i >= 0 && dx_startY+i < dx_whole_rows;\n" +"int indexDx = mad24(dx_startY+i, dx_step>>2, dx_startX+col);\n" +"float dx_s = dx_con ? Dx[indexDx] : 0.0f;\n" +"bool dy_con = dy_startX+col >= 0 && dy_startX+col < dy_whole_cols && dy_startY+i >= 0 && dy_startY+i < dy_whole_rows;\n" +"int indexDy = mad24(dy_startY+i, dy_step>>2, dy_startX+col);\n" +"float dy_s = dy_con ? Dy[indexDy] : 0.0f;\n" +"data[0][i] = dx_s * dx_s;\n" +"data[1][i] = dx_s * dy_s;\n" +"data[2][i] = dy_s * dy_s;\n" +"}\n" +"#else\n" +"int clamped_col = min(2*dst_cols, col);\n" +"for (int i=0; i < ksY+1; i++)\n" +"{\n" +"int dx_selected_row = dx_startY+i, dx_selected_col = dx_startX+clamped_col;\n" +"EXTRAPOLATE(dx_selected_row, dx_whole_rows)\n" +"EXTRAPOLATE(dx_selected_col, dx_whole_cols)\n" +"float dx_s = Dx[mad24(dx_selected_row, dx_step>>2, dx_selected_col)];\n" +"int dy_selected_row = dy_startY+i, dy_selected_col = dy_startX+clamped_col;\n" +"EXTRAPOLATE(dy_selected_row, dy_whole_rows)\n" +"EXTRAPOLATE(dy_selected_col, dy_whole_cols)\n" +"float dy_s = Dy[mad24(dy_selected_row, dy_step>>2, dy_selected_col)];\n" +"data[0][i] = dx_s * dx_s;\n" +"data[1][i] = dx_s * dy_s;\n" +"data[2][i] = dy_s * dy_s;\n" +"}\n" +"#endif\n" +"float sum0 = 0.0f, sum1 = 0.0f, sum2 = 0.0f;\n" +"for (int i=1; i < ksY; i++)\n" +"{\n" +"sum0 += data[0][i];\n" +"sum1 += data[1][i];\n" +"sum2 += data[2][i];\n" +"}\n" +"float sum01 = sum0 + data[0][0];\n" +"float sum02 = sum0 + data[0][ksY];\n" +"temp[0][col] = sum01;\n" +"temp[1][col] = sum02;\n" +"float sum11 = sum1 + data[1][0];\n" +"float sum12 = sum1 + data[1][ksY];\n" +"temp[2][col] = sum11;\n" +"temp[3][col] = sum12;\n" +"float sum21 = sum2 + data[2][0];\n" +"float sum22 = sum2 + data[2][ksY];\n" +"temp[4][col] = sum21;\n" +"temp[5][col] = sum22;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (col < (THREADS - (ksX - 1)))\n" +"{\n" +"col += anX;\n" +"int posX = dst_startX - dst_x_off + col - anX;\n" +"int posY = (gly << 1);\n" +"int till = (ksX + 1) & 1;\n" +"float tmp_sum[6] = { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f };\n" +"for (int k=0; k<6; k++)\n" +"{\n" +"float temp_sum = 0;\n" +"for (int i=-anX; i<=anX - till; i++)\n" +"temp_sum += temp[k][col+i];\n" +"tmp_sum[k] = temp_sum;\n" +"}\n" +"#ifdef CORNER_HARRIS\n" +"if (posX < dst_cols && (posY) < dst_rows)\n" +"{\n" +"int dst_index = mad24(dst_step, dst_startY, (int)sizeof(float) * (dst_startX + col - anX));\n" +"*(__global float *)(dst + dst_index) =\n" +"tmp_sum[0] * tmp_sum[4] - tmp_sum[2] * tmp_sum[2] - k * (tmp_sum[0] + tmp_sum[4]) * (tmp_sum[0] + tmp_sum[4]);\n" +"}\n" +"if (posX < dst_cols && (posY + 1) < dst_rows)\n" +"{\n" +"int dst_index = mad24(dst_step, dst_startY + 1, (int)sizeof(float) * (dst_startX + col - anX));\n" +"*(__global float *)(dst + dst_index) =\n" +"tmp_sum[1] * tmp_sum[5] - tmp_sum[3] * tmp_sum[3] - k * (tmp_sum[1] + tmp_sum[5]) * (tmp_sum[1] + tmp_sum[5]);\n" +"}\n" +"#elif defined CORNER_MINEIGENVAL\n" +"if (posX < dst_cols && (posY) < dst_rows)\n" +"{\n" +"int dst_index = mad24(dst_step, dst_startY, (int)sizeof(float) * (dst_startX + col - anX));\n" +"float a = tmp_sum[0] * 0.5f;\n" +"float b = tmp_sum[2];\n" +"float c = tmp_sum[4] * 0.5f;\n" +"*(__global float *)(dst + dst_index) = (float)((a+c) - native_sqrt((a-c)*(a-c) + b*b));\n" +"}\n" +"if (posX < dst_cols && (posY + 1) < dst_rows)\n" +"{\n" +"int dst_index = mad24(dst_step, dst_startY + 1, (int)sizeof(float) * (dst_startX + col - anX));\n" +"float a = tmp_sum[1] * 0.5f;\n" +"float b = tmp_sum[3];\n" +"float c = tmp_sum[5] * 0.5f;\n" +"*(__global float *)(dst + dst_index) = (float)((a+c) - native_sqrt((a-c)*(a-c) + b*b));\n" +"}\n" +"#else\n" +"#error \"No such corners type\"\n" +"#endif\n" +"}\n" +"}\n" +, "0b0ba9ee4305009cb2433737f7ed5bcd", NULL}; +struct cv::ocl::internal::ProgramEntry covardata_oclsrc={moduleName, "covardata", +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(x, maxV)\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), 0, (maxV)-1); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = ( (x) + (maxV) ) % (maxV); \\\n" +"}\n" +"#elif defined BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = min( mad24((maxV)-1,2,-(x))+1 , max((x),-(x)-1) ); \\\n" +"}\n" +"#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = min( mad24((maxV)-1,2,-(x)), max((x),-(x)) ); \\\n" +"}\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#define SRC(_x,_y) convert_float(((global SRCTYPE*)(Src+(_y)*src_step))[_x])\n" +"#ifdef BORDER_CONSTANT\n" +"#define ELEM(_x,_y,r_edge,t_edge,const_v) (_x)<0 | (_x) >= (r_edge) | (_y)<0 | (_y) >= (t_edge) ? (const_v) : SRC((_x),(_y))\n" +"#else\n" +"#define ELEM(_x,_y,r_edge,t_edge,const_v) SRC((_x),(_y))\n" +"#endif\n" +"#define DSTX(_x,_y) (((global float*)(DstX+DstXOffset+(_y)*DstXPitch))[_x])\n" +"#define DSTY(_x,_y) (((global float*)(DstY+DstYOffset+(_y)*DstYPitch))[_x])\n" +"#define INIT_AND_READ_LOCAL_SOURCE(width, height, fill_const, kernel_border) \\\n" +"int srcX = x + srcOffsetX - (kernel_border); \\\n" +"int srcY = y + srcOffsetY - (kernel_border); \\\n" +"int xb = srcX; \\\n" +"int yb = srcY; \\\n" +"\\\n" +"EXTRAPOLATE(xb, (width)); \\\n" +"EXTRAPOLATE(yb, (height)); \\\n" +"lsmem[liy][lix] = ELEM(xb, yb, (width), (height), (fill_const) ); \\\n" +"\\\n" +"if(lix < ((kernel_border)*2)) \\\n" +"{ \\\n" +"int xb = srcX+BLK_X; \\\n" +"EXTRAPOLATE(xb,(width)); \\\n" +"lsmem[liy][lix+BLK_X] = ELEM(xb, yb, (width), (height), (fill_const) ); \\\n" +"} \\\n" +"if(liy< ((kernel_border)*2)) \\\n" +"{ \\\n" +"int yb = srcY+BLK_Y; \\\n" +"EXTRAPOLATE(yb, (height)); \\\n" +"lsmem[liy+BLK_Y][lix] = ELEM(xb, yb, (width), (height), (fill_const) ); \\\n" +"} \\\n" +"if(lix<((kernel_border)*2) && liy<((kernel_border)*2)) \\\n" +"{ \\\n" +"int xb = srcX+BLK_X; \\\n" +"int yb = srcY+BLK_Y; \\\n" +"EXTRAPOLATE(xb,(width)); \\\n" +"EXTRAPOLATE(yb,(height)); \\\n" +"lsmem[liy+BLK_Y][lix+BLK_X] = ELEM(xb, yb, (width), (height), (fill_const) ); \\\n" +"}\n" +"__kernel void sobel3(__global const uchar * Src, int src_step, int srcOffsetX, int srcOffsetY,\n" +"__global uchar * DstX, int DstXPitch, int DstXOffset,\n" +"__global uchar * DstY, int DstYPitch, int DstYOffset, int dstHeight, int dstWidth,\n" +"int height, int width, float scale)\n" +"{\n" +"__local float lsmem[BLK_Y+2][BLK_X+2];\n" +"int lix = get_local_id(0);\n" +"int liy = get_local_id(1);\n" +"int x = (int)get_global_id(0);\n" +"int y = (int)get_global_id(1);\n" +"INIT_AND_READ_LOCAL_SOURCE(width, height, 0, 1)\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if( x >= dstWidth || y >=dstHeight ) return;\n" +"float u1 = lsmem[liy][lix];\n" +"float u2 = lsmem[liy][lix+1];\n" +"float u3 = lsmem[liy][lix+2];\n" +"float m1 = lsmem[liy+1][lix];\n" +"float m3 = lsmem[liy+1][lix+2];\n" +"float b1 = lsmem[liy+2][lix];\n" +"float b2 = lsmem[liy+2][lix+1];\n" +"float b3 = lsmem[liy+2][lix+2];\n" +"#ifdef SCHARR\n" +"DSTX(x,y) = mad(10.0f, m3 - m1, 3.0f * (u3 - u1 + b3 - b1)) * scale;\n" +"DSTY(x,y) = mad(10.0f, b2 - u2, 3.0f * (b1 - u1 + b3 - u3)) * scale;\n" +"#else\n" +"DSTX(x,y) = mad(2.0f, m3 - m1, u3 - u1 + b3 - b1) * scale;\n" +"DSTY(x,y) = mad(2.0f, b2 - u2, b1 - u1 + b3 - u3) * scale;\n" +"#endif\n" +"}\n" +"__kernel void sobel5(__global const uchar * Src, int src_step, int srcOffsetX, int srcOffsetY,\n" +"__global uchar * DstX, int DstXPitch, int DstXOffset,\n" +"__global uchar * DstY, int DstYPitch, int DstYOffset, int dstHeight, int dstWidth,\n" +"int height, int width, float scale)\n" +"{\n" +"__local float lsmem[BLK_Y+4][BLK_X+4];\n" +"int lix = get_local_id(0);\n" +"int liy = get_local_id(1);\n" +"int x = (int)get_global_id(0);\n" +"int y = (int)get_global_id(1);\n" +"INIT_AND_READ_LOCAL_SOURCE(width, height, 0, 2)\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if( x >= dstWidth || y >=dstHeight ) return;\n" +"float t1 = lsmem[liy][lix];\n" +"float t2 = lsmem[liy][lix+1];\n" +"float t3 = lsmem[liy][lix+2];\n" +"float t4 = lsmem[liy][lix+3];\n" +"float t5 = lsmem[liy][lix+4];\n" +"float u1 = lsmem[liy+1][lix];\n" +"float u2 = lsmem[liy+1][lix+1];\n" +"float u3 = lsmem[liy+1][lix+2];\n" +"float u4 = lsmem[liy+1][lix+3];\n" +"float u5 = lsmem[liy+1][lix+4];\n" +"float m1 = lsmem[liy+2][lix];\n" +"float m2 = lsmem[liy+2][lix+1];\n" +"float m4 = lsmem[liy+2][lix+3];\n" +"float m5 = lsmem[liy+2][lix+4];\n" +"float l1 = lsmem[liy+3][lix];\n" +"float l2 = lsmem[liy+3][lix+1];\n" +"float l3 = lsmem[liy+3][lix+2];\n" +"float l4 = lsmem[liy+3][lix+3];\n" +"float l5 = lsmem[liy+3][lix+4];\n" +"float b1 = lsmem[liy+4][lix];\n" +"float b2 = lsmem[liy+4][lix+1];\n" +"float b3 = lsmem[liy+4][lix+2];\n" +"float b4 = lsmem[liy+4][lix+3];\n" +"float b5 = lsmem[liy+4][lix+4];\n" +"DSTX(x,y) = scale *\n" +"mad(12.0f, m4 - m2,\n" +"mad(6.0f, m5 - m1,\n" +"mad(8.0f, u4 - u2 + l4 - l2,\n" +"mad(4.0f, u5 - u1 + l5 - l1,\n" +"mad(2.0f, t4 - t2 + b4 - b2, t5 - t1 + b5 - b1 )\n" +")\n" +")\n" +")\n" +");\n" +"DSTY(x,y) = scale *\n" +"mad(12.0f, l3 - u3,\n" +"mad(6.0f, b3 - t3,\n" +"mad(8.0f, l2 - u2 + l4 - u4,\n" +"mad(4.0f, b2 - t2 + b4 - t4,\n" +"mad(2.0f, l1 - u1 + l5 - u5, b1 - t1 + b5 - t5 )\n" +")\n" +")\n" +")\n" +");\n" +"}\n" +"__kernel void sobel7(__global const uchar * Src, int src_step, int srcOffsetX, int srcOffsetY,\n" +"__global uchar * DstX, int DstXPitch, int DstXOffset,\n" +"__global uchar * DstY, int DstYPitch, int DstYOffset, int dstHeight, int dstWidth,\n" +"int height, int width, float scale)\n" +"{\n" +"__local float lsmem[BLK_Y+6][BLK_X+6];\n" +"int lix = get_local_id(0);\n" +"int liy = get_local_id(1);\n" +"int x = (int)get_global_id(0);\n" +"int y = (int)get_global_id(1);\n" +"INIT_AND_READ_LOCAL_SOURCE(width, height, 0, 3)\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if( x >= dstWidth || y >=dstHeight ) return;\n" +"float tt1 = lsmem[liy][lix];\n" +"float tt2 = lsmem[liy][lix+1];\n" +"float tt3 = lsmem[liy][lix+2];\n" +"float tt4 = lsmem[liy][lix+3];\n" +"float tt5 = lsmem[liy][lix+4];\n" +"float tt6 = lsmem[liy][lix+5];\n" +"float tt7 = lsmem[liy][lix+6];\n" +"float t1 = lsmem[liy+1][lix];\n" +"float t2 = lsmem[liy+1][lix+1];\n" +"float t3 = lsmem[liy+1][lix+2];\n" +"float t4 = lsmem[liy+1][lix+3];\n" +"float t5 = lsmem[liy+1][lix+4];\n" +"float t6 = lsmem[liy+1][lix+5];\n" +"float t7 = lsmem[liy+1][lix+6];\n" +"float u1 = lsmem[liy+2][lix];\n" +"float u2 = lsmem[liy+2][lix+1];\n" +"float u3 = lsmem[liy+2][lix+2];\n" +"float u4 = lsmem[liy+2][lix+3];\n" +"float u5 = lsmem[liy+2][lix+4];\n" +"float u6 = lsmem[liy+2][lix+5];\n" +"float u7 = lsmem[liy+2][lix+6];\n" +"float m1 = lsmem[liy+3][lix];\n" +"float m2 = lsmem[liy+3][lix+1];\n" +"float m3 = lsmem[liy+3][lix+2];\n" +"float m5 = lsmem[liy+3][lix+4];\n" +"float m6 = lsmem[liy+3][lix+5];\n" +"float m7 = lsmem[liy+3][lix+6];\n" +"float l1 = lsmem[liy+4][lix];\n" +"float l2 = lsmem[liy+4][lix+1];\n" +"float l3 = lsmem[liy+4][lix+2];\n" +"float l4 = lsmem[liy+4][lix+3];\n" +"float l5 = lsmem[liy+4][lix+4];\n" +"float l6 = lsmem[liy+4][lix+5];\n" +"float l7 = lsmem[liy+4][lix+6];\n" +"float b1 = lsmem[liy+5][lix];\n" +"float b2 = lsmem[liy+5][lix+1];\n" +"float b3 = lsmem[liy+5][lix+2];\n" +"float b4 = lsmem[liy+5][lix+3];\n" +"float b5 = lsmem[liy+5][lix+4];\n" +"float b6 = lsmem[liy+5][lix+5];\n" +"float b7 = lsmem[liy+5][lix+6];\n" +"float bb1 = lsmem[liy+6][lix];\n" +"float bb2 = lsmem[liy+6][lix+1];\n" +"float bb3 = lsmem[liy+6][lix+2];\n" +"float bb4 = lsmem[liy+6][lix+3];\n" +"float bb5 = lsmem[liy+6][lix+4];\n" +"float bb6 = lsmem[liy+6][lix+5];\n" +"float bb7 = lsmem[liy+6][lix+6];\n" +"DSTX(x,y) = scale *\n" +"mad(100.0f, m5 - m3,\n" +"mad(80.0f, m6 - m2,\n" +"mad(20.0f, m7 - m1,\n" +"mad(75.0f, u5 - u3 + l5 - l3,\n" +"mad(60.0f, u6 - u2 + l6 - l2,\n" +"mad(15.0f, u7 - u1 + l7 - l1,\n" +"mad(30.0f, t5 - t3 + b5 - b3,\n" +"mad(24.0f, t6 - t2 + b6 - b2,\n" +"mad(6.0f, t7 - t1 + b7 - b1,\n" +"mad(5.0f, tt5 - tt3 + bb5 - bb3,\n" +"mad(4.0f, tt6 - tt2 + bb6 - bb2, tt7 - tt1 + bb7 - bb1 )\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +");\n" +"DSTY(x,y) = scale *\n" +"mad(100.0f, l4 - u4,\n" +"mad(80.0f, b4 - t4,\n" +"mad(20.0f, bb4 - tt4,\n" +"mad(75.0f, l5 - u5 + l3 - u3,\n" +"mad(60.0f, b5 - t5 + b3 - t3,\n" +"mad(15.0f, bb5 - tt5 + bb3 - tt3,\n" +"mad(30.0f, l6 - u6 + l2 - u2,\n" +"mad(24.0f, b6 - t6 + b2 - t2,\n" +"mad(6.0f, bb6 - tt6 + bb2 - tt2,\n" +"mad(5.0f, l7 - u7 + l1 - u1,\n" +"mad(4.0f, b7 - t7 + b1 - t1, bb7 - tt7 + bb1 - tt1 )\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +")\n" +");\n" +"}\n" +, "97cb1ffd4e7c1bc93caba596bf9c6e55", NULL}; +struct cv::ocl::internal::ProgramEntry filter2D_oclsrc={moduleName, "filter2D", +"#ifdef EXTRA_EXTRAPOLATION\n" +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(x, minV, maxV)\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), (minV), (maxV)-1); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"if ((x) < (minV)) \\\n" +"(x) += ((maxV) - (minV)); \\\n" +"if ((x) >= (maxV)) \\\n" +"(x) -= ((maxV) - (minV)); \\\n" +"}\n" +"#elif defined BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"if ((maxV) - (minV) == 1) \\\n" +"(x) = (minV); \\\n" +"else \\\n" +"while ((x) >= (maxV) || (x) < (minV)) \\\n" +"{ \\\n" +"if ((x) < (minV)) \\\n" +"(x) = (minV) - ((x) - (minV)) - 1; \\\n" +"else \\\n" +"(x) = (maxV) - 1 - ((x) - (maxV)); \\\n" +"} \\\n" +"}\n" +"#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"if ((maxV) - (minV) == 1) \\\n" +"(x) = (minV); \\\n" +"else \\\n" +"while ((x) >= (maxV) || (x) < (minV)) \\\n" +"{ \\\n" +"if ((x) < (minV)) \\\n" +"(x) = (minV) - ((x) - (minV)); \\\n" +"else \\\n" +"(x) = (maxV) - 1 - ((x) - (maxV)) - 1; \\\n" +"} \\\n" +"}\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#else\n" +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(x, minV, maxV)\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), (minV), (maxV)-1); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"if ((x) < (minV)) \\\n" +"(x) += (((minV) - (x)) / ((maxV) - (minV)) + 1) * ((maxV) - (minV)); \\\n" +"if ((x) >= (maxV)) \\\n" +"(x) = ((x) - (minV)) % ((maxV) - (minV)) + (minV); \\\n" +"}\n" +"#elif defined BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), 2 * (minV) - (x) - 1, 2 * (maxV) - (x) - 1); \\\n" +"}\n" +"#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101\n" +"#define EXTRAPOLATE(x, minV, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), 2 * (minV) - (x), 2 * (maxV) - (x) - 2); \\\n" +"}\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#endif\n" +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1) * cn\n" +"#define DSTSIZE (int)sizeof(dstT1) * cn\n" +"#endif\n" +"#define UPDATE_COLUMN_SUM(col) \\\n" +"__constant WT1 * k = &kernelData[KERNEL_SIZE_Y2_ALIGNED * col]; \\\n" +"WT tmp_sum = 0; \\\n" +"for (int sy = 0; sy < KERNEL_SIZE_Y; sy++) \\\n" +"tmp_sum += data[sy] * k[sy]; \\\n" +"sumOfCols[local_id] = tmp_sum; \\\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"#define UPDATE_TOTAL_SUM(col) \\\n" +"int id = local_id + col - ANCHOR_X; \\\n" +"if (id >= 0 && id < LOCAL_SIZE) \\\n" +"total_sum += sumOfCols[id]; \\\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"#define noconvert\n" +"#define DIG(a) a,\n" +"__constant WT1 kernelData[] = { COEFF };\n" +"__kernel void filter2D(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols, float delta)\n" +"{\n" +"int local_id = get_local_id(0);\n" +"int x = local_id + (LOCAL_SIZE - (KERNEL_SIZE_X - 1)) * get_group_id(0) - ANCHOR_X;\n" +"int y = get_global_id(1);\n" +"WT data[KERNEL_SIZE_Y];\n" +"__local WT sumOfCols[LOCAL_SIZE];\n" +"#ifdef BORDER_ISOLATED\n" +"int srcBeginX = srcOffsetX;\n" +"int srcBeginY = srcOffsetY;\n" +"#else\n" +"int srcBeginX = 0;\n" +"int srcBeginY = 0;\n" +"#endif\n" +"int srcX = srcOffsetX + x;\n" +"int srcY = srcOffsetY + y - ANCHOR_Y;\n" +"__global dstT *dst = (__global dstT *)(dstptr + mad24(y, dst_step, mad24(x, DSTSIZE, dst_offset)));\n" +"#ifdef BORDER_CONSTANT\n" +"if (srcX >= srcBeginX && srcX < srcEndX)\n" +"{\n" +"for (int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y; sy++, srcY++)\n" +"{\n" +"if (srcY >= srcBeginY && srcY < srcEndY)\n" +"data[sy + sy_index] = convertToWT(loadpix(srcptr + mad24(srcY, src_step, srcX * SRCSIZE)));\n" +"else\n" +"data[sy + sy_index] = (WT)(0);\n" +"}\n" +"}\n" +"else\n" +"{\n" +"for (int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y; sy++, srcY++)\n" +"{\n" +"data[sy + sy_index] = (WT)(0);\n" +"}\n" +"}\n" +"#else\n" +"EXTRAPOLATE(srcX, srcBeginX, srcEndX);\n" +"for (int sy = 0, sy_index = 0; sy < KERNEL_SIZE_Y; sy++, srcY++)\n" +"{\n" +"int tempY = srcY;\n" +"EXTRAPOLATE(tempY, srcBeginY, srcEndY);\n" +"data[sy + sy_index] = convertToWT(loadpix(srcptr + mad24(tempY, src_step, srcX * SRCSIZE)));\n" +"}\n" +"#endif\n" +"WT total_sum = 0;\n" +"for (int sx = 0; sx < ANCHOR_X; sx++)\n" +"{\n" +"UPDATE_COLUMN_SUM(sx);\n" +"UPDATE_TOTAL_SUM(sx);\n" +"}\n" +"__constant WT1 * k = &kernelData[KERNEL_SIZE_Y2_ALIGNED * ANCHOR_X];\n" +"for (int sy = 0; sy < KERNEL_SIZE_Y; sy++)\n" +"total_sum += data[sy] * k[sy];\n" +"for (int sx = ANCHOR_X + 1; sx < KERNEL_SIZE_X; sx++)\n" +"{\n" +"UPDATE_COLUMN_SUM(sx);\n" +"UPDATE_TOTAL_SUM(sx);\n" +"}\n" +"if (local_id >= ANCHOR_X && local_id < LOCAL_SIZE - (KERNEL_SIZE_X - 1 - ANCHOR_X) && x >= 0 && x < cols)\n" +"storepix(convertToDstT(total_sum + (WT)(delta)), dst);\n" +"}\n" +, "77e935928055f243ff9082b1879a0b2c", NULL}; +struct cv::ocl::internal::ProgramEntry filter2DSmall_oclsrc={moduleName, "filter2DSmall", +"#ifdef BORDER_REPLICATE\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) :(i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (b_edge)-1 :(addr))\n" +"#endif\n" +"#ifdef BORDER_REFLECT\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i)-1 : (i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-1+((b_edge)<<1) : (addr))\n" +"#endif\n" +"#ifdef BORDER_REFLECT_101\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i) : (i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-2+((b_edge)<<1) : (addr))\n" +"#endif\n" +"#ifdef BORDER_WRAP\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (i)+(b_edge) : (i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (i)-(b_edge) : (addr))\n" +"#endif\n" +"#ifdef BORDER_ISOLATED\n" +"#define ISOLATED_MIN(VAL) (VAL)\n" +"#else\n" +"#define ISOLATED_MIN(VAL) 0\n" +"#endif\n" +"#ifdef EXTRA_EXTRAPOLATION\n" +"#ifdef BORDER_CONSTANT\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"x = max(min(x, maxX - 1), minX); \\\n" +"y = max(min(y, maxY - 1), minY); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"if (x < minX) \\\n" +"x -= ((x - maxX + 1) / maxX) * maxX; \\\n" +"if (x >= maxX) \\\n" +"x %= maxX; \\\n" +"if (y < minY) \\\n" +"y -= ((y - maxY + 1) / maxY) * maxY; \\\n" +"if (y >= maxY) \\\n" +"y %= maxY; \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)\n" +"#define EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, delta) \\\n" +"{ \\\n" +"if (maxX - minX == 1) \\\n" +"x = minX; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (x < minX) \\\n" +"x = minX - (x - minX) - 1 + delta; \\\n" +"else \\\n" +"x = maxX - 1 - (x - maxX) - delta; \\\n" +"} \\\n" +"while (x >= maxX || x < minX); \\\n" +"\\\n" +"if (maxY - minY == 1) \\\n" +"y = minY; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (y < minY) \\\n" +"y = minY - (y - minY) - 1 + delta; \\\n" +"else \\\n" +"y = maxY - 1 - (y - maxY) - delta; \\\n" +"} \\\n" +"while (y >= maxY || y < minY); \\\n" +"}\n" +"#ifdef BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 0)\n" +"#elif defined(BORDER_REFLECT_101) || defined(BORDER_REFLECT101)\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 1)\n" +"#endif\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#else\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"int _row = y - ISOLATED_MIN(minY), _col = x - ISOLATED_MIN(minX); \\\n" +"_row = ADDR_H(_row, 0, maxY - ISOLATED_MIN(minY)); \\\n" +"_row = ADDR_B(_row, maxY - ISOLATED_MIN(minY), _row); \\\n" +"y = _row + ISOLATED_MIN(minY); \\\n" +"\\\n" +"_col = ADDR_L(_col, 0, maxX - ISOLATED_MIN(minX)); \\\n" +"_col = ADDR_R(_col, maxX - ISOLATED_MIN(minX), _col); \\\n" +"x = _col + ISOLATED_MIN(minX); \\\n" +"}\n" +"#endif\n" +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1) * cn\n" +"#define DSTSIZE (int)sizeof(dstT1) * cn\n" +"#endif\n" +"#define noconvert\n" +"struct RectCoords\n" +"{\n" +"int x1, y1, x2, y2;\n" +"};\n" +"#ifdef BORDER_ISOLATED\n" +"inline bool isBorder(const struct RectCoords bounds, int2 coord, int numPixels)\n" +"{\n" +"return (coord.x < bounds.x1 || coord.y < bounds.y1 || coord.x + numPixels > bounds.x2 || coord.y >= bounds.y2);\n" +"}\n" +"#else\n" +"inline bool isBorder(const struct RectCoords bounds, int2 coord, int numPixels)\n" +"{\n" +"return (coord.x < 0 || coord.y < 0 || coord.x + numPixels > bounds.x2 || coord.y >= bounds.y2);\n" +"}\n" +"#endif\n" +"inline WT getBorderPixel(const struct RectCoords bounds, int2 coord,\n" +"__global const uchar* srcptr, int srcstep)\n" +"{\n" +"#ifdef BORDER_CONSTANT\n" +"return (WT)(0);\n" +"#else\n" +"int selected_col = coord.x;\n" +"int selected_row = coord.y;\n" +"EXTRAPOLATE(selected_col, selected_row,\n" +"bounds.x1, bounds.y1,\n" +"bounds.x2, bounds.y2\n" +");\n" +"coord = (int2)(selected_col, selected_row);\n" +"__global const uchar* ptr = srcptr + mul24(coord.y, srcstep) +\n" +"coord.x * SRCSIZE;\n" +"return convertToWT(loadpix(ptr));\n" +"#endif\n" +"}\n" +"inline WT readSrcPixelSingle(int2 pos, __global const uchar* srcptr,\n" +"int srcstep, const struct RectCoords srcCoords)\n" +"{\n" +"if (!isBorder(srcCoords, pos, 1))\n" +"{\n" +"__global const uchar* ptr = srcptr + mul24(pos.y, srcstep) +\n" +"pos.x * SRCSIZE;\n" +"return convertToWT(loadpix(ptr));\n" +"}\n" +"else\n" +"{\n" +"return getBorderPixel(srcCoords, pos, srcptr, srcstep);\n" +"}\n" +"}\n" +"#define __CAT(x, y) x##y\n" +"#define CAT(x, y) __CAT(x, y)\n" +"#define vload1(OFFSET, PTR) (*(PTR + OFFSET))\n" +"#define PX_LOAD_VEC_TYPE CAT(srcT1, PX_LOAD_VEC_SIZE)\n" +"#define PX_LOAD_FLOAT_VEC_TYPE CAT(WT1, PX_LOAD_VEC_SIZE)\n" +"#if PX_LOAD_VEC_SIZE == 1\n" +"#define PX_LOAD_FLOAT_VEC_CONV (float)\n" +"#elif PX_LOAD_VEC_SIZE == 2\n" +"#define PX_LOAD_FLOAT_VEC_CONV convert_float2\n" +"#elif PX_LOAD_VEC_SIZE == 3\n" +"#define PX_LOAD_FLOAT_VEC_CONV convert_float3\n" +"#elif PX_LOAD_VEC_SIZE == 4\n" +"#define PX_LOAD_FLOAT_VEC_CONV convert_float4\n" +"#endif\n" +"#define PX_LOAD CAT(vload, PX_LOAD_VEC_SIZE)\n" +"#define float1 float\n" +"inline PX_LOAD_FLOAT_VEC_TYPE readSrcPixelGroup(int2 pos, __global const uchar* srcptr,\n" +"int srcstep, const struct RectCoords srcCoords)\n" +"{\n" +"__global const srcT1* ptr = (__global const srcT1*)\n" +"(srcptr + mul24(pos.y, srcstep) +\n" +"pos.x * SRCSIZE);\n" +"return PX_LOAD_FLOAT_VEC_CONV(PX_LOAD(0, ptr));\n" +"}\n" +"#define LOOP1(VAR, STMT) (STMT); (VAR)++;\n" +"#define LOOP2(VAR, STMT) LOOP1(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP3(VAR, STMT) LOOP2(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP4(VAR, STMT) LOOP3(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP5(VAR, STMT) LOOP4(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP6(VAR, STMT) LOOP5(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP7(VAR, STMT) LOOP6(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP8(VAR, STMT) LOOP7(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP9(VAR, STMT) LOOP8(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP10(VAR, STMT) LOOP9(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP11(VAR, STMT) LOOP10(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP12(VAR, STMT) LOOP11(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP13(VAR, STMT) LOOP12(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP(N, VAR, STMT) CAT(LOOP, N)((VAR), (STMT))\n" +"#define DIG(a) a,\n" +"__constant WT1 kernelData[] = { COEFF };\n" +"__kernel void filter2DSmall(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols, float delta)\n" +"{\n" +"const struct RectCoords srcCoords = { srcOffsetX, srcOffsetY, srcEndX, srcEndY };\n" +"const int startX = get_global_id(0) * PX_PER_WI_X;\n" +"const int startY = get_global_id(1) * PX_PER_WI_Y;\n" +"if ((startX >= cols) || (startY >= rows))\n" +"{\n" +"return;\n" +"}\n" +"WT privateData[PX_PER_WI_Y + KERNEL_SIZE_Y - 1][PRIV_DATA_WIDTH];\n" +"int py = 0;\n" +"LOOP(PX_LOAD_Y_ITERATIONS, py,\n" +"{\n" +"int y = startY + py;\n" +"int px = 0;\n" +"LOOP(PX_LOAD_X_ITERATIONS, px,\n" +"{\n" +"int x = startX + (px * PX_LOAD_NUM_PX);\n" +"int2 srcPos = (int2)(srcCoords.x1 + x - ANCHOR_X, srcCoords.y1 + y - ANCHOR_Y);\n" +"if (!isBorder(srcCoords, srcPos, PX_LOAD_NUM_PX))\n" +"{\n" +"PX_LOAD_FLOAT_VEC_TYPE p = readSrcPixelGroup(srcPos, srcptr, src_step, srcCoords);\n" +"*((PX_LOAD_FLOAT_VEC_TYPE*)&privateData[py][px * PX_LOAD_NUM_PX]) = p;\n" +"}\n" +"else\n" +"{\n" +"int lx = 0;\n" +"LOOP(PX_LOAD_NUM_PX, lx,\n" +"{\n" +"WT p = readSrcPixelSingle(srcPos, srcptr, src_step, srcCoords);\n" +"*((WT*)&privateData[py][px * PX_LOAD_NUM_PX + lx]) = p;\n" +"srcPos.x++;\n" +"});\n" +"}\n" +"});\n" +"});\n" +"py = 0;\n" +"LOOP(PX_PER_WI_Y, py,\n" +"{\n" +"int y = startY + py;\n" +"int px = 0;\n" +"LOOP(PX_PER_WI_X, px,\n" +"{\n" +"int x = startX + px;\n" +"WT total_sum = 0;\n" +"int sy = 0;\n" +"int kernelIndex = 0;\n" +"LOOP(KERNEL_SIZE_Y, sy,\n" +"{\n" +"int sx = 0;\n" +"LOOP(KERNEL_SIZE_X, sx,\n" +"{\n" +"total_sum = mad(kernelData[kernelIndex++], privateData[py + sy][px + sx], total_sum);\n" +"});\n" +"});\n" +"__global dstT* dstPtr = (__global dstT*)(dstptr + y * dst_step + dst_offset + x * DSTSIZE);\n" +"storepix(convertToDstT(total_sum + (WT)(delta)), dstPtr);\n" +"});\n" +"});\n" +"}\n" +, "030d23b1d64d51e6485f8941af1e3fc3", NULL}; +struct cv::ocl::internal::ProgramEntry filterSepCol_oclsrc={moduleName, "filterSepCol", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define READ_TIMES_COL ((2*(RADIUSY+LSIZE1)-1)/LSIZE1)\n" +"#define RADIUS 1\n" +"#define noconvert\n" +"#if CN != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1)*3\n" +"#define DSTSIZE (int)sizeof(dstT1)*3\n" +"#endif\n" +"#define DIG(a) a,\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"__constant int mat_kernel[] = { COEFF };\n" +"#else\n" +"__constant srcT1 mat_kernel[] = { COEFF };\n" +"#endif\n" +"__kernel void col_filter(__global const uchar * src, int src_step, int src_offset, int src_whole_rows, int src_whole_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols, float delta)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"int l_x = get_local_id(0);\n" +"int l_y = get_local_id(1);\n" +"int start_addr = mad24(y, src_step, x * SRCSIZE);\n" +"int end_addr = mad24(src_whole_rows - 1, src_step, src_whole_cols * SRCSIZE);\n" +"srcT sum, temp[READ_TIMES_COL];\n" +"__local srcT LDS_DAT[LSIZE1 * READ_TIMES_COL][LSIZE0 + 1];\n" +"for (int i = 0; i < READ_TIMES_COL; ++i)\n" +"{\n" +"int current_addr = mad24(i, LSIZE1 * src_step, start_addr);\n" +"current_addr = current_addr < end_addr ? current_addr : 0;\n" +"temp[i] = loadpix(src + current_addr);\n" +"}\n" +"for (int i = 0; i < READ_TIMES_COL; ++i)\n" +"LDS_DAT[mad24(i, LSIZE1, l_y)][l_x] = temp[i];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"sum = LDS_DAT[l_y + RADIUSY][l_x] * mat_kernel[RADIUSY] + (srcT)delta;\n" +"for (int i = 1; i <= RADIUSY; ++i)\n" +"{\n" +"temp[0] = LDS_DAT[l_y + RADIUSY - i][l_x];\n" +"temp[1] = LDS_DAT[l_y + RADIUSY + i][l_x];\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"sum += mad24(temp[0],mat_kernel[RADIUSY - i], temp[1] * mat_kernel[RADIUSY + i]);\n" +"#else\n" +"sum += mad(temp[0], mat_kernel[RADIUSY - i], temp[1] * mat_kernel[RADIUSY + i]);\n" +"#endif\n" +"}\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"#if defined(SHIFT_BITS) && SHIFT_BITS > 0\n" +"dstT result = convertToDstT(convertToFloatT(sum) * (floatT)(1.0f / (1 << SHIFT_BITS)));\n" +"#else\n" +"dstT result = convertToDstT(sum);\n" +"#endif\n" +"start_addr = mad24(y, dst_step, mad24(DSTSIZE, x, dst_offset));\n" +"storepix(result, dst + start_addr);\n" +"}\n" +"}\n" +, "ab9607013c121ef09706d5eddedeebe9", NULL}; +struct cv::ocl::internal::ProgramEntry filterSepRow_oclsrc={moduleName, "filterSepRow", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define READ_TIMES_ROW ((2*(RADIUSX+LSIZE0)-1)/LSIZE0)\n" +"#define RADIUS 1\n" +"#ifdef BORDER_REPLICATE\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr))\n" +"#endif\n" +"#ifdef BORDER_REFLECT\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr))\n" +"#endif\n" +"#ifdef BORDER_REFLECT_101\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr))\n" +"#endif\n" +"#ifdef BORDER_WRAP\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr))\n" +"#endif\n" +"#ifdef EXTRA_EXTRAPOLATION\n" +"#ifdef BORDER_CONSTANT\n" +"#define ELEM(i,l_edge,r_edge,elem1,elem2) (i)<(l_edge) | (i) >= (r_edge) ? (elem1) : (elem2)\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(t, minT, maxT) \\\n" +"{ \\\n" +"t = max(min(t, (maxT) - 1), (minT)); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, minT, maxT) \\\n" +"{ \\\n" +"if (t < (minT)) \\\n" +"t -= ((t - (maxT) + 1) / (maxT)) * (maxT); \\\n" +"if (t >= (maxT)) \\\n" +"t %= (maxT); \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)\n" +"#define EXTRAPOLATE_(t, minT, maxT, delta) \\\n" +"{ \\\n" +"if ((maxT) - (minT) == 1) \\\n" +"t = (minT); \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (t < (minT)) \\\n" +"t = (minT) - (t - (minT)) - 1 + delta; \\\n" +"else \\\n" +"t = (maxT) - 1 - (t - (maxT)) - delta; \\\n" +"} \\\n" +"while (t >= (maxT) || t < (minT)); \\\n" +"\\\n" +"}\n" +"#ifdef BORDER_REFLECT\n" +"#define EXTRAPOLATE(t, minT, maxT) EXTRAPOLATE_(t, minT, maxT, 0)\n" +"#elif defined(BORDER_REFLECT_101)\n" +"#define EXTRAPOLATE(t, minT, maxT) EXTRAPOLATE_(t, minT, maxT, 1)\n" +"#endif\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#else\n" +"#ifdef BORDER_CONSTANT\n" +"#define ELEM(i,l_edge,r_edge,elem1,elem2) (i)<(l_edge) | (i) >= (r_edge) ? (elem1) : (elem2)\n" +"#else\n" +"#define EXTRAPOLATE(t, minT, maxT) \\\n" +"{ \\\n" +"int _delta = t - (minT); \\\n" +"_delta = ADDR_L(_delta, 0, (maxT) - (minT)); \\\n" +"_delta = ADDR_R(_delta, (maxT) - (minT), _delta); \\\n" +"t = _delta + (minT); \\\n" +"}\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"#if CN != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1)*3\n" +"#define DSTSIZE (int)sizeof(dstT1)*3\n" +"#endif\n" +"#define DIG(a) a,\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"__constant int mat_kernel[] = { COEFF };\n" +"#else\n" +"__constant dstT1 mat_kernel[] = { COEFF };\n" +"#endif\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"#define dstT4 int4\n" +"#define convertDstVec convert_int4\n" +"#else\n" +"#define dstT4 float4\n" +"#define convertDstVec convert_float4\n" +"#endif\n" +"__kernel void row_filter_C1_D0(__global const uchar * src, int src_step_in_pixel, int src_offset_x, int src_offset_y,\n" +"int src_cols, int src_rows, int src_whole_cols, int src_whole_rows,\n" +"__global float * dst, int dst_step_in_pixel, int dst_cols, int dst_rows,\n" +"int radiusy)\n" +"{\n" +"int x = get_global_id(0)<<2;\n" +"int y = get_global_id(1);\n" +"int l_x = get_local_id(0);\n" +"int l_y = get_local_id(1);\n" +"int start_x = x + src_offset_x - RADIUSX & 0xfffffffc;\n" +"int offset = src_offset_x - RADIUSX & 3;\n" +"int start_y = y + src_offset_y - radiusy;\n" +"int start_addr = mad24(start_y, src_step_in_pixel, start_x);\n" +"dstT4 sum;\n" +"uchar4 temp[READ_TIMES_ROW];\n" +"__local uchar4 LDS_DAT[LSIZE1][READ_TIMES_ROW * LSIZE0 + 1];\n" +"#ifdef BORDER_CONSTANT\n" +"int end_addr = mad24(src_whole_rows - 1, src_step_in_pixel, src_whole_cols);\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"int current_addr = mad24(i, LSIZE0 << 2, start_addr);\n" +"current_addr = current_addr < end_addr && current_addr > 0 ? current_addr : 0;\n" +"temp[i] = *(__global const uchar4 *)&src[current_addr];\n" +"}\n" +"#ifdef BORDER_ISOLATED\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"temp[i].x = ELEM(start_x+i*LSIZE0*4, src_offset_x, src_offset_x + src_cols, 0, temp[i].x);\n" +"temp[i].y = ELEM(start_x+i*LSIZE0*4+1, src_offset_x, src_offset_x + src_cols, 0, temp[i].y);\n" +"temp[i].z = ELEM(start_x+i*LSIZE0*4+2, src_offset_x, src_offset_x + src_cols, 0, temp[i].z);\n" +"temp[i].w = ELEM(start_x+i*LSIZE0*4+3, src_offset_x, src_offset_x + src_cols, 0, temp[i].w);\n" +"temp[i] = ELEM(start_y, src_offset_y, src_offset_y + src_rows, (uchar4)0, temp[i]);\n" +"}\n" +"#else\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"temp[i].x = ELEM(start_x+i*LSIZE0*4, 0, src_whole_cols, 0, temp[i].x);\n" +"temp[i].y = ELEM(start_x+i*LSIZE0*4+1, 0, src_whole_cols, 0, temp[i].y);\n" +"temp[i].z = ELEM(start_x+i*LSIZE0*4+2, 0, src_whole_cols, 0, temp[i].z);\n" +"temp[i].w = ELEM(start_x+i*LSIZE0*4+3, 0, src_whole_cols, 0, temp[i].w);\n" +"temp[i] = ELEM(start_y, 0, src_whole_rows, (uchar4)0, temp[i]);\n" +"}\n" +"#endif\n" +"#else\n" +"#ifdef BORDER_ISOLATED\n" +"int not_all_in_range = (start_xsrc_offset_x + src_cols)| (start_y= src_offset_y + src_rows);\n" +"#else\n" +"int not_all_in_range = (start_x<0) | (start_x + READ_TIMES_ROW*LSIZE0*4+4>src_whole_cols)| (start_y<0) | (start_y >= src_whole_rows);\n" +"#endif\n" +"int4 index[READ_TIMES_ROW], addr;\n" +"int s_y;\n" +"if (not_all_in_range)\n" +"{\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"index[i] = (int4)(mad24(i, LSIZE0 << 2, start_x)) + (int4)(0, 1, 2, 3);\n" +"#ifdef BORDER_ISOLATED\n" +"EXTRAPOLATE(index[i].x, src_offset_x, src_offset_x + src_cols);\n" +"EXTRAPOLATE(index[i].y, src_offset_x, src_offset_x + src_cols);\n" +"EXTRAPOLATE(index[i].z, src_offset_x, src_offset_x + src_cols);\n" +"EXTRAPOLATE(index[i].w, src_offset_x, src_offset_x + src_cols);\n" +"#else\n" +"EXTRAPOLATE(index[i].x, 0, src_whole_cols);\n" +"EXTRAPOLATE(index[i].y, 0, src_whole_cols);\n" +"EXTRAPOLATE(index[i].z, 0, src_whole_cols);\n" +"EXTRAPOLATE(index[i].w, 0, src_whole_cols);\n" +"#endif\n" +"}\n" +"s_y = start_y;\n" +"#ifdef BORDER_ISOLATED\n" +"EXTRAPOLATE(s_y, src_offset_y, src_offset_y + src_rows);\n" +"#else\n" +"EXTRAPOLATE(s_y, 0, src_whole_rows);\n" +"#endif\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"addr = mad24((int4)s_y, (int4)src_step_in_pixel, index[i]);\n" +"temp[i].x = src[addr.x];\n" +"temp[i].y = src[addr.y];\n" +"temp[i].z = src[addr.z];\n" +"temp[i].w = src[addr.w];\n" +"}\n" +"}\n" +"else\n" +"{\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"temp[i] = *(__global uchar4*)&src[mad24(i, LSIZE0 << 2, start_addr)];\n" +"}\n" +"#endif\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"LDS_DAT[l_y][mad24(i, LSIZE0, l_x)] = temp[i];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"sum = convertDstVec(vload4(0,(__local uchar *)&LDS_DAT[l_y][l_x]+RADIUSX+offset)) * mat_kernel[RADIUSX];\n" +"for (int i = 1; i <= RADIUSX; ++i)\n" +"{\n" +"temp[0] = vload4(0, (__local uchar*)&LDS_DAT[l_y][l_x] + RADIUSX + offset - i);\n" +"temp[1] = vload4(0, (__local uchar*)&LDS_DAT[l_y][l_x] + RADIUSX + offset + i);\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"sum += mad24(convertDstVec(temp[0]), mat_kernel[RADIUSX-i], convertDstVec(temp[1]) * mat_kernel[RADIUSX + i]);\n" +"#else\n" +"sum += mad(convertDstVec(temp[0]), mat_kernel[RADIUSX-i], convertDstVec(temp[1]) * mat_kernel[RADIUSX + i]);\n" +"#endif\n" +"}\n" +"start_addr = mad24(y, dst_step_in_pixel, x);\n" +"if ((x+3= 0 ? current_addr : 0;\n" +"temp[i] = loadpix(src + current_addr);\n" +"}\n" +"#ifdef BORDER_ISOLATED\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"temp[i] = ELEM(mad24(i, LSIZE0, start_x), src_offset_x, src_offset_x + src_cols, (srcT)(0), temp[i]);\n" +"temp[i] = ELEM(start_y, src_offset_y, src_offset_y + src_rows, (srcT)(0), temp[i]);\n" +"}\n" +"#else\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"temp[i] = ELEM(mad24(i, LSIZE0, start_x), 0, src_whole_cols, (srcT)(0), temp[i]);\n" +"temp[i] = ELEM(start_y, 0, src_whole_rows, (srcT)(0), temp[i]);\n" +"}\n" +"#endif\n" +"#else\n" +"int index[READ_TIMES_ROW], s_x, s_y;\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"{\n" +"s_x = mad24(i, LSIZE0, start_x);\n" +"s_y = start_y;\n" +"#ifdef BORDER_ISOLATED\n" +"EXTRAPOLATE(s_x, src_offset_x, src_offset_x + src_cols);\n" +"EXTRAPOLATE(s_y, src_offset_y, src_offset_y + src_rows);\n" +"#else\n" +"EXTRAPOLATE(s_x, 0, src_whole_cols);\n" +"EXTRAPOLATE(s_y, 0, src_whole_rows);\n" +"#endif\n" +"index[i] = mad24(s_y, src_step, s_x * SRCSIZE);\n" +"}\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"temp[i] = loadpix(src + index[i]);\n" +"#endif\n" +"for (int i = 0; i < READ_TIMES_ROW; ++i)\n" +"LDS_DAT[l_y][mad24(i, LSIZE0, l_x)] = temp[i];\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"sum = convertToDstT(LDS_DAT[l_y][l_x + RADIUSX]) * mat_kernel[RADIUSX];\n" +"for (int i = 1; i <= RADIUSX; ++i)\n" +"{\n" +"temp[0] = LDS_DAT[l_y][l_x + RADIUSX - i];\n" +"temp[1] = LDS_DAT[l_y][l_x + RADIUSX + i];\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"sum += mad24(convertToDstT(temp[0]), mat_kernel[RADIUSX - i], convertToDstT(temp[1]) * mat_kernel[RADIUSX + i]);\n" +"#else\n" +"sum += mad(convertToDstT(temp[0]), mat_kernel[RADIUSX - i], convertToDstT(temp[1]) * mat_kernel[RADIUSX + i]);\n" +"#endif\n" +"}\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"start_addr = mad24(y, dst_step, x * DSTSIZE);\n" +"storepix(sum, dst + start_addr);\n" +"}\n" +"}\n" +, "048b140c890d74acfb2bc8b5d43a4cbf", NULL}; +struct cv::ocl::internal::ProgramEntry filterSep_singlePass_oclsrc={moduleName, "filterSep_singlePass", +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(x, maxV)\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), 0, (maxV)-1); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = ( (x) + (maxV) ) % (maxV); \\\n" +"}\n" +"#elif defined BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = min(((maxV)-1)*2-(x)+1, max((x),-(x)-1) ); \\\n" +"}\n" +"#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = min(((maxV)-1)*2-(x), max((x),-(x)) ); \\\n" +"}\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#if CN != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1)*3\n" +"#define DSTSIZE (int)sizeof(dstT1)*3\n" +"#endif\n" +"#define SRC(_x,_y) convertToWT(loadpix(Src + mad24(_y, src_step, SRCSIZE * _x)))\n" +"#ifdef BORDER_CONSTANT\n" +"#define ELEM(_x,_y,r_edge,t_edge,const_v) (_x)<0 | (_x) >= (r_edge) | (_y)<0 | (_y) >= (t_edge) ? (const_v) : SRC((_x),(_y))\n" +"#else\n" +"#define ELEM(_x,_y,r_edge,t_edge,const_v) SRC((_x),(_y))\n" +"#endif\n" +"#define noconvert\n" +"#define DIG(a) a,\n" +"__constant WT1 mat_kernelX[] = { KERNEL_MATRIX_X };\n" +"__constant WT1 mat_kernelY[] = { KERNEL_MATRIX_Y };\n" +"__kernel void sep_filter(__global uchar* Src, int src_step, int srcOffsetX, int srcOffsetY, int height, int width,\n" +"__global uchar* Dst, int dst_step, int dst_offset, int dst_rows, int dst_cols, float delta)\n" +"{\n" +"__local WT lsmem[BLK_Y + 2 * RADIUSY][BLK_X + 2 * RADIUSX];\n" +"__local WT lsmemDy[BLK_Y][BLK_X + 2 * RADIUSX];\n" +"int lix = get_local_id(0);\n" +"int liy = get_local_id(1);\n" +"int x = get_global_id(0);\n" +"int srcX = x + srcOffsetX - RADIUSX;\n" +"int clocY = liy;\n" +"do\n" +"{\n" +"int yb = clocY + srcOffsetY - RADIUSY;\n" +"EXTRAPOLATE(yb, (height));\n" +"int clocX = lix;\n" +"int cSrcX = srcX;\n" +"do\n" +"{\n" +"int xb = cSrcX;\n" +"EXTRAPOLATE(xb,(width));\n" +"lsmem[clocY][clocX] = ELEM(xb, yb, (width), (height), 0 );\n" +"clocX += BLK_X;\n" +"cSrcX += BLK_X;\n" +"}\n" +"while(clocX < BLK_X+(RADIUSX*2));\n" +"clocY += BLK_Y;\n" +"}\n" +"while (clocY < BLK_Y+(RADIUSY*2));\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int y = 0; y < dst_rows; y+=BLK_Y)\n" +"{\n" +"int i, clocX = lix;\n" +"WT sum = (WT) 0;\n" +"do\n" +"{\n" +"sum = (WT) 0;\n" +"for (i=0; i<=2*RADIUSY; i++)\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"sum = mad24(lsmem[liy + i][clocX], mat_kernelY[i], sum);\n" +"#else\n" +"sum = mad(lsmem[liy + i][clocX], mat_kernelY[i], sum);\n" +"#endif\n" +"lsmemDy[liy][clocX] = sum;\n" +"clocX += BLK_X;\n" +"}\n" +"while(clocX < BLK_X+(RADIUSX*2));\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if ((x < dst_cols) && (y + liy < dst_rows))\n" +"{\n" +"sum = (WT)(delta);\n" +"for (i=0; i<=2*RADIUSX; i++)\n" +"#if defined(INTEGER_ARITHMETIC)\n" +"sum = mad24(lsmemDy[liy][lix+i], mat_kernelX[i], sum);\n" +"#else\n" +"sum = mad(lsmemDy[liy][lix+i], mat_kernelX[i], sum);\n" +"#endif\n" +"#if defined(SHIFT_BITS) && SHIFT_BITS > 0\n" +"#if !defined(INTEGER_ARITHMETIC)\n" +"sum = sum * (1.0f / (1 << SHIFT_BITS));\n" +"#else\n" +"sum = (sum + (1 << (SHIFT_BITS-1))) >> SHIFT_BITS;\n" +"#endif\n" +"#endif\n" +"storepix(convertToDstT(sum), Dst + mad24(y + liy, dst_step, mad24(x, DSTSIZE, dst_offset)));\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int i = liy * BLK_X + lix; i < (RADIUSY*2) * (BLK_X+(RADIUSX*2)); i += BLK_X * BLK_Y)\n" +"{\n" +"int clocX = i % (BLK_X+(RADIUSX*2));\n" +"int clocY = i / (BLK_X+(RADIUSX*2));\n" +"lsmem[clocY][clocX] = lsmem[clocY + BLK_Y][clocX];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int yb = y + liy + BLK_Y + srcOffsetY + RADIUSY;\n" +"EXTRAPOLATE(yb, (height));\n" +"clocX = lix;\n" +"int cSrcX = x + srcOffsetX - RADIUSX;\n" +"do\n" +"{\n" +"int xb = cSrcX;\n" +"EXTRAPOLATE(xb,(width));\n" +"lsmem[liy + 2*RADIUSY][clocX] = ELEM(xb, yb, (width), (height), 0 );\n" +"clocX += BLK_X;\n" +"cSrcX += BLK_X;\n" +"}\n" +"while(clocX < BLK_X+(RADIUSX*2));\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"}\n" +, "478db57624f9e361da43e649f0a44c8f", NULL}; +struct cv::ocl::internal::ProgramEntry filterSmall_oclsrc={moduleName, "filterSmall", +"#ifdef BORDER_REPLICATE\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (l_edge) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (r_edge)-1 : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (t_edge) :(i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (b_edge)-1 :(addr))\n" +"#endif\n" +"#ifdef BORDER_REFLECT\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i)-1 : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-1+((r_edge)<<1) : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i)-1 : (i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-1+((b_edge)<<1) : (addr))\n" +"#endif\n" +"#ifdef BORDER_REFLECT_101\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? -(i) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? -(i)-2+((r_edge)<<1) : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? -(i) : (i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? -(i)-2+((b_edge)<<1) : (addr))\n" +"#endif\n" +"#ifdef BORDER_WRAP\n" +"#define ADDR_L(i, l_edge, r_edge) ((i) < (l_edge) ? (i)+(r_edge) : (i))\n" +"#define ADDR_R(i, r_edge, addr) ((i) >= (r_edge) ? (i)-(r_edge) : (addr))\n" +"#define ADDR_H(i, t_edge, b_edge) ((i) < (t_edge) ? (i)+(b_edge) : (i))\n" +"#define ADDR_B(i, b_edge, addr) ((i) >= (b_edge) ? (i)-(b_edge) : (addr))\n" +"#endif\n" +"#ifdef BORDER_ISOLATED\n" +"#define ISOLATED_MIN(VAL) (VAL)\n" +"#else\n" +"#define ISOLATED_MIN(VAL) 0\n" +"#endif\n" +"#ifdef EXTRA_EXTRAPOLATION\n" +"#ifdef BORDER_CONSTANT\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"x = max(min(x, maxX - 1), minX); \\\n" +"y = max(min(y, maxY - 1), minY); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"if (x < minX) \\\n" +"x -= ((x - maxX + 1) / maxX) * maxX; \\\n" +"if (x >= maxX) \\\n" +"x %= maxX; \\\n" +"if (y < minY) \\\n" +"y -= ((y - maxY + 1) / maxY) * maxY; \\\n" +"if (y >= maxY) \\\n" +"y %= maxY; \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)\n" +"#define EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, delta) \\\n" +"{ \\\n" +"if (maxX - minX == 1) \\\n" +"x = minX; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (x < minX) \\\n" +"x = minX - (x - minX) - 1 + delta; \\\n" +"else \\\n" +"x = maxX - 1 - (x - maxX) - delta; \\\n" +"} \\\n" +"while (x >= maxX || x < minX); \\\n" +"\\\n" +"if (maxY - minY == 1) \\\n" +"y = minY; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if (y < minY) \\\n" +"y = minY - (y - minY) - 1 + delta; \\\n" +"else \\\n" +"y = maxY - 1 - (y - maxY) - delta; \\\n" +"} \\\n" +"while (y >= maxY || y < minY); \\\n" +"}\n" +"#ifdef BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 0)\n" +"#elif defined(BORDER_REFLECT_101) || defined(BORDER_REFLECT101)\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) EXTRAPOLATE_(x, y, minX, minY, maxX, maxY, 1)\n" +"#endif\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#else\n" +"#define EXTRAPOLATE(x, y, minX, minY, maxX, maxY) \\\n" +"{ \\\n" +"int _row = y - ISOLATED_MIN(minY), _col = x - ISOLATED_MIN(minX); \\\n" +"_row = ADDR_H(_row, 0, maxY - ISOLATED_MIN(minY)); \\\n" +"_row = ADDR_B(_row, maxY - ISOLATED_MIN(minY), _row); \\\n" +"y = _row + ISOLATED_MIN(minY); \\\n" +"\\\n" +"_col = ADDR_L(_col, 0, maxX - ISOLATED_MIN(minX)); \\\n" +"_col = ADDR_R(_col, maxX - ISOLATED_MIN(minX), _col); \\\n" +"x = _col + ISOLATED_MIN(minX); \\\n" +"}\n" +"#endif\n" +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1) * cn\n" +"#define DSTSIZE (int)sizeof(dstT1) * cn\n" +"#endif\n" +"#define noconvert\n" +"struct RectCoords\n" +"{\n" +"int x1, y1, x2, y2;\n" +"};\n" +"#ifdef BORDER_ISOLATED\n" +"inline bool isBorder(const struct RectCoords bounds, int2 coord, int numPixels)\n" +"{\n" +"return coord.x < bounds.x1 || coord.y < bounds.y1 || coord.x + numPixels > bounds.x2 || coord.y >= bounds.y2;\n" +"}\n" +"#else\n" +"inline bool isBorder(const struct RectCoords bounds, int2 coord, int numPixels)\n" +"{\n" +"return coord.x < 0 || coord.y < 0 || coord.x + numPixels > bounds.x2 || coord.y >= bounds.y2;\n" +"}\n" +"#endif\n" +"#define float1 float\n" +"#define double1 double\n" +"#define uchar1 uchar\n" +"#define int1 int\n" +"#define uint1 unit\n" +"#define __CAT(x, y) x##y\n" +"#define CAT(x, y) __CAT(x, y)\n" +"#define vload1(OFFSET, PTR) (*(PTR + OFFSET))\n" +"#define PX_LOAD_VEC_TYPE CAT(srcT1, PX_LOAD_VEC_SIZE)\n" +"#define PX_LOAD_FLOAT_VEC_TYPE CAT(WT1, PX_LOAD_VEC_SIZE)\n" +"#define PX_LOAD CAT(vload, PX_LOAD_VEC_SIZE)\n" +"inline PX_LOAD_FLOAT_VEC_TYPE readSrcPixelGroup(int2 pos, __global const uchar * srcptr,\n" +"int srcstep, const struct RectCoords srcCoords)\n" +"{\n" +"__global const srcT1 * ptr = (__global const srcT1 *)\n" +"(srcptr + mad24(pos.y, srcstep, pos.x * SRCSIZE));\n" +"return PX_LOAD_FLOAT_VEC_CONV(PX_LOAD(0, ptr));\n" +"}\n" +"#define LOOP1(VAR, STMT) (STMT); (VAR)++;\n" +"#define LOOP2(VAR, STMT) LOOP1(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP3(VAR, STMT) LOOP2(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP4(VAR, STMT) LOOP3(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP5(VAR, STMT) LOOP4(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP6(VAR, STMT) LOOP5(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP7(VAR, STMT) LOOP6(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP8(VAR, STMT) LOOP7(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP9(VAR, STMT) LOOP8(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP10(VAR, STMT) LOOP9(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP11(VAR, STMT) LOOP10(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP12(VAR, STMT) LOOP11(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP13(VAR, STMT) LOOP12(VAR, STMT); (STMT); (VAR)++;\n" +"#define LOOP(N, VAR, STMT) CAT(LOOP, N)((VAR), (STMT))\n" +"#ifdef OP_BOX_FILTER\n" +"#define PROCESS_ELEM \\\n" +"WT total_sum = (WT)(0); \\\n" +"int sy = 0; \\\n" +"LOOP(KERNEL_SIZE_Y, sy, \\\n" +"{ \\\n" +"int sx = 0; \\\n" +"LOOP(KERNEL_SIZE_X, sx, \\\n" +"{ \\\n" +"total_sum += privateData[py + sy][px + sx]; \\\n" +"}); \\\n" +"})\n" +"#elif defined OP_FILTER2D\n" +"#define DIG(a) a,\n" +"__constant WT1 kernelData[] = { COEFF };\n" +"#define PROCESS_ELEM \\\n" +"WT total_sum = 0; \\\n" +"int sy = 0; \\\n" +"int kernelIndex = 0; \\\n" +"LOOP(KERNEL_SIZE_Y, sy, \\\n" +"{ \\\n" +"int sx = 0; \\\n" +"LOOP(KERNEL_SIZE_X, sx, \\\n" +"{ \\\n" +"total_sum = fma(kernelData[kernelIndex++], privateData[py + sy][px + sx], total_sum); \\\n" +"}); \\\n" +"})\n" +"#elif defined OP_ERODE || defined OP_DILATE\n" +"#ifdef DEPTH_0\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL UCHAR_MAX\n" +"#elif defined DEPTH_1\n" +"#define MIN_VAL SCHAR_MIN\n" +"#define MAX_VAL SCHAR_MAX\n" +"#elif defined DEPTH_2\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL USHRT_MAX\n" +"#elif defined DEPTH_3\n" +"#define MIN_VAL SHRT_MIN\n" +"#define MAX_VAL SHRT_MAX\n" +"#elif defined DEPTH_4\n" +"#define MIN_VAL INT_MIN\n" +"#define MAX_VAL INT_MAX\n" +"#elif defined DEPTH_5\n" +"#define MIN_VAL (-FLT_MAX)\n" +"#define MAX_VAL FLT_MAX\n" +"#elif defined DEPTH_6\n" +"#define MIN_VAL (-DBL_MAX)\n" +"#define MAX_VAL DBL_MAX\n" +"#endif\n" +"#ifdef OP_ERODE\n" +"#define VAL (WT)MAX_VAL\n" +"#elif defined OP_DILATE\n" +"#define VAL (WT)MIN_VAL\n" +"#else\n" +"#error \"Unknown operation\"\n" +"#endif\n" +"#define convert_float1 convert_float\n" +"#define convert_uchar1 convert_uchar\n" +"#define convert_int1 convert_int\n" +"#define convert_uint1 convert_uint\n" +"#ifdef OP_ERODE\n" +"#if defined INTEL_DEVICE && defined DEPTH_0\n" +"#define WA_CONVERT_1 CAT(convert_uint, cn)\n" +"#define WA_CONVERT_2 CAT(convert_, srcT)\n" +"#define MORPH_OP(A, B) ((A) < (B) ? (A) : (B))\n" +"#else\n" +"#define MORPH_OP(A, B) min((A), (B))\n" +"#endif\n" +"#endif\n" +"#ifdef OP_DILATE\n" +"#define MORPH_OP(A, B) max((A), (B))\n" +"#endif\n" +"#define PROCESS(_y, _x) \\\n" +"total_sum = convertToWT(MORPH_OP(convertToWT(total_sum), convertToWT(privateData[py + _y][px + _x])));\n" +"#define PROCESS_ELEM \\\n" +"WT total_sum = convertToWT(VAL); \\\n" +"PROCESS_ELEM_\n" +"#else\n" +"#error \"No processing is specified\"\n" +"#endif\n" +"#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT\n" +"#define EXTRA_PARAMS , __global const uchar * matptr, int mat_step, int mat_offset\n" +"#else\n" +"#define EXTRA_PARAMS\n" +"#endif\n" +"inline WT getBorderPixel(const struct RectCoords bounds, int2 coord,\n" +"__global const uchar * srcptr, int srcstep)\n" +"{\n" +"#ifdef BORDER_CONSTANT\n" +"#ifdef OP_ERODE\n" +"return (WT)(MAX_VAL);\n" +"#elif defined OP_DILATE\n" +"return (WT)(MIN_VAL);\n" +"#else\n" +"return (WT)(0);\n" +"#endif\n" +"#else\n" +"int selected_col = coord.x;\n" +"int selected_row = coord.y;\n" +"EXTRAPOLATE(selected_col, selected_row,\n" +"bounds.x1, bounds.y1,\n" +"bounds.x2, bounds.y2);\n" +"__global const uchar* ptr = srcptr + mad24(selected_row, srcstep, selected_col * SRCSIZE);\n" +"return convertToWT(loadpix(ptr));\n" +"#endif\n" +"}\n" +"inline WT readSrcPixelSingle(int2 pos, __global const uchar * srcptr,\n" +"int srcstep, const struct RectCoords srcCoords)\n" +"{\n" +"if (!isBorder(srcCoords, pos, 1))\n" +"{\n" +"__global const uchar * ptr = srcptr + mad24(pos.y, srcstep, pos.x * SRCSIZE);\n" +"return convertToWT(loadpix(ptr));\n" +"}\n" +"else\n" +"return getBorderPixel(srcCoords, pos, srcptr, srcstep);\n" +"}\n" +"__kernel void filterSmall(__global const uchar * srcptr, int src_step, int srcOffsetX, int srcOffsetY, int srcEndX, int srcEndY,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols\n" +"#ifdef NORMALIZE\n" +", float alpha\n" +"#endif\n" +"EXTRA_PARAMS )\n" +"{\n" +"const struct RectCoords srcCoords = { srcOffsetX, srcOffsetY, srcEndX, srcEndY };\n" +"const int startX = get_global_id(0) * PX_PER_WI_X;\n" +"const int startY = get_global_id(1) * PX_PER_WI_Y;\n" +"if (startX >= cols || startY >= rows)\n" +"return;\n" +"WT privateData[PX_PER_WI_Y + KERNEL_SIZE_Y - 1][PRIV_DATA_WIDTH];\n" +"int py = 0;\n" +"LOOP(PX_LOAD_Y_ITERATIONS, py,\n" +"{\n" +"int y = startY + py;\n" +"int px = 0;\n" +"LOOP(PX_LOAD_X_ITERATIONS, px,\n" +"{\n" +"int x = startX + (px * PX_LOAD_NUM_PX);\n" +"int2 srcPos = (int2)(srcCoords.x1 + x - ANCHOR_X, srcCoords.y1 + y - ANCHOR_Y);\n" +"if (!isBorder(srcCoords, srcPos, PX_LOAD_NUM_PX))\n" +"{\n" +"PX_LOAD_FLOAT_VEC_TYPE p = readSrcPixelGroup(srcPos, srcptr, src_step, srcCoords);\n" +"#ifdef SQR\n" +"*((PX_LOAD_FLOAT_VEC_TYPE *)&privateData[py][px * PX_LOAD_NUM_PX]) = p * p;\n" +"#else\n" +"*((PX_LOAD_FLOAT_VEC_TYPE *)&privateData[py][px * PX_LOAD_NUM_PX]) = p;\n" +"#endif\n" +"}\n" +"else\n" +"{\n" +"int lx = 0;\n" +"LOOP(PX_LOAD_NUM_PX, lx,\n" +"{\n" +"WT p = readSrcPixelSingle(srcPos, srcptr, src_step, srcCoords);\n" +"#ifdef SQR\n" +"*((WT*)&privateData[py][px * PX_LOAD_NUM_PX + lx]) = p * p;\n" +"#else\n" +"*((WT*)&privateData[py][px * PX_LOAD_NUM_PX + lx]) = p;\n" +"#endif\n" +"srcPos.x++;\n" +"});\n" +"}\n" +"});\n" +"});\n" +"py = 0;\n" +"LOOP(PX_PER_WI_Y, py,\n" +"{\n" +"int y = startY + py;\n" +"int px = 0;\n" +"LOOP(PX_PER_WI_X, px,\n" +"{\n" +"int x = startX + px;\n" +"PROCESS_ELEM;\n" +"int dst_index = mad24(y, dst_step, mad24(x, DSTSIZE, dst_offset));\n" +"__global dstT * dstPtr = (__global dstT *)(dstptr + dst_index);\n" +"#ifdef NORMALIZE\n" +"total_sum *= (WT)(alpha);\n" +"#endif\n" +"#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT\n" +"int mat_index = mad24(y, mat_step, mad24(x, SRCSIZE, mat_offset));\n" +"WT value = convertToWT(loadpix(matptr + mat_index));\n" +"#ifdef OP_GRADIENT\n" +"storepix(convertToDstT(convertToWT(total_sum) - convertToWT(value)), dstPtr );\n" +"#elif defined OP_TOPHAT\n" +"storepix(convertToDstT(convertToWT(value) - convertToWT(total_sum)), dstPtr );\n" +"#elif defined OP_BLACKHAT\n" +"storepix(convertToDstT(convertToWT(total_sum) - convertToWT(value)), dstPtr );\n" +"#endif\n" +"#else\n" +"storepix(convertToDstT(total_sum), dstPtr);\n" +"#endif\n" +"});\n" +"});\n" +"}\n" +, "791281c9ee1a2ff7234c85b09b6af6bf", NULL}; +struct cv::ocl::internal::ProgramEntry gaussianBlur3x3_oclsrc={moduleName, "gaussianBlur3x3", +"#define DIG(a) a,\n" +"__constant float kx[] = { KERNEL_MATRIX_X };\n" +"__constant float ky[] = { KERNEL_MATRIX_Y };\n" +"#define OP(delta, y, x) (convert_float16(arr[(y + delta) * 3 + x]) * ky[y] * kx[x])\n" +"__kernel void gaussianBlur3x3_8UC1_cols16_rows2(__global const uint* src, int src_step,\n" +"__global uint* dst, int dst_step, int rows, int cols)\n" +"{\n" +"int block_x = get_global_id(0);\n" +"int y = get_global_id(1) * 2;\n" +"int ssx, dsx;\n" +"if ((block_x * 16) >= cols || y >= rows) return;\n" +"uint4 line[4];\n" +"uint4 line_out[2];\n" +"uchar a; uchar16 b; uchar c;\n" +"uchar d; uchar16 e; uchar f;\n" +"uchar g; uchar16 h; uchar i;\n" +"uchar j; uchar16 k; uchar l;\n" +"ssx = dsx = 1;\n" +"int src_index = block_x * 4 * ssx + (y - 1) * (src_step / 4);\n" +"line[1] = vload4(0, src + src_index + (src_step / 4));\n" +"line[2] = vload4(0, src + src_index + 2 * (src_step / 4));\n" +"#ifdef BORDER_CONSTANT\n" +"line[0] = (y == 0) ? (uint4)0 : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? (uint4)0 : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined BORDER_REFLECT_101\n" +"line[0] = (y == 0) ? line[2] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[1] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"line[0] = (y == 0) ? line[1] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[2] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#endif\n" +"__global uchar *src_p = (__global uchar *)src;\n" +"src_index = block_x * 16 * ssx + (y - 1) * src_step;\n" +"bool line_end = ((block_x + 1) * 16 == cols);\n" +"b = as_uchar16(line[0]);\n" +"e = as_uchar16(line[1]);\n" +"h = as_uchar16(line[2]);\n" +"k = as_uchar16(line[3]);\n" +"#ifdef BORDER_CONSTANT\n" +"a = (block_x == 0 || y == 0) ? 0 : src_p[src_index - 1];\n" +"c = (line_end || y == 0) ? 0 : src_p[src_index + 16];\n" +"d = (block_x == 0) ? 0 : src_p[src_index + src_step - 1];\n" +"f = line_end ? 0 : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? 0 : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? 0 : src_p[src_index + 2 * src_step + 16];\n" +"j = (block_x == 0 || y == (rows - 2)) ? 0 : src_p[src_index + 3 * src_step - 1];\n" +"l = (line_end || y == (rows - 2))? 0 : src_p[src_index + 3 * src_step + 16];\n" +"#elif defined BORDER_REFLECT_101\n" +"int offset;\n" +"offset = (y == 0) ? (2 * src_step) : 0;\n" +"a = (block_x == 0) ? src_p[src_index + offset + 1] : src_p[src_index + offset - 1];\n" +"c = line_end ? src_p[src_index + offset + 14] : src_p[src_index + offset + 16];\n" +"d = (block_x == 0) ? src_p[src_index + src_step + 1] : src_p[src_index + src_step - 1];\n" +"f = line_end ? src_p[src_index + src_step + 14] : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? src_p[src_index + 2 * src_step + 1] : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? src_p[src_index + 2 * src_step + 14] : src_p[src_index + 2 * src_step + 16];\n" +"offset = (y == (rows - 2)) ? (1 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? src_p[src_index + offset + 1] : src_p[src_index + offset - 1];\n" +"l = line_end ? src_p[src_index + offset + 14] : src_p[src_index + offset + 16];\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"int offset;\n" +"offset = (y == 0) ? (1 * src_step) : 0;\n" +"a = (block_x == 0) ? src_p[src_index + offset] : src_p[src_index + offset - 1];\n" +"c = line_end ? src_p[src_index + offset + 15] : src_p[src_index + offset + 16];\n" +"d = (block_x == 0) ? src_p[src_index + src_step] : src_p[src_index + src_step - 1];\n" +"f = line_end ? src_p[src_index + src_step + 15] : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? src_p[src_index + 2 * src_step] : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? src_p[src_index + 2 * src_step + 15] : src_p[src_index + 2 * src_step + 16];\n" +"offset = (y == (rows - 2)) ? (2 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? src_p[src_index + offset] : src_p[src_index + offset - 1];\n" +"l = line_end ? src_p[src_index + offset + 15] : src_p[src_index + offset + 16];\n" +"#endif\n" +"uchar16 arr[12];\n" +"float16 sum[2];\n" +"arr[0] = (uchar16)(a, b.s0123, b.s456789ab, b.scde);\n" +"arr[1] = b;\n" +"arr[2] = (uchar16)(b.s123, b.s4567, b.s89abcdef, c);\n" +"arr[3] = (uchar16)(d, e.s0123, e.s456789ab, e.scde);\n" +"arr[4] = e;\n" +"arr[5] = (uchar16)(e.s123, e.s4567, e.s89abcdef, f);\n" +"arr[6] = (uchar16)(g, h.s0123, h.s456789ab, h.scde);\n" +"arr[7] = h;\n" +"arr[8] = (uchar16)(h.s123, h.s4567, h.s89abcdef, i);\n" +"arr[9] = (uchar16)(j, k.s0123, k.s456789ab, k.scde);\n" +"arr[10] = k;\n" +"arr[11] = (uchar16)(k.s123, k.s4567, k.s89abcdef, l);\n" +"sum[0] = OP(0, 0, 0) + OP(0, 0, 1) + OP(0, 0, 2) +\n" +"OP(0, 1, 0) + OP(0, 1, 1) + OP(0, 1, 2) +\n" +"OP(0, 2, 0) + OP(0, 2, 1) + OP(0, 2, 2);\n" +"sum[1] = OP(1, 0, 0) + OP(1, 0, 1) + OP(1, 0, 2) +\n" +"OP(1, 1, 0) + OP(1, 1, 1) + OP(1, 1, 2) +\n" +"OP(1, 2, 0) + OP(1, 2, 1) + OP(1, 2, 2);\n" +"line_out[0] = as_uint4(convert_uchar16_sat_rte(sum[0]));\n" +"line_out[1] = as_uint4(convert_uchar16_sat_rte(sum[1]));\n" +"int dst_index = block_x * 4 * dsx + y * (dst_step / 4);\n" +"vstore4(line_out[0], 0, dst + dst_index);\n" +"vstore4(line_out[1], 0, dst + dst_index + (dst_step / 4));\n" +"}\n" +, "dbbc069449fa882036a00c7734ead4c1", NULL}; +struct cv::ocl::internal::ProgramEntry gaussianBlur5x5_oclsrc={moduleName, "gaussianBlur5x5", +"#define DIG(a) a,\n" +"__constant float kx[] = { KERNEL_MATRIX_X };\n" +"__constant float ky[] = { KERNEL_MATRIX_Y };\n" +"#define OP(y, x) (convert_float4(arr[y * 5 + x]) * ky[y] * kx[x])\n" +"#define FILL_ARR(s1, s2, n, e1, e2) \\\n" +"arr[5 * n + 0] = row_s ? (uchar4)(s1, s2, line[n].s23) : (uchar4)(line[n].s0123); \\\n" +"arr[5 * n + 1] = row_s ? (uchar4)(s2, line[n].s234) : (uchar4)(line[n].s1234); \\\n" +"arr[5 * n + 2] = (uchar4)(line[n].s2345); \\\n" +"arr[5 * n + 3] = row_e ? (uchar4)(line[n].s345, e1) : (uchar4)(line[n].s3456); \\\n" +"arr[5 * n + 4] = row_e ? (uchar4)(line[n].s45, e1, e2) : (uchar4)(line[n].s4567);\n" +"__kernel void gaussianBlur5x5_8UC1_cols4(__global const uchar* src, int src_step,\n" +"__global uint* dst, int dst_step, int rows, int cols)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"if (x >= cols || y >= rows) return;\n" +"uchar8 line[5];\n" +"int offset, src_index;\n" +"src_index = x + (y - 2) * src_step - 2;\n" +"offset = max(0, src_index + 2 * src_step);\n" +"line[2] = vload8(0, src + offset);\n" +"if (offset == 0) line[2] = (uchar8)(0, 0, line[2].s0123, line[2].s45);\n" +"#if defined BORDER_CONSTANT || defined BORDER_REPLICATE\n" +"uchar8 tmp;\n" +"#ifdef BORDER_CONSTANT\n" +"tmp = (uchar8)0;\n" +"#elif defined BORDER_REPLICATE\n" +"tmp = line[2];\n" +"#endif\n" +"line[0] = line[1] = tmp;\n" +"if (y > 1)\n" +"{\n" +"offset = max(0, src_index);\n" +"line[0] = vload8(0, src + offset);\n" +"if (offset == 0) line[0] = (uchar8)(0, 0, line[0].s0123, line[0].s45);\n" +"}\n" +"if (y > 0)\n" +"{\n" +"offset = max(0, src_index + src_step);\n" +"line[1] = vload8(0, src + offset);\n" +"if (offset == 0) line[1] = (uchar8)(0, 0, line[1].s0123, line[1].s45);\n" +"}\n" +"line[3] = (y == (rows - 1)) ? tmp : vload8(0, src + src_index + 3 * src_step);\n" +"line[4] = (y >= (rows - 2)) ? tmp : vload8(0, src + src_index + 4 * src_step);\n" +"#elif BORDER_REFLECT\n" +"int t;\n" +"t = (y <= 1) ? (abs(y - 1) - y + 2) : 0;\n" +"offset = max(0, src_index + t * src_step);\n" +"line[0] = vload8(0, src + offset);\n" +"if (offset == 0) line[0] = (uchar8)(0, 0, line[0].s0123, line[0].s45);\n" +"if (y == 0)\n" +"line[1] = line[2];\n" +"else\n" +"{\n" +"offset = max(0, src_index + 1 * src_step);\n" +"line[1] = vload8(0, src + offset);\n" +"if (offset == 0) line[1] = (uchar8)(0, 0, line[1].s0123, line[0].s45);\n" +"}\n" +"line[3] = (y == (rows - 1)) ? line[2] : vload8(0, src + src_index + 3 * src_step);\n" +"t = (y >= (rows - 2)) ? (abs(y - (rows - 1)) - (y - (rows - 2)) + 2) : 4;\n" +"line[4] = vload8(0, src + src_index + t * src_step);\n" +"#elif BORDER_REFLECT_101\n" +"if (y == 1)\n" +"line[0] = line[2];\n" +"else\n" +"{\n" +"offset = (y == 0) ? (src_index + 4 * src_step) : max(0, src_index);\n" +"line[0] = vload8(0, src + offset);\n" +"if (offset == 0) line[0] = (uchar8)(0, 0, line[0].s0123, line[0].s45);\n" +"}\n" +"offset = (y == 0) ? (src_index + 3 * src_step) : max(0, src_index + 1 * src_step);\n" +"line[1] = vload8(0, src + offset);\n" +"if (offset == 0) line[1] = (uchar8)(0, 0, line[1].s0123, line[1].s45);\n" +"line[3] = vload8(0, src + src_index + ((y == (rows - 1)) ? 1 : 3) * src_step);\n" +"if (y == (rows - 2))\n" +"line[4] = line[2];\n" +"else\n" +"{\n" +"line[4] = vload8(0, src + src_index + ((y == (rows - 1)) ? 1 : 4) * src_step);\n" +"}\n" +"#endif\n" +"bool row_s = (x == 0);\n" +"bool row_e = ((x + 4) == cols);\n" +"uchar4 arr[25];\n" +"uchar s, e;\n" +"#ifdef BORDER_CONSTANT\n" +"s = e = 0;\n" +"FILL_ARR(s, s, 0, e, e);\n" +"FILL_ARR(s, s, 1, e, e);\n" +"FILL_ARR(s, s, 2, e, e);\n" +"FILL_ARR(s, s, 3, e, e);\n" +"FILL_ARR(s, s, 4, e, e);\n" +"#elif defined BORDER_REPLICATE\n" +"s = line[0].s2;\n" +"e = line[0].s5;\n" +"FILL_ARR(s, s, 0, e, e);\n" +"s = line[1].s2;\n" +"e = line[1].s5;\n" +"FILL_ARR(s, s, 1, e, e);\n" +"s = line[2].s2;\n" +"e = line[2].s5;\n" +"FILL_ARR(s, s, 2, e, e);\n" +"s = line[3].s2;\n" +"e = line[3].s5;\n" +"FILL_ARR(s, s, 3, e, e);\n" +"s = line[4].s2;\n" +"e = line[4].s5;\n" +"FILL_ARR(s, s, 4, e, e);\n" +"#elif BORDER_REFLECT\n" +"uchar s1, s2;\n" +"uchar e1, e2;\n" +"s1 = line[0].s3;\n" +"s2 = line[0].s2;\n" +"e1 = line[0].s5;\n" +"e2 = line[0].s4;\n" +"FILL_ARR(s1, s2, 0, e1, e2);\n" +"s1 = line[1].s3;\n" +"s2 = line[1].s2;\n" +"e1 = line[1].s5;\n" +"e2 = line[1].s4;\n" +"FILL_ARR(s1, s2, 1, e1, e2);\n" +"s1 = line[2].s3;\n" +"s2 = line[2].s2;\n" +"e1 = line[2].s5;\n" +"e2 = line[2].s4;\n" +"FILL_ARR(s1, s2, 2, e1, e2);\n" +"s1 = line[3].s3;\n" +"s2 = line[3].s2;\n" +"e1 = line[3].s5;\n" +"e2 = line[3].s4;\n" +"FILL_ARR(s1, s2, 3, e1, e2);\n" +"s1 = line[4].s3;\n" +"s2 = line[4].s2;\n" +"e1 = line[4].s5;\n" +"e2 = line[4].s4;\n" +"FILL_ARR(s1, s2, 4, e1, e2);\n" +"#elif BORDER_REFLECT_101\n" +"s = line[0].s4;\n" +"e = line[0].s3;\n" +"FILL_ARR(s, e, 0, s, e);\n" +"s = line[1].s4;\n" +"e = line[1].s3;\n" +"FILL_ARR(s, e, 1, s, e);\n" +"s = line[2].s4;\n" +"e = line[2].s3;\n" +"FILL_ARR(s, e, 2, s, e);\n" +"s = line[3].s4;\n" +"e = line[3].s3;\n" +"FILL_ARR(s, e, 3, s, e);\n" +"s = line[4].s4;\n" +"e = line[4].s3;\n" +"FILL_ARR(s, e, 4, s, e);\n" +"#endif\n" +"float4 sum;\n" +"sum = OP(0, 0) + OP(0, 1) + OP(0, 2) + OP(0, 3) + OP(0, 4) +\n" +"OP(1, 0) + OP(1, 1) + OP(1, 2) + OP(1, 3) + OP(1, 4) +\n" +"OP(2, 0) + OP(2, 1) + OP(2, 2) + OP(2, 3) + OP(2, 4) +\n" +"OP(3, 0) + OP(3, 1) + OP(3, 2) + OP(3, 3) + OP(3, 4) +\n" +"OP(4, 0) + OP(4, 1) + OP(4, 2) + OP(4, 3) + OP(4, 4);\n" +"int dst_index = (x / 4) + y * (dst_step / 4);\n" +"dst[dst_index] = as_uint(convert_uchar4_sat_rte(sum));\n" +"}\n" +, "ece5030c9920436f23ce25e35dc12303", NULL}; +struct cv::ocl::internal::ProgramEntry gftt_oclsrc={moduleName, "gftt", +"#ifdef OP_MAX_EIGEN_VAL\n" +"__kernel void maxEigenVal(__global const uchar * srcptr, int src_step, int src_offset, int cols,\n" +"int total, __global uchar * dstptr\n" +"#ifdef HAVE_MASK\n" +", __global const uchar * maskptr, int mask_step, int mask_offset\n" +"#endif\n" +")\n" +"{\n" +"int lid = get_local_id(0);\n" +"int gid = get_group_id(0);\n" +"int id = get_global_id(0);\n" +"__local float localmem_max[WGS2_ALIGNED];\n" +"float maxval = -FLT_MAX;\n" +"for (int grain = groupnum * WGS; id < total; id += grain)\n" +"{\n" +"int src_index = mad24(id / cols, src_step, mad24((id % cols), (int)sizeof(float), src_offset));\n" +"#ifdef HAVE_MASK\n" +"int mask_index = mad24(id / cols, mask_step, id % cols + mask_offset);\n" +"if (maskptr[mask_index])\n" +"#endif\n" +"maxval = max(maxval, *(__global const float *)(srcptr + src_index));\n" +"}\n" +"if (lid < WGS2_ALIGNED)\n" +"localmem_max[lid] = maxval;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (lid >= WGS2_ALIGNED && total >= WGS2_ALIGNED)\n" +"localmem_max[lid - WGS2_ALIGNED] = max(maxval, localmem_max[lid - WGS2_ALIGNED]);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int lsize = WGS2_ALIGNED >> 1; lsize > 0; lsize >>= 1)\n" +"{\n" +"if (lid < lsize)\n" +"{\n" +"int lid2 = lsize + lid;\n" +"localmem_max[lid] = max(localmem_max[lid], localmem_max[lid2]);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"if (lid == 0)\n" +"*(__global float *)(dstptr + (int)sizeof(float) * gid) = localmem_max[0];\n" +"}\n" +"__kernel void maxEigenValTask(__global float * dst, float qualityLevel,\n" +"__global int * cornersptr)\n" +"{\n" +"float maxval = -FLT_MAX;\n" +"#pragma unroll\n" +"for (int x = 0; x < groupnum; ++x)\n" +"maxval = max(maxval, dst[x]);\n" +"dst[0] = maxval * qualityLevel;\n" +"cornersptr[0] = 0;\n" +"}\n" +"#elif OP_FIND_CORNERS\n" +"#define GET_SRC_32F(_y, _x) *(__global const float *)(eigptr + (_y) * eig_step + (_x) * (int)sizeof(float) )\n" +"__kernel void findCorners(__global const uchar * eigptr, int eig_step, int eig_offset,\n" +"#ifdef HAVE_MASK\n" +"__global const uchar * mask, int mask_step, int mask_offset,\n" +"#endif\n" +"__global uchar * cornersptr, int rows, int cols,\n" +"__constant float * threshold, int max_corners)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"__global int* counter = (__global int*) cornersptr;\n" +"__global float2 * corners = (__global float2 *)(cornersptr + (int)sizeof(float2));\n" +"if (y < rows && x < cols\n" +"#ifdef HAVE_MASK\n" +"&& mask[mad24(y, mask_step, x + mask_offset)]\n" +"#endif\n" +")\n" +"{\n" +"++x, ++y;\n" +"float val = GET_SRC_32F(y, x);\n" +"if (val > threshold[0])\n" +"{\n" +"float maxVal = val;\n" +"maxVal = max(GET_SRC_32F(y - 1, x - 1), maxVal);\n" +"maxVal = max(GET_SRC_32F(y - 1, x ), maxVal);\n" +"maxVal = max(GET_SRC_32F(y - 1, x + 1), maxVal);\n" +"maxVal = max(GET_SRC_32F(y , x - 1), maxVal);\n" +"maxVal = max(GET_SRC_32F(y , x + 1), maxVal);\n" +"maxVal = max(GET_SRC_32F(y + 1, x - 1), maxVal);\n" +"maxVal = max(GET_SRC_32F(y + 1, x ), maxVal);\n" +"maxVal = max(GET_SRC_32F(y + 1, x + 1), maxVal);\n" +"if (val == maxVal)\n" +"{\n" +"int ind = atomic_inc(counter);\n" +"if (ind < max_corners)\n" +"{\n" +"corners[ind].x = val;\n" +"corners[ind].y = as_float(y | (x << 16));\n" +"}\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "cb2cfd26f04e14ae047e2f5eb28c8e11", NULL}; +struct cv::ocl::internal::ProgramEntry histogram_oclsrc={moduleName, "histogram", +"#ifndef kercn\n" +"#define kercn 1\n" +"#endif\n" +"#ifndef T\n" +"#define T uchar\n" +"#endif\n" +"#define noconvert\n" +"__kernel void calculate_histogram(__global const uchar * src_ptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * histptr, int total)\n" +"{\n" +"int lid = get_local_id(0);\n" +"int id = get_global_id(0) * kercn;\n" +"int gid = get_group_id(0);\n" +"__local int localhist[BINS];\n" +"#pragma unroll\n" +"for (int i = lid; i < BINS; i += WGS)\n" +"localhist[i] = 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"__global const uchar * src = src_ptr + src_offset;\n" +"int src_index;\n" +"for (int grain = HISTS_COUNT * WGS * kercn; id < total; id += grain)\n" +"{\n" +"#ifdef HAVE_SRC_CONT\n" +"src_index = id;\n" +"#else\n" +"src_index = mad24(id / src_cols, src_step, id % src_cols);\n" +"#endif\n" +"#if kercn == 1\n" +"atomic_inc(localhist + convert_int(src[src_index]));\n" +"#elif kercn == 4\n" +"int value = *(__global const int *)(src + src_index);\n" +"atomic_inc(localhist + (value & 0xff));\n" +"atomic_inc(localhist + ((value >> 8) & 0xff));\n" +"atomic_inc(localhist + ((value >> 16) & 0xff));\n" +"atomic_inc(localhist + ((value >> 24) & 0xff));\n" +"#elif kercn >= 2\n" +"T value = *(__global const T *)(src + src_index);\n" +"atomic_inc(localhist + value.s0);\n" +"atomic_inc(localhist + value.s1);\n" +"#if kercn >= 4\n" +"atomic_inc(localhist + value.s2);\n" +"atomic_inc(localhist + value.s3);\n" +"#if kercn >= 8\n" +"atomic_inc(localhist + value.s4);\n" +"atomic_inc(localhist + value.s5);\n" +"atomic_inc(localhist + value.s6);\n" +"atomic_inc(localhist + value.s7);\n" +"#if kercn == 16\n" +"atomic_inc(localhist + value.s8);\n" +"atomic_inc(localhist + value.s9);\n" +"atomic_inc(localhist + value.sA);\n" +"atomic_inc(localhist + value.sB);\n" +"atomic_inc(localhist + value.sC);\n" +"atomic_inc(localhist + value.sD);\n" +"atomic_inc(localhist + value.sE);\n" +"atomic_inc(localhist + value.sF);\n" +"#endif\n" +"#endif\n" +"#endif\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"__global int * hist = (__global int *)(histptr + gid * BINS * (int)sizeof(int));\n" +"#pragma unroll\n" +"for (int i = lid; i < BINS; i += WGS)\n" +"hist[i] = localhist[i];\n" +"}\n" +"#ifndef HT\n" +"#define HT int\n" +"#endif\n" +"#ifndef convertToHT\n" +"#define convertToHT noconvert\n" +"#endif\n" +"__kernel void merge_histogram(__global const int * ghist, __global uchar * histptr, int hist_step, int hist_offset)\n" +"{\n" +"int lid = get_local_id(0);\n" +"__global HT * hist = (__global HT *)(histptr + hist_offset);\n" +"#if WGS >= BINS\n" +"HT res = (HT)(0);\n" +"#else\n" +"#pragma unroll\n" +"for (int i = lid; i < BINS; i += WGS)\n" +"hist[i] = (HT)(0);\n" +"#endif\n" +"#pragma unroll\n" +"for (int i = 0; i < HISTS_COUNT; ++i)\n" +"{\n" +"#pragma unroll\n" +"for (int j = lid; j < BINS; j += WGS)\n" +"#if WGS >= BINS\n" +"res += convertToHT(ghist[j]);\n" +"#else\n" +"hist[j] += convertToHT(ghist[j]);\n" +"#endif\n" +"ghist += BINS;\n" +"}\n" +"#if WGS >= BINS\n" +"if (lid < BINS)\n" +"*(__global HT *)(histptr + mad24(lid, hist_step, hist_offset)) = res;\n" +"#endif\n" +"}\n" +"__kernel void calcLUT(__global uchar * dst, __global const int * ghist, int total)\n" +"{\n" +"int lid = get_local_id(0);\n" +"__local int sumhist[BINS];\n" +"__local float scale;\n" +"#if WGS >= BINS\n" +"int res = 0;\n" +"#else\n" +"#pragma unroll\n" +"for (int i = lid; i < BINS; i += WGS)\n" +"sumhist[i] = 0;\n" +"#endif\n" +"#pragma unroll\n" +"for (int i = 0; i < HISTS_COUNT; ++i)\n" +"{\n" +"#pragma unroll\n" +"for (int j = lid; j < BINS; j += WGS)\n" +"#if WGS >= BINS\n" +"res += ghist[j];\n" +"#else\n" +"sumhist[j] += ghist[j];\n" +"#endif\n" +"ghist += BINS;\n" +"}\n" +"#if WGS >= BINS\n" +"if (lid < BINS)\n" +"sumhist[lid] = res;\n" +"#endif\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (lid == 0)\n" +"{\n" +"int sum = 0, i = 0;\n" +"while (!sumhist[i])\n" +"++i;\n" +"if (total == sumhist[i])\n" +"{\n" +"scale = 1;\n" +"for (int j = 0; j < BINS; ++j)\n" +"sumhist[i] = i;\n" +"}\n" +"else\n" +"{\n" +"scale = 255.f / (total - sumhist[i]);\n" +"for (sumhist[i++] = 0; i < BINS; i++)\n" +"{\n" +"sum += sumhist[i];\n" +"sumhist[i] = sum;\n" +"}\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"#pragma unroll\n" +"for (int i = lid; i < BINS; i += WGS)\n" +"dst[i]= convert_uchar_sat_rte(convert_float(sumhist[i]) * scale);\n" +"}\n" +, "3bfd6703e639c8a36eb7cdd5f3eefda6", NULL}; +struct cv::ocl::internal::ProgramEntry hough_lines_oclsrc={moduleName, "hough_lines", +"#define ACCUM(ptr) *((__global int*)(ptr))\n" +"#ifdef MAKE_POINTS_LIST\n" +"__kernel void make_point_list(__global const uchar * src_ptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * list_ptr, int list_step, int list_offset, __global int* global_offset)\n" +"{\n" +"int x = get_local_id(0);\n" +"int y = get_group_id(1);\n" +"__local int l_index, l_offset;\n" +"__local int l_points[LOCAL_SIZE];\n" +"__global const uchar * src = src_ptr + mad24(y, src_step, src_offset);\n" +"__global int * list = (__global int*)(list_ptr + list_offset);\n" +"if (x == 0)\n" +"l_index = 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (y < src_rows)\n" +"{\n" +"y <<= 16;\n" +"for (int i=x; i < src_cols; i+=GROUP_SIZE)\n" +"{\n" +"if (src[i])\n" +"{\n" +"int val = y | i;\n" +"int index = atomic_inc(&l_index);\n" +"l_points[index] = val;\n" +"}\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (x == 0)\n" +"l_offset = atomic_add(global_offset, l_index);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"list += l_offset;\n" +"for (int i=x; i < l_index; i+=GROUP_SIZE)\n" +"{\n" +"list[i] = l_points[i];\n" +"}\n" +"}\n" +"#elif defined FILL_ACCUM_GLOBAL\n" +"__kernel void fill_accum_global(__global const uchar * list_ptr, int list_step, int list_offset,\n" +"__global uchar * accum_ptr, int accum_step, int accum_offset,\n" +"int total_points, float irho, float theta, int numrho, int numangle)\n" +"{\n" +"int theta_idx = get_global_id(1);\n" +"int count_idx = get_global_id(0);\n" +"int glob_size = get_global_size(0);\n" +"float cosVal;\n" +"float sinVal = sincos(theta * ((float)theta_idx), &cosVal);\n" +"sinVal *= irho;\n" +"cosVal *= irho;\n" +"__global const int * list = (__global const int*)(list_ptr + list_offset);\n" +"__global int* accum = (__global int*)(accum_ptr + mad24(theta_idx + 1, accum_step, accum_offset));\n" +"const int shift = (numrho - 1) / 2;\n" +"if (theta_idx < numangle)\n" +"{\n" +"for (int i = count_idx; i < total_points; i += glob_size)\n" +"{\n" +"const int val = list[i];\n" +"const int x = (val & 0xFFFF);\n" +"const int y = (val >> 16) & 0xFFFF;\n" +"int r = convert_int_rte(mad((float)x, cosVal, y * sinVal)) + shift;\n" +"atomic_inc(accum + r + 1);\n" +"}\n" +"}\n" +"}\n" +"#elif defined FILL_ACCUM_LOCAL\n" +"__kernel void fill_accum_local(__global const uchar * list_ptr, int list_step, int list_offset,\n" +"__global uchar * accum_ptr, int accum_step, int accum_offset,\n" +"int total_points, float irho, float theta, int numrho, int numangle)\n" +"{\n" +"int theta_idx = get_group_id(1);\n" +"int count_idx = get_local_id(0);\n" +"__local int l_accum[BUFFER_SIZE];\n" +"if (theta_idx > 0 && theta_idx < numangle + 1)\n" +"{\n" +"float cosVal;\n" +"float sinVal = sincos(theta * (float) (theta_idx-1), &cosVal);\n" +"sinVal *= irho;\n" +"cosVal *= irho;\n" +"for (int i=count_idx; i> 16;\n" +"int r = convert_int_rte(mad((float)x, cosVal, y * sinVal)) + shift;\n" +"atomic_inc(l_accum + r + 1);\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"__global int* accum = (__global int*)(accum_ptr + mad24(theta_idx, accum_step, accum_offset));\n" +"for (int i=count_idx; i threshold && curVote > ACCUM(accum - sizeof(int)) && curVote >= ACCUM(accum + sizeof(int)) &&\n" +"curVote > ACCUM(accum - accum_step) && curVote >= ACCUM(accum + accum_step))\n" +"{\n" +"int index = atomic_inc(lines_index);\n" +"if (index < linesMax)\n" +"{\n" +"float radius = (x - (accum_cols - 3) * 0.5f) * rho;\n" +"float angle = y * theta;\n" +"lines[index] = (float2)(radius, angle);\n" +"}\n" +"}\n" +"accum += glob_size * (int) sizeof(int);\n" +"}\n" +"}\n" +"}\n" +"#elif GET_LINES_PROBABOLISTIC\n" +"__kernel void get_lines(__global const uchar * accum_ptr, int accum_step, int accum_offset, int accum_rows, int accum_cols,\n" +"__global const uchar * src_ptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * lines_ptr, int lines_step, int lines_offset, __global int* lines_index_ptr,\n" +"int linesMax, int threshold, int lineLength, int lineGap, float rho, float theta)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (y < accum_rows-2)\n" +"{\n" +"__global const uchar* accum = accum_ptr + mad24(y+1, accum_step, mad24(x+1, (int) sizeof(int), accum_offset));\n" +"__global int4* lines = (__global int4*)(lines_ptr + lines_offset);\n" +"__global int* lines_index = lines_index_ptr + 1;\n" +"int curVote = ACCUM(accum);\n" +"if (curVote >= threshold &&\n" +"curVote > ACCUM(accum - accum_step - sizeof(int)) &&\n" +"curVote > ACCUM(accum - accum_step) &&\n" +"curVote > ACCUM(accum - accum_step + sizeof(int)) &&\n" +"curVote > ACCUM(accum - sizeof(int)) &&\n" +"curVote > ACCUM(accum + sizeof(int)) &&\n" +"curVote > ACCUM(accum + accum_step - sizeof(int)) &&\n" +"curVote > ACCUM(accum + accum_step) &&\n" +"curVote > ACCUM(accum + accum_step + sizeof(int)))\n" +"{\n" +"const float radius = (x - (accum_cols - 2 - 1) * 0.5f) * rho;\n" +"const float angle = y * theta;\n" +"float cosa;\n" +"float sina = sincos(angle, &cosa);\n" +"float2 p0 = (float2)(cosa * radius, sina * radius);\n" +"float2 dir = (float2)(-sina, cosa);\n" +"float2 pb[4] = { (float2)(-1, -1), (float2)(-1, -1), (float2)(-1, -1), (float2)(-1, -1) };\n" +"float a;\n" +"if (dir.x != 0)\n" +"{\n" +"a = -p0.x / dir.x;\n" +"pb[0].x = 0;\n" +"pb[0].y = p0.y + a * dir.y;\n" +"a = (src_cols - 1 - p0.x) / dir.x;\n" +"pb[1].x = src_cols - 1;\n" +"pb[1].y = p0.y + a * dir.y;\n" +"}\n" +"if (dir.y != 0)\n" +"{\n" +"a = -p0.y / dir.y;\n" +"pb[2].x = p0.x + a * dir.x;\n" +"pb[2].y = 0;\n" +"a = (src_rows - 1 - p0.y) / dir.y;\n" +"pb[3].x = p0.x + a * dir.x;\n" +"pb[3].y = src_rows - 1;\n" +"}\n" +"if (pb[0].x == 0 && (pb[0].y >= 0 && pb[0].y < src_rows))\n" +"{\n" +"p0 = pb[0];\n" +"if (dir.x < 0)\n" +"dir = -dir;\n" +"}\n" +"else if (pb[1].x == src_cols - 1 && (pb[1].y >= 0 && pb[1].y < src_rows))\n" +"{\n" +"p0 = pb[1];\n" +"if (dir.x > 0)\n" +"dir = -dir;\n" +"}\n" +"else if (pb[2].y == 0 && (pb[2].x >= 0 && pb[2].x < src_cols))\n" +"{\n" +"p0 = pb[2];\n" +"if (dir.y < 0)\n" +"dir = -dir;\n" +"}\n" +"else if (pb[3].y == src_rows - 1 && (pb[3].x >= 0 && pb[3].x < src_cols))\n" +"{\n" +"p0 = pb[3];\n" +"if (dir.y > 0)\n" +"dir = -dir;\n" +"}\n" +"dir /= max(fabs(dir.x), fabs(dir.y));\n" +"float2 line_end[2];\n" +"int gap;\n" +"bool inLine = false;\n" +"if (p0.x < 0 || p0.x >= src_cols || p0.y < 0 || p0.y >= src_rows)\n" +"return;\n" +"for (;;)\n" +"{\n" +"if (*(src_ptr + mad24(p0.y, src_step, p0.x + src_offset)))\n" +"{\n" +"gap = 0;\n" +"if (!inLine)\n" +"{\n" +"line_end[0] = p0;\n" +"line_end[1] = p0;\n" +"inLine = true;\n" +"}\n" +"else\n" +"{\n" +"line_end[1] = p0;\n" +"}\n" +"}\n" +"else if (inLine)\n" +"{\n" +"if (++gap > lineGap)\n" +"{\n" +"bool good_line = fabs(line_end[1].x - line_end[0].x) >= lineLength ||\n" +"fabs(line_end[1].y - line_end[0].y) >= lineLength;\n" +"if (good_line)\n" +"{\n" +"int index = atomic_inc(lines_index);\n" +"if (index < linesMax)\n" +"lines[index] = (int4)(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);\n" +"}\n" +"gap = 0;\n" +"inLine = false;\n" +"}\n" +"}\n" +"p0 = p0 + dir;\n" +"if (p0.x < 0 || p0.x >= src_cols || p0.y < 0 || p0.y >= src_rows)\n" +"{\n" +"if (inLine)\n" +"{\n" +"bool good_line = fabs(line_end[1].x - line_end[0].x) >= lineLength ||\n" +"fabs(line_end[1].y - line_end[0].y) >= lineLength;\n" +"if (good_line)\n" +"{\n" +"int index = atomic_inc(lines_index);\n" +"if (index < linesMax)\n" +"lines[index] = (int4)(line_end[0].x, line_end[0].y, line_end[1].x, line_end[1].y);\n" +"}\n" +"}\n" +"break;\n" +"}\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "e6ccdb4eda8b85085da0ea6fa9ce4569", NULL}; +struct cv::ocl::internal::ProgramEntry integral_sum_oclsrc={moduleName, "integral_sum", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#ifndef LOCAL_SUM_SIZE\n" +"#define LOCAL_SUM_SIZE 16\n" +"#endif\n" +"#define LOCAL_SUM_STRIDE (LOCAL_SUM_SIZE + 1)\n" +"kernel void integral_sum_cols(__global const uchar *src_ptr, int src_step, int src_offset, int rows, int cols,\n" +"__global uchar *buf_ptr, int buf_step, int buf_offset\n" +"#ifdef SUM_SQUARE\n" +",__global uchar *buf_sq_ptr, int buf_sq_step, int buf_sq_offset\n" +"#endif\n" +")\n" +"{\n" +"__local sumT lm_sum[LOCAL_SUM_STRIDE * LOCAL_SUM_SIZE];\n" +"#ifdef SUM_SQUARE\n" +"__local sumSQT lm_sum_sq[LOCAL_SUM_STRIDE * LOCAL_SUM_SIZE];\n" +"#endif\n" +"int lid = get_local_id(0);\n" +"int gid = get_group_id(0);\n" +"int x = get_global_id(0);\n" +"int src_index = x + src_offset;\n" +"sumT accum = 0;\n" +"#ifdef SUM_SQUARE\n" +"sumSQT accum_sq = 0;\n" +"#endif\n" +"for (int y = 0; y < rows; y += LOCAL_SUM_SIZE)\n" +"{\n" +"int lsum_index = lid;\n" +"#pragma unroll\n" +"for (int yin = 0; yin < LOCAL_SUM_SIZE; yin++, src_index+=src_step, lsum_index += LOCAL_SUM_STRIDE)\n" +"{\n" +"if ((x < cols) && (y + yin < rows))\n" +"{\n" +"__global const uchar *src = src_ptr + src_index;\n" +"accum += src[0];\n" +"#ifdef SUM_SQUARE\n" +"sumSQT temp = src[0] * src[0];\n" +"accum_sq += temp;\n" +"#endif\n" +"}\n" +"lm_sum[lsum_index] = accum;\n" +"#ifdef SUM_SQUARE\n" +"lm_sum_sq[lsum_index] = accum_sq;\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int buf_index = mad24(buf_step, LOCAL_SUM_SIZE * gid, mad24((int)sizeof(sumT), y + lid, buf_offset));\n" +"#ifdef SUM_SQUARE\n" +"int buf_sq_index = mad24(buf_sq_step, LOCAL_SUM_SIZE * gid, mad24((int)sizeof(sumSQT), y + lid, buf_sq_offset));\n" +"#endif\n" +"lsum_index = LOCAL_SUM_STRIDE * lid;\n" +"#pragma unroll\n" +"for (int yin = 0; yin < LOCAL_SUM_SIZE; yin++, lsum_index ++)\n" +"{\n" +"__global sumT *buf = (__global sumT *)(buf_ptr + buf_index);\n" +"buf[0] = lm_sum[lsum_index];\n" +"buf_index += buf_step;\n" +"#ifdef SUM_SQUARE\n" +"__global sumSQT *bufsq = (__global sumSQT *)(buf_sq_ptr + buf_sq_index);\n" +"bufsq[0] = lm_sum_sq[lsum_index];\n" +"buf_sq_index += buf_sq_step;\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"}\n" +"kernel void integral_sum_rows(__global const uchar *buf_ptr, int buf_step, int buf_offset,\n" +"#ifdef SUM_SQUARE\n" +"__global uchar *buf_sq_ptr, int buf_sq_step, int buf_sq_offset,\n" +"#endif\n" +"__global uchar *dst_ptr, int dst_step, int dst_offset, int rows, int cols\n" +"#ifdef SUM_SQUARE\n" +",__global uchar *dst_sq_ptr, int dst_sq_step, int dst_sq_offset\n" +"#endif\n" +")\n" +"{\n" +"__local sumT lm_sum[LOCAL_SUM_STRIDE * LOCAL_SUM_SIZE];\n" +"#ifdef SUM_SQUARE\n" +"__local sumSQT lm_sum_sq[LOCAL_SUM_STRIDE * LOCAL_SUM_SIZE];\n" +"#endif\n" +"int lid = get_local_id(0);\n" +"int gid = get_group_id(0);\n" +"int gs = get_global_size(0);\n" +"int x = get_global_id(0);\n" +"__global sumT *dst = (__global sumT *)(dst_ptr + dst_offset);\n" +"for (int xin = x; xin < cols; xin += gs)\n" +"{\n" +"dst[xin] = 0;\n" +"}\n" +"dst_offset += dst_step;\n" +"if (x < rows - 1)\n" +"{\n" +"dst = (__global sumT *)(dst_ptr + mad24(x, dst_step, dst_offset));\n" +"dst[0] = 0;\n" +"}\n" +"int buf_index = mad24((int)sizeof(sumT), x, buf_offset);\n" +"sumT accum = 0;\n" +"#ifdef SUM_SQUARE\n" +"__global sumSQT *dst_sq = (__global sumSQT *)(dst_sq_ptr + dst_sq_offset);\n" +"for (int xin = x; xin < cols; xin += gs)\n" +"{\n" +"dst_sq[xin] = 0;\n" +"}\n" +"dst_sq_offset += dst_sq_step;\n" +"if (x < rows - 1)\n" +"{\n" +"dst_sq = (__global sumSQT *)(dst_sq_ptr + mad24(x, dst_sq_step, dst_sq_offset));\n" +"dst_sq[0] = 0;\n" +"}\n" +"int buf_sq_index = mad24((int)sizeof(sumSQT), x, buf_sq_offset);\n" +"sumSQT accum_sq = 0;\n" +"#endif\n" +"for (int y = 1; y < cols; y += LOCAL_SUM_SIZE)\n" +"{\n" +"int lsum_index = lid;\n" +"#pragma unroll\n" +"for (int yin = 0; yin < LOCAL_SUM_SIZE; yin++, lsum_index += LOCAL_SUM_STRIDE)\n" +"{\n" +"__global const sumT *buf = (__global const sumT *)(buf_ptr + buf_index);\n" +"accum += buf[0];\n" +"lm_sum[lsum_index] = accum;\n" +"buf_index += buf_step;\n" +"#ifdef SUM_SQUARE\n" +"__global const sumSQT *buf_sq = (__global const sumSQT *)(buf_sq_ptr + buf_sq_index);\n" +"accum_sq += buf_sq[0];\n" +"lm_sum_sq[lsum_index] = accum_sq;\n" +"buf_sq_index += buf_sq_step;\n" +"#endif\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (y + lid < cols)\n" +"{\n" +"int dst_index = mad24(dst_step, LOCAL_SUM_SIZE * gid, mad24((int)sizeof(sumT), y + lid, dst_offset));\n" +"#ifdef SUM_SQUARE\n" +"int dst_sq_index = mad24(dst_sq_step, LOCAL_SUM_SIZE * gid, mad24((int)sizeof(sumSQT), y + lid, dst_sq_offset));\n" +"#endif\n" +"lsum_index = LOCAL_SUM_STRIDE * lid;\n" +"int yin_max = min(rows - 1 - LOCAL_SUM_SIZE * gid, LOCAL_SUM_SIZE);\n" +"#pragma unroll\n" +"for (int yin = 0; yin < yin_max; yin++, lsum_index++)\n" +"{\n" +"dst = (__global sumT *)(dst_ptr + dst_index);\n" +"dst[0] = lm_sum[lsum_index];\n" +"dst_index += dst_step;\n" +"#ifdef SUM_SQUARE\n" +"dst_sq = (__global sumSQT *)(dst_sq_ptr + dst_sq_index);\n" +"dst_sq[0] = lm_sum_sq[lsum_index];\n" +"dst_sq_index += dst_sq_step;\n" +"#endif\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"}\n" +, "ed75bf92c46b18f3cbb1b0b85ed3d46d", NULL}; +struct cv::ocl::internal::ProgramEntry laplacian3_oclsrc={moduleName, "laplacian3", +"#define DIG(a) a,\n" +"__constant float kx[] = { KERNEL_MATRIX };\n" +"#define OP(delta, x) (convert_float16(arr[delta + x]) * kx[x])\n" +"__kernel void laplacian3_8UC1_cols16_rows2(__global const uint* src, int src_step,\n" +"__global uint* dst, int dst_step,\n" +"int rows, int cols, float delta)\n" +"{\n" +"int block_x = get_global_id(0);\n" +"int y = get_global_id(1) * 2;\n" +"int ssx, dsx;\n" +"if ((block_x * 16) >= cols || y >= rows) return;\n" +"uint4 line[4];\n" +"uint4 line_out[2];\n" +"uchar a; uchar16 b; uchar c;\n" +"uchar d; uchar16 e; uchar f;\n" +"uchar g; uchar16 h; uchar i;\n" +"uchar j; uchar16 k; uchar l;\n" +"ssx = dsx = 1;\n" +"int src_index = block_x * 4 * ssx + (y - 1) * (src_step / 4);\n" +"line[1] = vload4(0, src + src_index + (src_step / 4));\n" +"line[2] = vload4(0, src + src_index + 2 * (src_step / 4));\n" +"#ifdef BORDER_CONSTANT\n" +"line[0] = (y == 0) ? (uint4)0 : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? (uint4)0 : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined BORDER_REFLECT_101\n" +"line[0] = (y == 0) ? line[2] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[1] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"line[0] = (y == 0) ? line[1] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[2] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#endif\n" +"__global uchar *src_p = (__global uchar *)src;\n" +"src_index = block_x * 16 * ssx + (y - 1) * src_step;\n" +"bool line_end = ((block_x + 1) * 16 == cols);\n" +"b = as_uchar16(line[0]);\n" +"e = as_uchar16(line[1]);\n" +"h = as_uchar16(line[2]);\n" +"k = as_uchar16(line[3]);\n" +"#ifdef BORDER_CONSTANT\n" +"a = (block_x == 0 || y == 0) ? 0 : src_p[src_index - 1];\n" +"c = (line_end || y == 0) ? 0 : src_p[src_index + 16];\n" +"d = (block_x == 0) ? 0 : src_p[src_index + src_step - 1];\n" +"f = line_end ? 0 : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? 0 : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? 0 : src_p[src_index + 2 * src_step + 16];\n" +"j = (block_x == 0 || y == (rows - 2)) ? 0 : src_p[src_index + 3 * src_step - 1];\n" +"l = (line_end || y == (rows - 2))? 0 : src_p[src_index + 3 * src_step + 16];\n" +"#elif defined BORDER_REFLECT_101\n" +"int offset;\n" +"offset = (y == 0) ? (2 * src_step) : 0;\n" +"a = (block_x == 0) ? src_p[src_index + offset + 1] : src_p[src_index + offset - 1];\n" +"c = line_end ? src_p[src_index + offset + 14] : src_p[src_index + offset + 16];\n" +"d = (block_x == 0) ? src_p[src_index + src_step + 1] : src_p[src_index + src_step - 1];\n" +"f = line_end ? src_p[src_index + src_step + 14] : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? src_p[src_index + 2 * src_step + 1] : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? src_p[src_index + 2 * src_step + 14] : src_p[src_index + 2 * src_step + 16];\n" +"offset = (y == (rows - 2)) ? (1 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? src_p[src_index + offset + 1] : src_p[src_index + offset - 1];\n" +"l = line_end ? src_p[src_index + offset + 14] : src_p[src_index + offset + 16];\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"int offset;\n" +"offset = (y == 0) ? (1 * src_step) : 0;\n" +"a = (block_x == 0) ? src_p[src_index + offset] : src_p[src_index + offset - 1];\n" +"c = line_end ? src_p[src_index + offset + 15] : src_p[src_index + offset + 16];\n" +"d = (block_x == 0) ? src_p[src_index + src_step] : src_p[src_index + src_step - 1];\n" +"f = line_end ? src_p[src_index + src_step + 15] : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? src_p[src_index + 2 * src_step] : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? src_p[src_index + 2 * src_step + 15] : src_p[src_index + 2 * src_step + 16];\n" +"offset = (y == (rows - 2)) ? (2 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? src_p[src_index + offset] : src_p[src_index + offset - 1];\n" +"l = line_end ? src_p[src_index + offset + 15] : src_p[src_index + offset + 16];\n" +"#endif\n" +"uchar16 arr[12];\n" +"float16 sum[2];\n" +"arr[0] = (uchar16)(a, b.s0123, b.s456789ab, b.scde);\n" +"arr[1] = b;\n" +"arr[2] = (uchar16)(b.s123, b.s4567, b.s89abcdef, c);\n" +"arr[3] = (uchar16)(d, e.s0123, e.s456789ab, e.scde);\n" +"arr[4] = e;\n" +"arr[5] = (uchar16)(e.s123, e.s4567, e.s89abcdef, f);\n" +"arr[6] = (uchar16)(g, h.s0123, h.s456789ab, h.scde);\n" +"arr[7] = h;\n" +"arr[8] = (uchar16)(h.s123, h.s4567, h.s89abcdef, i);\n" +"arr[9] = (uchar16)(j, k.s0123, k.s456789ab, k.scde);\n" +"arr[10] = k;\n" +"arr[11] = (uchar16)(k.s123, k.s4567, k.s89abcdef, l);\n" +"sum[0] = OP(0, 0) + OP(0, 1) + OP(0, 2) +\n" +"OP(0, 3) + OP(0, 4) + OP(0, 5) +\n" +"OP(0, 6) + OP(0, 7) + OP(0, 8);\n" +"sum[1] = OP(3, 0) + OP(3, 1) + OP(3, 2) +\n" +"OP(3, 3) + OP(3, 4) + OP(3, 5) +\n" +"OP(3, 6) + OP(3, 7) + OP(3, 8);\n" +"line_out[0] = as_uint4(convert_uchar16_sat_rte(sum[0] + delta));\n" +"line_out[1] = as_uint4(convert_uchar16_sat_rte(sum[1] + delta));\n" +"int dst_index = block_x * 4 * dsx + y * (dst_step / 4);\n" +"vstore4(line_out[0], 0, dst + dst_index);\n" +"vstore4(line_out[1], 0, dst + dst_index + (dst_step / 4));\n" +"}\n" +, "c7b7eb4f034bd214e42f2dccc9ffd970", NULL}; +struct cv::ocl::internal::ProgramEntry laplacian5_oclsrc={moduleName, "laplacian5", +"#define noconvert\n" +"#ifdef ONLY_SUM_CONVERT\n" +"__kernel void sumConvert(__global const uchar * src1ptr, int src1_step, int src1_offset,\n" +"__global const uchar * src2ptr, int src2_step, int src2_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"coeffT scale, coeffT delta)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (y < dst_rows && x < dst_cols)\n" +"{\n" +"int src1_index = mad24(y, src1_step, mad24(x, (int)sizeof(srcT), src1_offset));\n" +"int src2_index = mad24(y, src2_step, mad24(x, (int)sizeof(srcT), src2_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, (int)sizeof(dstT), dst_offset));\n" +"__global const srcT * src1 = (__global const srcT *)(src1ptr + src1_index);\n" +"__global const srcT * src2 = (__global const srcT *)(src2ptr + src2_index);\n" +"__global dstT * dst = (__global dstT *)(dstptr + dst_index);\n" +"#if wdepth <= 4\n" +"dst[0] = convertToDT( mad24((WT)(scale), convertToWT(src1[0]) + convertToWT(src2[0]), (WT)(delta)) );\n" +"#else\n" +"dst[0] = convertToDT( mad((WT)(scale), convertToWT(src1[0]) + convertToWT(src2[0]), (WT)(delta)) );\n" +"#endif\n" +"}\n" +"}\n" +"#else\n" +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(x, maxV)\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = clamp((x), 0, (maxV)-1); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = ( (x) + (maxV) ) % (maxV); \\\n" +"}\n" +"#elif defined BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = min(((maxV)-1)*2-(x)+1, max((x),-(x)-1) ); \\\n" +"}\n" +"#elif defined BORDER_REFLECT_101\n" +"#define EXTRAPOLATE(x, maxV) \\\n" +"{ \\\n" +"(x) = min(((maxV)-1)*2-(x), max((x),-(x)) ); \\\n" +"}\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#if CN != 3\n" +"#define loadpix(addr) *(__global const srcT *)(addr)\n" +"#define storepix(val, addr) *(__global dstT *)(addr) = val\n" +"#define SRCSIZE (int)sizeof(srcT)\n" +"#define DSTSIZE (int)sizeof(dstT)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const srcT1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global dstT1 *)(addr))\n" +"#define SRCSIZE (int)sizeof(srcT1)*3\n" +"#define DSTSIZE (int)sizeof(dstT1)*3\n" +"#endif\n" +"#define SRC(_x,_y) convertToWT(loadpix(Src + mad24(_y, src_step, SRCSIZE * _x)))\n" +"#ifdef BORDER_CONSTANT\n" +"#define ELEM(_x,_y,r_edge,t_edge,const_v) (_x)<0 | (_x) >= (r_edge) | (_y)<0 | (_y) >= (t_edge) ? (const_v) : SRC((_x),(_y))\n" +"#else\n" +"#define ELEM(_x,_y,r_edge,t_edge,const_v) SRC((_x),(_y))\n" +"#endif\n" +"#define DIG(a) a,\n" +"__constant WT1 mat_kernelX[] = { KERNEL_MATRIX_X };\n" +"__constant WT1 mat_kernelY[] = { KERNEL_MATRIX_Y };\n" +"__kernel void laplacian(__global uchar* Src, int src_step, int srcOffsetX, int srcOffsetY, int height, int width,\n" +"__global uchar* Dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"WT1 scale, WT1 delta)\n" +"{\n" +"__local WT lsmem[BLK_Y + 2 * RADIUS][BLK_X + 2 * RADIUS];\n" +"__local WT lsmemDy1[BLK_Y][BLK_X + 2 * RADIUS];\n" +"__local WT lsmemDy2[BLK_Y][BLK_X + 2 * RADIUS];\n" +"int lix = get_local_id(0);\n" +"int liy = get_local_id(1);\n" +"int x = get_global_id(0);\n" +"int srcX = x + srcOffsetX - RADIUS;\n" +"int clocY = liy;\n" +"do\n" +"{\n" +"int yb = clocY + srcOffsetY - RADIUS;\n" +"EXTRAPOLATE(yb, (height));\n" +"int clocX = lix;\n" +"int cSrcX = srcX;\n" +"do\n" +"{\n" +"int xb = cSrcX;\n" +"EXTRAPOLATE(xb,(width));\n" +"lsmem[clocY][clocX] = ELEM(xb, yb, (width), (height), 0 );\n" +"clocX += BLK_X;\n" +"cSrcX += BLK_X;\n" +"}\n" +"while(clocX < BLK_X+(RADIUS*2));\n" +"clocY += BLK_Y;\n" +"}\n" +"while (clocY < BLK_Y+(RADIUS*2));\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"WT scale_v = (WT)scale;\n" +"WT delta_v = (WT)delta;\n" +"for (int y = 0; y < dst_rows; y+=BLK_Y)\n" +"{\n" +"int i, clocX = lix;\n" +"WT sum1 = (WT) 0;\n" +"WT sum2 = (WT) 0;\n" +"do\n" +"{\n" +"sum1 = (WT) 0;\n" +"sum2 = (WT) 0;\n" +"for (i=0; i<=2*RADIUS; i++)\n" +"{\n" +"sum1 = mad(lsmem[liy + i][clocX], mat_kernelY[i], sum1);\n" +"sum2 = mad(lsmem[liy + i][clocX], mat_kernelX[i], sum2);\n" +"}\n" +"lsmemDy1[liy][clocX] = sum1;\n" +"lsmemDy2[liy][clocX] = sum2;\n" +"clocX += BLK_X;\n" +"}\n" +"while(clocX < BLK_X+(RADIUS*2));\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if ((x < dst_cols) && (y + liy < dst_rows))\n" +"{\n" +"sum1 = (WT) 0;\n" +"sum2 = (WT) 0;\n" +"for (i=0; i<=2*RADIUS; i++)\n" +"{\n" +"sum1 = mad(lsmemDy1[liy][lix+i], mat_kernelX[i], sum1);\n" +"sum2 = mad(lsmemDy2[liy][lix+i], mat_kernelY[i], sum2);\n" +"}\n" +"WT sum = mad(scale_v, (sum1 + sum2), delta_v);\n" +"storepix(convertToDT(sum), Dst + mad24(y + liy, dst_step, mad24(x, DSTSIZE, dst_offset)));\n" +"}\n" +"for (int i = liy * BLK_X + lix; i < (RADIUS*2) * (BLK_X+(RADIUS*2)); i += BLK_X * BLK_Y)\n" +"{\n" +"int clocX = i % (BLK_X+(RADIUS*2));\n" +"int clocY = i / (BLK_X+(RADIUS*2));\n" +"lsmem[clocY][clocX] = lsmem[clocY + BLK_Y][clocX];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int yb = y + liy + BLK_Y + srcOffsetY + RADIUS;\n" +"EXTRAPOLATE(yb, (height));\n" +"clocX = lix;\n" +"int cSrcX = x + srcOffsetX - RADIUS;\n" +"do\n" +"{\n" +"int xb = cSrcX;\n" +"EXTRAPOLATE(xb,(width));\n" +"lsmem[liy + 2*RADIUS][clocX] = ELEM(xb, yb, (width), (height), 0 );\n" +"clocX += BLK_X;\n" +"cSrcX += BLK_X;\n" +"}\n" +"while(clocX < BLK_X+(RADIUS*2));\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"}\n" +"#endif\n" +, "3ce3fc1a1c2e6be3a8fd0d2f51afeaf1", NULL}; +struct cv::ocl::internal::ProgramEntry linearPolar_oclsrc={moduleName, "linearPolar", +"#define CV_2PI 6.283185307179586476925286766559\n" +"#ifdef ForwardMap\n" +"__kernel void computeAngleRadius(__global float2* cp_sp, __global float* r, float maxRadius_width, float PI2_height, unsigned width, unsigned height)\n" +"{\n" +"unsigned gid = get_global_id(0);\n" +"if (gid < height)\n" +"{\n" +"float angle = gid * PI2_height;\n" +"float2 angle_tri=(float2)(cos(angle), sin(angle));\n" +"cp_sp[gid] = angle_tri;\n" +"}\n" +"if (gid < width)\n" +"{\n" +"r[gid] = maxRadius_width*gid;\n" +"}\n" +"}\n" +"__kernel void linearPolar(__global float* mx, __global float* my, __global float2* cp_sp, __global float* r, float cx, float cy, unsigned width, unsigned height)\n" +"{\n" +"__local float l_r[MEM_SIZE];\n" +"__local float2 l_double[MEM_SIZE];\n" +"unsigned rho = get_global_id(0);\n" +"unsigned phi = get_global_id(1);\n" +"unsigned local_0 = get_local_id(0);\n" +"unsigned local_1 = get_local_id(1);\n" +"if (local_1 == 0)\n" +"{\n" +"unsigned temp_phi=phi + local_0;\n" +"if (temp_phi < height)\n" +"{\n" +"l_double[local_0] = cp_sp[temp_phi];\n" +"}\n" +"}\n" +"if (local_1 == 1 )\n" +"{\n" +"if (rho < width)\n" +"{\n" +"l_r[local_0 ] = r[rho];\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (rho 0 ? 1 : -1;\n" +"return 0;\n" +"}\n" +"inline float normAcc_SQDIFF(float num, float denum)\n" +"{\n" +"if (fabs(num) < denum)\n" +"return num / denum;\n" +"if (fabs(num) < denum * 1.125f)\n" +"return num > 0 ? 1 : -1;\n" +"return 1;\n" +"}\n" +"#define noconvert\n" +"#if cn == 1\n" +"#define convertToDT(value) (float)(value)\n" +"#elif cn == 2\n" +"#define convertToDT(value) (float)(value.x + value.y)\n" +"#elif cn == 3\n" +"#define convertToDT(value) (float)(value.x + value.y + value.z)\n" +"#elif cn == 4\n" +"#define convertToDT(value) (float)(value.x + value.y + value.z + value.w)\n" +"#else\n" +"#error \"cn should be 1-4\"\n" +"#endif\n" +"#ifdef CALC_SUM\n" +"__kernel void calcSum(__global const uchar * srcptr, int src_step, int src_offset,\n" +"int cols, int total, __global float * dst)\n" +"{\n" +"int lid = get_local_id(0), id = get_global_id(0);\n" +"__local WT localmem[WGS2_ALIGNED];\n" +"WT accumulator = (WT)(0), tmp;\n" +"for ( ; id < total; id += WGS)\n" +"{\n" +"int src_index = mad24(id / cols, src_step, mad24(id % cols, TSIZE, src_offset));\n" +"T src = loadpix(srcptr + src_index);\n" +"tmp = convertToWT(src);\n" +"accumulator = mad(tmp, tmp, accumulator);\n" +"}\n" +"if (lid < WGS2_ALIGNED)\n" +"localmem[lid] = accumulator;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (lid >= WGS2_ALIGNED && total >= WGS2_ALIGNED)\n" +"localmem[lid - WGS2_ALIGNED] += accumulator;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"for (int lsize = WGS2_ALIGNED >> 1; lsize > 0; lsize >>= 1)\n" +"{\n" +"if (lid < lsize)\n" +"{\n" +"int lid2 = lsize + lid;\n" +"localmem[lid] += localmem[lid2];\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"}\n" +"if (lid == 0)\n" +"dst[0] = convertToDT(localmem[0]);\n" +"}\n" +"#elif defined FIRST_CHANNEL\n" +"__kernel void extractFirstChannel( const __global uchar* img, int img_step, int img_offset,\n" +"__global uchar* res, int res_step, int res_offset, int rows, int cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1)*PIX_PER_WI_Y;\n" +"if(x < cols )\n" +"{\n" +"#pragma unroll\n" +"for (int cy=0; cy < PIX_PER_WI_Y && y < rows; ++cy, ++y)\n" +"{\n" +"T1 image = *(__global const T1*)(img + mad24(y, img_step, mad24(x, (int)sizeof(T1)*cn, img_offset)));;\n" +"int res_idx = mad24(y, res_step, mad24(x, (int)sizeof(float), res_offset));\n" +"*(__global float *)(res + res_idx) = image;\n" +"}\n" +"}\n" +"}\n" +"#elif defined CCORR\n" +"#if cn==1 && PIX_PER_WI_X==4\n" +"__kernel void matchTemplate_Naive_CCORR(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global const uchar * templateptr, int template_step, int template_offset, int template_rows, int template_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int x0 = get_global_id(0)*PIX_PER_WI_X;\n" +"int y = get_global_id(1);\n" +"if (y < dst_rows)\n" +"{\n" +"if (x0 + PIX_PER_WI_X <= dst_cols)\n" +"{\n" +"WT sum = (WT)(0);\n" +"int ind = mad24(y, src_step, mad24(x0, (int)sizeof(T1), src_offset));\n" +"__global const T1 * template = (__global const T1*)(templateptr + template_offset);\n" +"for (int i = 0; i < template_rows; ++i)\n" +"{\n" +"for (int j = 0; j < template_cols; ++j)\n" +"{\n" +"T temp = (T)(template[j]);\n" +"T src = vload4(0, (__global const T1*)(srcptr + ind + j*(int)sizeof(T1)));\n" +"sum = mad(convertToWT(src), convertToWT(temp), sum);\n" +"}\n" +"ind += src_step;\n" +"template = (__global const T1 *)((__global const uchar *)template + template_step);\n" +"}\n" +"T temp = (T)(template[0]);\n" +"int dst_idx = mad24(y, dst_step, mad24(x0, (int)sizeof(float), dst_offset));\n" +"*(__global float4 *)(dst + dst_idx) = convert_float4(sum);\n" +"}\n" +"else\n" +"{\n" +"WT1 sum [PIX_PER_WI_X];\n" +"#pragma unroll\n" +"for (int i=0; i < PIX_PER_WI_X; i++) sum[i] = 0;\n" +"__global const T1 * src = (__global const T1 *)(srcptr + mad24(y, src_step, mad24(x0, (int)sizeof(T1), src_offset)));\n" +"__global const T1 * template = (__global const T1 *)(templateptr + template_offset);\n" +"for (int i = 0; i < template_rows; ++i)\n" +"{\n" +"for (int j = 0; j < template_cols; ++j)\n" +"{\n" +"#pragma unroll\n" +"for (int cx=0, x = x0; cx < PIX_PER_WI_X && x < dst_cols; ++cx, ++x)\n" +"{\n" +"sum[cx] = mad(convertToWT1(src[j+cx]), convertToWT1(template[j]), sum[cx]);\n" +"}\n" +"}\n" +"src = (__global const T1 *)((__global const uchar *)src + src_step);\n" +"template = (__global const T1 *)((__global const uchar *)template + template_step);\n" +"}\n" +"#pragma unroll\n" +"for (int cx=0; cx < PIX_PER_WI_X && x0 < dst_cols; ++cx, ++x0)\n" +"{\n" +"int dst_idx = mad24(y, dst_step, mad24(x0, (int)sizeof(float), dst_offset));\n" +"*(__global float *)(dst + dst_idx) = convertToDT(sum[cx]);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#else\n" +"__kernel void matchTemplate_Naive_CCORR(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global const uchar * templateptr, int template_step, int template_offset, int template_rows, int template_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"WT sum = (WT)(0);\n" +"for (int i = 0; i < template_rows; ++i)\n" +"{\n" +"for (int j = 0; j < template_cols; ++j)\n" +"{\n" +"T src = loadpix(srcptr + mad24(y+i, src_step, mad24(x+j, TSIZE, src_offset)));\n" +"T template = loadpix(templateptr + mad24(i, template_step, mad24(j, TSIZE, template_offset)));\n" +"sum = mad(convertToWT(src), convertToWT(template), sum);\n" +"}\n" +"}\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"*(__global float *)(dst + dst_idx) = convertToDT(sum);\n" +"}\n" +"}\n" +"#endif\n" +"#elif defined CCORR_NORMED\n" +"__kernel void matchTemplate_CCORR_NORMED(__global const uchar * src_sqsums, int src_sqsums_step, int src_sqsums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int template_rows, int template_cols, __global const float * template_sqsum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"__global const float * sqsum = (__global const float *)(src_sqsums);\n" +"src_sqsums_step /= sizeof(float);\n" +"src_sqsums_offset /= sizeof(float);\n" +"float image_sqsum_ = (float)(sqsum[SQSUMS_PTR(template_cols, template_rows)] - sqsum[SQSUMS_PTR(template_cols, 0)] -\n" +"sqsum[SQSUMS_PTR(0, template_rows)] + sqsum[SQSUMS_PTR(0, 0)]);\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"__global float * dstult = (__global float *)(dst + dst_idx);\n" +"*dstult = normAcc(*dstult, sqrt(image_sqsum_ * template_sqsum[0]));\n" +"}\n" +"}\n" +"#elif defined SQDIFF\n" +"__kernel void matchTemplate_Naive_SQDIFF(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global const uchar * templateptr, int template_step, int template_offset, int template_rows, int template_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"WT sum = (WT)(0), value;\n" +"for (int i = 0; i < template_rows; ++i)\n" +"{\n" +"for (int j = 0; j < template_cols; ++j)\n" +"{\n" +"T src = loadpix(srcptr + mad24(y+i, src_step, mad24(x+j, TSIZE, src_offset)));\n" +"T template = loadpix(templateptr + mad24(i, template_step, mad24(j, TSIZE, template_offset)));\n" +"value = convertToWT(src) - convertToWT(template);\n" +"sum = mad(value, value, sum);\n" +"}\n" +"}\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"*(__global float *)(dst + dst_idx) = convertToDT(sum);\n" +"}\n" +"}\n" +"#elif defined SQDIFF_PREPARED\n" +"__kernel void matchTemplate_Prepared_SQDIFF(__global const uchar * src_sqsums, int src_sqsums_step, int src_sqsums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int template_rows, int template_cols, __global const float * template_sqsum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"src_sqsums_step /= sizeof(float);\n" +"src_sqsums_offset /= sizeof(float);\n" +"__global const float * sqsum = (__global const float *)(src_sqsums);\n" +"float image_sqsum_ = (float)(\n" +"(sqsum[SQSUMS_PTR(template_cols, template_rows)] - sqsum[SQSUMS_PTR(template_cols, 0)]) -\n" +"(sqsum[SQSUMS_PTR(0, template_rows)] - sqsum[SQSUMS_PTR(0, 0)]));\n" +"float template_sqsum_value = template_sqsum[0];\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"__global float * dstult = (__global float *)(dst + dst_idx);\n" +"*dstult = image_sqsum_ - 2.0f * dstult[0] + template_sqsum_value;\n" +"}\n" +"}\n" +"#elif defined SQDIFF_NORMED\n" +"__kernel void matchTemplate_SQDIFF_NORMED(__global const uchar * src_sqsums, int src_sqsums_step, int src_sqsums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int template_rows, int template_cols, __global const float * template_sqsum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"src_sqsums_step /= sizeof(float);\n" +"src_sqsums_offset /= sizeof(float);\n" +"__global const float * sqsum = (__global const float *)(src_sqsums);\n" +"float image_sqsum_ = (float)(\n" +"(sqsum[SQSUMS_PTR(template_cols, template_rows)] - sqsum[SQSUMS_PTR(template_cols, 0)]) -\n" +"(sqsum[SQSUMS_PTR(0, template_rows)] - sqsum[SQSUMS_PTR(0, 0)]));\n" +"float template_sqsum_value = template_sqsum[0];\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"__global float * dstult = (__global float *)(dst + dst_idx);\n" +"*dstult = normAcc_SQDIFF(image_sqsum_ - 2.0f * dstult[0] + template_sqsum_value, sqrt(image_sqsum_ * template_sqsum_value));\n" +"}\n" +"}\n" +"#elif defined CCOEFF\n" +"#if cn == 1\n" +"__kernel void matchTemplate_Prepared_CCOEFF(__global const uchar * src_sums, int src_sums_step, int src_sums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int template_rows, int template_cols, float template_sum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"__global const T* sum = (__global const T*)(src_sums + mad24(y, src_sums_step, mad24(x, (int)sizeof(T), src_sums_offset)));\n" +"int step = src_sums_step/(int)sizeof(T);\n" +"T image_sum = (T)(0), value;\n" +"value = (T)(sum[mad24(template_rows, step, template_cols)] - sum[mad24(template_rows, step, 0)] - sum[template_cols] + sum[0]);\n" +"image_sum = mad(value, template_sum , image_sum);\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"*(__global float *)(dst + dst_idx) -= convertToDT(image_sum);\n" +"}\n" +"}\n" +"#elif cn==3\n" +"__kernel void matchTemplate_Prepared_CCOEFF(__global const uchar * src_sums, int src_sums_step, int src_sums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int template_rows, int template_cols, float4 template_sum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"T image_sum = (T)(0), value, temp_sum;\n" +"temp_sum.x = template_sum.x;\n" +"temp_sum.y = template_sum.y;\n" +"temp_sum.z = template_sum.z;\n" +"value = vload3(0, (__global const T1 *)(src_sums + SUMS(template_cols, template_rows)));\n" +"value -= vload3(0, (__global const T1 *)(src_sums + SUMS(0, template_rows)));\n" +"value -= vload3(0, (__global const T1 *)(src_sums + SUMS(template_cols, 0)));\n" +"value += vload3(0, (__global const T1 *)(src_sums + SUMS(0, 0)));\n" +"image_sum = mad(value, temp_sum , 0);\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"*(__global float *)(dst + dst_idx) -= convertToDT(image_sum);\n" +"}\n" +"}\n" +"#elif (cn==2 || cn==4)\n" +"__kernel void matchTemplate_Prepared_CCOEFF(__global const uchar * src_sums, int src_sums_step, int src_sums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int template_rows, int template_cols, float4 template_sum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"__global const T* sum = (__global const T*)(src_sums + mad24(y, src_sums_step, mad24(x, (int)sizeof(T), src_sums_offset)));\n" +"int step = src_sums_step/(int)sizeof(T);\n" +"T image_sum = (T)(0), value, temp_sum;\n" +"#if cn==2\n" +"temp_sum.x = template_sum.x;\n" +"temp_sum.y = template_sum.y;\n" +"#else\n" +"temp_sum = template_sum;\n" +"#endif\n" +"value = (sum[mad24(template_rows, step, template_cols)] - sum[mad24(template_rows, step, 0)] - sum[template_cols] + sum[0]);\n" +"image_sum = mad(value, temp_sum , image_sum);\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"*(__global float *)(dst + dst_idx) -= convertToDT(image_sum);\n" +"}\n" +"}\n" +"#else\n" +"#error \"cn should be 1-4\"\n" +"#endif\n" +"#elif defined CCOEFF_NORMED\n" +"#if cn == 1\n" +"__kernel void matchTemplate_CCOEFF_NORMED(__global const uchar * src_sums, int src_sums_step, int src_sums_offset,\n" +"__global const uchar * src_sqsums, int src_sqsums_step, int src_sqsums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int t_rows, int t_cols, float weight, float template_sum, float template_sqsum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"float sum_[2];\n" +"float sqsum_[2];\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int step = src_sums_step/(int)sizeof(T);\n" +"__global const T* sum = (__global const T*)(src_sums + mad24(y, src_sums_step, mad24(x, (int)sizeof(T), src_sums_offset)));\n" +"__global const T* sqsum = (__global const T*)(src_sqsums + mad24(y, src_sqsums_step, mad24(x, (int)sizeof(T), src_sqsums_offset)));\n" +"T value_sum = sum[mad24(t_rows, step, t_cols)] - sum[mad24(t_rows, step, 0)] - sum[t_cols] + sum[0];\n" +"T value_sqsum = sqsum[mad24(t_rows, step, t_cols)] - sqsum[mad24(t_rows, step, 0)] - sqsum[t_cols] + sqsum[0];\n" +"float num = convertToDT(mad(value_sum, template_sum, (float)0));\n" +"value_sqsum -= weight * value_sum * value_sum;\n" +"float denum = sqrt(mad(template_sqsum, convertToDT(value_sqsum), (float)0));\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"__global float * dstult = (__global float *)(dst+dst_idx);\n" +"*dstult = normAcc((*dstult) - num, denum);\n" +"}\n" +"}\n" +"#elif cn==3\n" +"__kernel void matchTemplate_CCOEFF_NORMED(__global const uchar * src_sums, int src_sums_step, int src_sums_offset,\n" +"__global const uchar * src_sqsums, int src_sqsums_step, int src_sqsums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int t_rows, int t_cols, float weight, float4 template_sum, float template_sqsum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int step = src_sums_step/(int)sizeof(T);\n" +"T temp_sum, value_sum, value_sqsum;\n" +"temp_sum.x = template_sum.x;\n" +"temp_sum.y = template_sum.y;\n" +"temp_sum.z = template_sum.z;\n" +"value_sum = vload3(0, (__global const T1 *)(src_sums + SUMS(t_cols, t_rows)));\n" +"value_sum -= vload3(0, (__global const T1 *)(src_sums + SUMS(0, t_rows)));\n" +"value_sum -= vload3(0, (__global const T1 *)(src_sums + SUMS(t_cols, 0)));\n" +"value_sum += vload3(0, (__global const T1 *)(src_sums + SUMS(0, 0)));\n" +"value_sqsum = vload3(0, (__global const T1 *)(src_sqsums + SQ_SUMS(t_cols, t_rows)));\n" +"value_sqsum -= vload3(0, (__global const T1 *)(src_sqsums + SQ_SUMS(0, t_rows)));\n" +"value_sqsum -= vload3(0, (__global const T1 *)(src_sqsums + SQ_SUMS(t_cols, 0)));\n" +"value_sqsum += vload3(0, (__global const T1 *)(src_sqsums + SQ_SUMS(0, 0)));\n" +"float num = convertToDT(mad(value_sum, temp_sum, 0));\n" +"value_sqsum -= weight * value_sum * value_sum;\n" +"float denum = sqrt(mad(template_sqsum, convertToDT(value_sqsum), (float)0));\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"__global float * dstult = (__global float *)(dst+dst_idx);\n" +"*dstult = normAcc((*dstult) - num, denum);\n" +"}\n" +"}\n" +"#elif (cn==2 || cn==4)\n" +"__kernel void matchTemplate_CCOEFF_NORMED(__global const uchar * src_sums, int src_sums_step, int src_sums_offset,\n" +"__global const uchar * src_sqsums, int src_sqsums_step, int src_sqsums_offset,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"int t_rows, int t_cols, float weight, float4 template_sum, float template_sqsum)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int step = src_sums_step/(int)sizeof(T);\n" +"T temp_sum;\n" +"__global const T* sum = (__global const T*)(src_sums + mad24(y, src_sums_step, mad24(x, (int)sizeof(T), src_sums_offset)));\n" +"__global const T* sqsum = (__global const T*)(src_sqsums + mad24(y, src_sqsums_step, mad24(x, (int)sizeof(T), src_sqsums_offset)));\n" +"T value_sum = sum[mad24(t_rows, step, t_cols)] - sum[mad24(t_rows, step, 0)] - sum[t_cols] + sum[0];\n" +"T value_sqsum = sqsum[mad24(t_rows, step, t_cols)] - sqsum[mad24(t_rows, step, 0)] - sqsum[t_cols] + sqsum[0];\n" +"#if cn==2\n" +"temp_sum.x = template_sum.x;\n" +"temp_sum.y = template_sum.y;\n" +"#else\n" +"temp_sum = template_sum;\n" +"#endif\n" +"float num = convertToDT(mad(value_sum, temp_sum, 0));\n" +"value_sqsum -= weight * value_sum * value_sum;\n" +"float denum = sqrt(mad(template_sqsum, convertToDT(value_sqsum), (float)0));\n" +"int dst_idx = mad24(y, dst_step, mad24(x, (int)sizeof(float), dst_offset));\n" +"__global float * dstult = (__global float *)(dst+dst_idx);\n" +"*dstult = normAcc((*dstult) - num, denum);\n" +"}\n" +"}\n" +"#else\n" +"#error \"cn should be 1-4\"\n" +"#endif\n" +"#endif\n" +, "b1f65f1ba4717552e461b432297031d6", NULL}; +struct cv::ocl::internal::ProgramEntry medianFilter_oclsrc={moduleName, "medianFilter", +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T *)(addr)\n" +"#define storepix(val, addr) *(__global T *)(addr) = val\n" +"#define TSIZE (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))\n" +"#define TSIZE (int)sizeof(T1) * cn\n" +"#endif\n" +"#define OP(a,b) { mid=a; a=min(a,b); b=max(mid,b);}\n" +"#ifdef USE_4OPT\n" +"#if cn == 1\n" +"#define LOAD4(val, offs) (val) = vload4(0, (__global T1 *)(srcptr + src_index + (offs)))\n" +"#define STORE4(val, offs) vstore4((val), 0, (__global T1 *)(dstptr + (offs)))\n" +"#define SHUFFLE4_3(src0, src1, src2, dst0, dst1, dst2) { dst1 = src1; \\\n" +"dst0 = (T4)(src0, dst1.xyz); \\\n" +"dst2 = (T4)(dst1.yzw, src2); }\n" +"#define SHUFFLE4_5(src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, dst4) { dst2 = src2; \\\n" +"dst0 = (T4)(src0, src1, dst2.xy); \\\n" +"dst1 = (T4)(src1, dst2.xyz); \\\n" +"dst3 = (T4)(dst2.yzw, src3); \\\n" +"dst4 = (T4)(dst2.zw, src3, src4); }\n" +"#elif cn == 2\n" +"#define LOAD4(val, offs) (val) = vload8(0, (__global T1 *)(srcptr + src_index + (offs)))\n" +"#define STORE4(val, offs) vstore8((val), 0, (__global T1 *)(dstptr + (offs)))\n" +"#define SHUFFLE4_3(src0, src1, src2, dst0, dst1, dst2) { dst1 = src1; \\\n" +"dst0 = (T4)(src0, dst1.s012345); \\\n" +"dst2 = (T4)(dst1.s234567, src2); }\n" +"#define SHUFFLE4_5(src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, dst4) { dst2 = src2; \\\n" +"dst0 = (T4)(src0, src1, dst2.s0123); \\\n" +"dst1 = (T4)(src1, dst2.s012345); \\\n" +"dst3 = (T4)(dst2.s234567, src3); \\\n" +"dst4 = (T4)(dst2.s4567, src3, src4); }\n" +"#elif cn == 4\n" +"#define LOAD4(val, offs) (val) = vload16(0, (__global T1 *)(srcptr + src_index + (offs)))\n" +"#define STORE4(val, offs) vstore16((val), 0, (__global T1 *)(dstptr + (offs)))\n" +"#define SHUFFLE4_3(src0, src1, src2, dst0, dst1, dst2) { dst1 = src1; \\\n" +"dst0 = (T4)(src0, dst1.s0123456789ab ); \\\n" +"dst2 = (T4)(dst1.s456789abcdef, src2); }\n" +"#define SHUFFLE4_5(src0, src1, src2, src3, src4, dst0, dst1, dst2, dst3, dst4) { dst2 = src2; \\\n" +"dst0 = (T4)(src0, src1, dst2.s01234567); \\\n" +"dst1 = (T4)(src1, dst2.s0123456789ab); \\\n" +"dst3 = (T4)(dst2.s456789abcdef, src3); \\\n" +"dst4 = (T4)(dst2.s89abcdef, src3, src4); }\n" +"#endif\n" +"__kernel void medianFilter3_u(__global const uchar* srcptr, int srcStep, int srcOffset,\n" +"__global uchar* dstptr, int dstStep, int dstOffset,\n" +"int rows, int cols)\n" +"{\n" +"int gx= get_global_id(0) << 2;\n" +"int gy= get_global_id(1) << 2;\n" +"if( gy >= rows || gx >= cols)\n" +"return;\n" +"T c0; T4 c1; T c2;\n" +"T c3; T4 c4; T c5;\n" +"T c6; T4 c7; T c8;\n" +"int x_left = mad24(max(gx-1, 0), TSIZE, srcOffset);\n" +"int x_central = mad24(gx, TSIZE, srcOffset);\n" +"int x_right = mad24(min(gx+4, cols-1), TSIZE, srcOffset);\n" +"int xdst = mad24(gx, TSIZE, dstOffset);\n" +"int src_index = max(gy-1, 0)*srcStep;\n" +"c0 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c1, x_central);\n" +"c2 = *(__global T *)(srcptr + src_index + x_right);\n" +"src_index = gy*srcStep;\n" +"c3 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c4, x_central);\n" +"c5 = *(__global T *)(srcptr + src_index + x_right);\n" +"#define ITER3(k) { \\\n" +"src_index = min(gy+k+1, rows-1)*srcStep; \\\n" +"c6 = *(__global T *)(srcptr + src_index + x_left); \\\n" +"LOAD4(c7, x_central); \\\n" +"c8 = *(__global T *)(srcptr + src_index + x_right); \\\n" +"T4 p0, p1, p2, p3, p4, p5, p6, p7, p8; \\\n" +"SHUFFLE4_3(c0, c1, c2, p0, p1, p2); \\\n" +"SHUFFLE4_3(c3, c4, c5, p3, p4, p5); \\\n" +"SHUFFLE4_3(c6, c7, c8, p6, p7, p8); \\\n" +"T4 mid; \\\n" +"OP(p1, p2); OP(p4, p5); OP(p7, p8); OP(p0, p1); \\\n" +"OP(p3, p4); OP(p6, p7); OP(p1, p2); OP(p4, p5); \\\n" +"OP(p7, p8); OP(p0, p3); OP(p5, p8); OP(p4, p7); \\\n" +"OP(p3, p6); OP(p1, p4); OP(p2, p5); OP(p4, p7); \\\n" +"OP(p4, p2); OP(p6, p4); OP(p4, p2); \\\n" +"int dst_index = mad24( gy+k, dstStep, xdst); \\\n" +"STORE4(p4, dst_index); \\\n" +"c0 = c3; c1 = c4; c2 = c5; \\\n" +"c3 = c6; c4 = c7; c5 = c8; \\\n" +"}\n" +"ITER3(0);\n" +"ITER3(1);\n" +"ITER3(2);\n" +"ITER3(3);\n" +"}\n" +"__kernel void medianFilter5_u(__global const uchar* srcptr, int srcStep, int srcOffset,\n" +"__global uchar* dstptr, int dstStep, int dstOffset,\n" +"int rows, int cols)\n" +"{\n" +"int gx= get_global_id(0) << 2;\n" +"int gy= get_global_id(1) << 2;\n" +"if( gy >= rows || gx >= cols)\n" +"return;\n" +"T c0; T c1; T4 c2; T c3; T c4;\n" +"T c5; T c6; T4 c7; T c8; T c9;\n" +"T c10; T c11; T4 c12; T c13; T c14;\n" +"T c15; T c16; T4 c17; T c18; T c19;\n" +"T c20; T c21; T4 c22; T c23; T c24;\n" +"int x_leftmost = mad24(max(gx-2, 0), TSIZE, srcOffset);\n" +"int x_left = mad24(max(gx-1, 0), TSIZE, srcOffset);\n" +"int x_central = mad24(gx, TSIZE, srcOffset);\n" +"int x_right = mad24(min(gx+4, cols-1), TSIZE, srcOffset);\n" +"int x_rightmost= mad24(min(gx+5, cols-1), TSIZE, srcOffset);\n" +"int xdst = mad24(gx, TSIZE, dstOffset);\n" +"int src_index = max(gy-2, 0)*srcStep;\n" +"c0 = *(__global T *)(srcptr + src_index + x_leftmost);\n" +"c1 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c2, x_central);\n" +"c3 = *(__global T *)(srcptr + src_index + x_right);\n" +"c4 = *(__global T *)(srcptr + src_index + x_rightmost);\n" +"src_index = max(gy-1, 0)*srcStep;\n" +"c5 = *(__global T *)(srcptr + src_index + x_leftmost);\n" +"c6 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c7, x_central);\n" +"c8 = *(__global T *)(srcptr + src_index + x_right);\n" +"c9 = *(__global T *)(srcptr + src_index + x_rightmost);\n" +"src_index = gy*srcStep;\n" +"c10 = *(__global T *)(srcptr + src_index + x_leftmost);\n" +"c11 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c12, x_central);\n" +"c13 = *(__global T *)(srcptr + src_index + x_right);\n" +"c14 = *(__global T *)(srcptr + src_index + x_rightmost);\n" +"src_index = (gy+1)*srcStep;\n" +"c15 = *(__global T *)(srcptr + src_index + x_leftmost);\n" +"c16 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c17, x_central);\n" +"c18 = *(__global T *)(srcptr + src_index + x_right);\n" +"c19 = *(__global T *)(srcptr + src_index + x_rightmost);\n" +"for(int k = 0; k < 4; k++)\n" +"{\n" +"src_index = min(gy+k+2, rows-1) * srcStep;\n" +"c20 = *(__global T *)(srcptr + src_index + x_leftmost);\n" +"c21 = *(__global T *)(srcptr + src_index + x_left);\n" +"LOAD4(c22, x_central);\n" +"c23 = *(__global T *)(srcptr + src_index + x_right);\n" +"c24 = *(__global T *)(srcptr + src_index + x_rightmost);\n" +"T4 p0, p1, p2, p3, p4,\n" +"p5, p6, p7, p8, p9,\n" +"p10, p11, p12, p13, p14,\n" +"p15, p16, p17, p18, p19,\n" +"p20, p21, p22, p23, p24;\n" +"SHUFFLE4_5(c0, c1, c2, c3, c4, p0, p1, p2, p3, p4);\n" +"SHUFFLE4_5(c5, c6, c7, c8, c9, p5, p6, p7, p8, p9);\n" +"SHUFFLE4_5(c10, c11, c12, c13, c14, p10, p11, p12, p13, p14);\n" +"SHUFFLE4_5(c15, c16, c17, c18, c19, p15, p16, p17, p18, p19);\n" +"SHUFFLE4_5(c20, c21, c22, c23, c24, p20, p21, p22, p23, p24);\n" +"T4 mid;\n" +"OP(p1, p2); OP(p0, p1); OP(p1, p2); OP(p4, p5); OP(p3, p4);\n" +"OP(p4, p5); OP(p0, p3); OP(p2, p5); OP(p2, p3); OP(p1, p4);\n" +"OP(p1, p2); OP(p3, p4); OP(p7, p8); OP(p6, p7); OP(p7, p8);\n" +"OP(p10, p11); OP(p9, p10); OP(p10, p11); OP(p6, p9); OP(p8, p11);\n" +"OP(p8, p9); OP(p7, p10); OP(p7, p8); OP(p9, p10); OP(p0, p6);\n" +"OP(p4, p10); OP(p4, p6); OP(p2, p8); OP(p2, p4); OP(p6, p8);\n" +"OP(p1, p7); OP(p5, p11); OP(p5, p7); OP(p3, p9); OP(p3, p5);\n" +"OP(p7, p9); OP(p1, p2); OP(p3, p4); OP(p5, p6); OP(p7, p8);\n" +"OP(p9, p10); OP(p13, p14); OP(p12, p13); OP(p13, p14); OP(p16, p17);\n" +"OP(p15, p16); OP(p16, p17); OP(p12, p15); OP(p14, p17); OP(p14, p15);\n" +"OP(p13, p16); OP(p13, p14); OP(p15, p16); OP(p19, p20); OP(p18, p19);\n" +"OP(p19, p20); OP(p21, p22); OP(p23, p24); OP(p21, p23); OP(p22, p24);\n" +"OP(p22, p23); OP(p18, p21); OP(p20, p23); OP(p20, p21); OP(p19, p22);\n" +"OP(p22, p24); OP(p19, p20); OP(p21, p22); OP(p23, p24); OP(p12, p18);\n" +"OP(p16, p22); OP(p16, p18); OP(p14, p20); OP(p20, p24); OP(p14, p16);\n" +"OP(p18, p20); OP(p22, p24); OP(p13, p19); OP(p17, p23); OP(p17, p19);\n" +"OP(p15, p21); OP(p15, p17); OP(p19, p21); OP(p13, p14); OP(p15, p16);\n" +"OP(p17, p18); OP(p19, p20); OP(p21, p22); OP(p23, p24); OP(p0, p12);\n" +"OP(p8, p20); OP(p8, p12); OP(p4, p16); OP(p16, p24); OP(p12, p16);\n" +"OP(p2, p14); OP(p10, p22); OP(p10, p14); OP(p6, p18); OP(p6, p10);\n" +"OP(p10, p12); OP(p1, p13); OP(p9, p21); OP(p9, p13); OP(p5, p17);\n" +"OP(p13, p17); OP(p3, p15); OP(p11, p23); OP(p11, p15); OP(p7, p19);\n" +"OP(p7, p11); OP(p11, p13); OP(p11, p12);\n" +"int dst_index = mad24( gy+k, dstStep, xdst);\n" +"STORE4(p12, dst_index);\n" +"c0=c5; c1=c6; c2=c7; c3=c8; c4=c9;\n" +"c5=c10; c6=c11; c7=c12; c8=c13; c9=c14;\n" +"c10=c15; c11=c16; c12=c17; c13=c18; c14=c19;\n" +"c15=c20; c16=c21; c17=c22; c18=c23; c19=c24;\n" +"}\n" +"}\n" +"#endif\n" +"__kernel void medianFilter3(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"__local T data[18][18];\n" +"int x = get_local_id(0);\n" +"int y = get_local_id(1);\n" +"int gx = get_global_id(0);\n" +"int gy = get_global_id(1);\n" +"int dx = gx - x - 1;\n" +"int dy = gy - y - 1;\n" +"int id = min(mad24(x, 16, y), 9*18-1);\n" +"int dr = id / 18;\n" +"int dc = id % 18;\n" +"int c = clamp(dx + dc, 0, dst_cols - 1);\n" +"int r = clamp(dy + dr, 0, dst_rows - 1);\n" +"int index1 = mad24(r, src_step, mad24(c, TSIZE, src_offset));\n" +"r = clamp(dy + dr + 9, 0, dst_rows - 1);\n" +"int index9 = mad24(r, src_step, mad24(c, TSIZE, src_offset));\n" +"data[dr][dc] = loadpix(srcptr + index1);\n" +"data[dr+9][dc] = loadpix(srcptr + index9);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"T p0 = data[y][x], p1 = data[y][(x+1)], p2 = data[y][(x+2)];\n" +"T p3 = data[y+1][x], p4 = data[y+1][(x+1)], p5 = data[y+1][(x+2)];\n" +"T p6 = data[y+2][x], p7 = data[y+2][(x+1)], p8 = data[y+2][(x+2)];\n" +"T mid;\n" +"OP(p1, p2); OP(p4, p5); OP(p7, p8); OP(p0, p1);\n" +"OP(p3, p4); OP(p6, p7); OP(p1, p2); OP(p4, p5);\n" +"OP(p7, p8); OP(p0, p3); OP(p5, p8); OP(p4, p7);\n" +"OP(p3, p6); OP(p1, p4); OP(p2, p5); OP(p4, p7);\n" +"OP(p4, p2); OP(p6, p4); OP(p4, p2);\n" +"int dst_index = mad24( gy, dst_step, mad24(gx, TSIZE, dst_offset));\n" +"if (gy < dst_rows && gx < dst_cols)\n" +"storepix(p4, dstptr + dst_index);\n" +"}\n" +"__kernel void medianFilter5(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"__local T data[20][20];\n" +"int x = get_local_id(0);\n" +"int y = get_local_id(1);\n" +"int gx = get_global_id(0);\n" +"int gy = get_global_id(1);\n" +"int dx = gx - x - 2;\n" +"int dy = gy - y - 2;\n" +"int id = min(mad24(x, 16, y), 10*20-1);\n" +"int dr = id / 20;\n" +"int dc = id % 20;\n" +"int c = clamp(dx + dc, 0, dst_cols - 1);\n" +"int r = clamp(dy + dr, 0, dst_rows - 1);\n" +"int index1 = mad24(r, src_step, mad24(c, TSIZE, src_offset));\n" +"r = clamp(dy + dr + 10, 0, dst_rows - 1);\n" +"int index10 = mad24(r, src_step, mad24(c, TSIZE, src_offset));\n" +"data[dr][dc] = loadpix(srcptr + index1);\n" +"data[dr+10][dc] = loadpix(srcptr + index10);\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"T p0 = data[y][x], p1 = data[y][x+1], p2 = data[y][x+2], p3 = data[y][x+3], p4 = data[y][x+4];\n" +"T p5 = data[y+1][x], p6 = data[y+1][x+1], p7 = data[y+1][x+2], p8 = data[y+1][x+3], p9 = data[y+1][x+4];\n" +"T p10 = data[y+2][x], p11 = data[y+2][x+1], p12 = data[y+2][x+2], p13 = data[y+2][x+3], p14 = data[y+2][x+4];\n" +"T p15 = data[y+3][x], p16 = data[y+3][x+1], p17 = data[y+3][x+2], p18 = data[y+3][x+3], p19 = data[y+3][x+4];\n" +"T p20 = data[y+4][x], p21 = data[y+4][x+1], p22 = data[y+4][x+2], p23 = data[y+4][x+3], p24 = data[y+4][x+4];\n" +"T mid;\n" +"OP(p1, p2); OP(p0, p1); OP(p1, p2); OP(p4, p5); OP(p3, p4);\n" +"OP(p4, p5); OP(p0, p3); OP(p2, p5); OP(p2, p3); OP(p1, p4);\n" +"OP(p1, p2); OP(p3, p4); OP(p7, p8); OP(p6, p7); OP(p7, p8);\n" +"OP(p10, p11); OP(p9, p10); OP(p10, p11); OP(p6, p9); OP(p8, p11);\n" +"OP(p8, p9); OP(p7, p10); OP(p7, p8); OP(p9, p10); OP(p0, p6);\n" +"OP(p4, p10); OP(p4, p6); OP(p2, p8); OP(p2, p4); OP(p6, p8);\n" +"OP(p1, p7); OP(p5, p11); OP(p5, p7); OP(p3, p9); OP(p3, p5);\n" +"OP(p7, p9); OP(p1, p2); OP(p3, p4); OP(p5, p6); OP(p7, p8);\n" +"OP(p9, p10); OP(p13, p14); OP(p12, p13); OP(p13, p14); OP(p16, p17);\n" +"OP(p15, p16); OP(p16, p17); OP(p12, p15); OP(p14, p17); OP(p14, p15);\n" +"OP(p13, p16); OP(p13, p14); OP(p15, p16); OP(p19, p20); OP(p18, p19);\n" +"OP(p19, p20); OP(p21, p22); OP(p23, p24); OP(p21, p23); OP(p22, p24);\n" +"OP(p22, p23); OP(p18, p21); OP(p20, p23); OP(p20, p21); OP(p19, p22);\n" +"OP(p22, p24); OP(p19, p20); OP(p21, p22); OP(p23, p24); OP(p12, p18);\n" +"OP(p16, p22); OP(p16, p18); OP(p14, p20); OP(p20, p24); OP(p14, p16);\n" +"OP(p18, p20); OP(p22, p24); OP(p13, p19); OP(p17, p23); OP(p17, p19);\n" +"OP(p15, p21); OP(p15, p17); OP(p19, p21); OP(p13, p14); OP(p15, p16);\n" +"OP(p17, p18); OP(p19, p20); OP(p21, p22); OP(p23, p24); OP(p0, p12);\n" +"OP(p8, p20); OP(p8, p12); OP(p4, p16); OP(p16, p24); OP(p12, p16);\n" +"OP(p2, p14); OP(p10, p22); OP(p10, p14); OP(p6, p18); OP(p6, p10);\n" +"OP(p10, p12); OP(p1, p13); OP(p9, p21); OP(p9, p13); OP(p5, p17);\n" +"OP(p13, p17); OP(p3, p15); OP(p11, p23); OP(p11, p15); OP(p7, p19);\n" +"OP(p7, p11); OP(p11, p13); OP(p11, p12);\n" +"int dst_index = mad24(gy, dst_step, mad24(gx, TSIZE, dst_offset));\n" +"if (gy < dst_rows && gx < dst_cols)\n" +"storepix(p12, dstptr + dst_index);\n" +"}\n" +, "f082457348bfbcb2e2de3014f46093a8", NULL}; +struct cv::ocl::internal::ProgramEntry moments_oclsrc={moduleName, "moments", +"#if TILE_SIZE != 32\n" +"#error \"TILE SIZE should be 32\"\n" +"#endif\n" +"__kernel void moments(__global const uchar* src, int src_step, int src_offset,\n" +"int src_rows, int src_cols, __global int* mom0, int xtiles)\n" +"{\n" +"int x0 = get_global_id(0);\n" +"int y0 = get_group_id(1);\n" +"int x, y = get_local_id(1);\n" +"int x_min = x0*TILE_SIZE;\n" +"int ypix = y0*TILE_SIZE + y;\n" +"__local int mom[TILE_SIZE][10];\n" +"if (x_min < src_cols && y0*TILE_SIZE < src_rows)\n" +"{\n" +"if (ypix < src_rows)\n" +"{\n" +"int x_max = min(src_cols - x_min, TILE_SIZE);\n" +"__global const uchar* ptr = src + src_offset + ypix*src_step + x_min;\n" +"int4 S = (int4)(0, 0, 0, 0), p;\n" +"#define SUM_ELEM(elem, ofs) \\\n" +"(int4)(1, (ofs), (ofs)*(ofs), (ofs)*(ofs)*(ofs))*elem\n" +"x = x_max & -4;\n" +"if (x_max >= 4)\n" +"{\n" +"p = convert_int4(vload4(0, ptr));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, 0, 0, 0) + (int4)(p.s1, p.s1, p.s1, p.s1) +\n" +"(int4)(p.s2, p.s2 * 2, p.s2 * 4, p.s2 * 8) + (int4)(p.s3, p.s3 * 3, p.s3 * 9, p.s3 * 27);\n" +"if (x_max >= 8)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 4));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 4, p.s0 * 16, p.s0 * 64) + (int4)(p.s1, p.s1 * 5, p.s1 * 25, p.s1 * 125) +\n" +"(int4)(p.s2, p.s2 * 6, p.s2 * 36, p.s2 * 216) + (int4)(p.s3, p.s3 * 7, p.s3 * 49, p.s3 * 343);\n" +"if (x_max >= 12)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 8));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 8, p.s0 * 64, p.s0 * 512) + (int4)(p.s1, p.s1 * 9, p.s1 * 81, p.s1 * 729) +\n" +"(int4)(p.s2, p.s2 * 10, p.s2 * 100, p.s2 * 1000) + (int4)(p.s3, p.s3 * 11, p.s3 * 121, p.s3 * 1331);\n" +"if (x_max >= 16)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 12));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 12, p.s0 * 144, p.s0 * 1728) + (int4)(p.s1, p.s1 * 13, p.s1 * 169, p.s1 * 2197) +\n" +"(int4)(p.s2, p.s2 * 14, p.s2 * 196, p.s2 * 2744) + (int4)(p.s3, p.s3 * 15, p.s3 * 225, p.s3 * 3375);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"if (x_max >= 20)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 16));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 16, p.s0 * 256, p.s0 * 4096) + (int4)(p.s1, p.s1 * 17, p.s1 * 289, p.s1 * 4913) +\n" +"(int4)(p.s2, p.s2 * 18, p.s2 * 324, p.s2 * 5832) + (int4)(p.s3, p.s3 * 19, p.s3 * 361, p.s3 * 6859);\n" +"if (x_max >= 24)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 20));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 20, p.s0 * 400, p.s0 * 8000) + (int4)(p.s1, p.s1 * 21, p.s1 * 441, p.s1 * 9261) +\n" +"(int4)(p.s2, p.s2 * 22, p.s2 * 484, p.s2 * 10648) + (int4)(p.s3, p.s3 * 23, p.s3 * 529, p.s3 * 12167);\n" +"if (x_max >= 28)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 24));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 24, p.s0 * 576, p.s0 * 13824) + (int4)(p.s1, p.s1 * 25, p.s1 * 625, p.s1 * 15625) +\n" +"(int4)(p.s2, p.s2 * 26, p.s2 * 676, p.s2 * 17576) + (int4)(p.s3, p.s3 * 27, p.s3 * 729, p.s3 * 19683);\n" +"if (x_max >= 32)\n" +"{\n" +"p = convert_int4(vload4(0, ptr + 28));\n" +"#ifdef OP_MOMENTS_BINARY\n" +"p = min(p, 1);\n" +"#endif\n" +"S += (int4)(p.s0, p.s0 * 28, p.s0 * 784, p.s0 * 21952) + (int4)(p.s1, p.s1 * 29, p.s1 * 841, p.s1 * 24389) +\n" +"(int4)(p.s2, p.s2 * 30, p.s2 * 900, p.s2 * 27000) + (int4)(p.s3, p.s3 * 31, p.s3 * 961, p.s3 * 29791);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"if (x < x_max)\n" +"{\n" +"int ps = ptr[x];\n" +"#ifdef OP_MOMENTS_BINARY\n" +"ps = min(ps, 1);\n" +"#endif\n" +"S += SUM_ELEM(ps, x);\n" +"if (x + 1 < x_max)\n" +"{\n" +"ps = ptr[x + 1];\n" +"#ifdef OP_MOMENTS_BINARY\n" +"ps = min(ps, 1);\n" +"#endif\n" +"S += SUM_ELEM(ps, x + 1);\n" +"if (x + 2 < x_max)\n" +"{\n" +"ps = ptr[x + 2];\n" +"#ifdef OP_MOMENTS_BINARY\n" +"ps = min(ps, 1);\n" +"#endif\n" +"S += SUM_ELEM(ps, x + 2);\n" +"}\n" +"}\n" +"}\n" +"int sy = y*y;\n" +"mom[y][0] = S.s0;\n" +"mom[y][1] = S.s1;\n" +"mom[y][2] = y*S.s0;\n" +"mom[y][3] = S.s2;\n" +"mom[y][4] = y*S.s1;\n" +"mom[y][5] = sy*S.s0;\n" +"mom[y][6] = S.s3;\n" +"mom[y][7] = y*S.s2;\n" +"mom[y][8] = sy*S.s1;\n" +"mom[y][9] = y*sy*S.s0;\n" +"}\n" +"else\n" +"mom[y][0] = mom[y][1] = mom[y][2] = mom[y][3] = mom[y][4] =\n" +"mom[y][5] = mom[y][6] = mom[y][7] = mom[y][8] = mom[y][9] = 0;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"#define REDUCE(d) \\\n" +"if (y < d) \\\n" +"{ \\\n" +"mom[y][0] += mom[y + d][0]; \\\n" +"mom[y][1] += mom[y + d][1]; \\\n" +"mom[y][2] += mom[y + d][2]; \\\n" +"mom[y][3] += mom[y + d][3]; \\\n" +"mom[y][4] += mom[y + d][4]; \\\n" +"mom[y][5] += mom[y + d][5]; \\\n" +"mom[y][6] += mom[y + d][6]; \\\n" +"mom[y][7] += mom[y + d][7]; \\\n" +"mom[y][8] += mom[y + d][8]; \\\n" +"mom[y][9] += mom[y + d][9]; \\\n" +"} \\\n" +"barrier(CLK_LOCAL_MEM_FENCE)\n" +"REDUCE(16);\n" +"REDUCE(8);\n" +"REDUCE(4);\n" +"REDUCE(2);\n" +"if (y < 10)\n" +"{\n" +"__global int* momout = mom0 + (y0*xtiles + x0) * 10;\n" +"momout[y] = mom[0][y] + mom[1][y];\n" +"}\n" +"}\n" +"}\n" +, "1d0545282b5860ed7eeeb6860fa9edc3", NULL}; +struct cv::ocl::internal::ProgramEntry morph_oclsrc={moduleName, "morph", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T *)(addr)\n" +"#define storepix(val, addr) *(__global T *)(addr) = val\n" +"#define TSIZE (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))\n" +"#define TSIZE ((int)sizeof(T1)*3)\n" +"#endif\n" +"#ifdef DEPTH_0\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL UCHAR_MAX\n" +"#elif defined DEPTH_1\n" +"#define MIN_VAL SCHAR_MIN\n" +"#define MAX_VAL SCHAR_MAX\n" +"#elif defined DEPTH_2\n" +"#define MIN_VAL 0\n" +"#define MAX_VAL USHRT_MAX\n" +"#elif defined DEPTH_3\n" +"#define MIN_VAL SHRT_MIN\n" +"#define MAX_VAL SHRT_MAX\n" +"#elif defined DEPTH_4\n" +"#define MIN_VAL INT_MIN\n" +"#define MAX_VAL INT_MAX\n" +"#elif defined DEPTH_5\n" +"#define MIN_VAL (-FLT_MAX)\n" +"#define MAX_VAL FLT_MAX\n" +"#elif defined DEPTH_6\n" +"#define MIN_VAL (-DBL_MAX)\n" +"#define MAX_VAL DBL_MAX\n" +"#endif\n" +"#ifdef OP_ERODE\n" +"#define VAL MAX_VAL\n" +"#elif defined OP_DILATE\n" +"#define VAL MIN_VAL\n" +"#else\n" +"#error \"Unknown operation\"\n" +"#endif\n" +"#ifdef OP_ERODE\n" +"#if defined INTEL_DEVICE && defined DEPTH_0\n" +"#define MORPH_OP(A, B) ((A) < (B) ? (A) : (B))\n" +"#else\n" +"#define MORPH_OP(A, B) min((A), (B))\n" +"#endif\n" +"#endif\n" +"#ifdef OP_DILATE\n" +"#define MORPH_OP(A, B) max((A), (B))\n" +"#endif\n" +"#define PROCESS(y, x) \\\n" +"temp = LDS_DAT[mad24(l_y + y, width, l_x + x)]; \\\n" +"res = MORPH_OP(res, temp);\n" +"#define ELEM(i, l_edge, r_edge, elem1, elem2) (i) < (l_edge) | (i) >= (r_edge) ? (elem1) : (elem2)\n" +"#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT\n" +"#define EXTRA_PARAMS , __global const uchar * matptr, int mat_step, int mat_offset\n" +"#else\n" +"#define EXTRA_PARAMS\n" +"#endif\n" +"__kernel void morph(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int src_offset_x, int src_offset_y, int cols, int rows,\n" +"int src_whole_cols, int src_whole_rows EXTRA_PARAMS)\n" +"{\n" +"int gidx = get_global_id(0), gidy = get_global_id(1);\n" +"int l_x = get_local_id(0), l_y = get_local_id(1);\n" +"int x = get_group_id(0) * LSIZE0, y = get_group_id(1) * LSIZE1;\n" +"int start_x = x + src_offset_x - RADIUSX;\n" +"int width = mad24(RADIUSX, 2, LSIZE0 + 1);\n" +"int start_y = y + src_offset_y - RADIUSY;\n" +"int point1 = mad24(l_y, LSIZE0, l_x);\n" +"int point2 = point1 + LSIZE0 * LSIZE1;\n" +"int tl_x = point1 % width, tl_y = point1 / width;\n" +"int tl_x2 = point2 % width, tl_y2 = point2 / width;\n" +"int cur_x = start_x + tl_x, cur_y = start_y + tl_y;\n" +"int cur_x2 = start_x + tl_x2, cur_y2 = start_y + tl_y2;\n" +"int start_addr = mad24(cur_y, src_step, cur_x * TSIZE);\n" +"int start_addr2 = mad24(cur_y2, src_step, cur_x2 * TSIZE);\n" +"__local T LDS_DAT[2 * LSIZE1 * LSIZE0];\n" +"int end_addr = mad24(src_whole_rows - 1, src_step, src_whole_cols * TSIZE);\n" +"start_addr = start_addr < end_addr && start_addr > 0 ? start_addr : 0;\n" +"start_addr2 = start_addr2 < end_addr && start_addr2 > 0 ? start_addr2 : 0;\n" +"T temp0 = loadpix(srcptr + start_addr);\n" +"T temp1 = loadpix(srcptr + start_addr2);\n" +"temp0 = ELEM(cur_x, 0, src_whole_cols, (T)(VAL), temp0);\n" +"temp0 = ELEM(cur_y, 0, src_whole_rows, (T)(VAL), temp0);\n" +"temp1 = ELEM(cur_x2, 0, src_whole_cols, (T)(VAL), temp1);\n" +"temp1 = ELEM(cur_y2, 0, src_whole_rows, (T)(VAL), temp1);\n" +"LDS_DAT[point1] = temp0;\n" +"LDS_DAT[point2] = temp1;\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"if (gidx < cols && gidy < rows)\n" +"{\n" +"T res = (T)(VAL), temp;\n" +"PROCESS_ELEMS;\n" +"int dst_index = mad24(gidy, dst_step, mad24(gidx, TSIZE, dst_offset));\n" +"#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT\n" +"int mat_index = mad24(gidy, mat_step, mad24(gidx, TSIZE, mat_offset));\n" +"T value = loadpix(matptr + mat_index);\n" +"#ifdef OP_GRADIENT\n" +"storepix(convertToT(convertToWT(res) - convertToWT(value)), dstptr + dst_index);\n" +"#elif defined OP_TOPHAT\n" +"storepix(convertToT(convertToWT(value) - convertToWT(res)), dstptr + dst_index);\n" +"#elif defined OP_BLACKHAT\n" +"storepix(convertToT(convertToWT(res) - convertToWT(value)), dstptr + dst_index);\n" +"#endif\n" +"#else\n" +"storepix(res, dstptr + dst_index);\n" +"#endif\n" +"}\n" +"}\n" +, "232e712bff362e53c55027da6e1e1584", NULL}; +struct cv::ocl::internal::ProgramEntry morph3x3_oclsrc={moduleName, "morph3x3", +"#ifdef OP_ERODE\n" +"#define OP(m1, m2) min(m1, m2)\n" +"#define VAL UCHAR_MAX\n" +"#endif\n" +"#ifdef OP_DILATE\n" +"#define OP(m1, m2) max(m1, m2)\n" +"#define VAL 0\n" +"#endif\n" +"#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT\n" +"#define EXTRA_PARAMS , __global const uchar * matptr, int mat_step, int mat_offset\n" +"#else\n" +"#define EXTRA_PARAMS\n" +"#endif\n" +"#define PROCESS(_y, _x) \\\n" +"line_out[0] = OP(line_out[0], arr[_x + 3 * _y]); \\\n" +"line_out[1] = OP(line_out[1], arr[_x + 3 * (_y + 1)]);\n" +"#define PROCESS_ELEM \\\n" +"line_out[0] = (uchar16)VAL; \\\n" +"line_out[1] = (uchar16)VAL; \\\n" +"PROCESS_ELEM_\n" +"__kernel void morph3x3_8UC1_cols16_rows2(__global const uint* src, int src_step,\n" +"__global uint* dst, int dst_step,\n" +"int rows, int cols\n" +"EXTRA_PARAMS)\n" +"{\n" +"int block_x = get_global_id(0);\n" +"int y = get_global_id(1) * 2;\n" +"int ssx = 1, dsx = 1;\n" +"if ((block_x * 16) >= cols || y >= rows) return;\n" +"uchar a; uchar16 b; uchar c;\n" +"uchar d; uchar16 e; uchar f;\n" +"uchar g; uchar16 h; uchar i;\n" +"uchar j; uchar16 k; uchar l;\n" +"uchar16 line[4];\n" +"uchar16 line_out[2];\n" +"int src_index = block_x * 4 * ssx + (y - 1) * (src_step / 4);\n" +"line[0] = (y == 0) ? (uchar16)VAL: as_uchar16(vload4(0, src + src_index));\n" +"line[1] = as_uchar16(vload4(0, src + src_index + (src_step / 4)));\n" +"line[2] = as_uchar16(vload4(0, src + src_index + 2 * (src_step / 4)));\n" +"line[3] = (y == (rows - 2)) ? (uchar16)VAL: as_uchar16(vload4(0, src + src_index + 3 * (src_step / 4)));\n" +"__global uchar *src_p = (__global uchar *)src;\n" +"bool line_end = ((block_x + 1) * 16 == cols);\n" +"src_index = block_x * 16 * ssx + (y - 1) * src_step;\n" +"a = (block_x == 0 || y == 0) ? VAL : src_p[src_index - 1];\n" +"b = line[0];\n" +"c = (line_end || y == 0) ? VAL : src_p[src_index + 16];\n" +"d = (block_x == 0) ? VAL : src_p[src_index + src_step - 1];\n" +"e = line[1];\n" +"f = line_end ? VAL : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? VAL : src_p[src_index + 2 * src_step - 1];\n" +"h = line[2];\n" +"i = line_end ? VAL : src_p[src_index + 2 * src_step + 16];\n" +"j = (block_x == 0 || y == (rows - 2)) ? VAL : src_p[src_index + 3 * src_step - 1];\n" +"k = line[3];\n" +"l = (line_end || y == (rows - 2)) ? VAL : src_p[src_index + 3 * src_step + 16];\n" +"uchar16 arr[12];\n" +"arr[0] = (uchar16)(a, b.s01234567, b.s89ab, b.scde);\n" +"arr[1] = b;\n" +"arr[2] = (uchar16)(b.s12345678, b.s9abc, b.sdef, c);\n" +"arr[3] = (uchar16)(d, e.s01234567, e.s89ab, e.scde);\n" +"arr[4] = e;\n" +"arr[5] = (uchar16)(e.s12345678, e.s9abc, e.sdef, f);\n" +"arr[6] = (uchar16)(g, h.s01234567, h.s89ab, h.scde);\n" +"arr[7] = h;\n" +"arr[8] = (uchar16)(h.s12345678, h.s9abc, h.sdef, i);\n" +"arr[9] = (uchar16)(j, k.s01234567, k.s89ab, k.scde);\n" +"arr[10] = k;\n" +"arr[11] = (uchar16)(k.s12345678, k.s9abc, k.sdef, l);\n" +"PROCESS_ELEM;\n" +"int dst_index = block_x * 4 * dsx + y * (dst_step / 4);\n" +"#if defined OP_GRADIENT || defined OP_TOPHAT || defined OP_BLACKHAT\n" +"int mat_index = y * mat_step + block_x * 16 * ssx + mat_offset;\n" +"uchar16 val0 = vload16(0, matptr + mat_index);\n" +"uchar16 val1 = vload16(0, matptr + mat_index + mat_step);\n" +"#ifdef OP_GRADIENT\n" +"line_out[0] = convert_uchar16_sat(convert_int16(line_out[0]) - convert_int16(val0));\n" +"line_out[1] = convert_uchar16_sat(convert_int16(line_out[1]) - convert_int16(val1));\n" +"vstore4(as_uint4(line_out[0]), 0, dst + dst_index);\n" +"vstore4(as_uint4(line_out[1]), 0, dst + dst_index + (dst_step / 4));\n" +"#elif defined OP_TOPHAT\n" +"line_out[0] = convert_uchar16_sat(convert_int16(val0) - convert_int16(line_out[0]));\n" +"line_out[1] = convert_uchar16_sat(convert_int16(val1) - convert_int16(line_out[1]));\n" +"vstore4(as_uint4(line_out[0]), 0, dst + dst_index);\n" +"vstore4(as_uint4(line_out[1]), 0, dst + dst_index + (dst_step / 4));\n" +"#elif defined OP_BLACKHAT\n" +"line_out[0] = convert_uchar16_sat(convert_int16(line_out[0]) - convert_int16(val0));\n" +"line_out[1] = convert_uchar16_sat(convert_int16(line_out[1]) - convert_int16(val1));\n" +"vstore4(as_uint4(line_out[0]), 0, dst + dst_index);\n" +"vstore4(as_uint4(line_out[1]), 0, dst + dst_index + (dst_step / 4));\n" +"#endif\n" +"#else\n" +"vstore4(as_uint4(line_out[0]), 0, dst + dst_index);\n" +"vstore4(as_uint4(line_out[1]), 0, dst + dst_index + (dst_step / 4));\n" +"#endif\n" +"}\n" +, "69fd431a8819a531ceca227a115c0a07", NULL}; +struct cv::ocl::internal::ProgramEntry precornerdetect_oclsrc={moduleName, "precornerdetect", +"__kernel void preCornerDetect(__global const uchar * Dxptr, int dx_step, int dx_offset,\n" +"__global const uchar * Dyptr, int dy_step, int dy_offset,\n" +"__global const uchar * D2xptr, int d2x_step, int d2x_offset,\n" +"__global const uchar * D2yptr, int d2y_step, int d2y_offset,\n" +"__global const uchar * Dxyptr, int dxy_step, int dxy_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset,\n" +"int dst_rows, int dst_cols, float factor)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1);\n" +"if (x < dst_cols && y < dst_rows)\n" +"{\n" +"int dx_index = mad24(dx_step, y, (int)sizeof(float) * x + dx_offset);\n" +"int dy_index = mad24(dy_step, y, (int)sizeof(float) * x + dy_offset);\n" +"int d2x_index = mad24(d2x_step, y, (int)sizeof(float) * x + d2x_offset);\n" +"int d2y_index = mad24(d2y_step, y, (int)sizeof(float) * x + d2y_offset);\n" +"int dxy_index = mad24(dxy_step, y, (int)sizeof(float) * x + dxy_offset);\n" +"int dst_index = mad24(dst_step, y, (int)sizeof(float) * x + dst_offset);\n" +"float dx = *(__global const float *)(Dxptr + dx_index);\n" +"float dy = *(__global const float *)(Dyptr + dy_index);\n" +"float d2x = *(__global const float *)(D2xptr + d2x_index);\n" +"float d2y = *(__global const float *)(D2yptr + d2y_index);\n" +"float dxy = *(__global const float *)(Dxyptr + dxy_index);\n" +"__global float * dst = (__global float *)(dstptr + dst_index);\n" +"dst[0] = factor * (dx*dx*d2y + dy*dy*d2x - 2*dx*dy*dxy);\n" +"}\n" +"}\n" +, "14a94db70b88aa76ff8840f03f3ad556", NULL}; +struct cv::ocl::internal::ProgramEntry pyr_down_oclsrc={moduleName, "pyr_down", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(x, maxV) clamp((x), 0, (maxV)-1)\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(x, maxV) ( (x) + (maxV) ) % (maxV)\n" +"#elif defined BORDER_REFLECT\n" +"#define EXTRAPOLATE(x, maxV) clamp(min(((maxV)-1)*2-(x)+1, max((x),-(x)-1) ), 0, (maxV)-1)\n" +"#elif defined BORDER_REFLECT_101 || defined BORDER_REFLECT101\n" +"#define EXTRAPOLATE(x, maxV) clamp(min(((maxV)-1)*2-(x), max((x),-(x)) ), 0, (maxV)-1)\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T*)(addr)\n" +"#define storepix(val, addr) *(__global T*)(addr) = (val)\n" +"#define PIXSIZE ((int)sizeof(T))\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1*)(addr))\n" +"#define storepix(val, addr) vstore3((val), 0, (__global T1*)(addr))\n" +"#define PIXSIZE ((int)sizeof(T1)*3)\n" +"#endif\n" +"#define SRC(_x,_y) convertToFT(loadpix(srcData + mad24(_y, src_step, PIXSIZE * _x)))\n" +"#if kercn == 4\n" +"#define SRC4(_x,_y) convert_float4(vload4(0, srcData + mad24(_y, src_step, PIXSIZE * _x)))\n" +"#endif\n" +"#ifdef INTEL_DEVICE\n" +"#define MAD(x,y,z) fma((x),(y),(z))\n" +"#else\n" +"#define MAD(x,y,z) mad((x),(y),(z))\n" +"#endif\n" +"#define LOAD_LOCAL(col_gl, col_lcl) \\\n" +"sum0 = co3* SRC(col_gl, EXTRAPOLATE_(src_y - 2, src_rows)); \\\n" +"sum0 = MAD(co2, SRC(col_gl, EXTRAPOLATE_(src_y - 1, src_rows)), sum0); \\\n" +"temp = SRC(col_gl, EXTRAPOLATE_(src_y, src_rows)); \\\n" +"sum0 = MAD(co1, temp, sum0); \\\n" +"sum1 = co3 * temp; \\\n" +"temp = SRC(col_gl, EXTRAPOLATE_(src_y + 1, src_rows)); \\\n" +"sum0 = MAD(co2, temp, sum0); \\\n" +"sum1 = MAD(co2, temp, sum1); \\\n" +"temp = SRC(col_gl, EXTRAPOLATE_(src_y + 2, src_rows)); \\\n" +"sum0 = MAD(co3, temp, sum0); \\\n" +"sum1 = MAD(co1, temp, sum1); \\\n" +"smem[0][col_lcl] = sum0; \\\n" +"sum1 = MAD(co2, SRC(col_gl, EXTRAPOLATE_(src_y + 3, src_rows)), sum1); \\\n" +"sum1 = MAD(co3, SRC(col_gl, EXTRAPOLATE_(src_y + 4, src_rows)), sum1); \\\n" +"smem[1][col_lcl] = sum1;\n" +"#if kercn == 4\n" +"#define LOAD_LOCAL4(col_gl, col_lcl) \\\n" +"sum40 = co3* SRC4(col_gl, EXTRAPOLATE_(src_y - 2, src_rows)); \\\n" +"sum40 = MAD(co2, SRC4(col_gl, EXTRAPOLATE_(src_y - 1, src_rows)), sum40); \\\n" +"temp4 = SRC4(col_gl, EXTRAPOLATE_(src_y, src_rows)); \\\n" +"sum40 = MAD(co1, temp4, sum40); \\\n" +"sum41 = co3 * temp4; \\\n" +"temp4 = SRC4(col_gl, EXTRAPOLATE_(src_y + 1, src_rows)); \\\n" +"sum40 = MAD(co2, temp4, sum40); \\\n" +"sum41 = MAD(co2, temp4, sum41); \\\n" +"temp4 = SRC4(col_gl, EXTRAPOLATE_(src_y + 2, src_rows)); \\\n" +"sum40 = MAD(co3, temp4, sum40); \\\n" +"sum41 = MAD(co1, temp4, sum41); \\\n" +"vstore4(sum40, col_lcl, (__local float*) &smem[0][2]); \\\n" +"sum41 = MAD(co2, SRC4(col_gl, EXTRAPOLATE_(src_y + 3, src_rows)), sum41); \\\n" +"sum41 = MAD(co3, SRC4(col_gl, EXTRAPOLATE_(src_y + 4, src_rows)), sum41); \\\n" +"vstore4(sum41, col_lcl, (__local float*) &smem[1][2]);\n" +"#endif\n" +"#define noconvert\n" +"__kernel void pyrDown(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"const int x = get_global_id(0)*kercn;\n" +"const int y = 2*get_global_id(1);\n" +"__local FT smem[2][LOCAL_SIZE + 4];\n" +"__global uchar * dstData = dst + dst_offset;\n" +"__global const uchar * srcData = src + src_offset;\n" +"FT sum0, sum1, temp;\n" +"FT co1 = 0.375f;\n" +"FT co2 = 0.25f;\n" +"FT co3 = 0.0625f;\n" +"const int src_y = 2*y;\n" +"int col;\n" +"if (src_y >= 2 && src_y < src_rows - 4)\n" +"{\n" +"#undef EXTRAPOLATE_\n" +"#define EXTRAPOLATE_(val, maxVal) val\n" +"#if kercn == 1\n" +"col = EXTRAPOLATE(x, src_cols);\n" +"LOAD_LOCAL(col, 2 + get_local_id(0))\n" +"#else\n" +"if (x < src_cols-4)\n" +"{\n" +"float4 sum40, sum41, temp4;\n" +"LOAD_LOCAL4(x, get_local_id(0))\n" +"}\n" +"else\n" +"{\n" +"for (int i=0; i<4; i++)\n" +"{\n" +"col = EXTRAPOLATE(x+i, src_cols);\n" +"LOAD_LOCAL(col, 2 + 4 * get_local_id(0) + i)\n" +"}\n" +"}\n" +"#endif\n" +"if (get_local_id(0) < 2)\n" +"{\n" +"col = EXTRAPOLATE((int)(get_group_id(0)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);\n" +"LOAD_LOCAL(col, get_local_id(0))\n" +"}\n" +"else if (get_local_id(0) < 4)\n" +"{\n" +"col = EXTRAPOLATE((int)((get_group_id(0)+1)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);\n" +"LOAD_LOCAL(col, LOCAL_SIZE + get_local_id(0))\n" +"}\n" +"}\n" +"else\n" +"{\n" +"#undef EXTRAPOLATE_\n" +"#define EXTRAPOLATE_(val, maxVal) EXTRAPOLATE(val, maxVal)\n" +"#if kercn == 1\n" +"col = EXTRAPOLATE(x, src_cols);\n" +"LOAD_LOCAL(col, 2 + get_local_id(0))\n" +"#else\n" +"if (x < src_cols-4)\n" +"{\n" +"float4 sum40, sum41, temp4;\n" +"LOAD_LOCAL4(x, get_local_id(0))\n" +"}\n" +"else\n" +"{\n" +"for (int i=0; i<4; i++)\n" +"{\n" +"col = EXTRAPOLATE(x+i, src_cols);\n" +"LOAD_LOCAL(col, 2 + 4*get_local_id(0) + i)\n" +"}\n" +"}\n" +"#endif\n" +"if (get_local_id(0) < 2)\n" +"{\n" +"col = EXTRAPOLATE((int)(get_group_id(0)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);\n" +"LOAD_LOCAL(col, get_local_id(0))\n" +"}\n" +"else if (get_local_id(0) < 4)\n" +"{\n" +"col = EXTRAPOLATE((int)((get_group_id(0)+1)*LOCAL_SIZE + get_local_id(0) - 2), src_cols);\n" +"LOAD_LOCAL(col, LOCAL_SIZE + get_local_id(0))\n" +"}\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"#if kercn == 1\n" +"if (get_local_id(0) < LOCAL_SIZE / 2)\n" +"{\n" +"const int tid2 = get_local_id(0) * 2;\n" +"const int dst_x = (get_group_id(0) * get_local_size(0) + tid2) / 2;\n" +"if (dst_x < dst_cols)\n" +"{\n" +"for (int yin = y, y1 = min(dst_rows, y + 2); yin < y1; yin++)\n" +"{\n" +"#if cn == 1\n" +"#if fdepth <= 5\n" +"FT sum = dot(vload4(0, (__local float*) (&smem) + tid2 + (yin - y) * (LOCAL_SIZE + 4)), (float4)(co3, co2, co1, co2));\n" +"#else\n" +"FT sum = dot(vload4(0, (__local double*) (&smem) + tid2 + (yin - y) * (LOCAL_SIZE + 4)), (double4)(co3, co2, co1, co2));\n" +"#endif\n" +"#else\n" +"FT sum = co3 * smem[yin - y][2 + tid2 - 2];\n" +"sum = MAD(co2, smem[yin - y][2 + tid2 - 1], sum);\n" +"sum = MAD(co1, smem[yin - y][2 + tid2 ], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid2 + 1], sum);\n" +"#endif\n" +"sum = MAD(co3, smem[yin - y][2 + tid2 + 2], sum);\n" +"storepix(convertToT(sum), dstData + yin * dst_step + dst_x * PIXSIZE);\n" +"}\n" +"}\n" +"}\n" +"#else\n" +"int tid4 = get_local_id(0) * 4;\n" +"int dst_x = (get_group_id(0) * LOCAL_SIZE + tid4) / 2;\n" +"if (dst_x < dst_cols - 1)\n" +"{\n" +"for (int yin = y, y1 = min(dst_rows, y + 2); yin < y1; yin++)\n" +"{\n" +"FT sum = co3* smem[yin - y][2 + tid4 + 2];\n" +"sum = MAD(co3, smem[yin - y][2 + tid4 - 2], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid4 - 1], sum);\n" +"sum = MAD(co1, smem[yin - y][2 + tid4 ], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid4 + 1], sum);\n" +"storepix(convertToT(sum), dstData + mad24(yin, dst_step, dst_x * PIXSIZE));\n" +"dst_x ++;\n" +"sum = co3* smem[yin - y][2 + tid4 + 4];\n" +"sum = MAD(co3, smem[yin - y][2 + tid4 ], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid4 + 1], sum);\n" +"sum = MAD(co1, smem[yin - y][2 + tid4 + 2], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid4 + 3], sum);\n" +"storepix(convertToT(sum), dstData + mad24(yin, dst_step, dst_x * PIXSIZE));\n" +"dst_x --;\n" +"}\n" +"}\n" +"else if (dst_x < dst_cols)\n" +"{\n" +"for (int yin = y, y1 = min(dst_rows, y + 2); yin < y1; yin++)\n" +"{\n" +"FT sum = co3* smem[yin - y][2 + tid4 + 2];\n" +"sum = MAD(co3, smem[yin - y][2 + tid4 - 2], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid4 - 1], sum);\n" +"sum = MAD(co1, smem[yin - y][2 + tid4 ], sum);\n" +"sum = MAD(co2, smem[yin - y][2 + tid4 + 1], sum);\n" +"storepix(convertToT(sum), dstData + mad24(yin, dst_step, dst_x * PIXSIZE));\n" +"}\n" +"}\n" +"#endif\n" +"}\n" +, "a5f2dccf982eb988b2ed0e11453d00b9", NULL}; +struct cv::ocl::internal::ProgramEntry pyr_up_oclsrc={moduleName, "pyr_up", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T*)(addr)\n" +"#define storepix(val, addr) *(__global T*)(addr) = (val)\n" +"#define PIXSIZE ((int)sizeof(T))\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1*)(addr))\n" +"#define storepix(val, addr) vstore3((val), 0, (__global T1*)(addr))\n" +"#define PIXSIZE ((int)sizeof(T1)*3)\n" +"#endif\n" +"#define EXTRAPOLATE(x, maxV) min(maxV - 1, (int) abs(x))\n" +"#define noconvert\n" +"__kernel void pyrUp(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"const int x = get_global_id(0);\n" +"const int y = get_global_id(1);\n" +"const int tidx = get_local_id(0);\n" +"const int tidy = get_local_id(1);\n" +"__local FT s_srcPatch[LOCAL_SIZE/2 + 2][LOCAL_SIZE/2 + 2];\n" +"__local FT s_dstPatch[LOCAL_SIZE/2 + 2][LOCAL_SIZE];\n" +"__global uchar * dstData = dst + dst_offset;\n" +"__global const uchar * srcData = src + src_offset;\n" +"if( tidx < (LOCAL_SIZE/2 + 2) && tidy < LOCAL_SIZE/2 + 2 )\n" +"{\n" +"int srcx = EXTRAPOLATE(mad24((int)get_group_id(0), LOCAL_SIZE/2, tidx) - 1, src_cols);\n" +"int srcy = EXTRAPOLATE(mad24((int)get_group_id(1), LOCAL_SIZE/2, tidy) - 1, src_rows);\n" +"s_srcPatch[tidy][tidx] = convertToFT(loadpix(srcData + srcy * src_step + srcx * PIXSIZE));\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"FT sum = 0.f;\n" +"const FT co1 = 0.75f;\n" +"const FT co2 = 0.5f;\n" +"const FT co3 = 0.125f;\n" +"const FT coef1 = (tidx & 1) == 0 ? co1 : (FT) 0;\n" +"const FT coef2 = (tidx & 1) == 0 ? co3 : co2;\n" +"const FT coefy1 = (tidy & 1) == 0 ? co1 : (FT) 0;\n" +"const FT coefy2 = (tidy & 1) == 0 ? co3 : co2;\n" +"if(tidy < LOCAL_SIZE/2 + 2)\n" +"{\n" +"sum = coef2* s_srcPatch[tidy][1 + ((tidx - 1) >> 1)];\n" +"sum = mad(coef1, s_srcPatch[tidy][1 + ((tidx ) >> 1)], sum);\n" +"sum = mad(coef2, s_srcPatch[tidy][1 + ((tidx + 2) >> 1)], sum);\n" +"s_dstPatch[tidy][tidx] = sum;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"sum = coefy2* s_dstPatch[1 + ((tidy - 1) >> 1)][tidx];\n" +"sum = mad(coefy1, s_dstPatch[1 + ((tidy ) >> 1)][tidx], sum);\n" +"sum = mad(coefy2, s_dstPatch[1 + ((tidy + 2) >> 1)][tidx], sum);\n" +"if ((x < dst_cols) && (y < dst_rows))\n" +"storepix(convertToT(sum), dstData + y * dst_step + x * PIXSIZE);\n" +"}\n" +"__kernel void pyrUp_unrolled(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"const int lx = 2*get_local_id(0);\n" +"const int ly = 2*get_local_id(1);\n" +"__local FT s_srcPatch[LOCAL_SIZE+2][LOCAL_SIZE+2];\n" +"__local FT s_dstPatch[LOCAL_SIZE+2][2*LOCAL_SIZE];\n" +"__global uchar * dstData = dst + dst_offset;\n" +"__global const uchar * srcData = src + src_offset;\n" +"if( lx < (LOCAL_SIZE+2) && ly < (LOCAL_SIZE+2) )\n" +"{\n" +"int srcx = mad24((int)get_group_id(0), LOCAL_SIZE, lx) - 1;\n" +"int srcy = mad24((int)get_group_id(1), LOCAL_SIZE, ly) - 1;\n" +"int srcx1 = EXTRAPOLATE(srcx, src_cols);\n" +"int srcx2 = EXTRAPOLATE(srcx+1, src_cols);\n" +"int srcy1 = EXTRAPOLATE(srcy, src_rows);\n" +"int srcy2 = EXTRAPOLATE(srcy+1, src_rows);\n" +"s_srcPatch[ly][lx] = convertToFT(loadpix(srcData + srcy1 * src_step + srcx1 * PIXSIZE));\n" +"s_srcPatch[ly+1][lx] = convertToFT(loadpix(srcData + srcy2 * src_step + srcx1 * PIXSIZE));\n" +"s_srcPatch[ly][lx+1] = convertToFT(loadpix(srcData + srcy1 * src_step + srcx2 * PIXSIZE));\n" +"s_srcPatch[ly+1][lx+1] = convertToFT(loadpix(srcData + srcy2 * src_step + srcx2 * PIXSIZE));\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"FT sum;\n" +"const FT co1 = 0.75f;\n" +"const FT co2 = 0.5f;\n" +"const FT co3 = 0.125f;\n" +"sum = co3 * s_srcPatch[1 + (ly >> 1)][1 + ((lx - 2) >> 1)];\n" +"sum = mad(co1, s_srcPatch[1 + (ly >> 1)][1 + ((lx ) >> 1)], sum);\n" +"sum = mad(co3, s_srcPatch[1 + (ly >> 1)][1 + ((lx + 2) >> 1)], sum);\n" +"s_dstPatch[1 + get_local_id(1)][lx] = sum;\n" +"sum = co2 * s_srcPatch[1 + (ly >> 1)][1 + ((lx + 1 - 1) >> 1)];\n" +"sum = mad(co2, s_srcPatch[1 + (ly >> 1)][1 + ((lx + 1 + 1) >> 1)], sum);\n" +"s_dstPatch[1 + get_local_id(1)][lx+1] = sum;\n" +"if (ly < 1)\n" +"{\n" +"sum = co3 * s_srcPatch[0][1 + ((lx - 2) >> 1)];\n" +"sum = mad(co1, s_srcPatch[0][1 + ((lx ) >> 1)], sum);\n" +"sum = mad(co3, s_srcPatch[0][1 + ((lx + 2) >> 1)], sum);\n" +"s_dstPatch[0][lx] = sum;\n" +"sum = co2 * s_srcPatch[0][1 + ((lx + 1 - 1) >> 1)];\n" +"sum = mad(co2, s_srcPatch[0][1 + ((lx + 1 + 1) >> 1)], sum);\n" +"s_dstPatch[0][lx+1] = sum;\n" +"}\n" +"if (ly > 2*LOCAL_SIZE-3)\n" +"{\n" +"sum = co3 * s_srcPatch[LOCAL_SIZE+1][1 + ((lx - 2) >> 1)];\n" +"sum = mad(co1, s_srcPatch[LOCAL_SIZE+1][1 + ((lx ) >> 1)], sum);\n" +"sum = mad(co3, s_srcPatch[LOCAL_SIZE+1][1 + ((lx + 2) >> 1)], sum);\n" +"s_dstPatch[LOCAL_SIZE+1][lx] = sum;\n" +"sum = co2 * s_srcPatch[LOCAL_SIZE+1][1 + ((lx + 1 - 1) >> 1)];\n" +"sum = mad(co2, s_srcPatch[LOCAL_SIZE+1][1 + ((lx + 1 + 1) >> 1)], sum);\n" +"s_dstPatch[LOCAL_SIZE+1][lx+1] = sum;\n" +"}\n" +"barrier(CLK_LOCAL_MEM_FENCE);\n" +"int dst_x = 2*get_global_id(0);\n" +"int dst_y = 2*get_global_id(1);\n" +"if ((dst_x < dst_cols) && (dst_y < dst_rows))\n" +"{\n" +"sum = co3 * s_dstPatch[1 + get_local_id(1) - 1][lx];\n" +"sum = mad(co1, s_dstPatch[1 + get_local_id(1) ][lx], sum);\n" +"sum = mad(co3, s_dstPatch[1 + get_local_id(1) + 1][lx], sum);\n" +"storepix(convertToT(sum), dstData + dst_y * dst_step + dst_x * PIXSIZE);\n" +"sum = co3 * s_dstPatch[1 + get_local_id(1) - 1][lx+1];\n" +"sum = mad(co1, s_dstPatch[1 + get_local_id(1) ][lx+1], sum);\n" +"sum = mad(co3, s_dstPatch[1 + get_local_id(1) + 1][lx+1], sum);\n" +"storepix(convertToT(sum), dstData + dst_y * dst_step + (dst_x+1) * PIXSIZE);\n" +"sum = co2 * s_dstPatch[1 + get_local_id(1) ][lx];\n" +"sum = mad(co2, s_dstPatch[1 + get_local_id(1) + 1][lx], sum);\n" +"storepix(convertToT(sum), dstData + (dst_y+1) * dst_step + dst_x * PIXSIZE);\n" +"sum = co2 * s_dstPatch[1 + get_local_id(1) ][lx+1];\n" +"sum = mad(co2, s_dstPatch[1 + get_local_id(1) + 1][lx+1], sum);\n" +"storepix(convertToT(sum), dstData + (dst_y+1) * dst_step + (dst_x+1) * PIXSIZE);\n" +"}\n" +"}\n" +, "e48abb0036bd5e090ad06600b018eec9", NULL}; +struct cv::ocl::internal::ProgramEntry pyramid_up_oclsrc={moduleName, "pyramid_up", +"__constant float kx[] = { 0.125, 0.5, 0.75, 0.5, 0.125 };\n" +"__constant float ky[] = { 0.125, 0.5, 0.75, 0.5, 0.125 };\n" +"#define OP(delta, y, x) (convert_float4(arr[(y + delta) * 5 + x]) * ky[y] * kx[x])\n" +"__kernel void pyrUp_cols2(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int block_x = get_global_id(0);\n" +"int y = get_global_id(1) * 2;\n" +"if ((block_x * 4) >= dst_cols || y >= dst_rows) return;\n" +"uchar8 line[6];\n" +"uchar4 line_out;\n" +"int offset, src_index;\n" +"src_index = block_x * 2 + (y / 2 - 1) * src_step - 1 + src_offset;\n" +"uchar4 tmp;\n" +"line[0] = line[2] = line[4] = (uchar8)0;\n" +"line[1] = line[3] = line[5] = (uchar8)0;\n" +"offset = max(0, src_index + 1 * src_step);\n" +"tmp = vload4(0, src + offset);\n" +"if (offset == 0) tmp = (uchar4)(0, tmp.s012);\n" +"line[2].even = tmp;\n" +"offset = max(0, src_index + ((y == 0) ? 2 : 0) * src_step);\n" +"tmp = vload4(0, src + offset);\n" +"if (offset == 0) tmp = (uchar4)(0, tmp.s012);\n" +"line[0].even = tmp;\n" +"if (y == (dst_rows - 2))\n" +"line[4] = line[2];\n" +"else\n" +"line[4].even = vload4(0, src + src_index + 2 * src_step);\n" +"bool row_s = (block_x == 0);\n" +"bool row_e = ((block_x + 1) * 4 == dst_cols);\n" +"uchar4 arr[30];\n" +"uchar s, e;\n" +"s = line[0].s4;\n" +"e = line[0].s3;\n" +"arr[0] = row_s ? (uchar4)(s, e, line[0].s23) : (uchar4)(line[0].s0123);\n" +"arr[1] = row_s ? (uchar4)(e, line[0].s234) : (uchar4)(line[0].s1234);\n" +"arr[2] = (uchar4)(line[0].s2345);\n" +"arr[3] = row_e ? (uchar4)(line[0].s345, s) : (uchar4)(line[0].s3456);\n" +"arr[4] = row_e ? (uchar4)(line[0].s45, s, e) : (uchar4)(line[0].s4567);\n" +"s = line[1].s4;\n" +"e = line[1].s3;\n" +"arr[5] = row_s ? (uchar4)(s, e, line[1].s23) : (uchar4)(line[1].s0123);\n" +"arr[6] = row_s ? (uchar4)(e, line[1].s234) : (uchar4)(line[1].s1234);\n" +"arr[7] = (uchar4)(line[1].s2345);\n" +"arr[8] = row_e ? (uchar4)(line[1].s345, s) : (uchar4)(line[1].s3456);\n" +"arr[9] = row_e ? (uchar4)(line[1].s45, s, e) : (uchar4)(line[1].s4567);\n" +"s = line[2].s4;\n" +"e = line[2].s3;\n" +"arr[10] = row_s ? (uchar4)(s, e, line[2].s23) : (uchar4)(line[2].s0123);\n" +"arr[11] = row_s ? (uchar4)(e, line[2].s234) : (uchar4)(line[2].s1234);\n" +"arr[12] = (uchar4)(line[2].s2345);\n" +"arr[13] = row_e ? (uchar4)(line[2].s345, s) : (uchar4)(line[2].s3456);\n" +"arr[14] = row_e ? (uchar4)(line[2].s45, s, e) : (uchar4)(line[2].s4567);\n" +"s = line[3].s4;\n" +"e = line[3].s3;\n" +"arr[15] = row_s ? (uchar4)(s, e, line[3].s23) : (uchar4)(line[3].s0123);\n" +"arr[16] = row_s ? (uchar4)(e, line[3].s234) : (uchar4)(line[3].s1234);\n" +"arr[17] = (uchar4)(line[3].s2345);\n" +"arr[18] = row_e ? (uchar4)(line[3].s345, s) : (uchar4)(line[3].s3456);\n" +"arr[19] = row_e ? (uchar4)(line[3].s45, s, e) : (uchar4)(line[3].s4567);\n" +"s = line[4].s4;\n" +"e = line[4].s3;\n" +"arr[20] = row_s ? (uchar4)(s, e, line[4].s23) : (uchar4)(line[4].s0123);\n" +"arr[21] = row_s ? (uchar4)(e, line[4].s234) : (uchar4)(line[4].s1234);\n" +"arr[22] = (uchar4)(line[4].s2345);\n" +"arr[23] = row_e ? (uchar4)(line[4].s345, s) : (uchar4)(line[4].s3456);\n" +"arr[24] = row_e ? (uchar4)(line[4].s45, s, e) : (uchar4)(line[4].s4567);\n" +"s = line[5].s4;\n" +"e = line[5].s3;\n" +"arr[25] = row_s ? (uchar4)(s, e, line[5].s23) : (uchar4)(line[5].s0123);\n" +"arr[26] = row_s ? (uchar4)(e, line[5].s234) : (uchar4)(line[5].s1234);\n" +"arr[27] = (uchar4)(line[5].s2345);\n" +"arr[28] = row_e ? (uchar4)(line[5].s345, s) : (uchar4)(line[5].s3456);\n" +"arr[29] = row_e ? (uchar4)(line[5].s45, s, e) : (uchar4)(line[5].s4567);\n" +"float4 sum[2];\n" +"sum[0] = OP(0, 0, 0) + OP(0, 0, 1) + OP(0, 0, 2) + OP(0, 0, 3) + OP(0, 0, 4) +\n" +"OP(0, 1, 0) + OP(0, 1, 1) + OP(0, 1, 2) + OP(0, 1, 3) + OP(0, 1, 4) +\n" +"OP(0, 2, 0) + OP(0, 2, 1) + OP(0, 2, 2) + OP(0, 2, 3) + OP(0, 2, 4) +\n" +"OP(0, 3, 0) + OP(0, 3, 1) + OP(0, 3, 2) + OP(0, 3, 3) + OP(0, 3, 4) +\n" +"OP(0, 4, 0) + OP(0, 4, 1) + OP(0, 4, 2) + OP(0, 4, 3) + OP(0, 4, 4);\n" +"sum[1] = OP(1, 0, 0) + OP(1, 0, 1) + OP(1, 0, 2) + OP(1, 0, 3) + OP(1, 0, 4) +\n" +"OP(1, 1, 0) + OP(1, 1, 1) + OP(1, 1, 2) + OP(1, 1, 3) + OP(1, 1, 4) +\n" +"OP(1, 2, 0) + OP(1, 2, 1) + OP(1, 2, 2) + OP(1, 2, 3) + OP(1, 2, 4) +\n" +"OP(1, 3, 0) + OP(1, 3, 1) + OP(1, 3, 2) + OP(1, 3, 3) + OP(1, 3, 4) +\n" +"OP(1, 4, 0) + OP(1, 4, 1) + OP(1, 4, 2) + OP(1, 4, 3) + OP(1, 4, 4);\n" +"int dst_index = block_x * 4 + y * dst_step + dst_offset;\n" +"vstore4(convert_uchar4_sat_rte(sum[0]), 0, dst + dst_index);\n" +"vstore4(convert_uchar4_sat_rte(sum[1]), 0, dst + dst_index + dst_step);\n" +"}\n" +, "46bae9fe0213f767045e3c63f9ffc6a0", NULL}; +struct cv::ocl::internal::ProgramEntry remap_oclsrc={moduleName, "remap", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define noconvert\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T*)(addr)\n" +"#define storepix(val, addr) *(__global T*)(addr) = val\n" +"#define TSIZE ((int)sizeof(T))\n" +"#define convertScalar(a) (a)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1*)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1*)(addr))\n" +"#define TSIZE ((int)sizeof(T1)*3)\n" +"#define convertScalar(a) (T)(a.x, a.y, a.z)\n" +"#endif\n" +"enum\n" +"{\n" +"INTER_BITS = 5,\n" +"INTER_TAB_SIZE = 1 << INTER_BITS,\n" +"INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE\n" +"};\n" +"#ifdef INTER_NEAREST\n" +"#define convertToWT\n" +"#endif\n" +"#ifdef BORDER_CONSTANT\n" +"#define EXTRAPOLATE(v2, v) v = scalar;\n" +"#elif defined BORDER_REPLICATE\n" +"#define EXTRAPOLATE(v2, v) \\\n" +"{ \\\n" +"v2 = max(min(v2, (int2)(src_cols - 1, src_rows - 1)), (int2)(0)); \\\n" +"v = convertToWT(loadpix((__global const T*)(srcptr + mad24(v2.y, src_step, v2.x * TSIZE + src_offset)))); \\\n" +"}\n" +"#elif defined BORDER_WRAP\n" +"#define EXTRAPOLATE(v2, v) \\\n" +"{ \\\n" +"if (v2.x < 0) \\\n" +"v2.x -= ((v2.x - src_cols + 1) / src_cols) * src_cols; \\\n" +"if (v2.x >= src_cols) \\\n" +"v2.x %= src_cols; \\\n" +"\\\n" +"if (v2.y < 0) \\\n" +"v2.y -= ((v2.y - src_rows + 1) / src_rows) * src_rows; \\\n" +"if( v2.y >= src_rows ) \\\n" +"v2.y %= src_rows; \\\n" +"v = convertToWT(loadpix((__global const T*)(srcptr + mad24(v2.y, src_step, v2.x * TSIZE + src_offset)))); \\\n" +"}\n" +"#elif defined(BORDER_REFLECT) || defined(BORDER_REFLECT_101)\n" +"#ifdef BORDER_REFLECT\n" +"#define DELTA int delta = 0\n" +"#else\n" +"#define DELTA int delta = 1\n" +"#endif\n" +"#define EXTRAPOLATE(v2, v) \\\n" +"{ \\\n" +"DELTA; \\\n" +"if (src_cols == 1) \\\n" +"v2.x = 0; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if( v2.x < 0 ) \\\n" +"v2.x = -v2.x - 1 + delta; \\\n" +"else \\\n" +"v2.x = src_cols - 1 - (v2.x - src_cols) - delta; \\\n" +"} \\\n" +"while (v2.x >= src_cols || v2.x < 0); \\\n" +"\\\n" +"if (src_rows == 1) \\\n" +"v2.y = 0; \\\n" +"else \\\n" +"do \\\n" +"{ \\\n" +"if( v2.y < 0 ) \\\n" +"v2.y = -v2.y - 1 + delta; \\\n" +"else \\\n" +"v2.y = src_rows - 1 - (v2.y - src_rows) - delta; \\\n" +"} \\\n" +"while (v2.y >= src_rows || v2.y < 0); \\\n" +"v = convertToWT(loadpix((__global const T*)(srcptr + mad24(v2.y, src_step, v2.x * TSIZE + src_offset)))); \\\n" +"}\n" +"#else\n" +"#error No extrapolation method\n" +"#endif\n" +"#define NEED_EXTRAPOLATION(gx, gy) (gx >= src_cols || gy >= src_rows || gx < 0 || gy < 0)\n" +"#ifdef INTER_NEAREST\n" +"__kernel void remap_2_32FC1(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * map1ptr, int map1_step, int map1_offset,\n" +"__global const uchar * map2ptr, int map2_step, int map2_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"T scalar = convertScalar(nVal);\n" +"int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(float), map1_offset));\n" +"int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(float), map2_offset));\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map1_index += map1_step, map2_index += map2_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const float * map1 = (__global const float *)(map1ptr + map1_index);\n" +"__global const float * map2 = (__global const float *)(map2ptr + map2_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"int gx = convert_int_sat_rte(map1[0]);\n" +"int gy = convert_int_sat_rte(map2[0]);\n" +"if (NEED_EXTRAPOLATION(gx, gy))\n" +"{\n" +"#ifndef BORDER_CONSTANT\n" +"int2 gxy = (int2)(gx, gy);\n" +"#endif\n" +"T v;\n" +"EXTRAPOLATE(gxy, v)\n" +"storepix(v, dst);\n" +"}\n" +"else\n" +"{\n" +"int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset));\n" +"storepix(loadpix((__global const T*)(srcptr + src_index)), dst);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void remap_32FC2(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * mapptr, int map_step, int map_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"T scalar = convertScalar(nVal);\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"int map_index = mad24(y, map_step, mad24(x, (int)sizeof(float2), map_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map_index += map_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const float2 * map = (__global const float2 *)(mapptr + map_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"int2 gxy = convert_int2_sat_rte(map[0]);\n" +"int gx = gxy.x, gy = gxy.y;\n" +"if (NEED_EXTRAPOLATION(gx, gy))\n" +"{\n" +"T v;\n" +"EXTRAPOLATE(gxy, v)\n" +"storepix(v, dst);\n" +"}\n" +"else\n" +"{\n" +"int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset));\n" +"storepix(loadpix((__global const T *)(srcptr + src_index)), dst);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void remap_16SC2(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * mapptr, int map_step, int map_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"T scalar = convertScalar(nVal);\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"int map_index = mad24(y, map_step, mad24(x, (int)sizeof(short2), map_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map_index += map_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const short2 * map = (__global const short2 *)(mapptr + map_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"int2 gxy = convert_int2(map[0]);\n" +"int gx = gxy.x, gy = gxy.y;\n" +"if (NEED_EXTRAPOLATION(gx, gy))\n" +"{\n" +"T v;\n" +"EXTRAPOLATE(gxy, v)\n" +"storepix(v, dst);\n" +"}\n" +"else\n" +"{\n" +"int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset));\n" +"storepix(loadpix((__global const T *)(srcptr + src_index)), dst);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"__kernel void remap_16SC2_16UC1(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * map1ptr, int map1_step, int map1_offset,\n" +"__global const uchar * map2ptr, int map2_step, int map2_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"T scalar = convertScalar(nVal);\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(short2), map1_offset));\n" +"int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(ushort), map2_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map1_index += map1_step, map2_index += map2_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const short2 * map1 = (__global const short2 *)(map1ptr + map1_index);\n" +"__global const ushort * map2 = (__global const ushort *)(map2ptr + map2_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"int map2Value = convert_int(map2[0]) & (INTER_TAB_SIZE2 - 1);\n" +"int dx = (map2Value & (INTER_TAB_SIZE - 1)) < (INTER_TAB_SIZE >> 1) ? 1 : 0;\n" +"int dy = (map2Value >> INTER_BITS) < (INTER_TAB_SIZE >> 1) ? 1 : 0;\n" +"int2 gxy = convert_int2(map1[0]) + (int2)(dx, dy);\n" +"int gx = gxy.x, gy = gxy.y;\n" +"if (NEED_EXTRAPOLATION(gx, gy))\n" +"{\n" +"T v;\n" +"EXTRAPOLATE(gxy, v)\n" +"storepix(v, dst);\n" +"}\n" +"else\n" +"{\n" +"int src_index = mad24(gy, src_step, mad24(gx, TSIZE, src_offset));\n" +"storepix(loadpix((__global const T *)(srcptr + src_index)), dst);\n" +"}\n" +"}\n" +"}\n" +"}\n" +"#elif defined INTER_LINEAR\n" +"__constant float coeffs[64] =\n" +"{ 1.000000f, 0.000000f, 0.968750f, 0.031250f, 0.937500f, 0.062500f, 0.906250f, 0.093750f, 0.875000f, 0.125000f, 0.843750f, 0.156250f,\n" +"0.812500f, 0.187500f, 0.781250f, 0.218750f, 0.750000f, 0.250000f, 0.718750f, 0.281250f, 0.687500f, 0.312500f, 0.656250f, 0.343750f,\n" +"0.625000f, 0.375000f, 0.593750f, 0.406250f, 0.562500f, 0.437500f, 0.531250f, 0.468750f, 0.500000f, 0.500000f, 0.468750f, 0.531250f,\n" +"0.437500f, 0.562500f, 0.406250f, 0.593750f, 0.375000f, 0.625000f, 0.343750f, 0.656250f, 0.312500f, 0.687500f, 0.281250f, 0.718750f,\n" +"0.250000f, 0.750000f, 0.218750f, 0.781250f, 0.187500f, 0.812500f, 0.156250f, 0.843750f, 0.125000f, 0.875000f, 0.093750f, 0.906250f,\n" +"0.062500f, 0.937500f, 0.031250f, 0.968750f };\n" +"__kernel void remap_16SC2_16UC1(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * map1ptr, int map1_step, int map1_offset,\n" +"__global const uchar * map2ptr, int map2_step, int map2_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"WT scalar = convertToWT(convertScalar(nVal));\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(short2), map1_offset));\n" +"int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(ushort), map2_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map1_index += map1_step, map2_index += map2_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const short2 * map1 = (__global const short2 *)(map1ptr + map1_index);\n" +"__global const ushort * map2 = (__global const ushort *)(map2ptr + map2_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"int2 map_dataA = convert_int2(map1[0]);\n" +"int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y);\n" +"int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1);\n" +"int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1);\n" +"ushort map2Value = (ushort)(map2[0] & (INTER_TAB_SIZE2 - 1));\n" +"WT2 u = (WT2)(map2Value & (INTER_TAB_SIZE - 1), map2Value >> INTER_BITS) / (WT2)(INTER_TAB_SIZE);\n" +"WT a = scalar, b = scalar, c = scalar, d = scalar;\n" +"if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y))\n" +"a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataA, a);\n" +"if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y))\n" +"b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataB, b);\n" +"if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y))\n" +"c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataC, c);\n" +"if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y))\n" +"d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataD, d);\n" +"WT dst_data = a * (1 - u.x) * (1 - u.y) +\n" +"b * (u.x) * (1 - u.y) +\n" +"c * (1 - u.x) * (u.y) +\n" +"d * (u.x) * (u.y);\n" +"storepix(convertToT(dst_data), dst);\n" +"}\n" +"}\n" +"}\n" +"__kernel void remap_2_32FC1(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * map1ptr, int map1_step, int map1_offset,\n" +"__global const uchar * map2ptr, int map2_step, int map2_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"WT scalar = convertToWT(convertScalar(nVal));\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"int map1_index = mad24(y, map1_step, mad24(x, (int)sizeof(float), map1_offset));\n" +"int map2_index = mad24(y, map2_step, mad24(x, (int)sizeof(float), map2_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map1_index += map1_step, map2_index += map2_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const float * map1 = (__global const float *)(map1ptr + map1_index);\n" +"__global const float * map2 = (__global const float *)(map2ptr + map2_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"#if defined BORDER_CONSTANT\n" +"float xf = map1[0], yf = map2[0];\n" +"int sx = convert_int_sat_rtz(mad(xf, (float)INTER_TAB_SIZE, 0.5f)) >> INTER_BITS;\n" +"int sy = convert_int_sat_rtz(mad(yf, (float)INTER_TAB_SIZE, 0.5f)) >> INTER_BITS;\n" +"__constant float * coeffs_x = coeffs + ((convert_int_rte(xf * INTER_TAB_SIZE) & (INTER_TAB_SIZE - 1)) << 1);\n" +"__constant float * coeffs_y = coeffs + ((convert_int_rte(yf * INTER_TAB_SIZE) & (INTER_TAB_SIZE - 1)) << 1);\n" +"WT sum = (WT)(0), xsum;\n" +"int src_index = mad24(sy, src_step, mad24(sx, TSIZE, src_offset));\n" +"#pragma unroll\n" +"for (int yp = 0; yp < 2; ++yp, src_index += src_step)\n" +"{\n" +"if (sy + yp >= 0 && sy + yp < src_rows)\n" +"{\n" +"xsum = (WT)(0);\n" +"if (sx >= 0 && sx + 2 < src_cols)\n" +"{\n" +"#if depth == 0 && cn == 1\n" +"uchar2 value = vload2(0, srcptr + src_index);\n" +"xsum = dot(convert_float2(value), (float2)(coeffs_x[0], coeffs_x[1]));\n" +"#else\n" +"#pragma unroll\n" +"for (int xp = 0; xp < 2; ++xp)\n" +"xsum = fma(convertToWT(loadpix(srcptr + mad24(xp, TSIZE, src_index))), coeffs_x[xp], xsum);\n" +"#endif\n" +"}\n" +"else\n" +"{\n" +"#pragma unroll\n" +"for (int xp = 0; xp < 2; ++xp)\n" +"xsum = fma(sx + xp >= 0 && sx + xp < src_cols ?\n" +"convertToWT(loadpix(srcptr + mad24(xp, TSIZE, src_index))) : scalar, coeffs_x[xp], xsum);\n" +"}\n" +"sum = fma(xsum, coeffs_y[yp], sum);\n" +"}\n" +"else\n" +"sum = fma(scalar, coeffs_y[yp], sum);\n" +"}\n" +"storepix(convertToT(sum), dst);\n" +"#else\n" +"float2 map_data = (float2)(map1[0], map2[0]);\n" +"int2 map_dataA = convert_int2_sat_rtn(map_data);\n" +"int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y);\n" +"int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1);\n" +"int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1);\n" +"float2 _u = map_data - convert_float2(map_dataA);\n" +"WT2 u = convertToWT2(convert_int2_rte(convertToWT2(_u) * (WT2)INTER_TAB_SIZE)) / (WT2)INTER_TAB_SIZE;\n" +"WT scalar = convertToWT(convertScalar(nVal));\n" +"WT a = scalar, b = scalar, c = scalar, d = scalar;\n" +"if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y))\n" +"a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataA, a);\n" +"if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y))\n" +"b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataB, b);\n" +"if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y))\n" +"c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataC, c);\n" +"if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y))\n" +"d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataD, d);\n" +"WT dst_data = a * (1 - u.x) * (1 - u.y) +\n" +"b * (u.x) * (1 - u.y) +\n" +"c * (1 - u.x) * (u.y) +\n" +"d * (u.x) * (u.y);\n" +"storepix(convertToT(dst_data), dst);\n" +"#endif\n" +"}\n" +"}\n" +"}\n" +"__kernel void remap_32FC2(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * mapptr, int map_step, int map_offset,\n" +"ST nVal)\n" +"{\n" +"int x = get_global_id(0);\n" +"int y = get_global_id(1) * rowsPerWI;\n" +"if (x < dst_cols)\n" +"{\n" +"WT scalar = convertToWT(convertScalar(nVal));\n" +"int dst_index = mad24(y, dst_step, mad24(x, TSIZE, dst_offset));\n" +"int map_index = mad24(y, map_step, mad24(x, (int)sizeof(float2), map_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < rowsPerWI; ++i, ++y,\n" +"map_index += map_step, dst_index += dst_step)\n" +"if (y < dst_rows)\n" +"{\n" +"__global const float2 * map = (__global const float2 *)(mapptr + map_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"float2 map_data = map[0];\n" +"int2 map_dataA = convert_int2_sat_rtn(map_data);\n" +"int2 map_dataB = (int2)(map_dataA.x + 1, map_dataA.y);\n" +"int2 map_dataC = (int2)(map_dataA.x, map_dataA.y + 1);\n" +"int2 map_dataD = (int2)(map_dataA.x + 1, map_dataA.y + 1);\n" +"float2 _u = map_data - convert_float2(map_dataA);\n" +"WT2 u = convertToWT2(convert_int2_rte(convertToWT2(_u) * (WT2)INTER_TAB_SIZE)) / (WT2)INTER_TAB_SIZE;\n" +"WT a = scalar, b = scalar, c = scalar, d = scalar;\n" +"if (!NEED_EXTRAPOLATION(map_dataA.x, map_dataA.y))\n" +"a = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataA.y, src_step, map_dataA.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataA, a);\n" +"if (!NEED_EXTRAPOLATION(map_dataB.x, map_dataB.y))\n" +"b = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataB.y, src_step, map_dataB.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataB, b);\n" +"if (!NEED_EXTRAPOLATION(map_dataC.x, map_dataC.y))\n" +"c = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataC.y, src_step, map_dataC.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataC, c);\n" +"if (!NEED_EXTRAPOLATION(map_dataD.x, map_dataD.y))\n" +"d = convertToWT(loadpix((__global const T *)(srcptr + mad24(map_dataD.y, src_step, map_dataD.x * TSIZE + src_offset))));\n" +"else\n" +"EXTRAPOLATE(map_dataD, d);\n" +"WT dst_data = a * (1 - u.x) * (1 - u.y) +\n" +"b * (u.x) * (1 - u.y) +\n" +"c * (1 - u.x) * (u.y) +\n" +"d * (u.x) * (u.y);\n" +"storepix(convertToT(dst_data), dst);\n" +"}\n" +"}\n" +"}\n" +"#endif\n" +, "d71b990c30eb48e9063ba2446a0278c6", NULL}; +struct cv::ocl::internal::ProgramEntry resize_oclsrc={moduleName, "resize", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS)\n" +"#define CAST_BITS (INTER_RESIZE_COEF_BITS << 1)\n" +"#define INC(x,l) min(x+1,l-1)\n" +"#define noconvert\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T *)(addr)\n" +"#define storepix(val, addr) *(__global T *)(addr) = val\n" +"#define TSIZE (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1 *)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1 *)(addr))\n" +"#define TSIZE (int)sizeof(T1)*cn\n" +"#endif\n" +"#if defined USE_SAMPLER\n" +"#if cn == 1\n" +"#define READ_IMAGE(X,Y,Z) read_imagef(X,Y,Z).x\n" +"#define INTERMEDIATE_TYPE float\n" +"#elif cn == 2\n" +"#define READ_IMAGE(X,Y,Z) read_imagef(X,Y,Z).xy\n" +"#define INTERMEDIATE_TYPE float2\n" +"#elif cn == 3\n" +"#define READ_IMAGE(X,Y,Z) read_imagef(X,Y,Z).xyz\n" +"#define INTERMEDIATE_TYPE float3\n" +"#elif cn == 4\n" +"#define READ_IMAGE(X,Y,Z) read_imagef(X,Y,Z)\n" +"#define INTERMEDIATE_TYPE float4\n" +"#endif\n" +"#define __CAT(x, y) x##y\n" +"#define CAT(x, y) __CAT(x, y)\n" +"#define float1 float\n" +"#if depth == 0\n" +"#define RESULT_SCALE 255.0f\n" +"#elif depth == 1\n" +"#define RESULT_SCALE 127.0f\n" +"#elif depth == 2\n" +"#define RESULT_SCALE 65535.0f\n" +"#elif depth == 3\n" +"#define RESULT_SCALE 32767.0f\n" +"#else\n" +"#define RESULT_SCALE 1.0f\n" +"#endif\n" +"__kernel void resizeSampler(__read_only image2d_t srcImage,\n" +"__global uchar* dstptr, int dststep, int dstoffset,\n" +"int dstrows, int dstcols,\n" +"float ifx, float ify)\n" +"{\n" +"const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE |\n" +"CLK_ADDRESS_CLAMP_TO_EDGE |\n" +"CLK_FILTER_LINEAR;\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"float sx = ((dx+0.5f) * ifx), sy = ((dy+0.5f) * ify);\n" +"INTERMEDIATE_TYPE intermediate = READ_IMAGE(srcImage, sampler, (float2)(sx, sy));\n" +"#if depth <= 4\n" +"T uval = convertToDT(round(intermediate * RESULT_SCALE));\n" +"#else\n" +"T uval = convertToDT(intermediate * RESULT_SCALE);\n" +"#endif\n" +"if(dx < dstcols && dy < dstrows)\n" +"{\n" +"storepix(uval, dstptr + mad24(dy, dststep, dstoffset + dx*TSIZE));\n" +"}\n" +"}\n" +"#elif defined INTER_LINEAR_INTEGER\n" +"__kernel void resizeLN(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__global const uchar * buffer)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"__global const int * xofs = (__global const int *)(buffer), * yofs = xofs + dst_cols;\n" +"__global const short * ialpha = (__global const short *)(yofs + dst_rows);\n" +"__global const short * ibeta = ialpha + ((dst_cols + dy) << 1);\n" +"ialpha += dx << 1;\n" +"int sx0 = xofs[dx], sy0 = clamp(yofs[dy], 0, src_rows - 1),\n" +"sy1 = clamp(yofs[dy] + 1, 0, src_rows - 1);\n" +"short a0 = ialpha[0], a1 = ialpha[1];\n" +"short b0 = ibeta[0], b1 = ibeta[1];\n" +"int src_index0 = mad24(sy0, src_step, mad24(sx0, TSIZE, src_offset)),\n" +"src_index1 = mad24(sy1, src_step, mad24(sx0, TSIZE, src_offset));\n" +"WT data0 = convertToWT(loadpix(srcptr + src_index0));\n" +"WT data1 = convertToWT(loadpix(srcptr + src_index0 + TSIZE));\n" +"WT data2 = convertToWT(loadpix(srcptr + src_index1));\n" +"WT data3 = convertToWT(loadpix(srcptr + src_index1 + TSIZE));\n" +"WT val = ( (((data0 * a0 + data1 * a1) >> 4) * b0) >> 16) +\n" +"( (((data2 * a0 + data3 * a1) >> 4) * b1) >> 16);\n" +"storepix(convertToDT((val + 2) >> 2),\n" +"dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset)));\n" +"}\n" +"}\n" +"#elif defined INTER_LINEAR\n" +"__kernel void resizeLN(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"float ifx, float ify)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"float sx = ((dx+0.5f) * ifx - 0.5f), sy = ((dy+0.5f) * ify - 0.5f);\n" +"int x = floor(sx), y = floor(sy);\n" +"float u = sx - x, v = sy - y;\n" +"if ( x<0 ) x=0,u=0;\n" +"if ( x>=src_cols ) x=src_cols-1,u=0;\n" +"if ( y<0 ) y=0,v=0;\n" +"if ( y>=src_rows ) y=src_rows-1,v=0;\n" +"int y_ = INC(y, src_rows);\n" +"int x_ = INC(x, src_cols);\n" +"#if depth <= 4\n" +"u = u * INTER_RESIZE_COEF_SCALE;\n" +"v = v * INTER_RESIZE_COEF_SCALE;\n" +"int U = rint(u);\n" +"int V = rint(v);\n" +"int U1 = rint(INTER_RESIZE_COEF_SCALE - u);\n" +"int V1 = rint(INTER_RESIZE_COEF_SCALE - v);\n" +"WT data0 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x, TSIZE, src_offset))));\n" +"WT data1 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x_, TSIZE, src_offset))));\n" +"WT data2 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x, TSIZE, src_offset))));\n" +"WT data3 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x_, TSIZE, src_offset))));\n" +"WT val = mul24((WT)mul24(U1, V1), data0) + mul24((WT)mul24(U, V1), data1) +\n" +"mul24((WT)mul24(U1, V), data2) + mul24((WT)mul24(U, V), data3);\n" +"T uval = convertToDT((val + (1<<(CAST_BITS-1)))>>CAST_BITS);\n" +"#else\n" +"float u1 = 1.f - u;\n" +"float v1 = 1.f - v;\n" +"WT data0 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x, TSIZE, src_offset))));\n" +"WT data1 = convertToWT(loadpix(srcptr + mad24(y, src_step, mad24(x_, TSIZE, src_offset))));\n" +"WT data2 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x, TSIZE, src_offset))));\n" +"WT data3 = convertToWT(loadpix(srcptr + mad24(y_, src_step, mad24(x_, TSIZE, src_offset))));\n" +"T uval = u1 * v1 * data0 + u * v1 * data1 + u1 * v *data2 + u * v *data3;\n" +"#endif\n" +"storepix(uval, dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset)));\n" +"}\n" +"}\n" +"#elif defined INTER_NEAREST\n" +"__kernel void resizeNN(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"float ifx, float ify)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"float s1 = dx * ifx;\n" +"float s2 = dy * ify;\n" +"int sx = min(convert_int_rtz(s1), src_cols - 1);\n" +"int sy = min(convert_int_rtz(s2), src_rows - 1);\n" +"storepix(loadpix(srcptr + mad24(sy, src_step, mad24(sx, TSIZE, src_offset))),\n" +"dstptr + mad24(dy, dst_step, mad24(dx, TSIZE, dst_offset)));\n" +"}\n" +"}\n" +"#elif defined INTER_AREA\n" +"#ifdef INTER_AREA_FAST\n" +"__kernel void resizeAREA_FAST(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"int dst_index = mad24(dy, dst_step, dst_offset);\n" +"int sx = XSCALE * dx;\n" +"int sy = YSCALE * dy;\n" +"WTV sum = (WTV)(0);\n" +"#pragma unroll\n" +"for (int py = 0; py < YSCALE; ++py)\n" +"{\n" +"int y = min(sy + py, src_rows - 1);\n" +"int src_index = mad24(y, src_step, src_offset);\n" +"#pragma unroll\n" +"for (int px = 0; px < XSCALE; ++px)\n" +"{\n" +"int x = min(sx + px, src_cols - 1);\n" +"sum += convertToWTV(loadpix(src + src_index + x*TSIZE));\n" +"}\n" +"}\n" +"storepix(convertToT(convertToWT2V(sum) * (WT2V)(SCALE)), dst + mad24(dx, TSIZE, dst_index));\n" +"}\n" +"}\n" +"#else\n" +"__kernel void resizeAREA(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"float ifx, float ify, __global const int * ofs_tab,\n" +"__global const int * map_tab, __global const float * alpha_tab)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"int dst_index = mad24(dy, dst_step, dst_offset);\n" +"__global const int * xmap_tab = map_tab;\n" +"__global const int * ymap_tab = (__global const int *)(map_tab + (src_cols << 1));\n" +"__global const float * xalpha_tab = alpha_tab;\n" +"__global const float * yalpha_tab = (__global const float *)(alpha_tab + (src_cols << 1));\n" +"__global const int * xofs_tab = ofs_tab;\n" +"__global const int * yofs_tab = (__global const int *)(ofs_tab + dst_cols + 1);\n" +"int xk0 = xofs_tab[dx], xk1 = xofs_tab[dx + 1];\n" +"int yk0 = yofs_tab[dy], yk1 = yofs_tab[dy + 1];\n" +"int sy0 = ymap_tab[yk0], sy1 = ymap_tab[yk1 - 1];\n" +"int sx0 = xmap_tab[xk0], sx1 = xmap_tab[xk1 - 1];\n" +"WTV sum = (WTV)(0), buf;\n" +"int src_index = mad24(sy0, src_step, src_offset);\n" +"for (int sy = sy0, yk = yk0; sy <= sy1; ++sy, src_index += src_step, ++yk)\n" +"{\n" +"WTV beta = (WTV)(yalpha_tab[yk]);\n" +"buf = (WTV)(0);\n" +"for (int sx = sx0, xk = xk0; sx <= sx1; ++sx, ++xk)\n" +"{\n" +"WTV alpha = (WTV)(xalpha_tab[xk]);\n" +"buf += convertToWTV(loadpix(src + mad24(sx, TSIZE, src_index))) * alpha;\n" +"}\n" +"sum += buf * beta;\n" +"}\n" +"storepix(convertToT(sum), dst + mad24(dx, TSIZE, dst_index));\n" +"}\n" +"}\n" +"#endif\n" +"#endif\n" +, "3e1ea3c21fc70a7a9166d5cc66b7ff80", NULL}; +struct cv::ocl::internal::ProgramEntry sepFilter3x3_oclsrc={moduleName, "sepFilter3x3", +"#define DIG(a) a,\n" +"__constant float kx[] = { KERNEL_MATRIX_X };\n" +"__constant float ky[] = { KERNEL_MATRIX_Y };\n" +"#define OP(delta, y, x) (convert_float16(arr[(y + delta) * 3 + x]) * ky[y] * kx[x])\n" +"__kernel void sepFilter3x3_8UC1_cols16_rows2(__global const uint* src, int src_step,\n" +"__global uint* dst, int dst_step,\n" +"int rows, int cols, float delta)\n" +"{\n" +"int block_x = get_global_id(0);\n" +"int y = get_global_id(1) * 2;\n" +"int ssx, dsx;\n" +"if ((block_x * 16) >= cols || y >= rows) return;\n" +"uint4 line[4];\n" +"uint4 line_out[2];\n" +"uchar a; uchar16 b; uchar c;\n" +"uchar d; uchar16 e; uchar f;\n" +"uchar g; uchar16 h; uchar i;\n" +"uchar j; uchar16 k; uchar l;\n" +"ssx = dsx = 1;\n" +"int src_index = block_x * 4 * ssx + (y - 1) * (src_step / 4);\n" +"line[1] = vload4(0, src + src_index + (src_step / 4));\n" +"line[2] = vload4(0, src + src_index + 2 * (src_step / 4));\n" +"#ifdef BORDER_CONSTANT\n" +"line[0] = (y == 0) ? (uint4)0 : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? (uint4)0 : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined BORDER_REFLECT_101\n" +"line[0] = (y == 0) ? line[2] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[1] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"line[0] = (y == 0) ? line[1] : vload4(0, src + src_index);\n" +"line[3] = (y == (rows - 2)) ? line[2] : vload4(0, src + src_index + 3 * (src_step / 4));\n" +"#endif\n" +"__global uchar *src_p = (__global uchar *)src;\n" +"src_index = block_x * 16 * ssx + (y - 1) * src_step;\n" +"bool line_end = ((block_x + 1) * 16 == cols);\n" +"b = as_uchar16(line[0]);\n" +"e = as_uchar16(line[1]);\n" +"h = as_uchar16(line[2]);\n" +"k = as_uchar16(line[3]);\n" +"#ifdef BORDER_CONSTANT\n" +"a = (block_x == 0 || y == 0) ? 0 : src_p[src_index - 1];\n" +"c = (line_end || y == 0) ? 0 : src_p[src_index + 16];\n" +"d = (block_x == 0) ? 0 : src_p[src_index + src_step - 1];\n" +"f = line_end ? 0 : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? 0 : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? 0 : src_p[src_index + 2 * src_step + 16];\n" +"j = (block_x == 0 || y == (rows - 2)) ? 0 : src_p[src_index + 3 * src_step - 1];\n" +"l = (line_end || y == (rows - 2))? 0 : src_p[src_index + 3 * src_step + 16];\n" +"#elif defined BORDER_REFLECT_101\n" +"int offset;\n" +"offset = (y == 0) ? (2 * src_step) : 0;\n" +"a = (block_x == 0) ? src_p[src_index + offset + 1] : src_p[src_index + offset - 1];\n" +"c = line_end ? src_p[src_index + offset + 14] : src_p[src_index + offset + 16];\n" +"d = (block_x == 0) ? src_p[src_index + src_step + 1] : src_p[src_index + src_step - 1];\n" +"f = line_end ? src_p[src_index + src_step + 14] : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? src_p[src_index + 2 * src_step + 1] : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? src_p[src_index + 2 * src_step + 14] : src_p[src_index + 2 * src_step + 16];\n" +"offset = (y == (rows - 2)) ? (1 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? src_p[src_index + offset + 1] : src_p[src_index + offset - 1];\n" +"l = line_end ? src_p[src_index + offset + 14] : src_p[src_index + offset + 16];\n" +"#elif defined (BORDER_REPLICATE) || defined(BORDER_REFLECT)\n" +"int offset;\n" +"offset = (y == 0) ? (1 * src_step) : 0;\n" +"a = (block_x == 0) ? src_p[src_index + offset] : src_p[src_index + offset - 1];\n" +"c = line_end ? src_p[src_index + offset + 15] : src_p[src_index + offset + 16];\n" +"d = (block_x == 0) ? src_p[src_index + src_step] : src_p[src_index + src_step - 1];\n" +"f = line_end ? src_p[src_index + src_step + 15] : src_p[src_index + src_step + 16];\n" +"g = (block_x == 0) ? src_p[src_index + 2 * src_step] : src_p[src_index + 2 * src_step - 1];\n" +"i = line_end ? src_p[src_index + 2 * src_step + 15] : src_p[src_index + 2 * src_step + 16];\n" +"offset = (y == (rows - 2)) ? (2 * src_step) : (3 * src_step);\n" +"j = (block_x == 0) ? src_p[src_index + offset] : src_p[src_index + offset - 1];\n" +"l = line_end ? src_p[src_index + offset + 15] : src_p[src_index + offset + 16];\n" +"#endif\n" +"uchar16 arr[12];\n" +"float16 sum[2];\n" +"arr[0] = (uchar16)(a, b.s0123, b.s456789ab, b.scde);\n" +"arr[1] = b;\n" +"arr[2] = (uchar16)(b.s123, b.s4567, b.s89abcdef, c);\n" +"arr[3] = (uchar16)(d, e.s0123, e.s456789ab, e.scde);\n" +"arr[4] = e;\n" +"arr[5] = (uchar16)(e.s123, e.s4567, e.s89abcdef, f);\n" +"arr[6] = (uchar16)(g, h.s0123, h.s456789ab, h.scde);\n" +"arr[7] = h;\n" +"arr[8] = (uchar16)(h.s123, h.s4567, h.s89abcdef, i);\n" +"arr[9] = (uchar16)(j, k.s0123, k.s456789ab, k.scde);\n" +"arr[10] = k;\n" +"arr[11] = (uchar16)(k.s123, k.s4567, k.s89abcdef, l);\n" +"sum[0] = OP(0, 0, 0) + OP(0, 0, 1) + OP(0, 0, 2) +\n" +"OP(0, 1, 0) + OP(0, 1, 1) + OP(0, 1, 2) +\n" +"OP(0, 2, 0) + OP(0, 2, 1) + OP(0, 2, 2);\n" +"sum[1] = OP(1, 0, 0) + OP(1, 0, 1) + OP(1, 0, 2) +\n" +"OP(1, 1, 0) + OP(1, 1, 1) + OP(1, 1, 2) +\n" +"OP(1, 2, 0) + OP(1, 2, 1) + OP(1, 2, 2);\n" +"line_out[0] = as_uint4(convert_uchar16_sat_rte(sum[0] + delta));\n" +"line_out[1] = as_uint4(convert_uchar16_sat_rte(sum[1] + delta));\n" +"int dst_index = block_x * 4 * dsx + y * (dst_step / 4);\n" +"vstore4(line_out[0], 0, dst + dst_index);\n" +"vstore4(line_out[1], 0, dst + dst_index + (dst_step / 4));\n" +"}\n" +, "a8fb0c872c2ccd041ab86148ae012c62", NULL}; +struct cv::ocl::internal::ProgramEntry threshold_oclsrc={moduleName, "threshold", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"__kernel void threshold(__global const uchar * srcptr, int src_step, int src_offset,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int rows, int cols,\n" +"T1 thresh, T1 max_val, T1 min_val)\n" +"{\n" +"int gx = get_global_id(0);\n" +"int gy = get_global_id(1) * STRIDE_SIZE;\n" +"if (gx < cols)\n" +"{\n" +"int src_index = mad24(gy, src_step, mad24(gx, (int)sizeof(T), src_offset));\n" +"int dst_index = mad24(gy, dst_step, mad24(gx, (int)sizeof(T), dst_offset));\n" +"#pragma unroll\n" +"for (int i = 0; i < STRIDE_SIZE; i++)\n" +"{\n" +"if (gy < rows)\n" +"{\n" +"T sdata = *(__global const T *)(srcptr + src_index);\n" +"__global T * dst = (__global T *)(dstptr + dst_index);\n" +"#ifdef THRESH_BINARY\n" +"dst[0] = sdata > (thresh) ? (T)(max_val) : (T)(0);\n" +"#elif defined THRESH_BINARY_INV\n" +"dst[0] = sdata > (thresh) ? (T)(0) : (T)(max_val);\n" +"#elif defined THRESH_TRUNC\n" +"dst[0] = clamp(sdata, (T)min_val, (T)(thresh));\n" +"#elif defined THRESH_TOZERO\n" +"dst[0] = sdata > (thresh) ? sdata : (T)(0);\n" +"#elif defined THRESH_TOZERO_INV\n" +"dst[0] = sdata > (thresh) ? (T)(0) : sdata;\n" +"#endif\n" +"gy++;\n" +"src_index += src_step;\n" +"dst_index += dst_step;\n" +"}\n" +"}\n" +"}\n" +"}\n" +, "f464151682565a20de380a62e09ae458", NULL}; +struct cv::ocl::internal::ProgramEntry warp_affine_oclsrc={moduleName, "warp_affine", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define INTER_BITS 5\n" +"#define INTER_TAB_SIZE (1 << INTER_BITS)\n" +"#define INTER_SCALE 1.f/INTER_TAB_SIZE\n" +"#define AB_BITS max(10, (int)INTER_BITS)\n" +"#define AB_SCALE (1 << AB_BITS)\n" +"#define INTER_REMAP_COEF_BITS 15\n" +"#define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS)\n" +"#define ROUND_DELTA (1 << (AB_BITS - INTER_BITS - 1))\n" +"#define noconvert\n" +"#ifndef ST\n" +"#define ST T\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T*)(addr)\n" +"#define storepix(val, addr) *(__global T*)(addr) = val\n" +"#define scalar scalar_\n" +"#define pixsize (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1*)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1*)(addr))\n" +"#ifdef INTER_NEAREST\n" +"#define scalar (T)(scalar_.x, scalar_.y, scalar_.z)\n" +"#else\n" +"#define scalar (WT)(scalar_.x, scalar_.y, scalar_.z)\n" +"#endif\n" +"#define pixsize ((int)sizeof(T1)*3)\n" +"#endif\n" +"#ifdef INTER_NEAREST\n" +"__kernel void warpAffine(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant CT * M, ST scalar_)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy0 = get_global_id(1) * rowsPerWI;\n" +"if (dx < dst_cols)\n" +"{\n" +"int round_delta = (AB_SCALE >> 1);\n" +"int X0_ = rint(M[0] * dx * AB_SCALE);\n" +"int Y0_ = rint(M[3] * dx * AB_SCALE);\n" +"int dst_index = mad24(dy0, dst_step, mad24(dx, pixsize, dst_offset));\n" +"for (int dy = dy0, dy1 = min(dst_rows, dy0 + rowsPerWI); dy < dy1; ++dy, dst_index += dst_step)\n" +"{\n" +"int X0 = X0_ + rint(fma(M[1], (CT)dy, M[2]) * AB_SCALE) + round_delta;\n" +"int Y0 = Y0_ + rint(fma(M[4], (CT)dy, M[5]) * AB_SCALE) + round_delta;\n" +"short sx = convert_short_sat(X0 >> AB_BITS);\n" +"short sy = convert_short_sat(Y0 >> AB_BITS);\n" +"if (sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows)\n" +"{\n" +"int src_index = mad24(sy, src_step, mad24(sx, pixsize, src_offset));\n" +"storepix(loadpix(srcptr + src_index), dstptr + dst_index);\n" +"}\n" +"else\n" +"storepix(scalar, dstptr + dst_index);\n" +"}\n" +"}\n" +"}\n" +"#elif defined INTER_LINEAR\n" +"__constant float coeffs[64] =\n" +"{ 1.000000f, 0.000000f, 0.968750f, 0.031250f, 0.937500f, 0.062500f, 0.906250f, 0.093750f, 0.875000f, 0.125000f, 0.843750f, 0.156250f,\n" +"0.812500f, 0.187500f, 0.781250f, 0.218750f, 0.750000f, 0.250000f, 0.718750f, 0.281250f, 0.687500f, 0.312500f, 0.656250f, 0.343750f,\n" +"0.625000f, 0.375000f, 0.593750f, 0.406250f, 0.562500f, 0.437500f, 0.531250f, 0.468750f, 0.500000f, 0.500000f, 0.468750f, 0.531250f,\n" +"0.437500f, 0.562500f, 0.406250f, 0.593750f, 0.375000f, 0.625000f, 0.343750f, 0.656250f, 0.312500f, 0.687500f, 0.281250f, 0.718750f,\n" +"0.250000f, 0.750000f, 0.218750f, 0.781250f, 0.187500f, 0.812500f, 0.156250f, 0.843750f, 0.125000f, 0.875000f, 0.093750f, 0.906250f,\n" +"0.062500f, 0.937500f, 0.031250f, 0.968750f };\n" +"__kernel void warpAffine(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant CT * M, ST scalar_)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy0 = get_global_id(1) * rowsPerWI;\n" +"if (dx < dst_cols)\n" +"{\n" +"int tmp = dx << AB_BITS;\n" +"int X0_ = rint(M[0] * tmp);\n" +"int Y0_ = rint(M[3] * tmp);\n" +"for (int dy = dy0, dy1 = min(dst_rows, dy0 + rowsPerWI); dy < dy1; ++dy)\n" +"{\n" +"int X0 = X0_ + rint(fma(M[1], (CT)dy, M[2]) * AB_SCALE) + ROUND_DELTA;\n" +"int Y0 = Y0_ + rint(fma(M[4], (CT)dy, M[5]) * AB_SCALE) + ROUND_DELTA;\n" +"X0 = X0 >> (AB_BITS - INTER_BITS);\n" +"Y0 = Y0 >> (AB_BITS - INTER_BITS);\n" +"short sx = convert_short_sat(X0 >> INTER_BITS), sy = convert_short_sat(Y0 >> INTER_BITS);\n" +"short ax = convert_short(X0 & (INTER_TAB_SIZE-1)), ay = convert_short(Y0 & (INTER_TAB_SIZE-1));\n" +"#if defined AMD_DEVICE || depth > 4\n" +"WT v0 = scalar, v1 = scalar, v2 = scalar, v3 = scalar;\n" +"if (sx >= 0 && sx < src_cols)\n" +"{\n" +"if (sy >= 0 && sy < src_rows)\n" +"v0 = convertToWT(loadpix(srcptr + mad24(sy, src_step, mad24(sx, pixsize, src_offset))));\n" +"if (sy+1 >= 0 && sy+1 < src_rows)\n" +"v2 = convertToWT(loadpix(srcptr + mad24(sy+1, src_step, mad24(sx, pixsize, src_offset))));\n" +"}\n" +"if (sx+1 >= 0 && sx+1 < src_cols)\n" +"{\n" +"if (sy >= 0 && sy < src_rows)\n" +"v1 = convertToWT(loadpix(srcptr + mad24(sy, src_step, mad24(sx+1, pixsize, src_offset))));\n" +"if (sy+1 >= 0 && sy+1 < src_rows)\n" +"v3 = convertToWT(loadpix(srcptr + mad24(sy+1, src_step, mad24(sx+1, pixsize, src_offset))));\n" +"}\n" +"float taby = 1.f/INTER_TAB_SIZE*ay;\n" +"float tabx = 1.f/INTER_TAB_SIZE*ax;\n" +"int dst_index = mad24(dy, dst_step, mad24(dx, pixsize, dst_offset));\n" +"#if depth <= 4\n" +"int itab0 = convert_short_sat_rte( (1.0f-taby)*(1.0f-tabx) * INTER_REMAP_COEF_SCALE );\n" +"int itab1 = convert_short_sat_rte( (1.0f-taby)*tabx * INTER_REMAP_COEF_SCALE );\n" +"int itab2 = convert_short_sat_rte( taby*(1.0f-tabx) * INTER_REMAP_COEF_SCALE );\n" +"int itab3 = convert_short_sat_rte( taby*tabx * INTER_REMAP_COEF_SCALE );\n" +"WT val = mad24(v0, itab0, mad24(v1, itab1, mad24(v2, itab2, v3 * itab3)));\n" +"storepix(convertToT((val + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS), dstptr + dst_index);\n" +"#else\n" +"float tabx2 = 1.0f - tabx, taby2 = 1.0f - taby;\n" +"WT val = fma(tabx2, fma(v0, taby2, v2 * taby), tabx * fma(v1, taby2, v3 * taby));\n" +"storepix(convertToT(val), dstptr + dst_index);\n" +"#endif\n" +"#else\n" +"__constant float * coeffs_y = coeffs + (ay << 1), * coeffs_x = coeffs + (ax << 1);\n" +"int src_index0 = mad24(sy, src_step, mad24(sx, pixsize, src_offset)), src_index;\n" +"int dst_index = mad24(dy, dst_step, mad24(dx, pixsize, dst_offset));\n" +"WT sum = (WT)(0), xsum;\n" +"#pragma unroll\n" +"for (int y = 0; y < 2; y++)\n" +"{\n" +"src_index = mad24(y, src_step, src_index0);\n" +"if (sy + y >= 0 && sy + y < src_rows)\n" +"{\n" +"xsum = (WT)(0);\n" +"if (sx >= 0 && sx + 2 < src_cols)\n" +"{\n" +"#if depth == 0 && cn == 1\n" +"uchar2 value = vload2(0, srcptr + src_index);\n" +"xsum = dot(convert_float2(value), (float2)(coeffs_x[0], coeffs_x[1]));\n" +"#else\n" +"#pragma unroll\n" +"for (int x = 0; x < 2; x++)\n" +"xsum = fma(convertToWT(loadpix(srcptr + mad24(x, pixsize, src_index))), coeffs_x[x], xsum);\n" +"#endif\n" +"}\n" +"else\n" +"{\n" +"#pragma unroll\n" +"for (int x = 0; x < 2; x++)\n" +"xsum = fma(sx + x >= 0 && sx + x < src_cols ?\n" +"convertToWT(loadpix(srcptr + mad24(x, pixsize, src_index))) : scalar, coeffs_x[x], xsum);\n" +"}\n" +"sum = fma(xsum, coeffs_y[y], sum);\n" +"}\n" +"else\n" +"sum = fma(scalar, coeffs_y[y], sum);\n" +"}\n" +"storepix(convertToT(sum), dstptr + dst_index);\n" +"#endif\n" +"}\n" +"}\n" +"}\n" +"#elif defined INTER_CUBIC\n" +"#ifdef AMD_DEVICE\n" +"inline void interpolateCubic( float x, float* coeffs )\n" +"{\n" +"const float A = -0.75f;\n" +"coeffs[0] = fma(fma(fma(A, (x + 1.f), - 5.0f*A), (x + 1.f), 8.0f*A), x + 1.f, - 4.0f*A);\n" +"coeffs[1] = fma(fma(A + 2.f, x, - (A + 3.f)), x*x, 1.f);\n" +"coeffs[2] = fma(fma(A + 2.f, 1.f - x, - (A + 3.f)), (1.f - x)*(1.f - x), 1.f);\n" +"coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2];\n" +"}\n" +"#else\n" +"__constant float coeffs[128] =\n" +"{ 0.000000f, 1.000000f, 0.000000f, 0.000000f, -0.021996f, 0.997841f, 0.024864f, -0.000710f, -0.041199f, 0.991516f, 0.052429f, -0.002747f,\n" +"-0.057747f, 0.981255f, 0.082466f, -0.005974f, -0.071777f, 0.967285f, 0.114746f, -0.010254f, -0.083427f, 0.949837f, 0.149040f, -0.015450f,\n" +"-0.092834f, 0.929138f, 0.185120f, -0.021423f, -0.100136f, 0.905418f, 0.222755f, -0.028038f, -0.105469f, 0.878906f, 0.261719f, -0.035156f,\n" +"-0.108971f, 0.849831f, 0.301781f, -0.042641f, -0.110779f, 0.818420f, 0.342712f, -0.050354f, -0.111031f, 0.784904f, 0.384285f, -0.058159f,\n" +"-0.109863f, 0.749512f, 0.426270f, -0.065918f, -0.107414f, 0.712471f, 0.468437f, -0.073494f, -0.103821f, 0.674011f, 0.510559f, -0.080750f,\n" +"-0.099220f, 0.634361f, 0.552406f, -0.087547f, -0.093750f, 0.593750f, 0.593750f, -0.093750f, -0.087547f, 0.552406f, 0.634361f, -0.099220f,\n" +"-0.080750f, 0.510559f, 0.674011f, -0.103821f, -0.073494f, 0.468437f, 0.712471f, -0.107414f, -0.065918f, 0.426270f, 0.749512f, -0.109863f,\n" +"-0.058159f, 0.384285f, 0.784904f, -0.111031f, -0.050354f, 0.342712f, 0.818420f, -0.110779f, -0.042641f, 0.301781f, 0.849831f, -0.108971f,\n" +"-0.035156f, 0.261719f, 0.878906f, -0.105469f, -0.028038f, 0.222755f, 0.905418f, -0.100136f, -0.021423f, 0.185120f, 0.929138f, -0.092834f,\n" +"-0.015450f, 0.149040f, 0.949837f, -0.083427f, -0.010254f, 0.114746f, 0.967285f, -0.071777f, -0.005974f, 0.082466f, 0.981255f, -0.057747f,\n" +"-0.002747f, 0.052429f, 0.991516f, -0.041199f, -0.000710f, 0.024864f, 0.997841f, -0.021996f };\n" +"#endif\n" +"__kernel void warpAffine(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant CT * M, ST scalar_)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"int tmp = (dx << AB_BITS);\n" +"int X0 = rint(M[0] * tmp) + rint(fma(M[1], (CT)dy, M[2]) * AB_SCALE) + ROUND_DELTA;\n" +"int Y0 = rint(M[3] * tmp) + rint(fma(M[4], (CT)dy, M[5]) * AB_SCALE) + ROUND_DELTA;\n" +"X0 = X0 >> (AB_BITS - INTER_BITS);\n" +"Y0 = Y0 >> (AB_BITS - INTER_BITS);\n" +"int sx = (short)(X0 >> INTER_BITS) - 1, sy = (short)(Y0 >> INTER_BITS) - 1;\n" +"int ay = (short)(Y0 & (INTER_TAB_SIZE - 1)), ax = (short)(X0 & (INTER_TAB_SIZE - 1));\n" +"#ifdef AMD_DEVICE\n" +"WT v[16];\n" +"#pragma unroll\n" +"for (int y = 0; y < 4; y++)\n" +"{\n" +"if (sy+y >= 0 && sy+y < src_rows)\n" +"{\n" +"#pragma unroll\n" +"for (int x = 0; x < 4; x++)\n" +"v[mad24(y, 4, x)] = sx+x >= 0 && sx+x < src_cols ?\n" +"convertToWT(loadpix(srcptr + mad24(sy+y, src_step, mad24(sx+x, pixsize, src_offset)))) : scalar;\n" +"}\n" +"else\n" +"{\n" +"#pragma unroll\n" +"for (int x = 0; x < 4; x++)\n" +"v[mad24(y, 4, x)] = scalar;\n" +"}\n" +"}\n" +"float tab1y[4], tab1x[4];\n" +"float ayy = INTER_SCALE * ay;\n" +"float axx = INTER_SCALE * ax;\n" +"interpolateCubic(ayy, tab1y);\n" +"interpolateCubic(axx, tab1x);\n" +"int dst_index = mad24(dy, dst_step, mad24(dx, pixsize, dst_offset));\n" +"WT sum = (WT)(0);\n" +"#if depth <= 4\n" +"int itab[16];\n" +"#pragma unroll\n" +"for (int i = 0; i < 16; i++)\n" +"itab[i] = rint(tab1y[(i>>2)] * tab1x[(i&3)] * INTER_REMAP_COEF_SCALE);\n" +"#pragma unroll\n" +"for (int i = 0; i < 16; i++)\n" +"sum = mad24(v[i], itab[i], sum);\n" +"storepix(convertToT( (sum + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ), dstptr + dst_index);\n" +"#else\n" +"#pragma unroll\n" +"for (int i = 0; i < 16; i++)\n" +"sum = fma(v[i], tab1y[(i>>2)] * tab1x[(i&3)], sum);\n" +"storepix(convertToT( sum ), dstptr + dst_index);\n" +"#endif\n" +"#else\n" +"__constant float * coeffs_y = coeffs + (ay << 2), * coeffs_x = coeffs + (ax << 2);\n" +"int src_index0 = mad24(sy, src_step, mad24(sx, pixsize, src_offset)), src_index;\n" +"int dst_index = mad24(dy, dst_step, mad24(dx, pixsize, dst_offset));\n" +"WT sum = (WT)(0), xsum;\n" +"#pragma unroll\n" +"for (int y = 0; y < 4; y++)\n" +"{\n" +"src_index = mad24(y, src_step, src_index0);\n" +"if (sy + y >= 0 && sy + y < src_rows)\n" +"{\n" +"xsum = (WT)(0);\n" +"if (sx >= 0 && sx + 4 < src_cols)\n" +"{\n" +"#if depth == 0 && cn == 1\n" +"uchar4 value = vload4(0, srcptr + src_index);\n" +"xsum = dot(convert_float4(value), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"#else\n" +"#pragma unroll\n" +"for (int x = 0; x < 4; x++)\n" +"xsum = fma(convertToWT(loadpix(srcptr + mad24(x, pixsize, src_index))), coeffs_x[x], xsum);\n" +"#endif\n" +"}\n" +"else\n" +"{\n" +"#pragma unroll\n" +"for (int x = 0; x < 4; x++)\n" +"xsum = fma(sx + x >= 0 && sx + x < src_cols ?\n" +"convertToWT(loadpix(srcptr + mad24(x, pixsize, src_index))) : scalar, coeffs_x[x], xsum);\n" +"}\n" +"sum = fma(xsum, coeffs_y[y], sum);\n" +"}\n" +"else\n" +"sum = fma(scalar, coeffs_y[y], sum);\n" +"}\n" +"storepix(convertToT(sum), dstptr + dst_index);\n" +"#endif\n" +"}\n" +"}\n" +"#endif\n" +, "ad75e82949ec363a8853f25adb4b808e", NULL}; +struct cv::ocl::internal::ProgramEntry warp_perspective_oclsrc={moduleName, "warp_perspective", +"#ifdef DOUBLE_SUPPORT\n" +"#ifdef cl_amd_fp64\n" +"#pragma OPENCL EXTENSION cl_amd_fp64:enable\n" +"#elif defined (cl_khr_fp64)\n" +"#pragma OPENCL EXTENSION cl_khr_fp64:enable\n" +"#endif\n" +"#endif\n" +"#define INTER_BITS 5\n" +"#define INTER_TAB_SIZE (1 << INTER_BITS)\n" +"#define INTER_SCALE 1.f / INTER_TAB_SIZE\n" +"#define AB_BITS max(10, (int)INTER_BITS)\n" +"#define AB_SCALE (1 << AB_BITS)\n" +"#define INTER_REMAP_COEF_BITS 15\n" +"#define INTER_REMAP_COEF_SCALE (1 << INTER_REMAP_COEF_BITS)\n" +"#define noconvert\n" +"#ifndef ST\n" +"#define ST T\n" +"#endif\n" +"#if cn != 3\n" +"#define loadpix(addr) *(__global const T*)(addr)\n" +"#define storepix(val, addr) *(__global T*)(addr) = val\n" +"#define scalar scalar_\n" +"#define pixsize (int)sizeof(T)\n" +"#else\n" +"#define loadpix(addr) vload3(0, (__global const T1*)(addr))\n" +"#define storepix(val, addr) vstore3(val, 0, (__global T1*)(addr))\n" +"#ifdef INTER_NEAREST\n" +"#define scalar (T)(scalar_.x, scalar_.y, scalar_.z)\n" +"#else\n" +"#define scalar (WT)(scalar_.x, scalar_.y, scalar_.z)\n" +"#endif\n" +"#define pixsize ((int)sizeof(T1)*3)\n" +"#endif\n" +"#ifdef INTER_NEAREST\n" +"__kernel void warpPerspective(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant CT * M, ST scalar_)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"CT X0 = M[0] * dx + M[1] * dy + M[2];\n" +"CT Y0 = M[3] * dx + M[4] * dy + M[5];\n" +"CT W = M[6] * dx + M[7] * dy + M[8];\n" +"W = W != 0.0f ? 1.f / W : 0.0f;\n" +"short sx = convert_short_sat_rte(X0*W);\n" +"short sy = convert_short_sat_rte(Y0*W);\n" +"int dst_index = mad24(dy, dst_step, dx * pixsize + dst_offset);\n" +"if (sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows)\n" +"{\n" +"int src_index = mad24(sy, src_step, sx * pixsize + src_offset);\n" +"storepix(loadpix(srcptr + src_index), dstptr + dst_index);\n" +"}\n" +"else\n" +"storepix(scalar, dstptr + dst_index);\n" +"}\n" +"}\n" +"#elif defined INTER_LINEAR\n" +"__kernel void warpPerspective(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant CT * M, ST scalar_)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"CT X0 = M[0] * dx + M[1] * dy + M[2];\n" +"CT Y0 = M[3] * dx + M[4] * dy + M[5];\n" +"CT W = M[6] * dx + M[7] * dy + M[8];\n" +"W = W != 0.0f ? INTER_TAB_SIZE / W : 0.0f;\n" +"int X = rint(X0 * W), Y = rint(Y0 * W);\n" +"short sx = convert_short_sat(X >> INTER_BITS);\n" +"short sy = convert_short_sat(Y >> INTER_BITS);\n" +"short ay = (short)(Y & (INTER_TAB_SIZE - 1));\n" +"short ax = (short)(X & (INTER_TAB_SIZE - 1));\n" +"WT v0 = (sx >= 0 && sx < src_cols && sy >= 0 && sy < src_rows) ?\n" +"convertToWT(loadpix(srcptr + mad24(sy, src_step, src_offset + sx * pixsize))) : scalar;\n" +"WT v1 = (sx+1 >= 0 && sx+1 < src_cols && sy >= 0 && sy < src_rows) ?\n" +"convertToWT(loadpix(srcptr + mad24(sy, src_step, src_offset + (sx+1) * pixsize))) : scalar;\n" +"WT v2 = (sx >= 0 && sx < src_cols && sy+1 >= 0 && sy+1 < src_rows) ?\n" +"convertToWT(loadpix(srcptr + mad24(sy+1, src_step, src_offset + sx * pixsize))) : scalar;\n" +"WT v3 = (sx+1 >= 0 && sx+1 < src_cols && sy+1 >= 0 && sy+1 < src_rows) ?\n" +"convertToWT(loadpix(srcptr + mad24(sy+1, src_step, src_offset + (sx+1) * pixsize))) : scalar;\n" +"float taby = 1.f/INTER_TAB_SIZE*ay;\n" +"float tabx = 1.f/INTER_TAB_SIZE*ax;\n" +"int dst_index = mad24(dy, dst_step, dst_offset + dx * pixsize);\n" +"#if depth <= 4\n" +"int itab0 = convert_short_sat_rte( (1.0f-taby)*(1.0f-tabx) * INTER_REMAP_COEF_SCALE );\n" +"int itab1 = convert_short_sat_rte( (1.0f-taby)*tabx * INTER_REMAP_COEF_SCALE );\n" +"int itab2 = convert_short_sat_rte( taby*(1.0f-tabx) * INTER_REMAP_COEF_SCALE );\n" +"int itab3 = convert_short_sat_rte( taby*tabx * INTER_REMAP_COEF_SCALE );\n" +"WT val = v0 * itab0 + v1 * itab1 + v2 * itab2 + v3 * itab3;\n" +"storepix(convertToT((val + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS), dstptr + dst_index);\n" +"#else\n" +"float tabx2 = 1.0f - tabx, taby2 = 1.0f - taby;\n" +"WT val = v0 * tabx2 * taby2 + v1 * tabx * taby2 + v2 * tabx2 * taby + v3 * tabx * taby;\n" +"storepix(convertToT(val), dstptr + dst_index);\n" +"#endif\n" +"}\n" +"}\n" +"#elif defined INTER_CUBIC\n" +"inline void interpolateCubic( float x, float* coeffs )\n" +"{\n" +"const float A = -0.75f;\n" +"coeffs[0] = ((A*(x + 1.f) - 5.0f*A)*(x + 1.f) + 8.0f*A)*(x + 1.f) - 4.0f*A;\n" +"coeffs[1] = ((A + 2.f)*x - (A + 3.f))*x*x + 1.f;\n" +"coeffs[2] = ((A + 2.f)*(1.f - x) - (A + 3.f))*(1.f - x)*(1.f - x) + 1.f;\n" +"coeffs[3] = 1.f - coeffs[0] - coeffs[1] - coeffs[2];\n" +"}\n" +"__kernel void warpPerspective(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dstptr, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant CT * M, ST scalar_)\n" +"{\n" +"int dx = get_global_id(0);\n" +"int dy = get_global_id(1);\n" +"if (dx < dst_cols && dy < dst_rows)\n" +"{\n" +"CT X0 = M[0] * dx + M[1] * dy + M[2];\n" +"CT Y0 = M[3] * dx + M[4] * dy + M[5];\n" +"CT W = M[6] * dx + M[7] * dy + M[8];\n" +"W = W != 0.0f ? INTER_TAB_SIZE / W : 0.0f;\n" +"int X = rint(X0 * W), Y = rint(Y0 * W);\n" +"short sx = convert_short_sat(X >> INTER_BITS) - 1;\n" +"short sy = convert_short_sat(Y >> INTER_BITS) - 1;\n" +"short ay = (short)(Y & (INTER_TAB_SIZE-1));\n" +"short ax = (short)(X & (INTER_TAB_SIZE-1));\n" +"WT v[16];\n" +"#pragma unroll\n" +"for (int y = 0; y < 4; y++)\n" +"#pragma unroll\n" +"for (int x = 0; x < 4; x++)\n" +"v[mad24(y, 4, x)] = (sx+x >= 0 && sx+x < src_cols && sy+y >= 0 && sy+y < src_rows) ?\n" +"convertToWT(loadpix(srcptr + mad24(sy+y, src_step, src_offset + (sx+x) * pixsize))) : scalar;\n" +"float tab1y[4], tab1x[4];\n" +"float ayy = INTER_SCALE * ay;\n" +"float axx = INTER_SCALE * ax;\n" +"interpolateCubic(ayy, tab1y);\n" +"interpolateCubic(axx, tab1x);\n" +"int dst_index = mad24(dy, dst_step, dst_offset + dx * pixsize);\n" +"WT sum = (WT)(0);\n" +"#if depth <= 4\n" +"int itab[16];\n" +"#pragma unroll\n" +"for (int i = 0; i < 16; i++)\n" +"itab[i] = rint(tab1y[(i>>2)] * tab1x[(i&3)] * INTER_REMAP_COEF_SCALE);\n" +"#pragma unroll\n" +"for (int i = 0; i < 16; i++)\n" +"sum += v[i] * itab[i];\n" +"storepix(convertToT( (sum + (1 << (INTER_REMAP_COEF_BITS-1))) >> INTER_REMAP_COEF_BITS ), dstptr + dst_index);\n" +"#else\n" +"#pragma unroll\n" +"for (int i = 0; i < 16; i++)\n" +"sum += v[i] * tab1y[(i>>2)] * tab1x[(i&3)];\n" +"storepix(convertToT( sum ), dstptr + dst_index);\n" +"#endif\n" +"}\n" +"}\n" +"#endif\n" +, "a369fbc4026cb59e20aead1fb3f896f0", NULL}; +struct cv::ocl::internal::ProgramEntry warp_transform_oclsrc={moduleName, "warp_transform", +"__constant short4 vec_offset = (short4)(0, 1, 2, 3);\n" +"#define GET_VAL(x, y) ((x) < 0 || (x) >= src_cols || (y) < 0 || (y) >= src_rows) ? scalar : src[src_offset + y * src_step + x]\n" +"__kernel void warpAffine_nearest_8u(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * M, ST scalar_)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"uchar scalar = convert_uchar_sat_rte(scalar_);\n" +"if (x >= dst_cols || y >= dst_rows) return;\n" +"short4 new_x, new_y;\n" +"new_x = convert_short4_sat_rte(M[0] * convert_float4(vec_offset + (short4)(x)) +\n" +"M[1] * convert_float4((short4)y) + M[2]);\n" +"new_y = convert_short4_sat_rte(M[3] * convert_float4(vec_offset + (short4)(x)) +\n" +"M[4] * convert_float4((short4)y) + M[5]);\n" +"uchar4 pix = (uchar4)scalar;\n" +"pix.s0 = GET_VAL(new_x.s0, new_y.s0);\n" +"pix.s1 = GET_VAL(new_x.s1, new_y.s1);\n" +"pix.s2 = GET_VAL(new_x.s2, new_y.s2);\n" +"pix.s3 = GET_VAL(new_x.s3, new_y.s3);\n" +"int dst_index = x + y * dst_step + dst_offset;\n" +"vstore4(pix, 0, dst + dst_index);\n" +"}\n" +"uchar4 read_pixels(__global const uchar * src, short tx, short ty,\n" +"int src_offset, int src_step, int src_cols, int\n" +"src_rows, uchar scalar)\n" +"{\n" +"uchar2 pt, pb;\n" +"short bx, by;\n" +"bx = tx + 1;\n" +"by = ty + 1;\n" +"if (tx >= 0 && (tx + 1) < src_cols && ty >= 0 && ty < src_rows)\n" +"{\n" +"pt = vload2(0, src + src_offset + ty * src_step + tx);\n" +"}\n" +"else\n" +"{\n" +"pt.s0 = GET_VAL(tx, ty);\n" +"pt.s1 = GET_VAL(bx, ty);\n" +"}\n" +"if (tx >= 0 && (tx + 1) < src_cols && by >= 0 && by < src_rows)\n" +"{\n" +"pb = vload2(0, src + src_offset + by * src_step + tx);\n" +"}\n" +"else\n" +"{\n" +"pb.s0 = GET_VAL(tx, by);\n" +"pb.s1 = GET_VAL(bx, by);\n" +"}\n" +"return (uchar4)(pt, pb);\n" +"}\n" +"__kernel void warpAffine_linear_8u(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * M, ST scalar_)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"uchar scalar = convert_uchar_sat_rte(scalar_);\n" +"if (x >= dst_cols || y >= dst_rows) return;\n" +"float4 nx, ny;\n" +"nx = M[0] * convert_float4((vec_offset + (short4)x)) + M[1] * convert_float4((short4)y) + M[2];\n" +"ny = M[3] * convert_float4((vec_offset + (short4)x)) + M[4] * convert_float4((short4)y) + M[5];\n" +"float4 s, t;\n" +"s = round((nx - floor(nx)) * 32.0f) / 32.0f;\n" +"t = round((ny - floor(ny)) * 32.0f) / 32.0f;\n" +"short4 tx, ty;\n" +"tx = convert_short4_sat_rtn(nx);\n" +"ty = convert_short4_sat_rtn(ny);\n" +"uchar4 pix[4];\n" +"pix[0] = read_pixels(src, tx.s0, ty.s0, src_offset, src_step, src_cols, src_rows, scalar);\n" +"pix[1] = read_pixels(src, tx.s1, ty.s1, src_offset, src_step, src_cols, src_rows, scalar);\n" +"pix[2] = read_pixels(src, tx.s2, ty.s2, src_offset, src_step, src_cols, src_rows, scalar);\n" +"pix[3] = read_pixels(src, tx.s3, ty.s3, src_offset, src_step, src_cols, src_rows, scalar);\n" +"float4 tl, tr, bl, br;\n" +"tl = convert_float4((uchar4)(pix[0].s0, pix[1].s0, pix[2].s0, pix[3].s0));\n" +"tr = convert_float4((uchar4)(pix[0].s1, pix[1].s1, pix[2].s1, pix[3].s1));\n" +"bl = convert_float4((uchar4)(pix[0].s2, pix[1].s2, pix[2].s2, pix[3].s2));\n" +"br = convert_float4((uchar4)(pix[0].s3, pix[1].s3, pix[2].s3, pix[3].s3));\n" +"float4 pixel;\n" +"pixel = tl * (1 - s) * (1 - t) + tr * s * (1 - t) + bl * (1 - s) * t + br * s * t;\n" +"int dst_index = x + y * dst_step + dst_offset;\n" +"vstore4(convert_uchar4_sat_rte(pixel), 0, dst + dst_index);\n" +"}\n" +"__constant float coeffs[128] =\n" +"{ 0.000000f, 1.000000f, 0.000000f, 0.000000f, -0.021996f, 0.997841f, 0.024864f, -0.000710f, -0.041199f, 0.991516f, 0.052429f, -0.002747f,\n" +"-0.057747f, 0.981255f, 0.082466f, -0.005974f, -0.071777f, 0.967285f, 0.114746f, -0.010254f, -0.083427f, 0.949837f, 0.149040f, -0.015450f,\n" +"-0.092834f, 0.929138f, 0.185120f, -0.021423f, -0.100136f, 0.905418f, 0.222755f, -0.028038f, -0.105469f, 0.878906f, 0.261719f, -0.035156f,\n" +"-0.108971f, 0.849831f, 0.301781f, -0.042641f, -0.110779f, 0.818420f, 0.342712f, -0.050354f, -0.111031f, 0.784904f, 0.384285f, -0.058159f,\n" +"-0.109863f, 0.749512f, 0.426270f, -0.065918f, -0.107414f, 0.712471f, 0.468437f, -0.073494f, -0.103821f, 0.674011f, 0.510559f, -0.080750f,\n" +"-0.099220f, 0.634361f, 0.552406f, -0.087547f, -0.093750f, 0.593750f, 0.593750f, -0.093750f, -0.087547f, 0.552406f, 0.634361f, -0.099220f,\n" +"-0.080750f, 0.510559f, 0.674011f, -0.103821f, -0.073494f, 0.468437f, 0.712471f, -0.107414f, -0.065918f, 0.426270f, 0.749512f, -0.109863f,\n" +"-0.058159f, 0.384285f, 0.784904f, -0.111031f, -0.050354f, 0.342712f, 0.818420f, -0.110779f, -0.042641f, 0.301781f, 0.849831f, -0.108971f,\n" +"-0.035156f, 0.261719f, 0.878906f, -0.105469f, -0.028038f, 0.222755f, 0.905418f, -0.100136f, -0.021423f, 0.185120f, 0.929138f, -0.092834f,\n" +"-0.015450f, 0.149040f, 0.949837f, -0.083427f, -0.010254f, 0.114746f, 0.967285f, -0.071777f, -0.005974f, 0.082466f, 0.981255f, -0.057747f,\n" +"-0.002747f, 0.052429f, 0.991516f, -0.041199f, -0.000710f, 0.024864f, 0.997841f, -0.021996f };\n" +"uchar4 read_pixels_cubic(__global const uchar * src, int tx, int ty,\n" +"int src_offset, int src_step, int src_cols, int src_rows, uchar scalar)\n" +"{\n" +"uchar4 pix;\n" +"if (tx >= 0 && (tx + 3) < src_cols && ty >= 0 && ty < src_rows)\n" +"{\n" +"pix = vload4(0, src + src_offset + ty * src_step + tx);\n" +"}\n" +"else\n" +"{\n" +"pix.s0 = GET_VAL((tx + 0), ty);\n" +"pix.s1 = GET_VAL((tx + 1), ty);\n" +"pix.s2 = GET_VAL((tx + 2), ty);\n" +"pix.s3 = GET_VAL((tx + 3), ty);\n" +"}\n" +"return pix;\n" +"}\n" +"__kernel void warpAffine_cubic_8u(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * M, ST scalar_)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"uchar scalar = convert_uchar_sat_rte(scalar_);\n" +"if (x >= dst_cols || y >= dst_rows) return;\n" +"float4 nx, ny;\n" +"nx = M[0] * convert_float4((vec_offset + (short4)x)) + M[1] * convert_float4((short4)y) + M[2];\n" +"ny = M[3] * convert_float4((vec_offset + (short4)x)) + M[4] * convert_float4((short4)y) + M[5];\n" +"int4 ax, ay;\n" +"ax = convert_int4_sat_rte((nx - floor(nx)) * 32.0f) & 31;\n" +"ay = convert_int4_sat_rte((ny - floor(ny)) * 32.0f) & 31;\n" +"int4 tx, ty;\n" +"int4 delta_x, delta_y;\n" +"delta_x = select((int4)1, (int4)0, ((nx - floor(nx))) * 64 > 63);\n" +"delta_y = select((int4)1, (int4)0, ((ny - floor(ny))) * 64 > 63);\n" +"tx = convert_int4_sat_rtn(nx) - delta_x;\n" +"ty = convert_int4_sat_rtn(ny) - delta_y;\n" +"__constant float * coeffs_x, * coeffs_y;\n" +"float4 sum = (float4)0.0f;\n" +"uchar4 pix;\n" +"float xsum;\n" +"coeffs_x = coeffs + (ax.s0 << 2);\n" +"coeffs_y = coeffs + (ay.s0 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s0, ty.s0 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s0 = fma(xsum, coeffs_y[i], sum.s0);\n" +"}\n" +"coeffs_x = coeffs + (ax.s1 << 2);\n" +"coeffs_y = coeffs + (ay.s1 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s1, ty.s1 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s1 = fma(xsum, coeffs_y[i], sum.s1);\n" +"}\n" +"coeffs_x = coeffs + (ax.s2 << 2);\n" +"coeffs_y = coeffs + (ay.s2 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s2, ty.s2 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s2 = fma(xsum, coeffs_y[i], sum.s2);\n" +"}\n" +"coeffs_x = coeffs + (ax.s3 << 2);\n" +"coeffs_y = coeffs + (ay.s3 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s3, ty.s3 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s3 = fma(xsum, coeffs_y[i], sum.s3);\n" +"}\n" +"int dst_index = x + y * dst_step + dst_offset;\n" +"vstore4(convert_uchar4_sat_rte(sum), 0, dst + dst_index);\n" +"}\n" +"__kernel void warpPerspective_nearest_8u(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * M, ST scalar_)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"uchar scalar = convert_uchar_sat_rte(scalar_);\n" +"if (x >= dst_cols || y >= dst_rows) return;\n" +"float4 nx, ny, nz;\n" +"nx = M[0] * convert_float4(vec_offset + (short4)(x)) +\n" +"M[1] * convert_float4((short4)y) + M[2];\n" +"ny = M[3] * convert_float4(vec_offset + (short4)(x)) +\n" +"M[4] * convert_float4((short4)y) + M[5];\n" +"nz = M[6] * convert_float4(vec_offset + (short4)(x)) +\n" +"M[7] * convert_float4((short4)y) + M[8];\n" +"short4 new_x, new_y;\n" +"float4 fz = select((float4)(0.0f), (float4)(1.0f / nz), nz != 0.0f);\n" +"new_x = convert_short4_sat_rte(nx * fz);\n" +"new_y = convert_short4_sat_rte(ny * fz);\n" +"uchar4 pix = (uchar4)scalar;\n" +"pix.s0 = GET_VAL(new_x.s0, new_y.s0);\n" +"pix.s1 = GET_VAL(new_x.s1, new_y.s1);\n" +"pix.s2 = GET_VAL(new_x.s2, new_y.s2);\n" +"pix.s3 = GET_VAL(new_x.s3, new_y.s3);\n" +"int dst_index = x + y * dst_step + dst_offset;\n" +"vstore4(pix, 0, dst + dst_index);\n" +"}\n" +"__kernel void warpPerspective_linear_8u(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * M, ST scalar_)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"uchar scalar = convert_uchar_sat_rte(scalar_);\n" +"if (x >= dst_cols || y >= dst_rows) return;\n" +"float4 nx, ny, nz;\n" +"nx = M[0] * convert_float4(vec_offset + (short4)(x)) + M[1] * convert_float4((short4)y) + M[2];\n" +"ny = M[3] * convert_float4(vec_offset + (short4)(x)) + M[4] * convert_float4((short4)y) + M[5];\n" +"nz = M[6] * convert_float4(vec_offset + (short4)(x)) + M[7] * convert_float4((short4)y) + M[8];\n" +"float4 fz = select((float4)(0.0f), (float4)(1.0f / nz), nz != 0.0f);\n" +"nx = nx * fz;\n" +"ny = ny * fz;\n" +"float4 s, t;\n" +"s = round((nx - floor(nx)) * 32.0f) / (float4)32.0f;\n" +"t = round((ny - floor(ny)) * 32.0f) / (float4)32.0f;\n" +"short4 tx, ty;\n" +"tx = convert_short4_sat_rtn(nx);\n" +"ty = convert_short4_sat_rtn(ny);\n" +"uchar4 pix[4];\n" +"pix[0] = read_pixels(src, tx.s0, ty.s0, src_offset, src_step, src_cols, src_rows, scalar);\n" +"pix[1] = read_pixels(src, tx.s1, ty.s1, src_offset, src_step, src_cols, src_rows, scalar);\n" +"pix[2] = read_pixels(src, tx.s2, ty.s2, src_offset, src_step, src_cols, src_rows, scalar);\n" +"pix[3] = read_pixels(src, tx.s3, ty.s3, src_offset, src_step, src_cols, src_rows, scalar);\n" +"float4 tl, tr, bl, br;\n" +"tl = convert_float4((uchar4)(pix[0].s0, pix[1].s0, pix[2].s0, pix[3].s0));\n" +"tr = convert_float4((uchar4)(pix[0].s1, pix[1].s1, pix[2].s1, pix[3].s1));\n" +"bl = convert_float4((uchar4)(pix[0].s2, pix[1].s2, pix[2].s2, pix[3].s2));\n" +"br = convert_float4((uchar4)(pix[0].s3, pix[1].s3, pix[2].s3, pix[3].s3));\n" +"float4 pixel;\n" +"pixel = tl * (1 - s) * (1 - t) + tr * s * (1 - t) + bl * (1 - s) * t + br * s * t;\n" +"int dst_index = x + y * dst_step + dst_offset;\n" +"vstore4(convert_uchar4_sat_rte(pixel), 0, dst + dst_index);\n" +"}\n" +"__kernel void warpPerspective_cubic_8u(__global const uchar * src, int src_step, int src_offset, int src_rows, int src_cols,\n" +"__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,\n" +"__constant float * M, ST scalar_)\n" +"{\n" +"int x = get_global_id(0) * 4;\n" +"int y = get_global_id(1);\n" +"uchar scalar = convert_uchar_sat_rte(scalar_);\n" +"if (x >= dst_cols || y >= dst_rows) return;\n" +"float4 nx, ny, nz;\n" +"nx = M[0] * convert_float4(vec_offset + (short4)(x)) + M[1] * convert_float4((short4)y) + M[2];\n" +"ny = M[3] * convert_float4(vec_offset + (short4)(x)) + M[4] * convert_float4((short4)y) + M[5];\n" +"nz = M[6] * convert_float4(vec_offset + (short4)(x)) + M[7] * convert_float4((short4)y) + M[8];\n" +"float4 fz = select((float4)(0.0f), (float4)(1.0f / nz), nz != 0.0f);\n" +"nx = nx * fz;\n" +"ny = ny * fz;\n" +"int4 ax, ay;\n" +"ax = convert_int4_sat_rte((nx - floor(nx)) * 32.0f) & 31;\n" +"ay = convert_int4_sat_rte((ny - floor(ny)) * 32.0f) & 31;\n" +"int4 tx, ty;\n" +"int4 delta_x, delta_y;\n" +"delta_x = select((int4)1, (int4)0, ((nx - floor(nx))) * 64 > 63);\n" +"delta_y = select((int4)1, (int4)0, ((ny - floor(ny))) * 64 > 63);\n" +"tx = convert_int4_sat_rtn(nx) - delta_x;\n" +"ty = convert_int4_sat_rtn(ny) - delta_y;\n" +"__constant float * coeffs_x, * coeffs_y;\n" +"float4 sum = (float4)0.0f;\n" +"uchar4 pix;\n" +"float xsum;\n" +"coeffs_x = coeffs + (ax.s0 << 2);\n" +"coeffs_y = coeffs + (ay.s0 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s0, ty.s0 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s0 = fma(xsum, coeffs_y[i], sum.s0);\n" +"}\n" +"coeffs_x = coeffs + (ax.s1 << 2);\n" +"coeffs_y = coeffs + (ay.s1 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s1, ty.s1 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s1 = fma(xsum, coeffs_y[i], sum.s1);\n" +"}\n" +"coeffs_x = coeffs + (ax.s2 << 2);\n" +"coeffs_y = coeffs + (ay.s2 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s2, ty.s2 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s2 = fma(xsum, coeffs_y[i], sum.s2);\n" +"}\n" +"coeffs_x = coeffs + (ax.s3 << 2);\n" +"coeffs_y = coeffs + (ay.s3 << 2);\n" +"for (int i = 0; i < 4; i++)\n" +"{\n" +"pix = read_pixels_cubic(src, tx.s3, ty.s3 + i, src_offset, src_step, src_cols, src_rows, scalar);\n" +"xsum = dot(convert_float4(pix), (float4)(coeffs_x[0], coeffs_x[1], coeffs_x[2], coeffs_x[3]));\n" +"sum.s3 = fma(xsum, coeffs_y[i], sum.s3);\n" +"}\n" +"int dst_index = x + y * dst_step + dst_offset;\n" +"vstore4(convert_uchar4_sat_rte(sum), 0, dst + dst_index);\n" +"}\n" +, "14b9aef8e98f5e09295cf556e35dbf81", NULL}; + +}}} +#endif diff --git a/generated/modules/imgproc/opencl_kernels_imgproc.hpp b/generated/modules/imgproc/opencl_kernels_imgproc.hpp new file mode 100644 index 0000000..4e66e5c --- /dev/null +++ b/generated/modules/imgproc/opencl_kernels_imgproc.hpp @@ -0,0 +1,64 @@ +// This file is auto-generated. Do not edit! + +#include "opencv2/core/ocl.hpp" +#include "opencv2/core/ocl_genbase.hpp" +#include "opencv2/core/opencl/ocl_defs.hpp" + +#ifdef HAVE_OPENCL + +namespace cv +{ +namespace ocl +{ +namespace imgproc +{ + +extern struct cv::ocl::internal::ProgramEntry accumulate_oclsrc; +extern struct cv::ocl::internal::ProgramEntry bilateral_oclsrc; +extern struct cv::ocl::internal::ProgramEntry blend_linear_oclsrc; +extern struct cv::ocl::internal::ProgramEntry boxFilter_oclsrc; +extern struct cv::ocl::internal::ProgramEntry boxFilter3x3_oclsrc; +extern struct cv::ocl::internal::ProgramEntry calc_back_project_oclsrc; +extern struct cv::ocl::internal::ProgramEntry canny_oclsrc; +extern struct cv::ocl::internal::ProgramEntry clahe_oclsrc; +extern struct cv::ocl::internal::ProgramEntry color_hsv_oclsrc; +extern struct cv::ocl::internal::ProgramEntry color_lab_oclsrc; +extern struct cv::ocl::internal::ProgramEntry color_rgb_oclsrc; +extern struct cv::ocl::internal::ProgramEntry color_yuv_oclsrc; +extern struct cv::ocl::internal::ProgramEntry corner_oclsrc; +extern struct cv::ocl::internal::ProgramEntry covardata_oclsrc; +extern struct cv::ocl::internal::ProgramEntry filter2D_oclsrc; +extern struct cv::ocl::internal::ProgramEntry filter2DSmall_oclsrc; +extern struct cv::ocl::internal::ProgramEntry filterSepCol_oclsrc; +extern struct cv::ocl::internal::ProgramEntry filterSepRow_oclsrc; +extern struct cv::ocl::internal::ProgramEntry filterSep_singlePass_oclsrc; +extern struct cv::ocl::internal::ProgramEntry filterSmall_oclsrc; +extern struct cv::ocl::internal::ProgramEntry gaussianBlur3x3_oclsrc; +extern struct cv::ocl::internal::ProgramEntry gaussianBlur5x5_oclsrc; +extern struct cv::ocl::internal::ProgramEntry gftt_oclsrc; +extern struct cv::ocl::internal::ProgramEntry histogram_oclsrc; +extern struct cv::ocl::internal::ProgramEntry hough_lines_oclsrc; +extern struct cv::ocl::internal::ProgramEntry integral_sum_oclsrc; +extern struct cv::ocl::internal::ProgramEntry laplacian3_oclsrc; +extern struct cv::ocl::internal::ProgramEntry laplacian5_oclsrc; +extern struct cv::ocl::internal::ProgramEntry linearPolar_oclsrc; +extern struct cv::ocl::internal::ProgramEntry logPolar_oclsrc; +extern struct cv::ocl::internal::ProgramEntry match_template_oclsrc; +extern struct cv::ocl::internal::ProgramEntry medianFilter_oclsrc; +extern struct cv::ocl::internal::ProgramEntry moments_oclsrc; +extern struct cv::ocl::internal::ProgramEntry morph_oclsrc; +extern struct cv::ocl::internal::ProgramEntry morph3x3_oclsrc; +extern struct cv::ocl::internal::ProgramEntry precornerdetect_oclsrc; +extern struct cv::ocl::internal::ProgramEntry pyr_down_oclsrc; +extern struct cv::ocl::internal::ProgramEntry pyr_up_oclsrc; +extern struct cv::ocl::internal::ProgramEntry pyramid_up_oclsrc; +extern struct cv::ocl::internal::ProgramEntry remap_oclsrc; +extern struct cv::ocl::internal::ProgramEntry resize_oclsrc; +extern struct cv::ocl::internal::ProgramEntry sepFilter3x3_oclsrc; +extern struct cv::ocl::internal::ProgramEntry threshold_oclsrc; +extern struct cv::ocl::internal::ProgramEntry warp_affine_oclsrc; +extern struct cv::ocl::internal::ProgramEntry warp_perspective_oclsrc; +extern struct cv::ocl::internal::ProgramEntry warp_transform_oclsrc; + +}}} +#endif diff --git a/generated/modules/imgproc/smooth.simd_declarations.hpp b/generated/modules/imgproc/smooth.simd_declarations.hpp new file mode 100644 index 0000000..6ba054e --- /dev/null +++ b/generated/modules/imgproc/smooth.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/smooth.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/imgproc/sumpixels.simd_declarations.hpp b/generated/modules/imgproc/sumpixels.simd_declarations.hpp new file mode 100644 index 0000000..100778b --- /dev/null +++ b/generated/modules/imgproc/sumpixels.simd_declarations.hpp @@ -0,0 +1,4 @@ +#define CV_CPU_SIMD_FILENAME "/root/opencv/modules/imgproc/src/sumpixels.simd.hpp" +#define CV_CPU_DISPATCH_MODES_ALL BASELINE + +#undef CV_CPU_SIMD_FILENAME diff --git a/generated/modules/opencv2/cvconfig.h b/generated/modules/opencv2/cvconfig.h new file mode 100644 index 0000000..6e37976 --- /dev/null +++ b/generated/modules/opencv2/cvconfig.h @@ -0,0 +1,149 @@ +#ifndef OPENCV_CVCONFIG_H_INCLUDED +#define OPENCV_CVCONFIG_H_INCLUDED + +/* OpenCV compiled as static or dynamic libs */ +#define BUILD_SHARED_LIBS + +/* OpenCV intrinsics optimized code */ +#define CV_ENABLE_INTRINSICS + +/* OpenCV additional optimized code */ +/* #undef CV_DISABLE_OPTIMIZATION */ + +/* Compile for 'real' NVIDIA GPU architectures */ +#define CUDA_ARCH_BIN "" + +/* NVIDIA GPU features are used */ +#define CUDA_ARCH_FEATURES "" + +/* Compile for 'virtual' NVIDIA PTX architectures */ +#define CUDA_ARCH_PTX "" + +/* AMD's Basic Linear Algebra Subprograms Library*/ +/* #undef HAVE_CLAMDBLAS */ + +/* AMD's OpenCL Fast Fourier Transform Library*/ +/* #undef HAVE_CLAMDFFT */ + +/* Clp support */ +/* #undef HAVE_CLP */ + +/* NVIDIA CUDA Runtime API*/ +/* #undef HAVE_CUDA */ + +/* NVIDIA CUDA Basic Linear Algebra Subprograms (BLAS) API*/ +/* #undef HAVE_CUBLAS */ + +/* NVIDIA CUDA Deep Neural Network (cuDNN) API*/ +/* #undef HAVE_CUDNN */ + +/* NVIDIA CUDA Fast Fourier Transform (FFT) API*/ +/* #undef HAVE_CUFFT */ + +/* DirectX */ +/* #undef HAVE_DIRECTX */ +/* #undef HAVE_DIRECTX_NV12 */ +/* #undef HAVE_D3D11 */ +/* #undef HAVE_D3D10 */ +/* #undef HAVE_D3D9 */ + +/* Eigen Matrix & Linear Algebra Library */ +/* #undef HAVE_EIGEN */ + +/* Geospatial Data Abstraction Library */ +/* #undef HAVE_GDAL */ + +/* Halide support */ +/* #undef HAVE_HALIDE */ + +/* Vulkan support */ +/* #undef HAVE_VULKAN */ + +/* Define to 1 if you have the header file. */ +#define HAVE_INTTYPES_H 1 + +/* Intel Integrated Performance Primitives */ +/* #undef HAVE_IPP */ +/* #undef HAVE_IPP_ICV */ +/* #undef HAVE_IPP_IW */ +/* #undef HAVE_IPP_IW_LL */ + +/* JPEG-2000 codec */ +#define HAVE_OPENJPEG +/* #undef HAVE_JASPER */ + +/* IJG JPEG codec */ +#define HAVE_JPEG + +/* libpng/png.h needs to be included */ +/* #undef HAVE_LIBPNG_PNG_H */ + +/* GDCM DICOM codec */ +/* #undef HAVE_GDCM */ + +/* NVIDIA Video Decoding API*/ +/* #undef HAVE_NVCUVID */ +/* #undef HAVE_NVCUVID_HEADER */ +/* #undef HAVE_DYNLINK_NVCUVID_HEADER */ + +/* NVIDIA Video Encoding API*/ +/* #undef HAVE_NVCUVENC */ + +/* OpenCL Support */ +#define HAVE_OPENCL +/* #undef HAVE_OPENCL_STATIC */ +/* #undef HAVE_OPENCL_SVM */ + +/* NVIDIA OpenCL D3D Extensions support */ +/* #undef HAVE_OPENCL_D3D11_NV */ + +/* OpenEXR codec */ +/* #undef HAVE_OPENEXR */ + +/* OpenGL support*/ +/* #undef HAVE_OPENGL */ + +/* PNG codec */ +#define HAVE_PNG + +/* Posix threads (pthreads) */ +#define HAVE_PTHREAD + +/* parallel_for with pthreads */ +#define HAVE_PTHREADS_PF + +/* Intel Threading Building Blocks */ +/* #undef HAVE_TBB */ + +/* Ste||ar Group High Performance ParallelX */ +/* #undef HAVE_HPX */ + +/* TIFF codec */ +/* #undef HAVE_TIFF */ + +/* Define if your processor stores words with the most significant byte + first (like Motorola and SPARC, unlike Intel and VAX). */ +/* #undef WORDS_BIGENDIAN */ + +/* VA library (libva) */ +/* #undef HAVE_VA */ + +/* Intel VA-API/OpenCL */ +/* #undef HAVE_VA_INTEL */ + +/* Lapack */ +/* #undef HAVE_LAPACK */ + +/* Library was compiled with functions instrumentation */ +/* #undef ENABLE_INSTRUMENTATION */ + +/* OpenVX */ +/* #undef HAVE_OPENVX */ + +/* OpenCV trace utilities */ +#define OPENCV_TRACE + +/* Library QR-code decoding */ +#define HAVE_QUIRC + +#endif // OPENCV_CVCONFIG_H_INCLUDED diff --git a/generated/modules/opencv2/opencv_modules.hpp b/generated/modules/opencv2/opencv_modules.hpp new file mode 100644 index 0000000..82dcdb3 --- /dev/null +++ b/generated/modules/opencv2/opencv_modules.hpp @@ -0,0 +1,18 @@ +/* + * ** File generated automatically, do not modify ** + * + * This file defines the list of modules available in current build configuration + * + * +*/ + +// This definition means that OpenCV is built with enabled non-free code. +// For example, patented algorithms for non-profit/non-commercial use only. +/* #undef OPENCV_ENABLE_NONFREE */ + +#define HAVE_OPENCV_CORE +#define HAVE_OPENCV_IMGCODECS +#define HAVE_OPENCV_IMGPROC +#define HAVE_OPENCV_VIDEOIO + + diff --git a/generated/modules/python_bindings_generator/pyopencv_custom_headers.h b/generated/modules/python_bindings_generator/pyopencv_custom_headers.h new file mode 100644 index 0000000..21bc3ce --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_custom_headers.h @@ -0,0 +1,5 @@ +//user-defined headers +#include "core/misc/python/pyopencv_async.hpp" +#include "core/misc/python/pyopencv_cuda.hpp" +#include "core/misc/python/pyopencv_umat.hpp" +#include "videoio/misc/python/pyopencv_videoio.hpp" diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_enums.h b/generated/modules/python_bindings_generator/pyopencv_generated_enums.h new file mode 100644 index 0000000..400d391 --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_enums.h @@ -0,0 +1,249 @@ +CV_PY_FROM_ENUM(AccessFlag); +CV_PY_TO_ENUM(AccessFlag); + +CV_PY_FROM_ENUM(AdaptiveThresholdTypes); +CV_PY_TO_ENUM(AdaptiveThresholdTypes); + +CV_PY_FROM_ENUM(BorderTypes); +CV_PY_TO_ENUM(BorderTypes); + +CV_PY_FROM_ENUM(CmpTypes); +CV_PY_TO_ENUM(CmpTypes); + +CV_PY_FROM_ENUM(ColorConversionCodes); +CV_PY_TO_ENUM(ColorConversionCodes); + +CV_PY_FROM_ENUM(ColormapTypes); +CV_PY_TO_ENUM(ColormapTypes); + +CV_PY_FROM_ENUM(ConnectedComponentsAlgorithmsTypes); +CV_PY_TO_ENUM(ConnectedComponentsAlgorithmsTypes); + +CV_PY_FROM_ENUM(ConnectedComponentsTypes); +CV_PY_TO_ENUM(ConnectedComponentsTypes); + +CV_PY_FROM_ENUM(ContourApproximationModes); +CV_PY_TO_ENUM(ContourApproximationModes); + +CV_PY_FROM_ENUM(CovarFlags); +CV_PY_TO_ENUM(CovarFlags); + +CV_PY_FROM_ENUM(DecompTypes); +CV_PY_TO_ENUM(DecompTypes); + +CV_PY_FROM_ENUM(DftFlags); +CV_PY_TO_ENUM(DftFlags); + +CV_PY_FROM_ENUM(DistanceTransformLabelTypes); +CV_PY_TO_ENUM(DistanceTransformLabelTypes); + +CV_PY_FROM_ENUM(DistanceTransformMasks); +CV_PY_TO_ENUM(DistanceTransformMasks); + +CV_PY_FROM_ENUM(DistanceTypes); +CV_PY_TO_ENUM(DistanceTypes); + +typedef cv::Error::Code Error_Code; +CV_PY_FROM_ENUM(Error_Code); +CV_PY_TO_ENUM(Error_Code); + +typedef cv::FileStorage::Mode FileStorage_Mode; +CV_PY_FROM_ENUM(FileStorage_Mode); +CV_PY_TO_ENUM(FileStorage_Mode); + +typedef cv::FileStorage::State FileStorage_State; +CV_PY_FROM_ENUM(FileStorage_State); +CV_PY_TO_ENUM(FileStorage_State); + +CV_PY_FROM_ENUM(FloodFillFlags); +CV_PY_TO_ENUM(FloodFillFlags); + +typedef cv::Formatter::FormatType Formatter_FormatType; +CV_PY_FROM_ENUM(Formatter_FormatType); +CV_PY_TO_ENUM(Formatter_FormatType); + +CV_PY_FROM_ENUM(GemmFlags); +CV_PY_TO_ENUM(GemmFlags); + +CV_PY_FROM_ENUM(GrabCutClasses); +CV_PY_TO_ENUM(GrabCutClasses); + +CV_PY_FROM_ENUM(GrabCutModes); +CV_PY_TO_ENUM(GrabCutModes); + +CV_PY_FROM_ENUM(HersheyFonts); +CV_PY_TO_ENUM(HersheyFonts); + +CV_PY_FROM_ENUM(HistCompMethods); +CV_PY_TO_ENUM(HistCompMethods); + +CV_PY_FROM_ENUM(HoughModes); +CV_PY_TO_ENUM(HoughModes); + +CV_PY_FROM_ENUM(ImreadModes); +CV_PY_TO_ENUM(ImreadModes); + +CV_PY_FROM_ENUM(ImwriteEXRCompressionFlags); +CV_PY_TO_ENUM(ImwriteEXRCompressionFlags); + +CV_PY_FROM_ENUM(ImwriteEXRTypeFlags); +CV_PY_TO_ENUM(ImwriteEXRTypeFlags); + +CV_PY_FROM_ENUM(ImwriteFlags); +CV_PY_TO_ENUM(ImwriteFlags); + +CV_PY_FROM_ENUM(ImwritePAMFlags); +CV_PY_TO_ENUM(ImwritePAMFlags); + +CV_PY_FROM_ENUM(ImwritePNGFlags); +CV_PY_TO_ENUM(ImwritePNGFlags); + +CV_PY_FROM_ENUM(InterpolationFlags); +CV_PY_TO_ENUM(InterpolationFlags); + +CV_PY_FROM_ENUM(InterpolationMasks); +CV_PY_TO_ENUM(InterpolationMasks); + +CV_PY_FROM_ENUM(KmeansFlags); +CV_PY_TO_ENUM(KmeansFlags); + +CV_PY_FROM_ENUM(LineSegmentDetectorModes); +CV_PY_TO_ENUM(LineSegmentDetectorModes); + +CV_PY_FROM_ENUM(LineTypes); +CV_PY_TO_ENUM(LineTypes); + +CV_PY_FROM_ENUM(MarkerTypes); +CV_PY_TO_ENUM(MarkerTypes); + +CV_PY_FROM_ENUM(MorphShapes); +CV_PY_TO_ENUM(MorphShapes); + +CV_PY_FROM_ENUM(MorphTypes); +CV_PY_TO_ENUM(MorphTypes); + +CV_PY_FROM_ENUM(NormTypes); +CV_PY_TO_ENUM(NormTypes); + +typedef cv::PCA::Flags PCA_Flags; +CV_PY_FROM_ENUM(PCA_Flags); +CV_PY_TO_ENUM(PCA_Flags); + +CV_PY_FROM_ENUM(Param); +CV_PY_TO_ENUM(Param); + +CV_PY_FROM_ENUM(QuatAssumeType); +CV_PY_TO_ENUM(QuatAssumeType); + +typedef cv::QuatEnum::EulerAnglesType QuatEnum_EulerAnglesType; +CV_PY_FROM_ENUM(QuatEnum_EulerAnglesType); +CV_PY_TO_ENUM(QuatEnum_EulerAnglesType); + +CV_PY_FROM_ENUM(RectanglesIntersectTypes); +CV_PY_TO_ENUM(RectanglesIntersectTypes); + +CV_PY_FROM_ENUM(ReduceTypes); +CV_PY_TO_ENUM(ReduceTypes); + +CV_PY_FROM_ENUM(RetrievalModes); +CV_PY_TO_ENUM(RetrievalModes); + +CV_PY_FROM_ENUM(RotateFlags); +CV_PY_TO_ENUM(RotateFlags); + +typedef cv::SVD::Flags SVD_Flags; +CV_PY_FROM_ENUM(SVD_Flags); +CV_PY_TO_ENUM(SVD_Flags); + +CV_PY_FROM_ENUM(ShapeMatchModes); +CV_PY_TO_ENUM(ShapeMatchModes); + +CV_PY_FROM_ENUM(SolveLPResult); +CV_PY_TO_ENUM(SolveLPResult); + +CV_PY_FROM_ENUM(SortFlags); +CV_PY_TO_ENUM(SortFlags); + +CV_PY_FROM_ENUM(SpecialFilter); +CV_PY_TO_ENUM(SpecialFilter); + +CV_PY_FROM_ENUM(TemplateMatchModes); +CV_PY_TO_ENUM(TemplateMatchModes); + +typedef cv::TermCriteria::Type TermCriteria_Type; +CV_PY_FROM_ENUM(TermCriteria_Type); +CV_PY_TO_ENUM(TermCriteria_Type); + +CV_PY_FROM_ENUM(ThresholdTypes); +CV_PY_TO_ENUM(ThresholdTypes); + +typedef cv::UMatData::MemoryFlag UMatData_MemoryFlag; +CV_PY_FROM_ENUM(UMatData_MemoryFlag); +CV_PY_TO_ENUM(UMatData_MemoryFlag); + +CV_PY_FROM_ENUM(UMatUsageFlags); +CV_PY_TO_ENUM(UMatUsageFlags); + +CV_PY_FROM_ENUM(VideoAccelerationType); +CV_PY_TO_ENUM(VideoAccelerationType); + +CV_PY_FROM_ENUM(VideoCaptureAPIs); +CV_PY_TO_ENUM(VideoCaptureAPIs); + +CV_PY_FROM_ENUM(VideoCaptureProperties); +CV_PY_TO_ENUM(VideoCaptureProperties); + +CV_PY_FROM_ENUM(VideoWriterProperties); +CV_PY_TO_ENUM(VideoWriterProperties); + +CV_PY_FROM_ENUM(WarpPolarMode); +CV_PY_TO_ENUM(WarpPolarMode); + +typedef cv::_InputArray::KindFlag _InputArray_KindFlag; +CV_PY_FROM_ENUM(_InputArray_KindFlag); +CV_PY_TO_ENUM(_InputArray_KindFlag); + +typedef cv::_OutputArray::DepthMask _OutputArray_DepthMask; +CV_PY_FROM_ENUM(_OutputArray_DepthMask); +CV_PY_TO_ENUM(_OutputArray_DepthMask); + +typedef cv::cuda::DeviceInfo::ComputeMode cuda_DeviceInfo_ComputeMode; +CV_PY_FROM_ENUM(cuda_DeviceInfo_ComputeMode); +CV_PY_TO_ENUM(cuda_DeviceInfo_ComputeMode); + +typedef cv::cuda::Event::CreateFlags cuda_Event_CreateFlags; +CV_PY_FROM_ENUM(cuda_Event_CreateFlags); +CV_PY_TO_ENUM(cuda_Event_CreateFlags); + +typedef cv::cuda::FeatureSet cuda_FeatureSet; +CV_PY_FROM_ENUM(cuda_FeatureSet); +CV_PY_TO_ENUM(cuda_FeatureSet); + +typedef cv::cuda::HostMem::AllocType cuda_HostMem_AllocType; +CV_PY_FROM_ENUM(cuda_HostMem_AllocType); +CV_PY_TO_ENUM(cuda_HostMem_AllocType); + +typedef cv::detail::TestOp detail_TestOp; +CV_PY_FROM_ENUM(detail_TestOp); +CV_PY_TO_ENUM(detail_TestOp); + +typedef cv::ocl::OclVectorStrategy ocl_OclVectorStrategy; +CV_PY_FROM_ENUM(ocl_OclVectorStrategy); +CV_PY_TO_ENUM(ocl_OclVectorStrategy); + +typedef cv::ogl::Buffer::Access ogl_Buffer_Access; +CV_PY_FROM_ENUM(ogl_Buffer_Access); +CV_PY_TO_ENUM(ogl_Buffer_Access); + +typedef cv::ogl::Buffer::Target ogl_Buffer_Target; +CV_PY_FROM_ENUM(ogl_Buffer_Target); +CV_PY_TO_ENUM(ogl_Buffer_Target); + +typedef cv::ogl::RenderModes ogl_RenderModes; +CV_PY_FROM_ENUM(ogl_RenderModes); +CV_PY_TO_ENUM(ogl_RenderModes); + +typedef cv::ogl::Texture2D::Format ogl_Texture2D_Format; +CV_PY_FROM_ENUM(ogl_Texture2D_Format); +CV_PY_TO_ENUM(ogl_Texture2D_Format); + diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_funcs.h b/generated/modules/python_bindings_generator/pyopencv_generated_funcs.h new file mode 100644 index 0000000..4196168 --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_funcs.h @@ -0,0 +1,16085 @@ +static PyObject* pyopencv_cv_Canny(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_edges = NULL; + Mat edges; + PyObject* pyobj_threshold1 = NULL; + double threshold1=0; + PyObject* pyobj_threshold2 = NULL; + double threshold2=0; + PyObject* pyobj_apertureSize = NULL; + int apertureSize=3; + PyObject* pyobj_L2gradient = NULL; + bool L2gradient=false; + + const char* keywords[] = { "image", "threshold1", "threshold2", "edges", "apertureSize", "L2gradient", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:Canny", (char**)keywords, &pyobj_image, &pyobj_threshold1, &pyobj_threshold2, &pyobj_edges, &pyobj_apertureSize, &pyobj_L2gradient) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 1)) && + pyopencv_to_safe(pyobj_threshold1, threshold1, ArgInfo("threshold1", 0)) && + pyopencv_to_safe(pyobj_threshold2, threshold2, ArgInfo("threshold2", 0)) && + pyopencv_to_safe(pyobj_apertureSize, apertureSize, ArgInfo("apertureSize", 0)) && + pyopencv_to_safe(pyobj_L2gradient, L2gradient, ArgInfo("L2gradient", 0)) ) + { + ERRWRAP2(cv::Canny(image, edges, threshold1, threshold2, apertureSize, L2gradient)); + return pyopencv_from(edges); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_edges = NULL; + UMat edges; + PyObject* pyobj_threshold1 = NULL; + double threshold1=0; + PyObject* pyobj_threshold2 = NULL; + double threshold2=0; + PyObject* pyobj_apertureSize = NULL; + int apertureSize=3; + PyObject* pyobj_L2gradient = NULL; + bool L2gradient=false; + + const char* keywords[] = { "image", "threshold1", "threshold2", "edges", "apertureSize", "L2gradient", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:Canny", (char**)keywords, &pyobj_image, &pyobj_threshold1, &pyobj_threshold2, &pyobj_edges, &pyobj_apertureSize, &pyobj_L2gradient) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 1)) && + pyopencv_to_safe(pyobj_threshold1, threshold1, ArgInfo("threshold1", 0)) && + pyopencv_to_safe(pyobj_threshold2, threshold2, ArgInfo("threshold2", 0)) && + pyopencv_to_safe(pyobj_apertureSize, apertureSize, ArgInfo("apertureSize", 0)) && + pyopencv_to_safe(pyobj_L2gradient, L2gradient, ArgInfo("L2gradient", 0)) ) + { + ERRWRAP2(cv::Canny(image, edges, threshold1, threshold2, apertureSize, L2gradient)); + return pyopencv_from(edges); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dx = NULL; + Mat dx; + PyObject* pyobj_dy = NULL; + Mat dy; + PyObject* pyobj_edges = NULL; + Mat edges; + PyObject* pyobj_threshold1 = NULL; + double threshold1=0; + PyObject* pyobj_threshold2 = NULL; + double threshold2=0; + PyObject* pyobj_L2gradient = NULL; + bool L2gradient=false; + + const char* keywords[] = { "dx", "dy", "threshold1", "threshold2", "edges", "L2gradient", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OO:Canny", (char**)keywords, &pyobj_dx, &pyobj_dy, &pyobj_threshold1, &pyobj_threshold2, &pyobj_edges, &pyobj_L2gradient) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 1)) && + pyopencv_to_safe(pyobj_threshold1, threshold1, ArgInfo("threshold1", 0)) && + pyopencv_to_safe(pyobj_threshold2, threshold2, ArgInfo("threshold2", 0)) && + pyopencv_to_safe(pyobj_L2gradient, L2gradient, ArgInfo("L2gradient", 0)) ) + { + ERRWRAP2(cv::Canny(dx, dy, edges, threshold1, threshold2, L2gradient)); + return pyopencv_from(edges); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dx = NULL; + UMat dx; + PyObject* pyobj_dy = NULL; + UMat dy; + PyObject* pyobj_edges = NULL; + UMat edges; + PyObject* pyobj_threshold1 = NULL; + double threshold1=0; + PyObject* pyobj_threshold2 = NULL; + double threshold2=0; + PyObject* pyobj_L2gradient = NULL; + bool L2gradient=false; + + const char* keywords[] = { "dx", "dy", "threshold1", "threshold2", "edges", "L2gradient", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OO:Canny", (char**)keywords, &pyobj_dx, &pyobj_dy, &pyobj_threshold1, &pyobj_threshold2, &pyobj_edges, &pyobj_L2gradient) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 1)) && + pyopencv_to_safe(pyobj_threshold1, threshold1, ArgInfo("threshold1", 0)) && + pyopencv_to_safe(pyobj_threshold2, threshold2, ArgInfo("threshold2", 0)) && + pyopencv_to_safe(pyobj_L2gradient, L2gradient, ArgInfo("L2gradient", 0)) ) + { + ERRWRAP2(cv::Canny(dx, dy, edges, threshold1, threshold2, L2gradient)); + return pyopencv_from(edges); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Canny"); + + return NULL; +} + +static PyObject* pyopencv_cv_EMD(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_signature1 = NULL; + Mat signature1; + PyObject* pyobj_signature2 = NULL; + Mat signature2; + PyObject* pyobj_distType = NULL; + int distType=0; + PyObject* pyobj_cost = NULL; + Mat cost; + PyObject* pyobj_lowerBound = NULL; + Ptr lowerBound; + PyObject* pyobj_flow = NULL; + Mat flow; + float retval; + + const char* keywords[] = { "signature1", "signature2", "distType", "cost", "lowerBound", "flow", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:EMD", (char**)keywords, &pyobj_signature1, &pyobj_signature2, &pyobj_distType, &pyobj_cost, &pyobj_lowerBound, &pyobj_flow) && + pyopencv_to_safe(pyobj_signature1, signature1, ArgInfo("signature1", 0)) && + pyopencv_to_safe(pyobj_signature2, signature2, ArgInfo("signature2", 0)) && + pyopencv_to_safe(pyobj_distType, distType, ArgInfo("distType", 0)) && + pyopencv_to_safe(pyobj_cost, cost, ArgInfo("cost", 0)) && + pyopencv_to_safe(pyobj_lowerBound, lowerBound, ArgInfo("lowerBound", 1)) && + pyopencv_to_safe(pyobj_flow, flow, ArgInfo("flow", 1)) ) + { + ERRWRAP2(retval = cv::wrapperEMD(signature1, signature2, distType, cost, lowerBound, flow)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(lowerBound), pyopencv_from(flow)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_signature1 = NULL; + UMat signature1; + PyObject* pyobj_signature2 = NULL; + UMat signature2; + PyObject* pyobj_distType = NULL; + int distType=0; + PyObject* pyobj_cost = NULL; + UMat cost; + PyObject* pyobj_lowerBound = NULL; + Ptr lowerBound; + PyObject* pyobj_flow = NULL; + UMat flow; + float retval; + + const char* keywords[] = { "signature1", "signature2", "distType", "cost", "lowerBound", "flow", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:EMD", (char**)keywords, &pyobj_signature1, &pyobj_signature2, &pyobj_distType, &pyobj_cost, &pyobj_lowerBound, &pyobj_flow) && + pyopencv_to_safe(pyobj_signature1, signature1, ArgInfo("signature1", 0)) && + pyopencv_to_safe(pyobj_signature2, signature2, ArgInfo("signature2", 0)) && + pyopencv_to_safe(pyobj_distType, distType, ArgInfo("distType", 0)) && + pyopencv_to_safe(pyobj_cost, cost, ArgInfo("cost", 0)) && + pyopencv_to_safe(pyobj_lowerBound, lowerBound, ArgInfo("lowerBound", 1)) && + pyopencv_to_safe(pyobj_flow, flow, ArgInfo("flow", 1)) ) + { + ERRWRAP2(retval = cv::wrapperEMD(signature1, signature2, distType, cost, lowerBound, flow)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(lowerBound), pyopencv_from(flow)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("EMD"); + + return NULL; +} + +static PyObject* pyopencv_cv_GaussianBlur(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_sigmaX = NULL; + double sigmaX=0; + PyObject* pyobj_sigmaY = NULL; + double sigmaY=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ksize", "sigmaX", "dst", "sigmaY", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:GaussianBlur", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_sigmaX, &pyobj_dst, &pyobj_sigmaY, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_sigmaX, sigmaX, ArgInfo("sigmaX", 0)) && + pyopencv_to_safe(pyobj_sigmaY, sigmaY, ArgInfo("sigmaY", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::GaussianBlur(src, dst, ksize, sigmaX, sigmaY, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_sigmaX = NULL; + double sigmaX=0; + PyObject* pyobj_sigmaY = NULL; + double sigmaY=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ksize", "sigmaX", "dst", "sigmaY", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:GaussianBlur", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_sigmaX, &pyobj_dst, &pyobj_sigmaY, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_sigmaX, sigmaX, ArgInfo("sigmaX", 0)) && + pyopencv_to_safe(pyobj_sigmaY, sigmaY, ArgInfo("sigmaY", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::GaussianBlur(src, dst, ksize, sigmaX, sigmaY, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("GaussianBlur"); + + return NULL; +} + +static PyObject* pyopencv_cv_HoughCircles(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_circles = NULL; + Mat circles; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_dp = NULL; + double dp=0; + PyObject* pyobj_minDist = NULL; + double minDist=0; + PyObject* pyobj_param1 = NULL; + double param1=100; + PyObject* pyobj_param2 = NULL; + double param2=100; + PyObject* pyobj_minRadius = NULL; + int minRadius=0; + PyObject* pyobj_maxRadius = NULL; + int maxRadius=0; + + const char* keywords[] = { "image", "method", "dp", "minDist", "circles", "param1", "param2", "minRadius", "maxRadius", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:HoughCircles", (char**)keywords, &pyobj_image, &pyobj_method, &pyobj_dp, &pyobj_minDist, &pyobj_circles, &pyobj_param1, &pyobj_param2, &pyobj_minRadius, &pyobj_maxRadius) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_circles, circles, ArgInfo("circles", 1)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_dp, dp, ArgInfo("dp", 0)) && + pyopencv_to_safe(pyobj_minDist, minDist, ArgInfo("minDist", 0)) && + pyopencv_to_safe(pyobj_param1, param1, ArgInfo("param1", 0)) && + pyopencv_to_safe(pyobj_param2, param2, ArgInfo("param2", 0)) && + pyopencv_to_safe(pyobj_minRadius, minRadius, ArgInfo("minRadius", 0)) && + pyopencv_to_safe(pyobj_maxRadius, maxRadius, ArgInfo("maxRadius", 0)) ) + { + ERRWRAP2(cv::HoughCircles(image, circles, method, dp, minDist, param1, param2, minRadius, maxRadius)); + return pyopencv_from(circles); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_circles = NULL; + UMat circles; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_dp = NULL; + double dp=0; + PyObject* pyobj_minDist = NULL; + double minDist=0; + PyObject* pyobj_param1 = NULL; + double param1=100; + PyObject* pyobj_param2 = NULL; + double param2=100; + PyObject* pyobj_minRadius = NULL; + int minRadius=0; + PyObject* pyobj_maxRadius = NULL; + int maxRadius=0; + + const char* keywords[] = { "image", "method", "dp", "minDist", "circles", "param1", "param2", "minRadius", "maxRadius", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:HoughCircles", (char**)keywords, &pyobj_image, &pyobj_method, &pyobj_dp, &pyobj_minDist, &pyobj_circles, &pyobj_param1, &pyobj_param2, &pyobj_minRadius, &pyobj_maxRadius) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_circles, circles, ArgInfo("circles", 1)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_dp, dp, ArgInfo("dp", 0)) && + pyopencv_to_safe(pyobj_minDist, minDist, ArgInfo("minDist", 0)) && + pyopencv_to_safe(pyobj_param1, param1, ArgInfo("param1", 0)) && + pyopencv_to_safe(pyobj_param2, param2, ArgInfo("param2", 0)) && + pyopencv_to_safe(pyobj_minRadius, minRadius, ArgInfo("minRadius", 0)) && + pyopencv_to_safe(pyobj_maxRadius, maxRadius, ArgInfo("maxRadius", 0)) ) + { + ERRWRAP2(cv::HoughCircles(image, circles, method, dp, minDist, param1, param2, minRadius, maxRadius)); + return pyopencv_from(circles); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HoughCircles"); + + return NULL; +} + +static PyObject* pyopencv_cv_HoughLines(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_lines = NULL; + Mat lines; + PyObject* pyobj_rho = NULL; + double rho=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_srn = NULL; + double srn=0; + PyObject* pyobj_stn = NULL; + double stn=0; + PyObject* pyobj_min_theta = NULL; + double min_theta=0; + PyObject* pyobj_max_theta = NULL; + double max_theta=CV_PI; + + const char* keywords[] = { "image", "rho", "theta", "threshold", "lines", "srn", "stn", "min_theta", "max_theta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:HoughLines", (char**)keywords, &pyobj_image, &pyobj_rho, &pyobj_theta, &pyobj_threshold, &pyobj_lines, &pyobj_srn, &pyobj_stn, &pyobj_min_theta, &pyobj_max_theta) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_rho, rho, ArgInfo("rho", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_srn, srn, ArgInfo("srn", 0)) && + pyopencv_to_safe(pyobj_stn, stn, ArgInfo("stn", 0)) && + pyopencv_to_safe(pyobj_min_theta, min_theta, ArgInfo("min_theta", 0)) && + pyopencv_to_safe(pyobj_max_theta, max_theta, ArgInfo("max_theta", 0)) ) + { + ERRWRAP2(cv::HoughLines(image, lines, rho, theta, threshold, srn, stn, min_theta, max_theta)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_lines = NULL; + UMat lines; + PyObject* pyobj_rho = NULL; + double rho=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_srn = NULL; + double srn=0; + PyObject* pyobj_stn = NULL; + double stn=0; + PyObject* pyobj_min_theta = NULL; + double min_theta=0; + PyObject* pyobj_max_theta = NULL; + double max_theta=CV_PI; + + const char* keywords[] = { "image", "rho", "theta", "threshold", "lines", "srn", "stn", "min_theta", "max_theta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:HoughLines", (char**)keywords, &pyobj_image, &pyobj_rho, &pyobj_theta, &pyobj_threshold, &pyobj_lines, &pyobj_srn, &pyobj_stn, &pyobj_min_theta, &pyobj_max_theta) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_rho, rho, ArgInfo("rho", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_srn, srn, ArgInfo("srn", 0)) && + pyopencv_to_safe(pyobj_stn, stn, ArgInfo("stn", 0)) && + pyopencv_to_safe(pyobj_min_theta, min_theta, ArgInfo("min_theta", 0)) && + pyopencv_to_safe(pyobj_max_theta, max_theta, ArgInfo("max_theta", 0)) ) + { + ERRWRAP2(cv::HoughLines(image, lines, rho, theta, threshold, srn, stn, min_theta, max_theta)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HoughLines"); + + return NULL; +} + +static PyObject* pyopencv_cv_HoughLinesP(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_lines = NULL; + Mat lines; + PyObject* pyobj_rho = NULL; + double rho=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_minLineLength = NULL; + double minLineLength=0; + PyObject* pyobj_maxLineGap = NULL; + double maxLineGap=0; + + const char* keywords[] = { "image", "rho", "theta", "threshold", "lines", "minLineLength", "maxLineGap", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:HoughLinesP", (char**)keywords, &pyobj_image, &pyobj_rho, &pyobj_theta, &pyobj_threshold, &pyobj_lines, &pyobj_minLineLength, &pyobj_maxLineGap) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_rho, rho, ArgInfo("rho", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_minLineLength, minLineLength, ArgInfo("minLineLength", 0)) && + pyopencv_to_safe(pyobj_maxLineGap, maxLineGap, ArgInfo("maxLineGap", 0)) ) + { + ERRWRAP2(cv::HoughLinesP(image, lines, rho, theta, threshold, minLineLength, maxLineGap)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_lines = NULL; + UMat lines; + PyObject* pyobj_rho = NULL; + double rho=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_minLineLength = NULL; + double minLineLength=0; + PyObject* pyobj_maxLineGap = NULL; + double maxLineGap=0; + + const char* keywords[] = { "image", "rho", "theta", "threshold", "lines", "minLineLength", "maxLineGap", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:HoughLinesP", (char**)keywords, &pyobj_image, &pyobj_rho, &pyobj_theta, &pyobj_threshold, &pyobj_lines, &pyobj_minLineLength, &pyobj_maxLineGap) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_rho, rho, ArgInfo("rho", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_minLineLength, minLineLength, ArgInfo("minLineLength", 0)) && + pyopencv_to_safe(pyobj_maxLineGap, maxLineGap, ArgInfo("maxLineGap", 0)) ) + { + ERRWRAP2(cv::HoughLinesP(image, lines, rho, theta, threshold, minLineLength, maxLineGap)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HoughLinesP"); + + return NULL; +} + +static PyObject* pyopencv_cv_HoughLinesPointSet(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_point = NULL; + Mat point; + PyObject* pyobj_lines = NULL; + Mat lines; + PyObject* pyobj_lines_max = NULL; + int lines_max=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_min_rho = NULL; + double min_rho=0; + PyObject* pyobj_max_rho = NULL; + double max_rho=0; + PyObject* pyobj_rho_step = NULL; + double rho_step=0; + PyObject* pyobj_min_theta = NULL; + double min_theta=0; + PyObject* pyobj_max_theta = NULL; + double max_theta=0; + PyObject* pyobj_theta_step = NULL; + double theta_step=0; + + const char* keywords[] = { "point", "lines_max", "threshold", "min_rho", "max_rho", "rho_step", "min_theta", "max_theta", "theta_step", "lines", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOOOOO|O:HoughLinesPointSet", (char**)keywords, &pyobj_point, &pyobj_lines_max, &pyobj_threshold, &pyobj_min_rho, &pyobj_max_rho, &pyobj_rho_step, &pyobj_min_theta, &pyobj_max_theta, &pyobj_theta_step, &pyobj_lines) && + pyopencv_to_safe(pyobj_point, point, ArgInfo("point", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_lines_max, lines_max, ArgInfo("lines_max", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_min_rho, min_rho, ArgInfo("min_rho", 0)) && + pyopencv_to_safe(pyobj_max_rho, max_rho, ArgInfo("max_rho", 0)) && + pyopencv_to_safe(pyobj_rho_step, rho_step, ArgInfo("rho_step", 0)) && + pyopencv_to_safe(pyobj_min_theta, min_theta, ArgInfo("min_theta", 0)) && + pyopencv_to_safe(pyobj_max_theta, max_theta, ArgInfo("max_theta", 0)) && + pyopencv_to_safe(pyobj_theta_step, theta_step, ArgInfo("theta_step", 0)) ) + { + ERRWRAP2(cv::HoughLinesPointSet(point, lines, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_point = NULL; + UMat point; + PyObject* pyobj_lines = NULL; + UMat lines; + PyObject* pyobj_lines_max = NULL; + int lines_max=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_min_rho = NULL; + double min_rho=0; + PyObject* pyobj_max_rho = NULL; + double max_rho=0; + PyObject* pyobj_rho_step = NULL; + double rho_step=0; + PyObject* pyobj_min_theta = NULL; + double min_theta=0; + PyObject* pyobj_max_theta = NULL; + double max_theta=0; + PyObject* pyobj_theta_step = NULL; + double theta_step=0; + + const char* keywords[] = { "point", "lines_max", "threshold", "min_rho", "max_rho", "rho_step", "min_theta", "max_theta", "theta_step", "lines", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOOOOO|O:HoughLinesPointSet", (char**)keywords, &pyobj_point, &pyobj_lines_max, &pyobj_threshold, &pyobj_min_rho, &pyobj_max_rho, &pyobj_rho_step, &pyobj_min_theta, &pyobj_max_theta, &pyobj_theta_step, &pyobj_lines) && + pyopencv_to_safe(pyobj_point, point, ArgInfo("point", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_lines_max, lines_max, ArgInfo("lines_max", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_min_rho, min_rho, ArgInfo("min_rho", 0)) && + pyopencv_to_safe(pyobj_max_rho, max_rho, ArgInfo("max_rho", 0)) && + pyopencv_to_safe(pyobj_rho_step, rho_step, ArgInfo("rho_step", 0)) && + pyopencv_to_safe(pyobj_min_theta, min_theta, ArgInfo("min_theta", 0)) && + pyopencv_to_safe(pyobj_max_theta, max_theta, ArgInfo("max_theta", 0)) && + pyopencv_to_safe(pyobj_theta_step, theta_step, ArgInfo("theta_step", 0)) ) + { + ERRWRAP2(cv::HoughLinesPointSet(point, lines, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HoughLinesPointSet"); + + return NULL; +} + +static PyObject* pyopencv_cv_HoughLinesWithAccumulator(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_lines = NULL; + Mat lines; + PyObject* pyobj_rho = NULL; + double rho=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_srn = NULL; + double srn=0; + PyObject* pyobj_stn = NULL; + double stn=0; + PyObject* pyobj_min_theta = NULL; + double min_theta=0; + PyObject* pyobj_max_theta = NULL; + double max_theta=CV_PI; + + const char* keywords[] = { "image", "rho", "theta", "threshold", "lines", "srn", "stn", "min_theta", "max_theta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:HoughLinesWithAccumulator", (char**)keywords, &pyobj_image, &pyobj_rho, &pyobj_theta, &pyobj_threshold, &pyobj_lines, &pyobj_srn, &pyobj_stn, &pyobj_min_theta, &pyobj_max_theta) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_rho, rho, ArgInfo("rho", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_srn, srn, ArgInfo("srn", 0)) && + pyopencv_to_safe(pyobj_stn, stn, ArgInfo("stn", 0)) && + pyopencv_to_safe(pyobj_min_theta, min_theta, ArgInfo("min_theta", 0)) && + pyopencv_to_safe(pyobj_max_theta, max_theta, ArgInfo("max_theta", 0)) ) + { + ERRWRAP2(cv::HoughLinesWithAccumulator(image, lines, rho, theta, threshold, srn, stn, min_theta, max_theta)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_lines = NULL; + UMat lines; + PyObject* pyobj_rho = NULL; + double rho=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_threshold = NULL; + int threshold=0; + PyObject* pyobj_srn = NULL; + double srn=0; + PyObject* pyobj_stn = NULL; + double stn=0; + PyObject* pyobj_min_theta = NULL; + double min_theta=0; + PyObject* pyobj_max_theta = NULL; + double max_theta=CV_PI; + + const char* keywords[] = { "image", "rho", "theta", "threshold", "lines", "srn", "stn", "min_theta", "max_theta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:HoughLinesWithAccumulator", (char**)keywords, &pyobj_image, &pyobj_rho, &pyobj_theta, &pyobj_threshold, &pyobj_lines, &pyobj_srn, &pyobj_stn, &pyobj_min_theta, &pyobj_max_theta) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_rho, rho, ArgInfo("rho", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_threshold, threshold, ArgInfo("threshold", 0)) && + pyopencv_to_safe(pyobj_srn, srn, ArgInfo("srn", 0)) && + pyopencv_to_safe(pyobj_stn, stn, ArgInfo("stn", 0)) && + pyopencv_to_safe(pyobj_min_theta, min_theta, ArgInfo("min_theta", 0)) && + pyopencv_to_safe(pyobj_max_theta, max_theta, ArgInfo("max_theta", 0)) ) + { + ERRWRAP2(cv::HoughLinesWithAccumulator(image, lines, rho, theta, threshold, srn, stn, min_theta, max_theta)); + return pyopencv_from(lines); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HoughLinesWithAccumulator"); + + return NULL; +} + +static PyObject* pyopencv_cv_HuMoments(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_m = NULL; + Moments m; + PyObject* pyobj_hu = NULL; + Mat hu; + + const char* keywords[] = { "m", "hu", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:HuMoments", (char**)keywords, &pyobj_m, &pyobj_hu) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_hu, hu, ArgInfo("hu", 1)) ) + { + ERRWRAP2(cv::HuMoments(m, hu)); + return pyopencv_from(hu); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + Moments m; + PyObject* pyobj_hu = NULL; + UMat hu; + + const char* keywords[] = { "m", "hu", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:HuMoments", (char**)keywords, &pyobj_m, &pyobj_hu) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_hu, hu, ArgInfo("hu", 1)) ) + { + ERRWRAP2(cv::HuMoments(m, hu)); + return pyopencv_from(hu); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HuMoments"); + + return NULL; +} + +static PyObject* pyopencv_cv_KeyPoint_convert(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_keypoints = NULL; + vector_KeyPoint keypoints; + vector_Point2f points2f; + PyObject* pyobj_keypointIndexes = NULL; + vector_int keypointIndexes=std::vector(); + + const char* keywords[] = { "keypoints", "keypointIndexes", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:KeyPoint_convert", (char**)keywords, &pyobj_keypoints, &pyobj_keypointIndexes) && + pyopencv_to_safe(pyobj_keypoints, keypoints, ArgInfo("keypoints", 0)) && + pyopencv_to_safe(pyobj_keypointIndexes, keypointIndexes, ArgInfo("keypointIndexes", 0)) ) + { + ERRWRAP2(cv::KeyPoint::convert(keypoints, points2f, keypointIndexes)); + return pyopencv_from(points2f); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points2f = NULL; + vector_Point2f points2f; + vector_KeyPoint keypoints; + PyObject* pyobj_size = NULL; + float size=1; + PyObject* pyobj_response = NULL; + float response=1; + PyObject* pyobj_octave = NULL; + int octave=0; + PyObject* pyobj_class_id = NULL; + int class_id=-1; + + const char* keywords[] = { "points2f", "size", "response", "octave", "class_id", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:KeyPoint_convert", (char**)keywords, &pyobj_points2f, &pyobj_size, &pyobj_response, &pyobj_octave, &pyobj_class_id) && + pyopencv_to_safe(pyobj_points2f, points2f, ArgInfo("points2f", 0)) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_response, response, ArgInfo("response", 0)) && + pyopencv_to_safe(pyobj_octave, octave, ArgInfo("octave", 0)) && + pyopencv_to_safe(pyobj_class_id, class_id, ArgInfo("class_id", 0)) ) + { + ERRWRAP2(cv::KeyPoint::convert(points2f, keypoints, size, response, octave, class_id)); + return pyopencv_from(keypoints); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("KeyPoint_convert"); + + return NULL; +} + +static PyObject* pyopencv_cv_KeyPoint_overlap(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_kp1 = NULL; + KeyPoint kp1; + PyObject* pyobj_kp2 = NULL; + KeyPoint kp2; + float retval; + + const char* keywords[] = { "kp1", "kp2", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:KeyPoint_overlap", (char**)keywords, &pyobj_kp1, &pyobj_kp2) && + pyopencv_to_safe(pyobj_kp1, kp1, ArgInfo("kp1", 0)) && + pyopencv_to_safe(pyobj_kp2, kp2, ArgInfo("kp2", 0)) ) + { + ERRWRAP2(retval = cv::KeyPoint::overlap(kp1, kp2)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_LUT(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_lut = NULL; + Mat lut; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "lut", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:LUT", (char**)keywords, &pyobj_src, &pyobj_lut, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_lut, lut, ArgInfo("lut", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::LUT(src, lut, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_lut = NULL; + UMat lut; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "lut", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:LUT", (char**)keywords, &pyobj_src, &pyobj_lut, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_lut, lut, ArgInfo("lut", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::LUT(src, lut, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("LUT"); + + return NULL; +} + +static PyObject* pyopencv_cv_Laplacian(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_ksize = NULL; + int ksize=1; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "dst", "ksize", "scale", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:Laplacian", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_dst, &pyobj_ksize, &pyobj_scale, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::Laplacian(src, dst, ddepth, ksize, scale, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_ksize = NULL; + int ksize=1; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "dst", "ksize", "scale", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:Laplacian", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_dst, &pyobj_ksize, &pyobj_scale, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::Laplacian(src, dst, ddepth, ksize, scale, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Laplacian"); + + return NULL; +} + +static PyObject* pyopencv_cv_Mahalanobis(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_v1 = NULL; + Mat v1; + PyObject* pyobj_v2 = NULL; + Mat v2; + PyObject* pyobj_icovar = NULL; + Mat icovar; + double retval; + + const char* keywords[] = { "v1", "v2", "icovar", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:Mahalanobis", (char**)keywords, &pyobj_v1, &pyobj_v2, &pyobj_icovar) && + pyopencv_to_safe(pyobj_v1, v1, ArgInfo("v1", 0)) && + pyopencv_to_safe(pyobj_v2, v2, ArgInfo("v2", 0)) && + pyopencv_to_safe(pyobj_icovar, icovar, ArgInfo("icovar", 0)) ) + { + ERRWRAP2(retval = cv::Mahalanobis(v1, v2, icovar)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_v1 = NULL; + UMat v1; + PyObject* pyobj_v2 = NULL; + UMat v2; + PyObject* pyobj_icovar = NULL; + UMat icovar; + double retval; + + const char* keywords[] = { "v1", "v2", "icovar", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:Mahalanobis", (char**)keywords, &pyobj_v1, &pyobj_v2, &pyobj_icovar) && + pyopencv_to_safe(pyobj_v1, v1, ArgInfo("v1", 0)) && + pyopencv_to_safe(pyobj_v2, v2, ArgInfo("v2", 0)) && + pyopencv_to_safe(pyobj_icovar, icovar, ArgInfo("icovar", 0)) ) + { + ERRWRAP2(retval = cv::Mahalanobis(v1, v2, icovar)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Mahalanobis"); + + return NULL; +} + +static PyObject* pyopencv_cv_PCABackProject(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + PyObject* pyobj_result = NULL; + Mat result; + + const char* keywords[] = { "data", "mean", "eigenvectors", "result", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:PCABackProject", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_result) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 0)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 0)) && + pyopencv_to_safe(pyobj_result, result, ArgInfo("result", 1)) ) + { + ERRWRAP2(cv::PCABackProject(data, mean, eigenvectors, result)); + return pyopencv_from(result); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + PyObject* pyobj_result = NULL; + UMat result; + + const char* keywords[] = { "data", "mean", "eigenvectors", "result", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:PCABackProject", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_result) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 0)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 0)) && + pyopencv_to_safe(pyobj_result, result, ArgInfo("result", 1)) ) + { + ERRWRAP2(cv::PCABackProject(data, mean, eigenvectors, result)); + return pyopencv_from(result); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("PCABackProject"); + + return NULL; +} + +static PyObject* pyopencv_cv_PCACompute(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + PyObject* pyobj_maxComponents = NULL; + int maxComponents=0; + + const char* keywords[] = { "data", "mean", "eigenvectors", "maxComponents", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:PCACompute", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_maxComponents) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_maxComponents, maxComponents, ArgInfo("maxComponents", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, maxComponents)); + return Py_BuildValue("(NN)", pyopencv_from(mean), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + PyObject* pyobj_maxComponents = NULL; + int maxComponents=0; + + const char* keywords[] = { "data", "mean", "eigenvectors", "maxComponents", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:PCACompute", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_maxComponents) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_maxComponents, maxComponents, ArgInfo("maxComponents", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, maxComponents)); + return Py_BuildValue("(NN)", pyopencv_from(mean), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + PyObject* pyobj_retainedVariance = NULL; + double retainedVariance=0; + + const char* keywords[] = { "data", "mean", "retainedVariance", "eigenvectors", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:PCACompute", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_retainedVariance, &pyobj_eigenvectors) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_retainedVariance, retainedVariance, ArgInfo("retainedVariance", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, retainedVariance)); + return Py_BuildValue("(NN)", pyopencv_from(mean), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + PyObject* pyobj_retainedVariance = NULL; + double retainedVariance=0; + + const char* keywords[] = { "data", "mean", "retainedVariance", "eigenvectors", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:PCACompute", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_retainedVariance, &pyobj_eigenvectors) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_retainedVariance, retainedVariance, ArgInfo("retainedVariance", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, retainedVariance)); + return Py_BuildValue("(NN)", pyopencv_from(mean), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("PCACompute"); + + return NULL; +} + +static PyObject* pyopencv_cv_PCACompute2(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + PyObject* pyobj_eigenvalues = NULL; + Mat eigenvalues; + PyObject* pyobj_maxComponents = NULL; + int maxComponents=0; + + const char* keywords[] = { "data", "mean", "eigenvectors", "eigenvalues", "maxComponents", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:PCACompute2", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_eigenvalues, &pyobj_maxComponents) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_maxComponents, maxComponents, ArgInfo("maxComponents", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, eigenvalues, maxComponents)); + return Py_BuildValue("(NNN)", pyopencv_from(mean), pyopencv_from(eigenvectors), pyopencv_from(eigenvalues)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + PyObject* pyobj_eigenvalues = NULL; + UMat eigenvalues; + PyObject* pyobj_maxComponents = NULL; + int maxComponents=0; + + const char* keywords[] = { "data", "mean", "eigenvectors", "eigenvalues", "maxComponents", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:PCACompute2", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_eigenvalues, &pyobj_maxComponents) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_maxComponents, maxComponents, ArgInfo("maxComponents", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, eigenvalues, maxComponents)); + return Py_BuildValue("(NNN)", pyopencv_from(mean), pyopencv_from(eigenvectors), pyopencv_from(eigenvalues)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + PyObject* pyobj_eigenvalues = NULL; + Mat eigenvalues; + PyObject* pyobj_retainedVariance = NULL; + double retainedVariance=0; + + const char* keywords[] = { "data", "mean", "retainedVariance", "eigenvectors", "eigenvalues", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:PCACompute2", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_retainedVariance, &pyobj_eigenvectors, &pyobj_eigenvalues) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_retainedVariance, retainedVariance, ArgInfo("retainedVariance", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, eigenvalues, retainedVariance)); + return Py_BuildValue("(NNN)", pyopencv_from(mean), pyopencv_from(eigenvectors), pyopencv_from(eigenvalues)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + PyObject* pyobj_eigenvalues = NULL; + UMat eigenvalues; + PyObject* pyobj_retainedVariance = NULL; + double retainedVariance=0; + + const char* keywords[] = { "data", "mean", "retainedVariance", "eigenvectors", "eigenvalues", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:PCACompute2", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_retainedVariance, &pyobj_eigenvectors, &pyobj_eigenvalues) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_retainedVariance, retainedVariance, ArgInfo("retainedVariance", 0)) ) + { + ERRWRAP2(cv::PCACompute(data, mean, eigenvectors, eigenvalues, retainedVariance)); + return Py_BuildValue("(NNN)", pyopencv_from(mean), pyopencv_from(eigenvectors), pyopencv_from(eigenvalues)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("PCACompute2"); + + return NULL; +} + +static PyObject* pyopencv_cv_PCAProject(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + PyObject* pyobj_result = NULL; + Mat result; + + const char* keywords[] = { "data", "mean", "eigenvectors", "result", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:PCAProject", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_result) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 0)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 0)) && + pyopencv_to_safe(pyobj_result, result, ArgInfo("result", 1)) ) + { + ERRWRAP2(cv::PCAProject(data, mean, eigenvectors, result)); + return pyopencv_from(result); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + PyObject* pyobj_result = NULL; + UMat result; + + const char* keywords[] = { "data", "mean", "eigenvectors", "result", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:PCAProject", (char**)keywords, &pyobj_data, &pyobj_mean, &pyobj_eigenvectors, &pyobj_result) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 0)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 0)) && + pyopencv_to_safe(pyobj_result, result, ArgInfo("result", 1)) ) + { + ERRWRAP2(cv::PCAProject(data, mean, eigenvectors, result)); + return pyopencv_from(result); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("PCAProject"); + + return NULL; +} + +static PyObject* pyopencv_cv_PSNR(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_R = NULL; + double R=255.; + double retval; + + const char* keywords[] = { "src1", "src2", "R", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:PSNR", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_R) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_R, R, ArgInfo("R", 0)) ) + { + ERRWRAP2(retval = cv::PSNR(src1, src2, R)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_R = NULL; + double R=255.; + double retval; + + const char* keywords[] = { "src1", "src2", "R", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:PSNR", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_R) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_R, R, ArgInfo("R", 0)) ) + { + ERRWRAP2(retval = cv::PSNR(src1, src2, R)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("PSNR"); + + return NULL; +} + +static PyObject* pyopencv_cv_SVBackSubst(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_w = NULL; + Mat w; + PyObject* pyobj_u = NULL; + Mat u; + PyObject* pyobj_vt = NULL; + Mat vt; + PyObject* pyobj_rhs = NULL; + Mat rhs; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "w", "u", "vt", "rhs", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:SVBackSubst", (char**)keywords, &pyobj_w, &pyobj_u, &pyobj_vt, &pyobj_rhs, &pyobj_dst) && + pyopencv_to_safe(pyobj_w, w, ArgInfo("w", 0)) && + pyopencv_to_safe(pyobj_u, u, ArgInfo("u", 0)) && + pyopencv_to_safe(pyobj_vt, vt, ArgInfo("vt", 0)) && + pyopencv_to_safe(pyobj_rhs, rhs, ArgInfo("rhs", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::SVBackSubst(w, u, vt, rhs, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_w = NULL; + UMat w; + PyObject* pyobj_u = NULL; + UMat u; + PyObject* pyobj_vt = NULL; + UMat vt; + PyObject* pyobj_rhs = NULL; + UMat rhs; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "w", "u", "vt", "rhs", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:SVBackSubst", (char**)keywords, &pyobj_w, &pyobj_u, &pyobj_vt, &pyobj_rhs, &pyobj_dst) && + pyopencv_to_safe(pyobj_w, w, ArgInfo("w", 0)) && + pyopencv_to_safe(pyobj_u, u, ArgInfo("u", 0)) && + pyopencv_to_safe(pyobj_vt, vt, ArgInfo("vt", 0)) && + pyopencv_to_safe(pyobj_rhs, rhs, ArgInfo("rhs", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::SVBackSubst(w, u, vt, rhs, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("SVBackSubst"); + + return NULL; +} + +static PyObject* pyopencv_cv_SVDecomp(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_w = NULL; + Mat w; + PyObject* pyobj_u = NULL; + Mat u; + PyObject* pyobj_vt = NULL; + Mat vt; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "w", "u", "vt", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:SVDecomp", (char**)keywords, &pyobj_src, &pyobj_w, &pyobj_u, &pyobj_vt, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_w, w, ArgInfo("w", 1)) && + pyopencv_to_safe(pyobj_u, u, ArgInfo("u", 1)) && + pyopencv_to_safe(pyobj_vt, vt, ArgInfo("vt", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::SVDecomp(src, w, u, vt, flags)); + return Py_BuildValue("(NNN)", pyopencv_from(w), pyopencv_from(u), pyopencv_from(vt)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_w = NULL; + UMat w; + PyObject* pyobj_u = NULL; + UMat u; + PyObject* pyobj_vt = NULL; + UMat vt; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "w", "u", "vt", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:SVDecomp", (char**)keywords, &pyobj_src, &pyobj_w, &pyobj_u, &pyobj_vt, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_w, w, ArgInfo("w", 1)) && + pyopencv_to_safe(pyobj_u, u, ArgInfo("u", 1)) && + pyopencv_to_safe(pyobj_vt, vt, ArgInfo("vt", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::SVDecomp(src, w, u, vt, flags)); + return Py_BuildValue("(NNN)", pyopencv_from(w), pyopencv_from(u), pyopencv_from(vt)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("SVDecomp"); + + return NULL; +} + +static PyObject* pyopencv_cv_Scharr(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_dx = NULL; + int dx=0; + PyObject* pyobj_dy = NULL; + int dy=0; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "dx", "dy", "dst", "scale", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOO:Scharr", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_dx, &pyobj_dy, &pyobj_dst, &pyobj_scale, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_dx = NULL; + int dx=0; + PyObject* pyobj_dy = NULL; + int dy=0; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "dx", "dy", "dst", "scale", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOO:Scharr", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_dx, &pyobj_dy, &pyobj_dst, &pyobj_scale, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Scharr"); + + return NULL; +} + +static PyObject* pyopencv_cv_Sobel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_dx = NULL; + int dx=0; + PyObject* pyobj_dy = NULL; + int dy=0; + PyObject* pyobj_ksize = NULL; + int ksize=3; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "dx", "dy", "dst", "ksize", "scale", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:Sobel", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_dx, &pyobj_dy, &pyobj_dst, &pyobj_ksize, &pyobj_scale, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::Sobel(src, dst, ddepth, dx, dy, ksize, scale, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_dx = NULL; + int dx=0; + PyObject* pyobj_dy = NULL; + int dy=0; + PyObject* pyobj_ksize = NULL; + int ksize=3; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "dx", "dy", "dst", "ksize", "scale", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:Sobel", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_dx, &pyobj_dy, &pyobj_dst, &pyobj_ksize, &pyobj_scale, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::Sobel(src, dst, ddepth, dx, dy, ksize, scale, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Sobel"); + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_context(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + void* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv_UMat_context()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_queue(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + void* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv_UMat_queue()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_fourcc(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_c1 = NULL; + char c1; + PyObject* pyobj_c2 = NULL; + char c2; + PyObject* pyobj_c3 = NULL; + char c3; + PyObject* pyobj_c4 = NULL; + char c4; + int retval; + + const char* keywords[] = { "c1", "c2", "c3", "c4", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:VideoWriter_fourcc", (char**)keywords, &pyobj_c1, &pyobj_c2, &pyobj_c3, &pyobj_c4) && + convert_to_char(pyobj_c1, &c1, ArgInfo("c1", 0)) && + convert_to_char(pyobj_c2, &c2, ArgInfo("c2", 0)) && + convert_to_char(pyobj_c3, &c3, ArgInfo("c3", 0)) && + convert_to_char(pyobj_c4, &c4, ArgInfo("c4", 0)) ) + { + ERRWRAP2(retval = cv::VideoWriter::fourcc(c1, c2, c3, c4)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_absdiff(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src1", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:absdiff", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::absdiff(src1, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src1", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:absdiff", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::absdiff(src1, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("absdiff"); + + return NULL; +} + +static PyObject* pyopencv_cv_accumulate(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:accumulate", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulate(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:accumulate", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulate(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("accumulate"); + + return NULL; +} + +static PyObject* pyopencv_cv_accumulateProduct(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:accumulateProduct", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulateProduct(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:accumulateProduct", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulateProduct(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("accumulateProduct"); + + return NULL; +} + +static PyObject* pyopencv_cv_accumulateSquare(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:accumulateSquare", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulateSquare(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:accumulateSquare", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulateSquare(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("accumulateSquare"); + + return NULL; +} + +static PyObject* pyopencv_cv_accumulateWeighted(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "dst", "alpha", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:accumulateWeighted", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_alpha, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulateWeighted(src, dst, alpha, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "dst", "alpha", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:accumulateWeighted", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_alpha, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::accumulateWeighted(src, dst, alpha, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("accumulateWeighted"); + + return NULL; +} + +static PyObject* pyopencv_cv_adaptiveThreshold(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_maxValue = NULL; + double maxValue=0; + PyObject* pyobj_adaptiveMethod = NULL; + int adaptiveMethod=0; + PyObject* pyobj_thresholdType = NULL; + int thresholdType=0; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_C = NULL; + double C=0; + + const char* keywords[] = { "src", "maxValue", "adaptiveMethod", "thresholdType", "blockSize", "C", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|O:adaptiveThreshold", (char**)keywords, &pyobj_src, &pyobj_maxValue, &pyobj_adaptiveMethod, &pyobj_thresholdType, &pyobj_blockSize, &pyobj_C, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_maxValue, maxValue, ArgInfo("maxValue", 0)) && + pyopencv_to_safe(pyobj_adaptiveMethod, adaptiveMethod, ArgInfo("adaptiveMethod", 0)) && + pyopencv_to_safe(pyobj_thresholdType, thresholdType, ArgInfo("thresholdType", 0)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_C, C, ArgInfo("C", 0)) ) + { + ERRWRAP2(cv::adaptiveThreshold(src, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_maxValue = NULL; + double maxValue=0; + PyObject* pyobj_adaptiveMethod = NULL; + int adaptiveMethod=0; + PyObject* pyobj_thresholdType = NULL; + int thresholdType=0; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_C = NULL; + double C=0; + + const char* keywords[] = { "src", "maxValue", "adaptiveMethod", "thresholdType", "blockSize", "C", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|O:adaptiveThreshold", (char**)keywords, &pyobj_src, &pyobj_maxValue, &pyobj_adaptiveMethod, &pyobj_thresholdType, &pyobj_blockSize, &pyobj_C, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_maxValue, maxValue, ArgInfo("maxValue", 0)) && + pyopencv_to_safe(pyobj_adaptiveMethod, adaptiveMethod, ArgInfo("adaptiveMethod", 0)) && + pyopencv_to_safe(pyobj_thresholdType, thresholdType, ArgInfo("thresholdType", 0)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_C, C, ArgInfo("C", 0)) ) + { + ERRWRAP2(cv::adaptiveThreshold(src, dst, maxValue, adaptiveMethod, thresholdType, blockSize, C)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("adaptiveThreshold"); + + return NULL; +} + +static PyObject* pyopencv_cv_add(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "mask", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:add", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::add(src1, src2, dst, mask, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "mask", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:add", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::add(src1, src2, dst, mask, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("add"); + + return NULL; +} + +static PyObject* pyopencv_cv_addWeighted(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_gamma = NULL; + double gamma=0; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "alpha", "src2", "beta", "gamma", "dst", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:addWeighted", (char**)keywords, &pyobj_src1, &pyobj_alpha, &pyobj_src2, &pyobj_beta, &pyobj_gamma, &pyobj_dst, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_gamma, gamma, ArgInfo("gamma", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::addWeighted(src1, alpha, src2, beta, gamma, dst, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_gamma = NULL; + double gamma=0; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "alpha", "src2", "beta", "gamma", "dst", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:addWeighted", (char**)keywords, &pyobj_src1, &pyobj_alpha, &pyobj_src2, &pyobj_beta, &pyobj_gamma, &pyobj_dst, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_gamma, gamma, ArgInfo("gamma", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::addWeighted(src1, alpha, src2, beta, gamma, dst, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("addWeighted"); + + return NULL; +} + +static PyObject* pyopencv_cv_applyColorMap(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_colormap = NULL; + int colormap=0; + + const char* keywords[] = { "src", "colormap", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:applyColorMap", (char**)keywords, &pyobj_src, &pyobj_colormap, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_colormap, colormap, ArgInfo("colormap", 0)) ) + { + ERRWRAP2(cv::applyColorMap(src, dst, colormap)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_colormap = NULL; + int colormap=0; + + const char* keywords[] = { "src", "colormap", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:applyColorMap", (char**)keywords, &pyobj_src, &pyobj_colormap, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_colormap, colormap, ArgInfo("colormap", 0)) ) + { + ERRWRAP2(cv::applyColorMap(src, dst, colormap)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_userColor = NULL; + Mat userColor; + + const char* keywords[] = { "src", "userColor", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:applyColorMap", (char**)keywords, &pyobj_src, &pyobj_userColor, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_userColor, userColor, ArgInfo("userColor", 0)) ) + { + ERRWRAP2(cv::applyColorMap(src, dst, userColor)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_userColor = NULL; + UMat userColor; + + const char* keywords[] = { "src", "userColor", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:applyColorMap", (char**)keywords, &pyobj_src, &pyobj_userColor, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_userColor, userColor, ArgInfo("userColor", 0)) ) + { + ERRWRAP2(cv::applyColorMap(src, dst, userColor)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("applyColorMap"); + + return NULL; +} + +static PyObject* pyopencv_cv_approxPolyDP(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_curve = NULL; + Mat curve; + PyObject* pyobj_approxCurve = NULL; + Mat approxCurve; + PyObject* pyobj_epsilon = NULL; + double epsilon=0; + PyObject* pyobj_closed = NULL; + bool closed=0; + + const char* keywords[] = { "curve", "epsilon", "closed", "approxCurve", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:approxPolyDP", (char**)keywords, &pyobj_curve, &pyobj_epsilon, &pyobj_closed, &pyobj_approxCurve) && + pyopencv_to_safe(pyobj_curve, curve, ArgInfo("curve", 0)) && + pyopencv_to_safe(pyobj_approxCurve, approxCurve, ArgInfo("approxCurve", 1)) && + pyopencv_to_safe(pyobj_epsilon, epsilon, ArgInfo("epsilon", 0)) && + pyopencv_to_safe(pyobj_closed, closed, ArgInfo("closed", 0)) ) + { + ERRWRAP2(cv::approxPolyDP(curve, approxCurve, epsilon, closed)); + return pyopencv_from(approxCurve); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_curve = NULL; + UMat curve; + PyObject* pyobj_approxCurve = NULL; + UMat approxCurve; + PyObject* pyobj_epsilon = NULL; + double epsilon=0; + PyObject* pyobj_closed = NULL; + bool closed=0; + + const char* keywords[] = { "curve", "epsilon", "closed", "approxCurve", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:approxPolyDP", (char**)keywords, &pyobj_curve, &pyobj_epsilon, &pyobj_closed, &pyobj_approxCurve) && + pyopencv_to_safe(pyobj_curve, curve, ArgInfo("curve", 0)) && + pyopencv_to_safe(pyobj_approxCurve, approxCurve, ArgInfo("approxCurve", 1)) && + pyopencv_to_safe(pyobj_epsilon, epsilon, ArgInfo("epsilon", 0)) && + pyopencv_to_safe(pyobj_closed, closed, ArgInfo("closed", 0)) ) + { + ERRWRAP2(cv::approxPolyDP(curve, approxCurve, epsilon, closed)); + return pyopencv_from(approxCurve); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("approxPolyDP"); + + return NULL; +} + +static PyObject* pyopencv_cv_arcLength(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_curve = NULL; + Mat curve; + PyObject* pyobj_closed = NULL; + bool closed=0; + double retval; + + const char* keywords[] = { "curve", "closed", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:arcLength", (char**)keywords, &pyobj_curve, &pyobj_closed) && + pyopencv_to_safe(pyobj_curve, curve, ArgInfo("curve", 0)) && + pyopencv_to_safe(pyobj_closed, closed, ArgInfo("closed", 0)) ) + { + ERRWRAP2(retval = cv::arcLength(curve, closed)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_curve = NULL; + UMat curve; + PyObject* pyobj_closed = NULL; + bool closed=0; + double retval; + + const char* keywords[] = { "curve", "closed", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:arcLength", (char**)keywords, &pyobj_curve, &pyobj_closed) && + pyopencv_to_safe(pyobj_curve, curve, ArgInfo("curve", 0)) && + pyopencv_to_safe(pyobj_closed, closed, ArgInfo("closed", 0)) ) + { + ERRWRAP2(retval = cv::arcLength(curve, closed)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("arcLength"); + + return NULL; +} + +static PyObject* pyopencv_cv_arrowedLine(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_line_type = NULL; + int line_type=8; + PyObject* pyobj_shift = NULL; + int shift=0; + PyObject* pyobj_tipLength = NULL; + double tipLength=0.1; + + const char* keywords[] = { "img", "pt1", "pt2", "color", "thickness", "line_type", "shift", "tipLength", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOO:arrowedLine", (char**)keywords, &pyobj_img, &pyobj_pt1, &pyobj_pt2, &pyobj_color, &pyobj_thickness, &pyobj_line_type, &pyobj_shift, &pyobj_tipLength) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 0)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_line_type, line_type, ArgInfo("line_type", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) && + pyopencv_to_safe(pyobj_tipLength, tipLength, ArgInfo("tipLength", 0)) ) + { + ERRWRAP2(cv::arrowedLine(img, pt1, pt2, color, thickness, line_type, shift, tipLength)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_line_type = NULL; + int line_type=8; + PyObject* pyobj_shift = NULL; + int shift=0; + PyObject* pyobj_tipLength = NULL; + double tipLength=0.1; + + const char* keywords[] = { "img", "pt1", "pt2", "color", "thickness", "line_type", "shift", "tipLength", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOO:arrowedLine", (char**)keywords, &pyobj_img, &pyobj_pt1, &pyobj_pt2, &pyobj_color, &pyobj_thickness, &pyobj_line_type, &pyobj_shift, &pyobj_tipLength) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 0)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_line_type, line_type, ArgInfo("line_type", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) && + pyopencv_to_safe(pyobj_tipLength, tipLength, ArgInfo("tipLength", 0)) ) + { + ERRWRAP2(cv::arrowedLine(img, pt1, pt2, color, thickness, line_type, shift, tipLength)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("arrowedLine"); + + return NULL; +} + +static PyObject* pyopencv_cv_batchDistance(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dist = NULL; + Mat dist; + PyObject* pyobj_dtype = NULL; + int dtype=0; + PyObject* pyobj_nidx = NULL; + Mat nidx; + PyObject* pyobj_normType = NULL; + int normType=NORM_L2; + PyObject* pyobj_K = NULL; + int K=0; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_update = NULL; + int update=0; + PyObject* pyobj_crosscheck = NULL; + bool crosscheck=false; + + const char* keywords[] = { "src1", "src2", "dtype", "dist", "nidx", "normType", "K", "mask", "update", "crosscheck", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOOOOO:batchDistance", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dtype, &pyobj_dist, &pyobj_nidx, &pyobj_normType, &pyobj_K, &pyobj_mask, &pyobj_update, &pyobj_crosscheck) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dist, dist, ArgInfo("dist", 1)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) && + pyopencv_to_safe(pyobj_nidx, nidx, ArgInfo("nidx", 1)) && + pyopencv_to_safe(pyobj_normType, normType, ArgInfo("normType", 0)) && + pyopencv_to_safe(pyobj_K, K, ArgInfo("K", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_update, update, ArgInfo("update", 0)) && + pyopencv_to_safe(pyobj_crosscheck, crosscheck, ArgInfo("crosscheck", 0)) ) + { + ERRWRAP2(cv::batchDistance(src1, src2, dist, dtype, nidx, normType, K, mask, update, crosscheck)); + return Py_BuildValue("(NN)", pyopencv_from(dist), pyopencv_from(nidx)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dist = NULL; + UMat dist; + PyObject* pyobj_dtype = NULL; + int dtype=0; + PyObject* pyobj_nidx = NULL; + UMat nidx; + PyObject* pyobj_normType = NULL; + int normType=NORM_L2; + PyObject* pyobj_K = NULL; + int K=0; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_update = NULL; + int update=0; + PyObject* pyobj_crosscheck = NULL; + bool crosscheck=false; + + const char* keywords[] = { "src1", "src2", "dtype", "dist", "nidx", "normType", "K", "mask", "update", "crosscheck", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOOOOO:batchDistance", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dtype, &pyobj_dist, &pyobj_nidx, &pyobj_normType, &pyobj_K, &pyobj_mask, &pyobj_update, &pyobj_crosscheck) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dist, dist, ArgInfo("dist", 1)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) && + pyopencv_to_safe(pyobj_nidx, nidx, ArgInfo("nidx", 1)) && + pyopencv_to_safe(pyobj_normType, normType, ArgInfo("normType", 0)) && + pyopencv_to_safe(pyobj_K, K, ArgInfo("K", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_update, update, ArgInfo("update", 0)) && + pyopencv_to_safe(pyobj_crosscheck, crosscheck, ArgInfo("crosscheck", 0)) ) + { + ERRWRAP2(cv::batchDistance(src1, src2, dist, dtype, nidx, normType, K, mask, update, crosscheck)); + return Py_BuildValue("(NN)", pyopencv_from(dist), pyopencv_from(nidx)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("batchDistance"); + + return NULL; +} + +static PyObject* pyopencv_cv_bilateralFilter(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_d = NULL; + int d=0; + PyObject* pyobj_sigmaColor = NULL; + double sigmaColor=0; + PyObject* pyobj_sigmaSpace = NULL; + double sigmaSpace=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "d", "sigmaColor", "sigmaSpace", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OO:bilateralFilter", (char**)keywords, &pyobj_src, &pyobj_d, &pyobj_sigmaColor, &pyobj_sigmaSpace, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_d, d, ArgInfo("d", 0)) && + pyopencv_to_safe(pyobj_sigmaColor, sigmaColor, ArgInfo("sigmaColor", 0)) && + pyopencv_to_safe(pyobj_sigmaSpace, sigmaSpace, ArgInfo("sigmaSpace", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_d = NULL; + int d=0; + PyObject* pyobj_sigmaColor = NULL; + double sigmaColor=0; + PyObject* pyobj_sigmaSpace = NULL; + double sigmaSpace=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "d", "sigmaColor", "sigmaSpace", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OO:bilateralFilter", (char**)keywords, &pyobj_src, &pyobj_d, &pyobj_sigmaColor, &pyobj_sigmaSpace, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_d, d, ArgInfo("d", 0)) && + pyopencv_to_safe(pyobj_sigmaColor, sigmaColor, ArgInfo("sigmaColor", 0)) && + pyopencv_to_safe(pyobj_sigmaSpace, sigmaSpace, ArgInfo("sigmaSpace", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("bilateralFilter"); + + return NULL; +} + +static PyObject* pyopencv_cv_bitwise_and(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:bitwise_and", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_and(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:bitwise_and", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_and(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("bitwise_and"); + + return NULL; +} + +static PyObject* pyopencv_cv_bitwise_not(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:bitwise_not", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_not(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:bitwise_not", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_not(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("bitwise_not"); + + return NULL; +} + +static PyObject* pyopencv_cv_bitwise_or(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:bitwise_or", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_or(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:bitwise_or", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_or(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("bitwise_or"); + + return NULL; +} + +static PyObject* pyopencv_cv_bitwise_xor(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:bitwise_xor", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_xor(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src1", "src2", "dst", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:bitwise_xor", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::bitwise_xor(src1, src2, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("bitwise_xor"); + + return NULL; +} + +static PyObject* pyopencv_cv_blendLinear(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_weights1 = NULL; + Mat weights1; + PyObject* pyobj_weights2 = NULL; + Mat weights2; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src1", "src2", "weights1", "weights2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:blendLinear", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_weights1, &pyobj_weights2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_weights1, weights1, ArgInfo("weights1", 0)) && + pyopencv_to_safe(pyobj_weights2, weights2, ArgInfo("weights2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::blendLinear(src1, src2, weights1, weights2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_weights1 = NULL; + UMat weights1; + PyObject* pyobj_weights2 = NULL; + UMat weights2; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src1", "src2", "weights1", "weights2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:blendLinear", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_weights1, &pyobj_weights2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_weights1, weights1, ArgInfo("weights1", 0)) && + pyopencv_to_safe(pyobj_weights2, weights2, ArgInfo("weights2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::blendLinear(src1, src2, weights1, weights2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("blendLinear"); + + return NULL; +} + +static PyObject* pyopencv_cv_blur(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ksize", "dst", "anchor", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:blur", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_dst, &pyobj_anchor, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::blur(src, dst, ksize, anchor, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ksize", "dst", "anchor", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:blur", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_dst, &pyobj_anchor, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::blur(src, dst, ksize, anchor, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("blur"); + + return NULL; +} + +static PyObject* pyopencv_cv_borderInterpolate(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_p = NULL; + int p=0; + PyObject* pyobj_len = NULL; + int len=0; + PyObject* pyobj_borderType = NULL; + int borderType=0; + int retval; + + const char* keywords[] = { "p", "len", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:borderInterpolate", (char**)keywords, &pyobj_p, &pyobj_len, &pyobj_borderType) && + pyopencv_to_safe(pyobj_p, p, ArgInfo("p", 0)) && + pyopencv_to_safe(pyobj_len, len, ArgInfo("len", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(retval = cv::borderInterpolate(p, len, borderType)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_boundingRect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_array = NULL; + Mat array; + Rect retval; + + const char* keywords[] = { "array", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:boundingRect", (char**)keywords, &pyobj_array) && + pyopencv_to_safe(pyobj_array, array, ArgInfo("array", 0)) ) + { + ERRWRAP2(retval = cv::boundingRect(array)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_array = NULL; + UMat array; + Rect retval; + + const char* keywords[] = { "array", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:boundingRect", (char**)keywords, &pyobj_array) && + pyopencv_to_safe(pyobj_array, array, ArgInfo("array", 0)) ) + { + ERRWRAP2(retval = cv::boundingRect(array)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("boundingRect"); + + return NULL; +} + +static PyObject* pyopencv_cv_boxFilter(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_normalize = NULL; + bool normalize=true; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "ksize", "dst", "anchor", "normalize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:boxFilter", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_ksize, &pyobj_dst, &pyobj_anchor, &pyobj_normalize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_normalize, normalize, ArgInfo("normalize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::boxFilter(src, dst, ddepth, ksize, anchor, normalize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_normalize = NULL; + bool normalize=true; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "ksize", "dst", "anchor", "normalize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:boxFilter", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_ksize, &pyobj_dst, &pyobj_anchor, &pyobj_normalize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_normalize, normalize, ArgInfo("normalize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::boxFilter(src, dst, ddepth, ksize, anchor, normalize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("boxFilter"); + + return NULL; +} + +static PyObject* pyopencv_cv_boxPoints(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_box = NULL; + RotatedRect box; + PyObject* pyobj_points = NULL; + Mat points; + + const char* keywords[] = { "box", "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:boxPoints", (char**)keywords, &pyobj_box, &pyobj_points) && + pyopencv_to_safe(pyobj_box, box, ArgInfo("box", 0)) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 1)) ) + { + ERRWRAP2(cv::boxPoints(box, points)); + return pyopencv_from(points); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_box = NULL; + RotatedRect box; + PyObject* pyobj_points = NULL; + UMat points; + + const char* keywords[] = { "box", "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:boxPoints", (char**)keywords, &pyobj_box, &pyobj_points) && + pyopencv_to_safe(pyobj_box, box, ArgInfo("box", 0)) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 1)) ) + { + ERRWRAP2(cv::boxPoints(box, points)); + return pyopencv_from(points); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("boxPoints"); + + return NULL; +} + +static PyObject* pyopencv_cv_calcBackProject(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_images = NULL; + vector_Mat images; + PyObject* pyobj_channels = NULL; + vector_int channels; + PyObject* pyobj_hist = NULL; + Mat hist; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ranges = NULL; + vector_float ranges; + PyObject* pyobj_scale = NULL; + double scale=0; + + const char* keywords[] = { "images", "channels", "hist", "ranges", "scale", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:calcBackProject", (char**)keywords, &pyobj_images, &pyobj_channels, &pyobj_hist, &pyobj_ranges, &pyobj_scale, &pyobj_dst) && + pyopencv_to_safe(pyobj_images, images, ArgInfo("images", 0)) && + pyopencv_to_safe(pyobj_channels, channels, ArgInfo("channels", 0)) && + pyopencv_to_safe(pyobj_hist, hist, ArgInfo("hist", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ranges, ranges, ArgInfo("ranges", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) ) + { + ERRWRAP2(cv::calcBackProject(images, channels, hist, dst, ranges, scale)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_images = NULL; + vector_UMat images; + PyObject* pyobj_channels = NULL; + vector_int channels; + PyObject* pyobj_hist = NULL; + UMat hist; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ranges = NULL; + vector_float ranges; + PyObject* pyobj_scale = NULL; + double scale=0; + + const char* keywords[] = { "images", "channels", "hist", "ranges", "scale", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:calcBackProject", (char**)keywords, &pyobj_images, &pyobj_channels, &pyobj_hist, &pyobj_ranges, &pyobj_scale, &pyobj_dst) && + pyopencv_to_safe(pyobj_images, images, ArgInfo("images", 0)) && + pyopencv_to_safe(pyobj_channels, channels, ArgInfo("channels", 0)) && + pyopencv_to_safe(pyobj_hist, hist, ArgInfo("hist", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ranges, ranges, ArgInfo("ranges", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) ) + { + ERRWRAP2(cv::calcBackProject(images, channels, hist, dst, ranges, scale)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("calcBackProject"); + + return NULL; +} + +static PyObject* pyopencv_cv_calcCovarMatrix(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_samples = NULL; + Mat samples; + PyObject* pyobj_covar = NULL; + Mat covar; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_ctype = NULL; + int ctype=CV_64F; + + const char* keywords[] = { "samples", "mean", "flags", "covar", "ctype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:calcCovarMatrix", (char**)keywords, &pyobj_samples, &pyobj_mean, &pyobj_flags, &pyobj_covar, &pyobj_ctype) && + pyopencv_to_safe(pyobj_samples, samples, ArgInfo("samples", 0)) && + pyopencv_to_safe(pyobj_covar, covar, ArgInfo("covar", 1)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_ctype, ctype, ArgInfo("ctype", 0)) ) + { + ERRWRAP2(cv::calcCovarMatrix(samples, covar, mean, flags, ctype)); + return Py_BuildValue("(NN)", pyopencv_from(covar), pyopencv_from(mean)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_samples = NULL; + UMat samples; + PyObject* pyobj_covar = NULL; + UMat covar; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_ctype = NULL; + int ctype=CV_64F; + + const char* keywords[] = { "samples", "mean", "flags", "covar", "ctype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:calcCovarMatrix", (char**)keywords, &pyobj_samples, &pyobj_mean, &pyobj_flags, &pyobj_covar, &pyobj_ctype) && + pyopencv_to_safe(pyobj_samples, samples, ArgInfo("samples", 0)) && + pyopencv_to_safe(pyobj_covar, covar, ArgInfo("covar", 1)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_ctype, ctype, ArgInfo("ctype", 0)) ) + { + ERRWRAP2(cv::calcCovarMatrix(samples, covar, mean, flags, ctype)); + return Py_BuildValue("(NN)", pyopencv_from(covar), pyopencv_from(mean)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("calcCovarMatrix"); + + return NULL; +} + +static PyObject* pyopencv_cv_calcHist(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_images = NULL; + vector_Mat images; + PyObject* pyobj_channels = NULL; + vector_int channels; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_hist = NULL; + Mat hist; + PyObject* pyobj_histSize = NULL; + vector_int histSize; + PyObject* pyobj_ranges = NULL; + vector_float ranges; + PyObject* pyobj_accumulate = NULL; + bool accumulate=false; + + const char* keywords[] = { "images", "channels", "mask", "histSize", "ranges", "hist", "accumulate", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:calcHist", (char**)keywords, &pyobj_images, &pyobj_channels, &pyobj_mask, &pyobj_histSize, &pyobj_ranges, &pyobj_hist, &pyobj_accumulate) && + pyopencv_to_safe(pyobj_images, images, ArgInfo("images", 0)) && + pyopencv_to_safe(pyobj_channels, channels, ArgInfo("channels", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_hist, hist, ArgInfo("hist", 1)) && + pyopencv_to_safe(pyobj_histSize, histSize, ArgInfo("histSize", 0)) && + pyopencv_to_safe(pyobj_ranges, ranges, ArgInfo("ranges", 0)) && + pyopencv_to_safe(pyobj_accumulate, accumulate, ArgInfo("accumulate", 0)) ) + { + ERRWRAP2(cv::calcHist(images, channels, mask, hist, histSize, ranges, accumulate)); + return pyopencv_from(hist); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_images = NULL; + vector_UMat images; + PyObject* pyobj_channels = NULL; + vector_int channels; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_hist = NULL; + UMat hist; + PyObject* pyobj_histSize = NULL; + vector_int histSize; + PyObject* pyobj_ranges = NULL; + vector_float ranges; + PyObject* pyobj_accumulate = NULL; + bool accumulate=false; + + const char* keywords[] = { "images", "channels", "mask", "histSize", "ranges", "hist", "accumulate", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:calcHist", (char**)keywords, &pyobj_images, &pyobj_channels, &pyobj_mask, &pyobj_histSize, &pyobj_ranges, &pyobj_hist, &pyobj_accumulate) && + pyopencv_to_safe(pyobj_images, images, ArgInfo("images", 0)) && + pyopencv_to_safe(pyobj_channels, channels, ArgInfo("channels", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_hist, hist, ArgInfo("hist", 1)) && + pyopencv_to_safe(pyobj_histSize, histSize, ArgInfo("histSize", 0)) && + pyopencv_to_safe(pyobj_ranges, ranges, ArgInfo("ranges", 0)) && + pyopencv_to_safe(pyobj_accumulate, accumulate, ArgInfo("accumulate", 0)) ) + { + ERRWRAP2(cv::calcHist(images, channels, mask, hist, histSize, ranges, accumulate)); + return pyopencv_from(hist); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("calcHist"); + + return NULL; +} + +static PyObject* pyopencv_cv_cartToPolar(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_x = NULL; + Mat x; + PyObject* pyobj_y = NULL; + Mat y; + PyObject* pyobj_magnitude = NULL; + Mat magnitude; + PyObject* pyobj_angle = NULL; + Mat angle; + PyObject* pyobj_angleInDegrees = NULL; + bool angleInDegrees=false; + + const char* keywords[] = { "x", "y", "magnitude", "angle", "angleInDegrees", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:cartToPolar", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_magnitude, &pyobj_angle, &pyobj_angleInDegrees) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_magnitude, magnitude, ArgInfo("magnitude", 1)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 1)) && + pyopencv_to_safe(pyobj_angleInDegrees, angleInDegrees, ArgInfo("angleInDegrees", 0)) ) + { + ERRWRAP2(cv::cartToPolar(x, y, magnitude, angle, angleInDegrees)); + return Py_BuildValue("(NN)", pyopencv_from(magnitude), pyopencv_from(angle)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_x = NULL; + UMat x; + PyObject* pyobj_y = NULL; + UMat y; + PyObject* pyobj_magnitude = NULL; + UMat magnitude; + PyObject* pyobj_angle = NULL; + UMat angle; + PyObject* pyobj_angleInDegrees = NULL; + bool angleInDegrees=false; + + const char* keywords[] = { "x", "y", "magnitude", "angle", "angleInDegrees", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:cartToPolar", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_magnitude, &pyobj_angle, &pyobj_angleInDegrees) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_magnitude, magnitude, ArgInfo("magnitude", 1)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 1)) && + pyopencv_to_safe(pyobj_angleInDegrees, angleInDegrees, ArgInfo("angleInDegrees", 0)) ) + { + ERRWRAP2(cv::cartToPolar(x, y, magnitude, angle, angleInDegrees)); + return Py_BuildValue("(NN)", pyopencv_from(magnitude), pyopencv_from(angle)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cartToPolar"); + + return NULL; +} + +static PyObject* pyopencv_cv_checkHardwareSupport(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_feature = NULL; + int feature=0; + bool retval; + + const char* keywords[] = { "feature", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:checkHardwareSupport", (char**)keywords, &pyobj_feature) && + pyopencv_to_safe(pyobj_feature, feature, ArgInfo("feature", 0)) ) + { + ERRWRAP2(retval = cv::checkHardwareSupport(feature)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_checkRange(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_a = NULL; + Mat a; + PyObject* pyobj_quiet = NULL; + bool quiet=true; + Point pos; + PyObject* pyobj_minVal = NULL; + double minVal=-DBL_MAX; + PyObject* pyobj_maxVal = NULL; + double maxVal=DBL_MAX; + bool retval; + + const char* keywords[] = { "a", "quiet", "minVal", "maxVal", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:checkRange", (char**)keywords, &pyobj_a, &pyobj_quiet, &pyobj_minVal, &pyobj_maxVal) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 0)) && + pyopencv_to_safe(pyobj_quiet, quiet, ArgInfo("quiet", 0)) && + pyopencv_to_safe(pyobj_minVal, minVal, ArgInfo("minVal", 0)) && + pyopencv_to_safe(pyobj_maxVal, maxVal, ArgInfo("maxVal", 0)) ) + { + ERRWRAP2(retval = cv::checkRange(a, quiet, &pos, minVal, maxVal)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(pos)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_a = NULL; + UMat a; + PyObject* pyobj_quiet = NULL; + bool quiet=true; + Point pos; + PyObject* pyobj_minVal = NULL; + double minVal=-DBL_MAX; + PyObject* pyobj_maxVal = NULL; + double maxVal=DBL_MAX; + bool retval; + + const char* keywords[] = { "a", "quiet", "minVal", "maxVal", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:checkRange", (char**)keywords, &pyobj_a, &pyobj_quiet, &pyobj_minVal, &pyobj_maxVal) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 0)) && + pyopencv_to_safe(pyobj_quiet, quiet, ArgInfo("quiet", 0)) && + pyopencv_to_safe(pyobj_minVal, minVal, ArgInfo("minVal", 0)) && + pyopencv_to_safe(pyobj_maxVal, maxVal, ArgInfo("maxVal", 0)) ) + { + ERRWRAP2(retval = cv::checkRange(a, quiet, &pos, minVal, maxVal)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(pos)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("checkRange"); + + return NULL; +} + +static PyObject* pyopencv_cv_circle(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_center = NULL; + Point center; + PyObject* pyobj_radius = NULL; + int radius=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "center", "radius", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:circle", (char**)keywords, &pyobj_img, &pyobj_center, &pyobj_radius, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_radius, radius, ArgInfo("radius", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::circle(img, center, radius, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_center = NULL; + Point center; + PyObject* pyobj_radius = NULL; + int radius=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "center", "radius", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:circle", (char**)keywords, &pyobj_img, &pyobj_center, &pyobj_radius, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_radius, radius, ArgInfo("radius", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::circle(img, center, radius, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("circle"); + + return NULL; +} + +static PyObject* pyopencv_cv_clipLine(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_imgRect = NULL; + Rect imgRect; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + bool retval; + + const char* keywords[] = { "imgRect", "pt1", "pt2", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:clipLine", (char**)keywords, &pyobj_imgRect, &pyobj_pt1, &pyobj_pt2) && + pyopencv_to_safe(pyobj_imgRect, imgRect, ArgInfo("imgRect", 0)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 1)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 1)) ) + { + ERRWRAP2(retval = cv::clipLine(imgRect, pt1, pt2)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(pt1), pyopencv_from(pt2)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_compare(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_cmpop = NULL; + int cmpop=0; + + const char* keywords[] = { "src1", "src2", "cmpop", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:compare", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_cmpop, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_cmpop, cmpop, ArgInfo("cmpop", 0)) ) + { + ERRWRAP2(cv::compare(src1, src2, dst, cmpop)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_cmpop = NULL; + int cmpop=0; + + const char* keywords[] = { "src1", "src2", "cmpop", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:compare", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_cmpop, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_cmpop, cmpop, ArgInfo("cmpop", 0)) ) + { + ERRWRAP2(cv::compare(src1, src2, dst, cmpop)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("compare"); + + return NULL; +} + +static PyObject* pyopencv_cv_compareHist(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_H1 = NULL; + Mat H1; + PyObject* pyobj_H2 = NULL; + Mat H2; + PyObject* pyobj_method = NULL; + int method=0; + double retval; + + const char* keywords[] = { "H1", "H2", "method", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:compareHist", (char**)keywords, &pyobj_H1, &pyobj_H2, &pyobj_method) && + pyopencv_to_safe(pyobj_H1, H1, ArgInfo("H1", 0)) && + pyopencv_to_safe(pyobj_H2, H2, ArgInfo("H2", 0)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) ) + { + ERRWRAP2(retval = cv::compareHist(H1, H2, method)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_H1 = NULL; + UMat H1; + PyObject* pyobj_H2 = NULL; + UMat H2; + PyObject* pyobj_method = NULL; + int method=0; + double retval; + + const char* keywords[] = { "H1", "H2", "method", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:compareHist", (char**)keywords, &pyobj_H1, &pyobj_H2, &pyobj_method) && + pyopencv_to_safe(pyobj_H1, H1, ArgInfo("H1", 0)) && + pyopencv_to_safe(pyobj_H2, H2, ArgInfo("H2", 0)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) ) + { + ERRWRAP2(retval = cv::compareHist(H1, H2, method)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("compareHist"); + + return NULL; +} + +static PyObject* pyopencv_cv_completeSymm(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_m = NULL; + Mat m; + PyObject* pyobj_lowerToUpper = NULL; + bool lowerToUpper=false; + + const char* keywords[] = { "m", "lowerToUpper", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:completeSymm", (char**)keywords, &pyobj_m, &pyobj_lowerToUpper) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 1)) && + pyopencv_to_safe(pyobj_lowerToUpper, lowerToUpper, ArgInfo("lowerToUpper", 0)) ) + { + ERRWRAP2(cv::completeSymm(m, lowerToUpper)); + return pyopencv_from(m); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + UMat m; + PyObject* pyobj_lowerToUpper = NULL; + bool lowerToUpper=false; + + const char* keywords[] = { "m", "lowerToUpper", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:completeSymm", (char**)keywords, &pyobj_m, &pyobj_lowerToUpper) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 1)) && + pyopencv_to_safe(pyobj_lowerToUpper, lowerToUpper, ArgInfo("lowerToUpper", 0)) ) + { + ERRWRAP2(cv::completeSymm(m, lowerToUpper)); + return pyopencv_from(m); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("completeSymm"); + + return NULL; +} + +static PyObject* pyopencv_cv_connectedComponents(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_labels = NULL; + Mat labels; + PyObject* pyobj_connectivity = NULL; + int connectivity=8; + PyObject* pyobj_ltype = NULL; + int ltype=CV_32S; + int retval; + + const char* keywords[] = { "image", "labels", "connectivity", "ltype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:connectedComponents", (char**)keywords, &pyobj_image, &pyobj_labels, &pyobj_connectivity, &pyobj_ltype) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponents(image, labels, connectivity, ltype)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(labels)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_labels = NULL; + UMat labels; + PyObject* pyobj_connectivity = NULL; + int connectivity=8; + PyObject* pyobj_ltype = NULL; + int ltype=CV_32S; + int retval; + + const char* keywords[] = { "image", "labels", "connectivity", "ltype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:connectedComponents", (char**)keywords, &pyobj_image, &pyobj_labels, &pyobj_connectivity, &pyobj_ltype) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponents(image, labels, connectivity, ltype)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(labels)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("connectedComponents"); + + return NULL; +} + +static PyObject* pyopencv_cv_connectedComponentsWithAlgorithm(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_labels = NULL; + Mat labels; + PyObject* pyobj_connectivity = NULL; + int connectivity=0; + PyObject* pyobj_ltype = NULL; + int ltype=0; + PyObject* pyobj_ccltype = NULL; + int ccltype=0; + int retval; + + const char* keywords[] = { "image", "connectivity", "ltype", "ccltype", "labels", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:connectedComponentsWithAlgorithm", (char**)keywords, &pyobj_image, &pyobj_connectivity, &pyobj_ltype, &pyobj_ccltype, &pyobj_labels) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) && + pyopencv_to_safe(pyobj_ccltype, ccltype, ArgInfo("ccltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponents(image, labels, connectivity, ltype, ccltype)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(labels)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_labels = NULL; + UMat labels; + PyObject* pyobj_connectivity = NULL; + int connectivity=0; + PyObject* pyobj_ltype = NULL; + int ltype=0; + PyObject* pyobj_ccltype = NULL; + int ccltype=0; + int retval; + + const char* keywords[] = { "image", "connectivity", "ltype", "ccltype", "labels", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:connectedComponentsWithAlgorithm", (char**)keywords, &pyobj_image, &pyobj_connectivity, &pyobj_ltype, &pyobj_ccltype, &pyobj_labels) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) && + pyopencv_to_safe(pyobj_ccltype, ccltype, ArgInfo("ccltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponents(image, labels, connectivity, ltype, ccltype)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(labels)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("connectedComponentsWithAlgorithm"); + + return NULL; +} + +static PyObject* pyopencv_cv_connectedComponentsWithStats(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_labels = NULL; + Mat labels; + PyObject* pyobj_stats = NULL; + Mat stats; + PyObject* pyobj_centroids = NULL; + Mat centroids; + PyObject* pyobj_connectivity = NULL; + int connectivity=8; + PyObject* pyobj_ltype = NULL; + int ltype=CV_32S; + int retval; + + const char* keywords[] = { "image", "labels", "stats", "centroids", "connectivity", "ltype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOOO:connectedComponentsWithStats", (char**)keywords, &pyobj_image, &pyobj_labels, &pyobj_stats, &pyobj_centroids, &pyobj_connectivity, &pyobj_ltype) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_stats, stats, ArgInfo("stats", 1)) && + pyopencv_to_safe(pyobj_centroids, centroids, ArgInfo("centroids", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponentsWithStats(image, labels, stats, centroids, connectivity, ltype)); + return Py_BuildValue("(NNNN)", pyopencv_from(retval), pyopencv_from(labels), pyopencv_from(stats), pyopencv_from(centroids)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_labels = NULL; + UMat labels; + PyObject* pyobj_stats = NULL; + UMat stats; + PyObject* pyobj_centroids = NULL; + UMat centroids; + PyObject* pyobj_connectivity = NULL; + int connectivity=8; + PyObject* pyobj_ltype = NULL; + int ltype=CV_32S; + int retval; + + const char* keywords[] = { "image", "labels", "stats", "centroids", "connectivity", "ltype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOOO:connectedComponentsWithStats", (char**)keywords, &pyobj_image, &pyobj_labels, &pyobj_stats, &pyobj_centroids, &pyobj_connectivity, &pyobj_ltype) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_stats, stats, ArgInfo("stats", 1)) && + pyopencv_to_safe(pyobj_centroids, centroids, ArgInfo("centroids", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponentsWithStats(image, labels, stats, centroids, connectivity, ltype)); + return Py_BuildValue("(NNNN)", pyopencv_from(retval), pyopencv_from(labels), pyopencv_from(stats), pyopencv_from(centroids)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("connectedComponentsWithStats"); + + return NULL; +} + +static PyObject* pyopencv_cv_connectedComponentsWithStatsWithAlgorithm(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_labels = NULL; + Mat labels; + PyObject* pyobj_stats = NULL; + Mat stats; + PyObject* pyobj_centroids = NULL; + Mat centroids; + PyObject* pyobj_connectivity = NULL; + int connectivity=0; + PyObject* pyobj_ltype = NULL; + int ltype=0; + PyObject* pyobj_ccltype = NULL; + int ccltype=0; + int retval; + + const char* keywords[] = { "image", "connectivity", "ltype", "ccltype", "labels", "stats", "centroids", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:connectedComponentsWithStatsWithAlgorithm", (char**)keywords, &pyobj_image, &pyobj_connectivity, &pyobj_ltype, &pyobj_ccltype, &pyobj_labels, &pyobj_stats, &pyobj_centroids) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_stats, stats, ArgInfo("stats", 1)) && + pyopencv_to_safe(pyobj_centroids, centroids, ArgInfo("centroids", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) && + pyopencv_to_safe(pyobj_ccltype, ccltype, ArgInfo("ccltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponentsWithStats(image, labels, stats, centroids, connectivity, ltype, ccltype)); + return Py_BuildValue("(NNNN)", pyopencv_from(retval), pyopencv_from(labels), pyopencv_from(stats), pyopencv_from(centroids)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_labels = NULL; + UMat labels; + PyObject* pyobj_stats = NULL; + UMat stats; + PyObject* pyobj_centroids = NULL; + UMat centroids; + PyObject* pyobj_connectivity = NULL; + int connectivity=0; + PyObject* pyobj_ltype = NULL; + int ltype=0; + PyObject* pyobj_ccltype = NULL; + int ccltype=0; + int retval; + + const char* keywords[] = { "image", "connectivity", "ltype", "ccltype", "labels", "stats", "centroids", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:connectedComponentsWithStatsWithAlgorithm", (char**)keywords, &pyobj_image, &pyobj_connectivity, &pyobj_ltype, &pyobj_ccltype, &pyobj_labels, &pyobj_stats, &pyobj_centroids) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_stats, stats, ArgInfo("stats", 1)) && + pyopencv_to_safe(pyobj_centroids, centroids, ArgInfo("centroids", 1)) && + pyopencv_to_safe(pyobj_connectivity, connectivity, ArgInfo("connectivity", 0)) && + pyopencv_to_safe(pyobj_ltype, ltype, ArgInfo("ltype", 0)) && + pyopencv_to_safe(pyobj_ccltype, ccltype, ArgInfo("ccltype", 0)) ) + { + ERRWRAP2(retval = cv::connectedComponentsWithStats(image, labels, stats, centroids, connectivity, ltype, ccltype)); + return Py_BuildValue("(NNNN)", pyopencv_from(retval), pyopencv_from(labels), pyopencv_from(stats), pyopencv_from(centroids)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("connectedComponentsWithStatsWithAlgorithm"); + + return NULL; +} + +static PyObject* pyopencv_cv_contourArea(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_contour = NULL; + Mat contour; + PyObject* pyobj_oriented = NULL; + bool oriented=false; + double retval; + + const char* keywords[] = { "contour", "oriented", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:contourArea", (char**)keywords, &pyobj_contour, &pyobj_oriented) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) && + pyopencv_to_safe(pyobj_oriented, oriented, ArgInfo("oriented", 0)) ) + { + ERRWRAP2(retval = cv::contourArea(contour, oriented)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_contour = NULL; + UMat contour; + PyObject* pyobj_oriented = NULL; + bool oriented=false; + double retval; + + const char* keywords[] = { "contour", "oriented", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:contourArea", (char**)keywords, &pyobj_contour, &pyobj_oriented) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) && + pyopencv_to_safe(pyobj_oriented, oriented, ArgInfo("oriented", 0)) ) + { + ERRWRAP2(retval = cv::contourArea(contour, oriented)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("contourArea"); + + return NULL; +} + +static PyObject* pyopencv_cv_convertFp16(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:convertFp16", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::convertFp16(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:convertFp16", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::convertFp16(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convertFp16"); + + return NULL; +} + +static PyObject* pyopencv_cv_convertMaps(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_map1 = NULL; + Mat map1; + PyObject* pyobj_map2 = NULL; + Mat map2; + PyObject* pyobj_dstmap1 = NULL; + Mat dstmap1; + PyObject* pyobj_dstmap2 = NULL; + Mat dstmap2; + PyObject* pyobj_dstmap1type = NULL; + int dstmap1type=0; + PyObject* pyobj_nninterpolation = NULL; + bool nninterpolation=false; + + const char* keywords[] = { "map1", "map2", "dstmap1type", "dstmap1", "dstmap2", "nninterpolation", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:convertMaps", (char**)keywords, &pyobj_map1, &pyobj_map2, &pyobj_dstmap1type, &pyobj_dstmap1, &pyobj_dstmap2, &pyobj_nninterpolation) && + pyopencv_to_safe(pyobj_map1, map1, ArgInfo("map1", 0)) && + pyopencv_to_safe(pyobj_map2, map2, ArgInfo("map2", 0)) && + pyopencv_to_safe(pyobj_dstmap1, dstmap1, ArgInfo("dstmap1", 1)) && + pyopencv_to_safe(pyobj_dstmap2, dstmap2, ArgInfo("dstmap2", 1)) && + pyopencv_to_safe(pyobj_dstmap1type, dstmap1type, ArgInfo("dstmap1type", 0)) && + pyopencv_to_safe(pyobj_nninterpolation, nninterpolation, ArgInfo("nninterpolation", 0)) ) + { + ERRWRAP2(cv::convertMaps(map1, map2, dstmap1, dstmap2, dstmap1type, nninterpolation)); + return Py_BuildValue("(NN)", pyopencv_from(dstmap1), pyopencv_from(dstmap2)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_map1 = NULL; + UMat map1; + PyObject* pyobj_map2 = NULL; + UMat map2; + PyObject* pyobj_dstmap1 = NULL; + UMat dstmap1; + PyObject* pyobj_dstmap2 = NULL; + UMat dstmap2; + PyObject* pyobj_dstmap1type = NULL; + int dstmap1type=0; + PyObject* pyobj_nninterpolation = NULL; + bool nninterpolation=false; + + const char* keywords[] = { "map1", "map2", "dstmap1type", "dstmap1", "dstmap2", "nninterpolation", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:convertMaps", (char**)keywords, &pyobj_map1, &pyobj_map2, &pyobj_dstmap1type, &pyobj_dstmap1, &pyobj_dstmap2, &pyobj_nninterpolation) && + pyopencv_to_safe(pyobj_map1, map1, ArgInfo("map1", 0)) && + pyopencv_to_safe(pyobj_map2, map2, ArgInfo("map2", 0)) && + pyopencv_to_safe(pyobj_dstmap1, dstmap1, ArgInfo("dstmap1", 1)) && + pyopencv_to_safe(pyobj_dstmap2, dstmap2, ArgInfo("dstmap2", 1)) && + pyopencv_to_safe(pyobj_dstmap1type, dstmap1type, ArgInfo("dstmap1type", 0)) && + pyopencv_to_safe(pyobj_nninterpolation, nninterpolation, ArgInfo("nninterpolation", 0)) ) + { + ERRWRAP2(cv::convertMaps(map1, map2, dstmap1, dstmap2, dstmap1type, nninterpolation)); + return Py_BuildValue("(NN)", pyopencv_from(dstmap1), pyopencv_from(dstmap2)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convertMaps"); + + return NULL; +} + +static PyObject* pyopencv_cv_convertScaleAbs(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_alpha = NULL; + double alpha=1; + PyObject* pyobj_beta = NULL; + double beta=0; + + const char* keywords[] = { "src", "dst", "alpha", "beta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:convertScaleAbs", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_alpha, &pyobj_beta) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) ) + { + ERRWRAP2(cv::convertScaleAbs(src, dst, alpha, beta)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_alpha = NULL; + double alpha=1; + PyObject* pyobj_beta = NULL; + double beta=0; + + const char* keywords[] = { "src", "dst", "alpha", "beta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:convertScaleAbs", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_alpha, &pyobj_beta) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) ) + { + ERRWRAP2(cv::convertScaleAbs(src, dst, alpha, beta)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convertScaleAbs"); + + return NULL; +} + +static PyObject* pyopencv_cv_convexHull(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + PyObject* pyobj_hull = NULL; + Mat hull; + PyObject* pyobj_clockwise = NULL; + bool clockwise=false; + PyObject* pyobj_returnPoints = NULL; + bool returnPoints=true; + + const char* keywords[] = { "points", "hull", "clockwise", "returnPoints", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:convexHull", (char**)keywords, &pyobj_points, &pyobj_hull, &pyobj_clockwise, &pyobj_returnPoints) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_hull, hull, ArgInfo("hull", 1)) && + pyopencv_to_safe(pyobj_clockwise, clockwise, ArgInfo("clockwise", 0)) && + pyopencv_to_safe(pyobj_returnPoints, returnPoints, ArgInfo("returnPoints", 0)) ) + { + ERRWRAP2(cv::convexHull(points, hull, clockwise, returnPoints)); + return pyopencv_from(hull); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + PyObject* pyobj_hull = NULL; + UMat hull; + PyObject* pyobj_clockwise = NULL; + bool clockwise=false; + PyObject* pyobj_returnPoints = NULL; + bool returnPoints=true; + + const char* keywords[] = { "points", "hull", "clockwise", "returnPoints", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:convexHull", (char**)keywords, &pyobj_points, &pyobj_hull, &pyobj_clockwise, &pyobj_returnPoints) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_hull, hull, ArgInfo("hull", 1)) && + pyopencv_to_safe(pyobj_clockwise, clockwise, ArgInfo("clockwise", 0)) && + pyopencv_to_safe(pyobj_returnPoints, returnPoints, ArgInfo("returnPoints", 0)) ) + { + ERRWRAP2(cv::convexHull(points, hull, clockwise, returnPoints)); + return pyopencv_from(hull); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convexHull"); + + return NULL; +} + +static PyObject* pyopencv_cv_convexityDefects(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_contour = NULL; + Mat contour; + PyObject* pyobj_convexhull = NULL; + Mat convexhull; + PyObject* pyobj_convexityDefects = NULL; + Mat convexityDefects; + + const char* keywords[] = { "contour", "convexhull", "convexityDefects", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:convexityDefects", (char**)keywords, &pyobj_contour, &pyobj_convexhull, &pyobj_convexityDefects) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) && + pyopencv_to_safe(pyobj_convexhull, convexhull, ArgInfo("convexhull", 0)) && + pyopencv_to_safe(pyobj_convexityDefects, convexityDefects, ArgInfo("convexityDefects", 1)) ) + { + ERRWRAP2(cv::convexityDefects(contour, convexhull, convexityDefects)); + return pyopencv_from(convexityDefects); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_contour = NULL; + UMat contour; + PyObject* pyobj_convexhull = NULL; + UMat convexhull; + PyObject* pyobj_convexityDefects = NULL; + UMat convexityDefects; + + const char* keywords[] = { "contour", "convexhull", "convexityDefects", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:convexityDefects", (char**)keywords, &pyobj_contour, &pyobj_convexhull, &pyobj_convexityDefects) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) && + pyopencv_to_safe(pyobj_convexhull, convexhull, ArgInfo("convexhull", 0)) && + pyopencv_to_safe(pyobj_convexityDefects, convexityDefects, ArgInfo("convexityDefects", 1)) ) + { + ERRWRAP2(cv::convexityDefects(contour, convexhull, convexityDefects)); + return pyopencv_from(convexityDefects); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convexityDefects"); + + return NULL; +} + +static PyObject* pyopencv_cv_copyMakeBorder(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_top = NULL; + int top=0; + PyObject* pyobj_bottom = NULL; + int bottom=0; + PyObject* pyobj_left = NULL; + int left=0; + PyObject* pyobj_right = NULL; + int right=0; + PyObject* pyobj_borderType = NULL; + int borderType=0; + PyObject* pyobj_value = NULL; + Scalar value; + + const char* keywords[] = { "src", "top", "bottom", "left", "right", "borderType", "dst", "value", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|OO:copyMakeBorder", (char**)keywords, &pyobj_src, &pyobj_top, &pyobj_bottom, &pyobj_left, &pyobj_right, &pyobj_borderType, &pyobj_dst, &pyobj_value) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_top, top, ArgInfo("top", 0)) && + pyopencv_to_safe(pyobj_bottom, bottom, ArgInfo("bottom", 0)) && + pyopencv_to_safe(pyobj_left, left, ArgInfo("left", 0)) && + pyopencv_to_safe(pyobj_right, right, ArgInfo("right", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_value, value, ArgInfo("value", 0)) ) + { + ERRWRAP2(cv::copyMakeBorder(src, dst, top, bottom, left, right, borderType, value)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_top = NULL; + int top=0; + PyObject* pyobj_bottom = NULL; + int bottom=0; + PyObject* pyobj_left = NULL; + int left=0; + PyObject* pyobj_right = NULL; + int right=0; + PyObject* pyobj_borderType = NULL; + int borderType=0; + PyObject* pyobj_value = NULL; + Scalar value; + + const char* keywords[] = { "src", "top", "bottom", "left", "right", "borderType", "dst", "value", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|OO:copyMakeBorder", (char**)keywords, &pyobj_src, &pyobj_top, &pyobj_bottom, &pyobj_left, &pyobj_right, &pyobj_borderType, &pyobj_dst, &pyobj_value) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_top, top, ArgInfo("top", 0)) && + pyopencv_to_safe(pyobj_bottom, bottom, ArgInfo("bottom", 0)) && + pyopencv_to_safe(pyobj_left, left, ArgInfo("left", 0)) && + pyopencv_to_safe(pyobj_right, right, ArgInfo("right", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_value, value, ArgInfo("value", 0)) ) + { + ERRWRAP2(cv::copyMakeBorder(src, dst, top, bottom, left, right, borderType, value)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("copyMakeBorder"); + + return NULL; +} + +static PyObject* pyopencv_cv_copyTo(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "mask", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:copyTo", (char**)keywords, &pyobj_src, &pyobj_mask, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::copyTo(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "mask", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:copyTo", (char**)keywords, &pyobj_src, &pyobj_mask, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::copyTo(src, dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("copyTo"); + + return NULL; +} + +static PyObject* pyopencv_cv_cornerEigenValsAndVecs(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "blockSize", "ksize", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:cornerEigenValsAndVecs", (char**)keywords, &pyobj_src, &pyobj_blockSize, &pyobj_ksize, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::cornerEigenValsAndVecs(src, dst, blockSize, ksize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "blockSize", "ksize", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:cornerEigenValsAndVecs", (char**)keywords, &pyobj_src, &pyobj_blockSize, &pyobj_ksize, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::cornerEigenValsAndVecs(src, dst, blockSize, ksize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cornerEigenValsAndVecs"); + + return NULL; +} + +static PyObject* pyopencv_cv_cornerHarris(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_k = NULL; + double k=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "blockSize", "ksize", "k", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OO:cornerHarris", (char**)keywords, &pyobj_src, &pyobj_blockSize, &pyobj_ksize, &pyobj_k, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::cornerHarris(src, dst, blockSize, ksize, k, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_k = NULL; + double k=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "blockSize", "ksize", "k", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OO:cornerHarris", (char**)keywords, &pyobj_src, &pyobj_blockSize, &pyobj_ksize, &pyobj_k, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::cornerHarris(src, dst, blockSize, ksize, k, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cornerHarris"); + + return NULL; +} + +static PyObject* pyopencv_cv_cornerMinEigenVal(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_ksize = NULL; + int ksize=3; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "blockSize", "dst", "ksize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:cornerMinEigenVal", (char**)keywords, &pyobj_src, &pyobj_blockSize, &pyobj_dst, &pyobj_ksize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::cornerMinEigenVal(src, dst, blockSize, ksize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_ksize = NULL; + int ksize=3; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "blockSize", "dst", "ksize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:cornerMinEigenVal", (char**)keywords, &pyobj_src, &pyobj_blockSize, &pyobj_dst, &pyobj_ksize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::cornerMinEigenVal(src, dst, blockSize, ksize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cornerMinEigenVal"); + + return NULL; +} + +static PyObject* pyopencv_cv_cornerSubPix(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_corners = NULL; + Mat corners; + PyObject* pyobj_winSize = NULL; + Size winSize; + PyObject* pyobj_zeroZone = NULL; + Size zeroZone; + PyObject* pyobj_criteria = NULL; + TermCriteria criteria; + + const char* keywords[] = { "image", "corners", "winSize", "zeroZone", "criteria", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO:cornerSubPix", (char**)keywords, &pyobj_image, &pyobj_corners, &pyobj_winSize, &pyobj_zeroZone, &pyobj_criteria) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_winSize, winSize, ArgInfo("winSize", 0)) && + pyopencv_to_safe(pyobj_zeroZone, zeroZone, ArgInfo("zeroZone", 0)) && + pyopencv_to_safe(pyobj_criteria, criteria, ArgInfo("criteria", 0)) ) + { + ERRWRAP2(cv::cornerSubPix(image, corners, winSize, zeroZone, criteria)); + return pyopencv_from(corners); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_corners = NULL; + UMat corners; + PyObject* pyobj_winSize = NULL; + Size winSize; + PyObject* pyobj_zeroZone = NULL; + Size zeroZone; + PyObject* pyobj_criteria = NULL; + TermCriteria criteria; + + const char* keywords[] = { "image", "corners", "winSize", "zeroZone", "criteria", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO:cornerSubPix", (char**)keywords, &pyobj_image, &pyobj_corners, &pyobj_winSize, &pyobj_zeroZone, &pyobj_criteria) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_winSize, winSize, ArgInfo("winSize", 0)) && + pyopencv_to_safe(pyobj_zeroZone, zeroZone, ArgInfo("zeroZone", 0)) && + pyopencv_to_safe(pyobj_criteria, criteria, ArgInfo("criteria", 0)) ) + { + ERRWRAP2(cv::cornerSubPix(image, corners, winSize, zeroZone, criteria)); + return pyopencv_from(corners); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cornerSubPix"); + + return NULL; +} + +static PyObject* pyopencv_cv_countNonZero(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + int retval; + + const char* keywords[] = { "src", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:countNonZero", (char**)keywords, &pyobj_src) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) ) + { + ERRWRAP2(retval = cv::countNonZero(src)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + int retval; + + const char* keywords[] = { "src", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:countNonZero", (char**)keywords, &pyobj_src) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) ) + { + ERRWRAP2(retval = cv::countNonZero(src)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("countNonZero"); + + return NULL; +} + +static PyObject* pyopencv_cv_createCLAHE(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_clipLimit = NULL; + double clipLimit=40.0; + PyObject* pyobj_tileGridSize = NULL; + Size tileGridSize=Size(8, 8); + Ptr retval; + + const char* keywords[] = { "clipLimit", "tileGridSize", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|OO:createCLAHE", (char**)keywords, &pyobj_clipLimit, &pyobj_tileGridSize) && + pyopencv_to_safe(pyobj_clipLimit, clipLimit, ArgInfo("clipLimit", 0)) && + pyopencv_to_safe(pyobj_tileGridSize, tileGridSize, ArgInfo("tileGridSize", 0)) ) + { + ERRWRAP2(retval = cv::createCLAHE(clipLimit, tileGridSize)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_createGeneralizedHoughBallard(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + Ptr retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::createGeneralizedHoughBallard()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_createGeneralizedHoughGuil(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + Ptr retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::createGeneralizedHoughGuil()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_createHanningWindow(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_winSize = NULL; + Size winSize; + PyObject* pyobj_type = NULL; + int type=0; + + const char* keywords[] = { "winSize", "type", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:createHanningWindow", (char**)keywords, &pyobj_winSize, &pyobj_type, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_winSize, winSize, ArgInfo("winSize", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(cv::createHanningWindow(dst, winSize, type)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_winSize = NULL; + Size winSize; + PyObject* pyobj_type = NULL; + int type=0; + + const char* keywords[] = { "winSize", "type", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:createHanningWindow", (char**)keywords, &pyobj_winSize, &pyobj_type, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_winSize, winSize, ArgInfo("winSize", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(cv::createHanningWindow(dst, winSize, type)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("createHanningWindow"); + + return NULL; +} + +static PyObject* pyopencv_cv_createLineSegmentDetector(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_refine = NULL; + int refine=LSD_REFINE_STD; + PyObject* pyobj_scale = NULL; + double scale=0.8; + PyObject* pyobj_sigma_scale = NULL; + double sigma_scale=0.6; + PyObject* pyobj_quant = NULL; + double quant=2.0; + PyObject* pyobj_ang_th = NULL; + double ang_th=22.5; + PyObject* pyobj_log_eps = NULL; + double log_eps=0; + PyObject* pyobj_density_th = NULL; + double density_th=0.7; + PyObject* pyobj_n_bins = NULL; + int n_bins=1024; + Ptr retval; + + const char* keywords[] = { "refine", "scale", "sigma_scale", "quant", "ang_th", "log_eps", "density_th", "n_bins", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|OOOOOOOO:createLineSegmentDetector", (char**)keywords, &pyobj_refine, &pyobj_scale, &pyobj_sigma_scale, &pyobj_quant, &pyobj_ang_th, &pyobj_log_eps, &pyobj_density_th, &pyobj_n_bins) && + pyopencv_to_safe(pyobj_refine, refine, ArgInfo("refine", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_sigma_scale, sigma_scale, ArgInfo("sigma_scale", 0)) && + pyopencv_to_safe(pyobj_quant, quant, ArgInfo("quant", 0)) && + pyopencv_to_safe(pyobj_ang_th, ang_th, ArgInfo("ang_th", 0)) && + pyopencv_to_safe(pyobj_log_eps, log_eps, ArgInfo("log_eps", 0)) && + pyopencv_to_safe(pyobj_density_th, density_th, ArgInfo("density_th", 0)) && + pyopencv_to_safe(pyobj_n_bins, n_bins, ArgInfo("n_bins", 0)) ) + { + ERRWRAP2(retval = cv::createLineSegmentDetector(refine, scale, sigma_scale, quant, ang_th, log_eps, density_th, n_bins)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cubeRoot(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_val = NULL; + float val=0.f; + float retval; + + const char* keywords[] = { "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cubeRoot", (char**)keywords, &pyobj_val) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(retval = cv::cubeRoot(val)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cvtColor(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_code = NULL; + int code=0; + PyObject* pyobj_dstCn = NULL; + int dstCn=0; + + const char* keywords[] = { "src", "code", "dst", "dstCn", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:cvtColor", (char**)keywords, &pyobj_src, &pyobj_code, &pyobj_dst, &pyobj_dstCn) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_code, code, ArgInfo("code", 0)) && + pyopencv_to_safe(pyobj_dstCn, dstCn, ArgInfo("dstCn", 0)) ) + { + ERRWRAP2(cv::cvtColor(src, dst, code, dstCn)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_code = NULL; + int code=0; + PyObject* pyobj_dstCn = NULL; + int dstCn=0; + + const char* keywords[] = { "src", "code", "dst", "dstCn", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:cvtColor", (char**)keywords, &pyobj_src, &pyobj_code, &pyobj_dst, &pyobj_dstCn) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_code, code, ArgInfo("code", 0)) && + pyopencv_to_safe(pyobj_dstCn, dstCn, ArgInfo("dstCn", 0)) ) + { + ERRWRAP2(cv::cvtColor(src, dst, code, dstCn)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cvtColor"); + + return NULL; +} + +static PyObject* pyopencv_cv_cvtColorTwoPlane(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_code = NULL; + int code=0; + + const char* keywords[] = { "src1", "src2", "code", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:cvtColorTwoPlane", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_code, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_code, code, ArgInfo("code", 0)) ) + { + ERRWRAP2(cv::cvtColorTwoPlane(src1, src2, dst, code)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_code = NULL; + int code=0; + + const char* keywords[] = { "src1", "src2", "code", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:cvtColorTwoPlane", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_code, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_code, code, ArgInfo("code", 0)) ) + { + ERRWRAP2(cv::cvtColorTwoPlane(src1, src2, dst, code)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("cvtColorTwoPlane"); + + return NULL; +} + +static PyObject* pyopencv_cv_dct(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:dct", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::dct(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:dct", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::dct(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dct"); + + return NULL; +} + +static PyObject* pyopencv_cv_demosaicing(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_code = NULL; + int code=0; + PyObject* pyobj_dstCn = NULL; + int dstCn=0; + + const char* keywords[] = { "src", "code", "dst", "dstCn", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:demosaicing", (char**)keywords, &pyobj_src, &pyobj_code, &pyobj_dst, &pyobj_dstCn) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_code, code, ArgInfo("code", 0)) && + pyopencv_to_safe(pyobj_dstCn, dstCn, ArgInfo("dstCn", 0)) ) + { + ERRWRAP2(cv::demosaicing(src, dst, code, dstCn)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_code = NULL; + int code=0; + PyObject* pyobj_dstCn = NULL; + int dstCn=0; + + const char* keywords[] = { "src", "code", "dst", "dstCn", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:demosaicing", (char**)keywords, &pyobj_src, &pyobj_code, &pyobj_dst, &pyobj_dstCn) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_code, code, ArgInfo("code", 0)) && + pyopencv_to_safe(pyobj_dstCn, dstCn, ArgInfo("dstCn", 0)) ) + { + ERRWRAP2(cv::demosaicing(src, dst, code, dstCn)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("demosaicing"); + + return NULL; +} + +static PyObject* pyopencv_cv_determinant(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_mtx = NULL; + Mat mtx; + double retval; + + const char* keywords[] = { "mtx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:determinant", (char**)keywords, &pyobj_mtx) && + pyopencv_to_safe(pyobj_mtx, mtx, ArgInfo("mtx", 0)) ) + { + ERRWRAP2(retval = cv::determinant(mtx)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_mtx = NULL; + UMat mtx; + double retval; + + const char* keywords[] = { "mtx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:determinant", (char**)keywords, &pyobj_mtx) && + pyopencv_to_safe(pyobj_mtx, mtx, ArgInfo("mtx", 0)) ) + { + ERRWRAP2(retval = cv::determinant(mtx)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("determinant"); + + return NULL; +} + +static PyObject* pyopencv_cv_dft(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_nonzeroRows = NULL; + int nonzeroRows=0; + + const char* keywords[] = { "src", "dst", "flags", "nonzeroRows", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:dft", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags, &pyobj_nonzeroRows) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_nonzeroRows, nonzeroRows, ArgInfo("nonzeroRows", 0)) ) + { + ERRWRAP2(cv::dft(src, dst, flags, nonzeroRows)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_nonzeroRows = NULL; + int nonzeroRows=0; + + const char* keywords[] = { "src", "dst", "flags", "nonzeroRows", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:dft", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags, &pyobj_nonzeroRows) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_nonzeroRows, nonzeroRows, ArgInfo("nonzeroRows", 0)) ) + { + ERRWRAP2(cv::dft(src, dst, flags, nonzeroRows)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dft"); + + return NULL; +} + +static PyObject* pyopencv_cv_dilate(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_kernel = NULL; + Mat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_iterations = NULL; + int iterations=1; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue=morphologyDefaultBorderValue(); + + const char* keywords[] = { "src", "kernel", "dst", "anchor", "iterations", "borderType", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:dilate", (char**)keywords, &pyobj_src, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_iterations, &pyobj_borderType, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_iterations, iterations, ArgInfo("iterations", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::dilate(src, dst, kernel, anchor, iterations, borderType, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_kernel = NULL; + UMat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_iterations = NULL; + int iterations=1; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue=morphologyDefaultBorderValue(); + + const char* keywords[] = { "src", "kernel", "dst", "anchor", "iterations", "borderType", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:dilate", (char**)keywords, &pyobj_src, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_iterations, &pyobj_borderType, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_iterations, iterations, ArgInfo("iterations", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::dilate(src, dst, kernel, anchor, iterations, borderType, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dilate"); + + return NULL; +} + +static PyObject* pyopencv_cv_distanceTransform(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_distanceType = NULL; + int distanceType=0; + PyObject* pyobj_maskSize = NULL; + int maskSize=0; + PyObject* pyobj_dstType = NULL; + int dstType=CV_32F; + + const char* keywords[] = { "src", "distanceType", "maskSize", "dst", "dstType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:distanceTransform", (char**)keywords, &pyobj_src, &pyobj_distanceType, &pyobj_maskSize, &pyobj_dst, &pyobj_dstType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_distanceType, distanceType, ArgInfo("distanceType", 0)) && + pyopencv_to_safe(pyobj_maskSize, maskSize, ArgInfo("maskSize", 0)) && + pyopencv_to_safe(pyobj_dstType, dstType, ArgInfo("dstType", 0)) ) + { + ERRWRAP2(cv::distanceTransform(src, dst, distanceType, maskSize, dstType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_distanceType = NULL; + int distanceType=0; + PyObject* pyobj_maskSize = NULL; + int maskSize=0; + PyObject* pyobj_dstType = NULL; + int dstType=CV_32F; + + const char* keywords[] = { "src", "distanceType", "maskSize", "dst", "dstType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:distanceTransform", (char**)keywords, &pyobj_src, &pyobj_distanceType, &pyobj_maskSize, &pyobj_dst, &pyobj_dstType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_distanceType, distanceType, ArgInfo("distanceType", 0)) && + pyopencv_to_safe(pyobj_maskSize, maskSize, ArgInfo("maskSize", 0)) && + pyopencv_to_safe(pyobj_dstType, dstType, ArgInfo("dstType", 0)) ) + { + ERRWRAP2(cv::distanceTransform(src, dst, distanceType, maskSize, dstType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("distanceTransform"); + + return NULL; +} + +static PyObject* pyopencv_cv_distanceTransformWithLabels(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_labels = NULL; + Mat labels; + PyObject* pyobj_distanceType = NULL; + int distanceType=0; + PyObject* pyobj_maskSize = NULL; + int maskSize=0; + PyObject* pyobj_labelType = NULL; + int labelType=DIST_LABEL_CCOMP; + + const char* keywords[] = { "src", "distanceType", "maskSize", "dst", "labels", "labelType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:distanceTransformWithLabels", (char**)keywords, &pyobj_src, &pyobj_distanceType, &pyobj_maskSize, &pyobj_dst, &pyobj_labels, &pyobj_labelType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_distanceType, distanceType, ArgInfo("distanceType", 0)) && + pyopencv_to_safe(pyobj_maskSize, maskSize, ArgInfo("maskSize", 0)) && + pyopencv_to_safe(pyobj_labelType, labelType, ArgInfo("labelType", 0)) ) + { + ERRWRAP2(cv::distanceTransform(src, dst, labels, distanceType, maskSize, labelType)); + return Py_BuildValue("(NN)", pyopencv_from(dst), pyopencv_from(labels)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_labels = NULL; + UMat labels; + PyObject* pyobj_distanceType = NULL; + int distanceType=0; + PyObject* pyobj_maskSize = NULL; + int maskSize=0; + PyObject* pyobj_labelType = NULL; + int labelType=DIST_LABEL_CCOMP; + + const char* keywords[] = { "src", "distanceType", "maskSize", "dst", "labels", "labelType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:distanceTransformWithLabels", (char**)keywords, &pyobj_src, &pyobj_distanceType, &pyobj_maskSize, &pyobj_dst, &pyobj_labels, &pyobj_labelType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_labels, labels, ArgInfo("labels", 1)) && + pyopencv_to_safe(pyobj_distanceType, distanceType, ArgInfo("distanceType", 0)) && + pyopencv_to_safe(pyobj_maskSize, maskSize, ArgInfo("maskSize", 0)) && + pyopencv_to_safe(pyobj_labelType, labelType, ArgInfo("labelType", 0)) ) + { + ERRWRAP2(cv::distanceTransform(src, dst, labels, distanceType, maskSize, labelType)); + return Py_BuildValue("(NN)", pyopencv_from(dst), pyopencv_from(labels)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("distanceTransformWithLabels"); + + return NULL; +} + +static PyObject* pyopencv_cv_divSpectrums(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_a = NULL; + Mat a; + PyObject* pyobj_b = NULL; + Mat b; + PyObject* pyobj_c = NULL; + Mat c; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_conjB = NULL; + bool conjB=false; + + const char* keywords[] = { "a", "b", "flags", "c", "conjB", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:divSpectrums", (char**)keywords, &pyobj_a, &pyobj_b, &pyobj_flags, &pyobj_c, &pyobj_conjB) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 0)) && + pyopencv_to_safe(pyobj_b, b, ArgInfo("b", 0)) && + pyopencv_to_safe(pyobj_c, c, ArgInfo("c", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_conjB, conjB, ArgInfo("conjB", 0)) ) + { + ERRWRAP2(cv::divSpectrums(a, b, c, flags, conjB)); + return pyopencv_from(c); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_a = NULL; + UMat a; + PyObject* pyobj_b = NULL; + UMat b; + PyObject* pyobj_c = NULL; + UMat c; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_conjB = NULL; + bool conjB=false; + + const char* keywords[] = { "a", "b", "flags", "c", "conjB", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:divSpectrums", (char**)keywords, &pyobj_a, &pyobj_b, &pyobj_flags, &pyobj_c, &pyobj_conjB) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 0)) && + pyopencv_to_safe(pyobj_b, b, ArgInfo("b", 0)) && + pyopencv_to_safe(pyobj_c, c, ArgInfo("c", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_conjB, conjB, ArgInfo("conjB", 0)) ) + { + ERRWRAP2(cv::divSpectrums(a, b, c, flags, conjB)); + return pyopencv_from(c); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("divSpectrums"); + + return NULL; +} + +static PyObject* pyopencv_cv_divide(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "scale", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:divide", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_scale, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::divide(src1, src2, dst, scale, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "scale", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:divide", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_scale, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::divide(src1, src2, dst, scale, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_scale = NULL; + double scale=0; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "scale", "src2", "dst", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:divide", (char**)keywords, &pyobj_scale, &pyobj_src2, &pyobj_dst, &pyobj_dtype) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::divide(scale, src2, dst, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_scale = NULL; + double scale=0; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "scale", "src2", "dst", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:divide", (char**)keywords, &pyobj_scale, &pyobj_src2, &pyobj_dst, &pyobj_dtype) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::divide(scale, src2, dst, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("divide"); + + return NULL; +} + +static PyObject* pyopencv_cv_drawContours(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_contours = NULL; + vector_Mat contours; + PyObject* pyobj_contourIdx = NULL; + int contourIdx=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_hierarchy = NULL; + Mat hierarchy; + PyObject* pyobj_maxLevel = NULL; + int maxLevel=INT_MAX; + PyObject* pyobj_offset = NULL; + Point offset; + + const char* keywords[] = { "image", "contours", "contourIdx", "color", "thickness", "lineType", "hierarchy", "maxLevel", "offset", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:drawContours", (char**)keywords, &pyobj_image, &pyobj_contours, &pyobj_contourIdx, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_hierarchy, &pyobj_maxLevel, &pyobj_offset) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_contours, contours, ArgInfo("contours", 0)) && + pyopencv_to_safe(pyobj_contourIdx, contourIdx, ArgInfo("contourIdx", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_hierarchy, hierarchy, ArgInfo("hierarchy", 0)) && + pyopencv_to_safe(pyobj_maxLevel, maxLevel, ArgInfo("maxLevel", 0)) && + pyopencv_to_safe(pyobj_offset, offset, ArgInfo("offset", 0)) ) + { + ERRWRAP2(cv::drawContours(image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset)); + return pyopencv_from(image); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_contours = NULL; + vector_UMat contours; + PyObject* pyobj_contourIdx = NULL; + int contourIdx=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_hierarchy = NULL; + UMat hierarchy; + PyObject* pyobj_maxLevel = NULL; + int maxLevel=INT_MAX; + PyObject* pyobj_offset = NULL; + Point offset; + + const char* keywords[] = { "image", "contours", "contourIdx", "color", "thickness", "lineType", "hierarchy", "maxLevel", "offset", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:drawContours", (char**)keywords, &pyobj_image, &pyobj_contours, &pyobj_contourIdx, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_hierarchy, &pyobj_maxLevel, &pyobj_offset) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_contours, contours, ArgInfo("contours", 0)) && + pyopencv_to_safe(pyobj_contourIdx, contourIdx, ArgInfo("contourIdx", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_hierarchy, hierarchy, ArgInfo("hierarchy", 0)) && + pyopencv_to_safe(pyobj_maxLevel, maxLevel, ArgInfo("maxLevel", 0)) && + pyopencv_to_safe(pyobj_offset, offset, ArgInfo("offset", 0)) ) + { + ERRWRAP2(cv::drawContours(image, contours, contourIdx, color, thickness, lineType, hierarchy, maxLevel, offset)); + return pyopencv_from(image); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("drawContours"); + + return NULL; +} + +static PyObject* pyopencv_cv_drawMarker(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_position = NULL; + Point position; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_markerType = NULL; + int markerType=MARKER_CROSS; + PyObject* pyobj_markerSize = NULL; + int markerSize=20; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_line_type = NULL; + int line_type=8; + + const char* keywords[] = { "img", "position", "color", "markerType", "markerSize", "thickness", "line_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:drawMarker", (char**)keywords, &pyobj_img, &pyobj_position, &pyobj_color, &pyobj_markerType, &pyobj_markerSize, &pyobj_thickness, &pyobj_line_type) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_position, position, ArgInfo("position", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_markerType, markerType, ArgInfo("markerType", 0)) && + pyopencv_to_safe(pyobj_markerSize, markerSize, ArgInfo("markerSize", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_line_type, line_type, ArgInfo("line_type", 0)) ) + { + ERRWRAP2(cv::drawMarker(img, position, color, markerType, markerSize, thickness, line_type)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_position = NULL; + Point position; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_markerType = NULL; + int markerType=MARKER_CROSS; + PyObject* pyobj_markerSize = NULL; + int markerSize=20; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_line_type = NULL; + int line_type=8; + + const char* keywords[] = { "img", "position", "color", "markerType", "markerSize", "thickness", "line_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:drawMarker", (char**)keywords, &pyobj_img, &pyobj_position, &pyobj_color, &pyobj_markerType, &pyobj_markerSize, &pyobj_thickness, &pyobj_line_type) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_position, position, ArgInfo("position", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_markerType, markerType, ArgInfo("markerType", 0)) && + pyopencv_to_safe(pyobj_markerSize, markerSize, ArgInfo("markerSize", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_line_type, line_type, ArgInfo("line_type", 0)) ) + { + ERRWRAP2(cv::drawMarker(img, position, color, markerType, markerSize, thickness, line_type)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("drawMarker"); + + return NULL; +} + +static PyObject* pyopencv_cv_eigen(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_eigenvalues = NULL; + Mat eigenvalues; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + bool retval; + + const char* keywords[] = { "src", "eigenvalues", "eigenvectors", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:eigen", (char**)keywords, &pyobj_src, &pyobj_eigenvalues, &pyobj_eigenvectors) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) ) + { + ERRWRAP2(retval = cv::eigen(src, eigenvalues, eigenvectors)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(eigenvalues), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_eigenvalues = NULL; + UMat eigenvalues; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + bool retval; + + const char* keywords[] = { "src", "eigenvalues", "eigenvectors", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:eigen", (char**)keywords, &pyobj_src, &pyobj_eigenvalues, &pyobj_eigenvectors) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) ) + { + ERRWRAP2(retval = cv::eigen(src, eigenvalues, eigenvectors)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(eigenvalues), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("eigen"); + + return NULL; +} + +static PyObject* pyopencv_cv_eigenNonSymmetric(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_eigenvalues = NULL; + Mat eigenvalues; + PyObject* pyobj_eigenvectors = NULL; + Mat eigenvectors; + + const char* keywords[] = { "src", "eigenvalues", "eigenvectors", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:eigenNonSymmetric", (char**)keywords, &pyobj_src, &pyobj_eigenvalues, &pyobj_eigenvectors) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) ) + { + ERRWRAP2(cv::eigenNonSymmetric(src, eigenvalues, eigenvectors)); + return Py_BuildValue("(NN)", pyopencv_from(eigenvalues), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_eigenvalues = NULL; + UMat eigenvalues; + PyObject* pyobj_eigenvectors = NULL; + UMat eigenvectors; + + const char* keywords[] = { "src", "eigenvalues", "eigenvectors", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:eigenNonSymmetric", (char**)keywords, &pyobj_src, &pyobj_eigenvalues, &pyobj_eigenvectors) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_eigenvalues, eigenvalues, ArgInfo("eigenvalues", 1)) && + pyopencv_to_safe(pyobj_eigenvectors, eigenvectors, ArgInfo("eigenvectors", 1)) ) + { + ERRWRAP2(cv::eigenNonSymmetric(src, eigenvalues, eigenvectors)); + return Py_BuildValue("(NN)", pyopencv_from(eigenvalues), pyopencv_from(eigenvectors)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("eigenNonSymmetric"); + + return NULL; +} + +static PyObject* pyopencv_cv_ellipse(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_center = NULL; + Point center; + PyObject* pyobj_axes = NULL; + Size axes; + PyObject* pyobj_angle = NULL; + double angle=0; + PyObject* pyobj_startAngle = NULL; + double startAngle=0; + PyObject* pyobj_endAngle = NULL; + double endAngle=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "center", "axes", "angle", "startAngle", "endAngle", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOOO|OOO:ellipse", (char**)keywords, &pyobj_img, &pyobj_center, &pyobj_axes, &pyobj_angle, &pyobj_startAngle, &pyobj_endAngle, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_axes, axes, ArgInfo("axes", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_startAngle, startAngle, ArgInfo("startAngle", 0)) && + pyopencv_to_safe(pyobj_endAngle, endAngle, ArgInfo("endAngle", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_center = NULL; + Point center; + PyObject* pyobj_axes = NULL; + Size axes; + PyObject* pyobj_angle = NULL; + double angle=0; + PyObject* pyobj_startAngle = NULL; + double startAngle=0; + PyObject* pyobj_endAngle = NULL; + double endAngle=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "center", "axes", "angle", "startAngle", "endAngle", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOOO|OOO:ellipse", (char**)keywords, &pyobj_img, &pyobj_center, &pyobj_axes, &pyobj_angle, &pyobj_startAngle, &pyobj_endAngle, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_axes, axes, ArgInfo("axes", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_startAngle, startAngle, ArgInfo("startAngle", 0)) && + pyopencv_to_safe(pyobj_endAngle, endAngle, ArgInfo("endAngle", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::ellipse(img, center, axes, angle, startAngle, endAngle, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_box = NULL; + RotatedRect box; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + + const char* keywords[] = { "img", "box", "color", "thickness", "lineType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:ellipse", (char**)keywords, &pyobj_img, &pyobj_box, &pyobj_color, &pyobj_thickness, &pyobj_lineType) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_box, box, ArgInfo("box", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) ) + { + ERRWRAP2(cv::ellipse(img, box, color, thickness, lineType)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_box = NULL; + RotatedRect box; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + + const char* keywords[] = { "img", "box", "color", "thickness", "lineType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:ellipse", (char**)keywords, &pyobj_img, &pyobj_box, &pyobj_color, &pyobj_thickness, &pyobj_lineType) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_box, box, ArgInfo("box", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) ) + { + ERRWRAP2(cv::ellipse(img, box, color, thickness, lineType)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("ellipse"); + + return NULL; +} + +static PyObject* pyopencv_cv_ellipse2Poly(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_center = NULL; + Point center; + PyObject* pyobj_axes = NULL; + Size axes; + PyObject* pyobj_angle = NULL; + int angle=0; + PyObject* pyobj_arcStart = NULL; + int arcStart=0; + PyObject* pyobj_arcEnd = NULL; + int arcEnd=0; + PyObject* pyobj_delta = NULL; + int delta=0; + vector_Point pts; + + const char* keywords[] = { "center", "axes", "angle", "arcStart", "arcEnd", "delta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO:ellipse2Poly", (char**)keywords, &pyobj_center, &pyobj_axes, &pyobj_angle, &pyobj_arcStart, &pyobj_arcEnd, &pyobj_delta) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_axes, axes, ArgInfo("axes", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_arcStart, arcStart, ArgInfo("arcStart", 0)) && + pyopencv_to_safe(pyobj_arcEnd, arcEnd, ArgInfo("arcEnd", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) ) + { + ERRWRAP2(cv::ellipse2Poly(center, axes, angle, arcStart, arcEnd, delta, pts)); + return pyopencv_from(pts); + } + + return NULL; +} + +static PyObject* pyopencv_cv_equalizeHist(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:equalizeHist", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::equalizeHist(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:equalizeHist", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::equalizeHist(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("equalizeHist"); + + return NULL; +} + +static PyObject* pyopencv_cv_erode(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_kernel = NULL; + Mat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_iterations = NULL; + int iterations=1; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue=morphologyDefaultBorderValue(); + + const char* keywords[] = { "src", "kernel", "dst", "anchor", "iterations", "borderType", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:erode", (char**)keywords, &pyobj_src, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_iterations, &pyobj_borderType, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_iterations, iterations, ArgInfo("iterations", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::erode(src, dst, kernel, anchor, iterations, borderType, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_kernel = NULL; + UMat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_iterations = NULL; + int iterations=1; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue=morphologyDefaultBorderValue(); + + const char* keywords[] = { "src", "kernel", "dst", "anchor", "iterations", "borderType", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:erode", (char**)keywords, &pyobj_src, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_iterations, &pyobj_borderType, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_iterations, iterations, ArgInfo("iterations", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::erode(src, dst, kernel, anchor, iterations, borderType, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("erode"); + + return NULL; +} + +static PyObject* pyopencv_cv_exp(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:exp", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::exp(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:exp", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::exp(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("exp"); + + return NULL; +} + +static PyObject* pyopencv_cv_extractChannel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_coi = NULL; + int coi=0; + + const char* keywords[] = { "src", "coi", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:extractChannel", (char**)keywords, &pyobj_src, &pyobj_coi, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_coi, coi, ArgInfo("coi", 0)) ) + { + ERRWRAP2(cv::extractChannel(src, dst, coi)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_coi = NULL; + int coi=0; + + const char* keywords[] = { "src", "coi", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:extractChannel", (char**)keywords, &pyobj_src, &pyobj_coi, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_coi, coi, ArgInfo("coi", 0)) ) + { + ERRWRAP2(cv::extractChannel(src, dst, coi)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("extractChannel"); + + return NULL; +} + +static PyObject* pyopencv_cv_fastAtan2(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_y = NULL; + float y=0.f; + PyObject* pyobj_x = NULL; + float x=0.f; + float retval; + + const char* keywords[] = { "y", "x", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:fastAtan2", (char**)keywords, &pyobj_y, &pyobj_x) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) ) + { + ERRWRAP2(retval = cv::fastAtan2(y, x)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_fillConvexPoly(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_points = NULL; + Mat points; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "points", "color", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:fillConvexPoly", (char**)keywords, &pyobj_img, &pyobj_points, &pyobj_color, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::fillConvexPoly(img, points, color, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_points = NULL; + UMat points; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "points", "color", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:fillConvexPoly", (char**)keywords, &pyobj_img, &pyobj_points, &pyobj_color, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::fillConvexPoly(img, points, color, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("fillConvexPoly"); + + return NULL; +} + +static PyObject* pyopencv_cv_fillPoly(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_pts = NULL; + vector_Mat pts; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + PyObject* pyobj_offset = NULL; + Point offset; + + const char* keywords[] = { "img", "pts", "color", "lineType", "shift", "offset", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:fillPoly", (char**)keywords, &pyobj_img, &pyobj_pts, &pyobj_color, &pyobj_lineType, &pyobj_shift, &pyobj_offset) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pts, pts, ArgInfo("pts", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) && + pyopencv_to_safe(pyobj_offset, offset, ArgInfo("offset", 0)) ) + { + ERRWRAP2(cv::fillPoly(img, pts, color, lineType, shift, offset)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_pts = NULL; + vector_UMat pts; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + PyObject* pyobj_offset = NULL; + Point offset; + + const char* keywords[] = { "img", "pts", "color", "lineType", "shift", "offset", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:fillPoly", (char**)keywords, &pyobj_img, &pyobj_pts, &pyobj_color, &pyobj_lineType, &pyobj_shift, &pyobj_offset) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pts, pts, ArgInfo("pts", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) && + pyopencv_to_safe(pyobj_offset, offset, ArgInfo("offset", 0)) ) + { + ERRWRAP2(cv::fillPoly(img, pts, color, lineType, shift, offset)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("fillPoly"); + + return NULL; +} + +static PyObject* pyopencv_cv_filter2D(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_kernel = NULL; + Mat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "kernel", "dst", "anchor", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:filter2D", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::filter2D(src, dst, ddepth, kernel, anchor, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_kernel = NULL; + UMat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "kernel", "dst", "anchor", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:filter2D", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::filter2D(src, dst, ddepth, kernel, anchor, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("filter2D"); + + return NULL; +} + +static PyObject* pyopencv_cv_findContours(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_contours = NULL; + vector_Mat contours; + PyObject* pyobj_hierarchy = NULL; + Mat hierarchy; + PyObject* pyobj_mode = NULL; + int mode=0; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_offset = NULL; + Point offset; + + const char* keywords[] = { "image", "mode", "method", "contours", "hierarchy", "offset", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:findContours", (char**)keywords, &pyobj_image, &pyobj_mode, &pyobj_method, &pyobj_contours, &pyobj_hierarchy, &pyobj_offset) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_contours, contours, ArgInfo("contours", 1)) && + pyopencv_to_safe(pyobj_hierarchy, hierarchy, ArgInfo("hierarchy", 1)) && + pyopencv_to_safe(pyobj_mode, mode, ArgInfo("mode", 0)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_offset, offset, ArgInfo("offset", 0)) ) + { + ERRWRAP2(cv::findContours(image, contours, hierarchy, mode, method, offset)); + return Py_BuildValue("(NN)", pyopencv_from(contours), pyopencv_from(hierarchy)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_contours = NULL; + vector_UMat contours; + PyObject* pyobj_hierarchy = NULL; + UMat hierarchy; + PyObject* pyobj_mode = NULL; + int mode=0; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_offset = NULL; + Point offset; + + const char* keywords[] = { "image", "mode", "method", "contours", "hierarchy", "offset", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:findContours", (char**)keywords, &pyobj_image, &pyobj_mode, &pyobj_method, &pyobj_contours, &pyobj_hierarchy, &pyobj_offset) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_contours, contours, ArgInfo("contours", 1)) && + pyopencv_to_safe(pyobj_hierarchy, hierarchy, ArgInfo("hierarchy", 1)) && + pyopencv_to_safe(pyobj_mode, mode, ArgInfo("mode", 0)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_offset, offset, ArgInfo("offset", 0)) ) + { + ERRWRAP2(cv::findContours(image, contours, hierarchy, mode, method, offset)); + return Py_BuildValue("(NN)", pyopencv_from(contours), pyopencv_from(hierarchy)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("findContours"); + + return NULL; +} + +static PyObject* pyopencv_cv_findNonZero(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_idx = NULL; + Mat idx; + + const char* keywords[] = { "src", "idx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:findNonZero", (char**)keywords, &pyobj_src, &pyobj_idx) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_idx, idx, ArgInfo("idx", 1)) ) + { + ERRWRAP2(cv::findNonZero(src, idx)); + return pyopencv_from(idx); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_idx = NULL; + UMat idx; + + const char* keywords[] = { "src", "idx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:findNonZero", (char**)keywords, &pyobj_src, &pyobj_idx) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_idx, idx, ArgInfo("idx", 1)) ) + { + ERRWRAP2(cv::findNonZero(src, idx)); + return pyopencv_from(idx); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("findNonZero"); + + return NULL; +} + +static PyObject* pyopencv_cv_fitEllipse(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:fitEllipse", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::fitEllipse(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:fitEllipse", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::fitEllipse(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("fitEllipse"); + + return NULL; +} + +static PyObject* pyopencv_cv_fitEllipseAMS(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:fitEllipseAMS", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::fitEllipseAMS(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:fitEllipseAMS", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::fitEllipseAMS(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("fitEllipseAMS"); + + return NULL; +} + +static PyObject* pyopencv_cv_fitEllipseDirect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:fitEllipseDirect", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::fitEllipseDirect(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:fitEllipseDirect", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::fitEllipseDirect(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("fitEllipseDirect"); + + return NULL; +} + +static PyObject* pyopencv_cv_fitLine(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + PyObject* pyobj_line = NULL; + Mat line; + PyObject* pyobj_distType = NULL; + int distType=0; + PyObject* pyobj_param = NULL; + double param=0; + PyObject* pyobj_reps = NULL; + double reps=0; + PyObject* pyobj_aeps = NULL; + double aeps=0; + + const char* keywords[] = { "points", "distType", "param", "reps", "aeps", "line", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:fitLine", (char**)keywords, &pyobj_points, &pyobj_distType, &pyobj_param, &pyobj_reps, &pyobj_aeps, &pyobj_line) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_line, line, ArgInfo("line", 1)) && + pyopencv_to_safe(pyobj_distType, distType, ArgInfo("distType", 0)) && + pyopencv_to_safe(pyobj_param, param, ArgInfo("param", 0)) && + pyopencv_to_safe(pyobj_reps, reps, ArgInfo("reps", 0)) && + pyopencv_to_safe(pyobj_aeps, aeps, ArgInfo("aeps", 0)) ) + { + ERRWRAP2(cv::fitLine(points, line, distType, param, reps, aeps)); + return pyopencv_from(line); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + PyObject* pyobj_line = NULL; + UMat line; + PyObject* pyobj_distType = NULL; + int distType=0; + PyObject* pyobj_param = NULL; + double param=0; + PyObject* pyobj_reps = NULL; + double reps=0; + PyObject* pyobj_aeps = NULL; + double aeps=0; + + const char* keywords[] = { "points", "distType", "param", "reps", "aeps", "line", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:fitLine", (char**)keywords, &pyobj_points, &pyobj_distType, &pyobj_param, &pyobj_reps, &pyobj_aeps, &pyobj_line) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_line, line, ArgInfo("line", 1)) && + pyopencv_to_safe(pyobj_distType, distType, ArgInfo("distType", 0)) && + pyopencv_to_safe(pyobj_param, param, ArgInfo("param", 0)) && + pyopencv_to_safe(pyobj_reps, reps, ArgInfo("reps", 0)) && + pyopencv_to_safe(pyobj_aeps, aeps, ArgInfo("aeps", 0)) ) + { + ERRWRAP2(cv::fitLine(points, line, distType, param, reps, aeps)); + return pyopencv_from(line); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("fitLine"); + + return NULL; +} + +static PyObject* pyopencv_cv_flip(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flipCode = NULL; + int flipCode=0; + + const char* keywords[] = { "src", "flipCode", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:flip", (char**)keywords, &pyobj_src, &pyobj_flipCode, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flipCode, flipCode, ArgInfo("flipCode", 0)) ) + { + ERRWRAP2(cv::flip(src, dst, flipCode)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flipCode = NULL; + int flipCode=0; + + const char* keywords[] = { "src", "flipCode", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:flip", (char**)keywords, &pyobj_src, &pyobj_flipCode, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flipCode, flipCode, ArgInfo("flipCode", 0)) ) + { + ERRWRAP2(cv::flip(src, dst, flipCode)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("flip"); + + return NULL; +} + +static PyObject* pyopencv_cv_floodFill(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_seedPoint = NULL; + Point seedPoint; + PyObject* pyobj_newVal = NULL; + Scalar newVal; + Rect rect; + PyObject* pyobj_loDiff = NULL; + Scalar loDiff; + PyObject* pyobj_upDiff = NULL; + Scalar upDiff; + PyObject* pyobj_flags = NULL; + int flags=4; + int retval; + + const char* keywords[] = { "image", "mask", "seedPoint", "newVal", "loDiff", "upDiff", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:floodFill", (char**)keywords, &pyobj_image, &pyobj_mask, &pyobj_seedPoint, &pyobj_newVal, &pyobj_loDiff, &pyobj_upDiff, &pyobj_flags) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 1)) && + pyopencv_to_safe(pyobj_seedPoint, seedPoint, ArgInfo("seedPoint", 0)) && + pyopencv_to_safe(pyobj_newVal, newVal, ArgInfo("newVal", 0)) && + pyopencv_to_safe(pyobj_loDiff, loDiff, ArgInfo("loDiff", 0)) && + pyopencv_to_safe(pyobj_upDiff, upDiff, ArgInfo("upDiff", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::floodFill(image, mask, seedPoint, newVal, &rect, loDiff, upDiff, flags)); + return Py_BuildValue("(NNNN)", pyopencv_from(retval), pyopencv_from(image), pyopencv_from(mask), pyopencv_from(rect)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_seedPoint = NULL; + Point seedPoint; + PyObject* pyobj_newVal = NULL; + Scalar newVal; + Rect rect; + PyObject* pyobj_loDiff = NULL; + Scalar loDiff; + PyObject* pyobj_upDiff = NULL; + Scalar upDiff; + PyObject* pyobj_flags = NULL; + int flags=4; + int retval; + + const char* keywords[] = { "image", "mask", "seedPoint", "newVal", "loDiff", "upDiff", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:floodFill", (char**)keywords, &pyobj_image, &pyobj_mask, &pyobj_seedPoint, &pyobj_newVal, &pyobj_loDiff, &pyobj_upDiff, &pyobj_flags) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 1)) && + pyopencv_to_safe(pyobj_seedPoint, seedPoint, ArgInfo("seedPoint", 0)) && + pyopencv_to_safe(pyobj_newVal, newVal, ArgInfo("newVal", 0)) && + pyopencv_to_safe(pyobj_loDiff, loDiff, ArgInfo("loDiff", 0)) && + pyopencv_to_safe(pyobj_upDiff, upDiff, ArgInfo("upDiff", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::floodFill(image, mask, seedPoint, newVal, &rect, loDiff, upDiff, flags)); + return Py_BuildValue("(NNNN)", pyopencv_from(retval), pyopencv_from(image), pyopencv_from(mask), pyopencv_from(rect)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("floodFill"); + + return NULL; +} + +static PyObject* pyopencv_cv_gemm(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_src3 = NULL; + Mat src3; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src1", "src2", "alpha", "src3", "beta", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:gemm", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_alpha, &pyobj_src3, &pyobj_beta, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_src3, src3, ArgInfo("src3", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::gemm(src1, src2, alpha, src3, beta, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_src3 = NULL; + UMat src3; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src1", "src2", "alpha", "src3", "beta", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:gemm", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_alpha, &pyobj_src3, &pyobj_beta, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_src3, src3, ArgInfo("src3", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::gemm(src1, src2, alpha, src3, beta, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("gemm"); + + return NULL; +} + +static PyObject* pyopencv_cv_getAffineTransform(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + Mat retval; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:getAffineTransform", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 0)) ) + { + ERRWRAP2(retval = cv::getAffineTransform(src, dst)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + Mat retval; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:getAffineTransform", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 0)) ) + { + ERRWRAP2(retval = cv::getAffineTransform(src, dst)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("getAffineTransform"); + + return NULL; +} + +static PyObject* pyopencv_cv_getBuildInformation(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getBuildInformation()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getCPUFeaturesLine(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + std::string retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getCPUFeaturesLine()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getCPUTickCount(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int64 retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getCPUTickCount()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getDerivKernels(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_kx = NULL; + Mat kx; + PyObject* pyobj_ky = NULL; + Mat ky; + PyObject* pyobj_dx = NULL; + int dx=0; + PyObject* pyobj_dy = NULL; + int dy=0; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_normalize = NULL; + bool normalize=false; + PyObject* pyobj_ktype = NULL; + int ktype=CV_32F; + + const char* keywords[] = { "dx", "dy", "ksize", "kx", "ky", "normalize", "ktype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:getDerivKernels", (char**)keywords, &pyobj_dx, &pyobj_dy, &pyobj_ksize, &pyobj_kx, &pyobj_ky, &pyobj_normalize, &pyobj_ktype) && + pyopencv_to_safe(pyobj_kx, kx, ArgInfo("kx", 1)) && + pyopencv_to_safe(pyobj_ky, ky, ArgInfo("ky", 1)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_normalize, normalize, ArgInfo("normalize", 0)) && + pyopencv_to_safe(pyobj_ktype, ktype, ArgInfo("ktype", 0)) ) + { + ERRWRAP2(cv::getDerivKernels(kx, ky, dx, dy, ksize, normalize, ktype)); + return Py_BuildValue("(NN)", pyopencv_from(kx), pyopencv_from(ky)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_kx = NULL; + UMat kx; + PyObject* pyobj_ky = NULL; + UMat ky; + PyObject* pyobj_dx = NULL; + int dx=0; + PyObject* pyobj_dy = NULL; + int dy=0; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_normalize = NULL; + bool normalize=false; + PyObject* pyobj_ktype = NULL; + int ktype=CV_32F; + + const char* keywords[] = { "dx", "dy", "ksize", "kx", "ky", "normalize", "ktype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:getDerivKernels", (char**)keywords, &pyobj_dx, &pyobj_dy, &pyobj_ksize, &pyobj_kx, &pyobj_ky, &pyobj_normalize, &pyobj_ktype) && + pyopencv_to_safe(pyobj_kx, kx, ArgInfo("kx", 1)) && + pyopencv_to_safe(pyobj_ky, ky, ArgInfo("ky", 1)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_normalize, normalize, ArgInfo("normalize", 0)) && + pyopencv_to_safe(pyobj_ktype, ktype, ArgInfo("ktype", 0)) ) + { + ERRWRAP2(cv::getDerivKernels(kx, ky, dx, dy, ksize, normalize, ktype)); + return Py_BuildValue("(NN)", pyopencv_from(kx), pyopencv_from(ky)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("getDerivKernels"); + + return NULL; +} + +static PyObject* pyopencv_cv_getFontScaleFromHeight(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_fontFace = NULL; + int fontFace=0; + PyObject* pyobj_pixelHeight = NULL; + int pixelHeight=0; + PyObject* pyobj_thickness = NULL; + int thickness=1; + double retval; + + const char* keywords[] = { "fontFace", "pixelHeight", "thickness", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:getFontScaleFromHeight", (char**)keywords, &pyobj_fontFace, &pyobj_pixelHeight, &pyobj_thickness) && + pyopencv_to_safe(pyobj_fontFace, fontFace, ArgInfo("fontFace", 0)) && + pyopencv_to_safe(pyobj_pixelHeight, pixelHeight, ArgInfo("pixelHeight", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) ) + { + ERRWRAP2(retval = cv::getFontScaleFromHeight(fontFace, pixelHeight, thickness)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getGaborKernel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_sigma = NULL; + double sigma=0; + PyObject* pyobj_theta = NULL; + double theta=0; + PyObject* pyobj_lambd = NULL; + double lambd=0; + PyObject* pyobj_gamma = NULL; + double gamma=0; + PyObject* pyobj_psi = NULL; + double psi=CV_PI*0.5; + PyObject* pyobj_ktype = NULL; + int ktype=CV_64F; + Mat retval; + + const char* keywords[] = { "ksize", "sigma", "theta", "lambd", "gamma", "psi", "ktype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OO:getGaborKernel", (char**)keywords, &pyobj_ksize, &pyobj_sigma, &pyobj_theta, &pyobj_lambd, &pyobj_gamma, &pyobj_psi, &pyobj_ktype) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_sigma, sigma, ArgInfo("sigma", 0)) && + pyopencv_to_safe(pyobj_theta, theta, ArgInfo("theta", 0)) && + pyopencv_to_safe(pyobj_lambd, lambd, ArgInfo("lambd", 0)) && + pyopencv_to_safe(pyobj_gamma, gamma, ArgInfo("gamma", 0)) && + pyopencv_to_safe(pyobj_psi, psi, ArgInfo("psi", 0)) && + pyopencv_to_safe(pyobj_ktype, ktype, ArgInfo("ktype", 0)) ) + { + ERRWRAP2(retval = cv::getGaborKernel(ksize, sigma, theta, lambd, gamma, psi, ktype)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getGaussianKernel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_sigma = NULL; + double sigma=0; + PyObject* pyobj_ktype = NULL; + int ktype=CV_64F; + Mat retval; + + const char* keywords[] = { "ksize", "sigma", "ktype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:getGaussianKernel", (char**)keywords, &pyobj_ksize, &pyobj_sigma, &pyobj_ktype) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_sigma, sigma, ArgInfo("sigma", 0)) && + pyopencv_to_safe(pyobj_ktype, ktype, ArgInfo("ktype", 0)) ) + { + ERRWRAP2(retval = cv::getGaussianKernel(ksize, sigma, ktype)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getHardwareFeatureName(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_feature = NULL; + int feature=0; + String retval; + + const char* keywords[] = { "feature", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:getHardwareFeatureName", (char**)keywords, &pyobj_feature) && + pyopencv_to_safe(pyobj_feature, feature, ArgInfo("feature", 0)) ) + { + ERRWRAP2(retval = cv::getHardwareFeatureName(feature)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getLogLevel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getLogLevel()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getNumThreads(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getNumThreads()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getNumberOfCPUs(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getNumberOfCPUs()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getOptimalDFTSize(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_vecsize = NULL; + int vecsize=0; + int retval; + + const char* keywords[] = { "vecsize", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:getOptimalDFTSize", (char**)keywords, &pyobj_vecsize) && + pyopencv_to_safe(pyobj_vecsize, vecsize, ArgInfo("vecsize", 0)) ) + { + ERRWRAP2(retval = cv::getOptimalDFTSize(vecsize)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getPerspectiveTransform(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_solveMethod = NULL; + int solveMethod=DECOMP_LU; + Mat retval; + + const char* keywords[] = { "src", "dst", "solveMethod", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:getPerspectiveTransform", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_solveMethod) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 0)) && + pyopencv_to_safe(pyobj_solveMethod, solveMethod, ArgInfo("solveMethod", 0)) ) + { + ERRWRAP2(retval = cv::getPerspectiveTransform(src, dst, solveMethod)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_solveMethod = NULL; + int solveMethod=DECOMP_LU; + Mat retval; + + const char* keywords[] = { "src", "dst", "solveMethod", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:getPerspectiveTransform", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_solveMethod) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 0)) && + pyopencv_to_safe(pyobj_solveMethod, solveMethod, ArgInfo("solveMethod", 0)) ) + { + ERRWRAP2(retval = cv::getPerspectiveTransform(src, dst, solveMethod)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("getPerspectiveTransform"); + + return NULL; +} + +static PyObject* pyopencv_cv_getRectSubPix(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_patchSize = NULL; + Size patchSize; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_patch = NULL; + Mat patch; + PyObject* pyobj_patchType = NULL; + int patchType=-1; + + const char* keywords[] = { "image", "patchSize", "center", "patch", "patchType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:getRectSubPix", (char**)keywords, &pyobj_image, &pyobj_patchSize, &pyobj_center, &pyobj_patch, &pyobj_patchType) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_patchSize, patchSize, ArgInfo("patchSize", 0)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_patch, patch, ArgInfo("patch", 1)) && + pyopencv_to_safe(pyobj_patchType, patchType, ArgInfo("patchType", 0)) ) + { + ERRWRAP2(cv::getRectSubPix(image, patchSize, center, patch, patchType)); + return pyopencv_from(patch); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_patchSize = NULL; + Size patchSize; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_patch = NULL; + UMat patch; + PyObject* pyobj_patchType = NULL; + int patchType=-1; + + const char* keywords[] = { "image", "patchSize", "center", "patch", "patchType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:getRectSubPix", (char**)keywords, &pyobj_image, &pyobj_patchSize, &pyobj_center, &pyobj_patch, &pyobj_patchType) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_patchSize, patchSize, ArgInfo("patchSize", 0)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_patch, patch, ArgInfo("patch", 1)) && + pyopencv_to_safe(pyobj_patchType, patchType, ArgInfo("patchType", 0)) ) + { + ERRWRAP2(cv::getRectSubPix(image, patchSize, center, patch, patchType)); + return pyopencv_from(patch); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("getRectSubPix"); + + return NULL; +} + +static PyObject* pyopencv_cv_getRotationMatrix2D(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_angle = NULL; + double angle=0; + PyObject* pyobj_scale = NULL; + double scale=0; + Mat retval; + + const char* keywords[] = { "center", "angle", "scale", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:getRotationMatrix2D", (char**)keywords, &pyobj_center, &pyobj_angle, &pyobj_scale) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) ) + { + ERRWRAP2(retval = cv::getRotationMatrix2D(center, angle, scale)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getStructuringElement(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_shape = NULL; + int shape=0; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + Mat retval; + + const char* keywords[] = { "shape", "ksize", "anchor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:getStructuringElement", (char**)keywords, &pyobj_shape, &pyobj_ksize, &pyobj_anchor) && + pyopencv_to_safe(pyobj_shape, shape, ArgInfo("shape", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) ) + { + ERRWRAP2(retval = cv::getStructuringElement(shape, ksize, anchor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getTextSize(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_text = NULL; + String text; + PyObject* pyobj_fontFace = NULL; + int fontFace=0; + PyObject* pyobj_fontScale = NULL; + double fontScale=0; + PyObject* pyobj_thickness = NULL; + int thickness=0; + int baseLine; + Size retval; + + const char* keywords[] = { "text", "fontFace", "fontScale", "thickness", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:getTextSize", (char**)keywords, &pyobj_text, &pyobj_fontFace, &pyobj_fontScale, &pyobj_thickness) && + pyopencv_to_safe(pyobj_text, text, ArgInfo("text", 0)) && + pyopencv_to_safe(pyobj_fontFace, fontFace, ArgInfo("fontFace", 0)) && + pyopencv_to_safe(pyobj_fontScale, fontScale, ArgInfo("fontScale", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) ) + { + ERRWRAP2(retval = cv::getTextSize(text, fontFace, fontScale, thickness, &baseLine)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(baseLine)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getThreadNum(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getThreadNum()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getTickCount(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int64 retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getTickCount()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getTickFrequency(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getTickFrequency()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getVersionMajor(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getVersionMajor()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getVersionMinor(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getVersionMinor()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getVersionRevision(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getVersionRevision()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_getVersionString(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::getVersionString()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_goodFeaturesToTrack(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_corners = NULL; + Mat corners; + PyObject* pyobj_maxCorners = NULL; + int maxCorners=0; + PyObject* pyobj_qualityLevel = NULL; + double qualityLevel=0; + PyObject* pyobj_minDistance = NULL; + double minDistance=0; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_blockSize = NULL; + int blockSize=3; + PyObject* pyobj_useHarrisDetector = NULL; + bool useHarrisDetector=false; + PyObject* pyobj_k = NULL; + double k=0.04; + + const char* keywords[] = { "image", "maxCorners", "qualityLevel", "minDistance", "corners", "mask", "blockSize", "useHarrisDetector", "k", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:goodFeaturesToTrack", (char**)keywords, &pyobj_image, &pyobj_maxCorners, &pyobj_qualityLevel, &pyobj_minDistance, &pyobj_corners, &pyobj_mask, &pyobj_blockSize, &pyobj_useHarrisDetector, &pyobj_k) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_maxCorners, maxCorners, ArgInfo("maxCorners", 0)) && + pyopencv_to_safe(pyobj_qualityLevel, qualityLevel, ArgInfo("qualityLevel", 0)) && + pyopencv_to_safe(pyobj_minDistance, minDistance, ArgInfo("minDistance", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_useHarrisDetector, useHarrisDetector, ArgInfo("useHarrisDetector", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) ) + { + ERRWRAP2(cv::goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask, blockSize, useHarrisDetector, k)); + return pyopencv_from(corners); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_corners = NULL; + UMat corners; + PyObject* pyobj_maxCorners = NULL; + int maxCorners=0; + PyObject* pyobj_qualityLevel = NULL; + double qualityLevel=0; + PyObject* pyobj_minDistance = NULL; + double minDistance=0; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_blockSize = NULL; + int blockSize=3; + PyObject* pyobj_useHarrisDetector = NULL; + bool useHarrisDetector=false; + PyObject* pyobj_k = NULL; + double k=0.04; + + const char* keywords[] = { "image", "maxCorners", "qualityLevel", "minDistance", "corners", "mask", "blockSize", "useHarrisDetector", "k", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOOO:goodFeaturesToTrack", (char**)keywords, &pyobj_image, &pyobj_maxCorners, &pyobj_qualityLevel, &pyobj_minDistance, &pyobj_corners, &pyobj_mask, &pyobj_blockSize, &pyobj_useHarrisDetector, &pyobj_k) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_maxCorners, maxCorners, ArgInfo("maxCorners", 0)) && + pyopencv_to_safe(pyobj_qualityLevel, qualityLevel, ArgInfo("qualityLevel", 0)) && + pyopencv_to_safe(pyobj_minDistance, minDistance, ArgInfo("minDistance", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_useHarrisDetector, useHarrisDetector, ArgInfo("useHarrisDetector", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) ) + { + ERRWRAP2(cv::goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask, blockSize, useHarrisDetector, k)); + return pyopencv_from(corners); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_corners = NULL; + Mat corners; + PyObject* pyobj_maxCorners = NULL; + int maxCorners=0; + PyObject* pyobj_qualityLevel = NULL; + double qualityLevel=0; + PyObject* pyobj_minDistance = NULL; + double minDistance=0; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_gradientSize = NULL; + int gradientSize=0; + PyObject* pyobj_useHarrisDetector = NULL; + bool useHarrisDetector=false; + PyObject* pyobj_k = NULL; + double k=0.04; + + const char* keywords[] = { "image", "maxCorners", "qualityLevel", "minDistance", "mask", "blockSize", "gradientSize", "corners", "useHarrisDetector", "k", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOOO|OOO:goodFeaturesToTrack", (char**)keywords, &pyobj_image, &pyobj_maxCorners, &pyobj_qualityLevel, &pyobj_minDistance, &pyobj_mask, &pyobj_blockSize, &pyobj_gradientSize, &pyobj_corners, &pyobj_useHarrisDetector, &pyobj_k) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_maxCorners, maxCorners, ArgInfo("maxCorners", 0)) && + pyopencv_to_safe(pyobj_qualityLevel, qualityLevel, ArgInfo("qualityLevel", 0)) && + pyopencv_to_safe(pyobj_minDistance, minDistance, ArgInfo("minDistance", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_gradientSize, gradientSize, ArgInfo("gradientSize", 0)) && + pyopencv_to_safe(pyobj_useHarrisDetector, useHarrisDetector, ArgInfo("useHarrisDetector", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) ) + { + ERRWRAP2(cv::goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask, blockSize, gradientSize, useHarrisDetector, k)); + return pyopencv_from(corners); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_corners = NULL; + UMat corners; + PyObject* pyobj_maxCorners = NULL; + int maxCorners=0; + PyObject* pyobj_qualityLevel = NULL; + double qualityLevel=0; + PyObject* pyobj_minDistance = NULL; + double minDistance=0; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_blockSize = NULL; + int blockSize=0; + PyObject* pyobj_gradientSize = NULL; + int gradientSize=0; + PyObject* pyobj_useHarrisDetector = NULL; + bool useHarrisDetector=false; + PyObject* pyobj_k = NULL; + double k=0.04; + + const char* keywords[] = { "image", "maxCorners", "qualityLevel", "minDistance", "mask", "blockSize", "gradientSize", "corners", "useHarrisDetector", "k", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOOO|OOO:goodFeaturesToTrack", (char**)keywords, &pyobj_image, &pyobj_maxCorners, &pyobj_qualityLevel, &pyobj_minDistance, &pyobj_mask, &pyobj_blockSize, &pyobj_gradientSize, &pyobj_corners, &pyobj_useHarrisDetector, &pyobj_k) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_maxCorners, maxCorners, ArgInfo("maxCorners", 0)) && + pyopencv_to_safe(pyobj_qualityLevel, qualityLevel, ArgInfo("qualityLevel", 0)) && + pyopencv_to_safe(pyobj_minDistance, minDistance, ArgInfo("minDistance", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_gradientSize, gradientSize, ArgInfo("gradientSize", 0)) && + pyopencv_to_safe(pyobj_useHarrisDetector, useHarrisDetector, ArgInfo("useHarrisDetector", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) ) + { + ERRWRAP2(cv::goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask, blockSize, gradientSize, useHarrisDetector, k)); + return pyopencv_from(corners); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("goodFeaturesToTrack"); + + return NULL; +} + +static PyObject* pyopencv_cv_goodFeaturesToTrackWithQuality(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_corners = NULL; + Mat corners; + PyObject* pyobj_maxCorners = NULL; + int maxCorners=0; + PyObject* pyobj_qualityLevel = NULL; + double qualityLevel=0; + PyObject* pyobj_minDistance = NULL; + double minDistance=0; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_cornersQuality = NULL; + Mat cornersQuality; + PyObject* pyobj_blockSize = NULL; + int blockSize=3; + PyObject* pyobj_gradientSize = NULL; + int gradientSize=3; + PyObject* pyobj_useHarrisDetector = NULL; + bool useHarrisDetector=false; + PyObject* pyobj_k = NULL; + double k=0.04; + + const char* keywords[] = { "image", "maxCorners", "qualityLevel", "minDistance", "mask", "corners", "cornersQuality", "blockSize", "gradientSize", "useHarrisDetector", "k", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OOOOOO:goodFeaturesToTrackWithQuality", (char**)keywords, &pyobj_image, &pyobj_maxCorners, &pyobj_qualityLevel, &pyobj_minDistance, &pyobj_mask, &pyobj_corners, &pyobj_cornersQuality, &pyobj_blockSize, &pyobj_gradientSize, &pyobj_useHarrisDetector, &pyobj_k) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_maxCorners, maxCorners, ArgInfo("maxCorners", 0)) && + pyopencv_to_safe(pyobj_qualityLevel, qualityLevel, ArgInfo("qualityLevel", 0)) && + pyopencv_to_safe(pyobj_minDistance, minDistance, ArgInfo("minDistance", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_cornersQuality, cornersQuality, ArgInfo("cornersQuality", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_gradientSize, gradientSize, ArgInfo("gradientSize", 0)) && + pyopencv_to_safe(pyobj_useHarrisDetector, useHarrisDetector, ArgInfo("useHarrisDetector", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) ) + { + ERRWRAP2(cv::goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask, cornersQuality, blockSize, gradientSize, useHarrisDetector, k)); + return Py_BuildValue("(NN)", pyopencv_from(corners), pyopencv_from(cornersQuality)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_corners = NULL; + UMat corners; + PyObject* pyobj_maxCorners = NULL; + int maxCorners=0; + PyObject* pyobj_qualityLevel = NULL; + double qualityLevel=0; + PyObject* pyobj_minDistance = NULL; + double minDistance=0; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_cornersQuality = NULL; + UMat cornersQuality; + PyObject* pyobj_blockSize = NULL; + int blockSize=3; + PyObject* pyobj_gradientSize = NULL; + int gradientSize=3; + PyObject* pyobj_useHarrisDetector = NULL; + bool useHarrisDetector=false; + PyObject* pyobj_k = NULL; + double k=0.04; + + const char* keywords[] = { "image", "maxCorners", "qualityLevel", "minDistance", "mask", "corners", "cornersQuality", "blockSize", "gradientSize", "useHarrisDetector", "k", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|OOOOOO:goodFeaturesToTrackWithQuality", (char**)keywords, &pyobj_image, &pyobj_maxCorners, &pyobj_qualityLevel, &pyobj_minDistance, &pyobj_mask, &pyobj_corners, &pyobj_cornersQuality, &pyobj_blockSize, &pyobj_gradientSize, &pyobj_useHarrisDetector, &pyobj_k) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_corners, corners, ArgInfo("corners", 1)) && + pyopencv_to_safe(pyobj_maxCorners, maxCorners, ArgInfo("maxCorners", 0)) && + pyopencv_to_safe(pyobj_qualityLevel, qualityLevel, ArgInfo("qualityLevel", 0)) && + pyopencv_to_safe(pyobj_minDistance, minDistance, ArgInfo("minDistance", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_cornersQuality, cornersQuality, ArgInfo("cornersQuality", 1)) && + pyopencv_to_safe(pyobj_blockSize, blockSize, ArgInfo("blockSize", 0)) && + pyopencv_to_safe(pyobj_gradientSize, gradientSize, ArgInfo("gradientSize", 0)) && + pyopencv_to_safe(pyobj_useHarrisDetector, useHarrisDetector, ArgInfo("useHarrisDetector", 0)) && + pyopencv_to_safe(pyobj_k, k, ArgInfo("k", 0)) ) + { + ERRWRAP2(cv::goodFeaturesToTrack(image, corners, maxCorners, qualityLevel, minDistance, mask, cornersQuality, blockSize, gradientSize, useHarrisDetector, k)); + return Py_BuildValue("(NN)", pyopencv_from(corners), pyopencv_from(cornersQuality)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("goodFeaturesToTrackWithQuality"); + + return NULL; +} + +static PyObject* pyopencv_cv_grabCut(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_rect = NULL; + Rect rect; + PyObject* pyobj_bgdModel = NULL; + Mat bgdModel; + PyObject* pyobj_fgdModel = NULL; + Mat fgdModel; + PyObject* pyobj_iterCount = NULL; + int iterCount=0; + PyObject* pyobj_mode = NULL; + int mode=GC_EVAL; + + const char* keywords[] = { "img", "mask", "rect", "bgdModel", "fgdModel", "iterCount", "mode", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|O:grabCut", (char**)keywords, &pyobj_img, &pyobj_mask, &pyobj_rect, &pyobj_bgdModel, &pyobj_fgdModel, &pyobj_iterCount, &pyobj_mode) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 1)) && + pyopencv_to_safe(pyobj_rect, rect, ArgInfo("rect", 0)) && + pyopencv_to_safe(pyobj_bgdModel, bgdModel, ArgInfo("bgdModel", 1)) && + pyopencv_to_safe(pyobj_fgdModel, fgdModel, ArgInfo("fgdModel", 1)) && + pyopencv_to_safe(pyobj_iterCount, iterCount, ArgInfo("iterCount", 0)) && + pyopencv_to_safe(pyobj_mode, mode, ArgInfo("mode", 0)) ) + { + ERRWRAP2(cv::grabCut(img, mask, rect, bgdModel, fgdModel, iterCount, mode)); + return Py_BuildValue("(NNN)", pyopencv_from(mask), pyopencv_from(bgdModel), pyopencv_from(fgdModel)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_rect = NULL; + Rect rect; + PyObject* pyobj_bgdModel = NULL; + UMat bgdModel; + PyObject* pyobj_fgdModel = NULL; + UMat fgdModel; + PyObject* pyobj_iterCount = NULL; + int iterCount=0; + PyObject* pyobj_mode = NULL; + int mode=GC_EVAL; + + const char* keywords[] = { "img", "mask", "rect", "bgdModel", "fgdModel", "iterCount", "mode", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|O:grabCut", (char**)keywords, &pyobj_img, &pyobj_mask, &pyobj_rect, &pyobj_bgdModel, &pyobj_fgdModel, &pyobj_iterCount, &pyobj_mode) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 1)) && + pyopencv_to_safe(pyobj_rect, rect, ArgInfo("rect", 0)) && + pyopencv_to_safe(pyobj_bgdModel, bgdModel, ArgInfo("bgdModel", 1)) && + pyopencv_to_safe(pyobj_fgdModel, fgdModel, ArgInfo("fgdModel", 1)) && + pyopencv_to_safe(pyobj_iterCount, iterCount, ArgInfo("iterCount", 0)) && + pyopencv_to_safe(pyobj_mode, mode, ArgInfo("mode", 0)) ) + { + ERRWRAP2(cv::grabCut(img, mask, rect, bgdModel, fgdModel, iterCount, mode)); + return Py_BuildValue("(NNN)", pyopencv_from(mask), pyopencv_from(bgdModel), pyopencv_from(fgdModel)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("grabCut"); + + return NULL; +} + +static PyObject* pyopencv_cv_haveImageReader(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_filename = NULL; + String filename; + bool retval; + + const char* keywords[] = { "filename", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:haveImageReader", (char**)keywords, &pyobj_filename) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) ) + { + ERRWRAP2(retval = cv::haveImageReader(filename)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_haveImageWriter(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_filename = NULL; + String filename; + bool retval; + + const char* keywords[] = { "filename", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:haveImageWriter", (char**)keywords, &pyobj_filename) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) ) + { + ERRWRAP2(retval = cv::haveImageWriter(filename)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_haveOpenVX(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::haveOpenVX()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_hconcat(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + vector_Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:hconcat", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::hconcat(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + vector_UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:hconcat", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::hconcat(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("hconcat"); + + return NULL; +} + +static PyObject* pyopencv_cv_idct(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:idct", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::idct(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:idct", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::idct(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("idct"); + + return NULL; +} + +static PyObject* pyopencv_cv_idft(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_nonzeroRows = NULL; + int nonzeroRows=0; + + const char* keywords[] = { "src", "dst", "flags", "nonzeroRows", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:idft", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags, &pyobj_nonzeroRows) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_nonzeroRows, nonzeroRows, ArgInfo("nonzeroRows", 0)) ) + { + ERRWRAP2(cv::idft(src, dst, flags, nonzeroRows)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_nonzeroRows = NULL; + int nonzeroRows=0; + + const char* keywords[] = { "src", "dst", "flags", "nonzeroRows", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:idft", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags, &pyobj_nonzeroRows) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_nonzeroRows, nonzeroRows, ArgInfo("nonzeroRows", 0)) ) + { + ERRWRAP2(cv::idft(src, dst, flags, nonzeroRows)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("idft"); + + return NULL; +} + +static PyObject* pyopencv_cv_imcount(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_flags = NULL; + int flags=IMREAD_ANYCOLOR; + size_t retval; + + const char* keywords[] = { "filename", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:imcount", (char**)keywords, &pyobj_filename, &pyobj_flags) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::imcount(filename, flags)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_imdecode(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_buf = NULL; + Mat buf; + PyObject* pyobj_flags = NULL; + int flags=0; + Mat retval; + + const char* keywords[] = { "buf", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:imdecode", (char**)keywords, &pyobj_buf, &pyobj_flags) && + pyopencv_to_safe(pyobj_buf, buf, ArgInfo("buf", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::imdecode(buf, flags)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_buf = NULL; + UMat buf; + PyObject* pyobj_flags = NULL; + int flags=0; + Mat retval; + + const char* keywords[] = { "buf", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:imdecode", (char**)keywords, &pyobj_buf, &pyobj_flags) && + pyopencv_to_safe(pyobj_buf, buf, ArgInfo("buf", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::imdecode(buf, flags)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("imdecode"); + + return NULL; +} + +static PyObject* pyopencv_cv_imencode(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_ext = NULL; + String ext; + PyObject* pyobj_img = NULL; + Mat img; + vector_uchar buf; + PyObject* pyobj_params = NULL; + vector_int params=std::vector(); + bool retval; + + const char* keywords[] = { "ext", "img", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:imencode", (char**)keywords, &pyobj_ext, &pyobj_img, &pyobj_params) && + pyopencv_to_safe(pyobj_ext, ext, ArgInfo("ext", 0)) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = cv::imencode(ext, img, buf, params)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(buf)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_ext = NULL; + String ext; + PyObject* pyobj_img = NULL; + UMat img; + vector_uchar buf; + PyObject* pyobj_params = NULL; + vector_int params=std::vector(); + bool retval; + + const char* keywords[] = { "ext", "img", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:imencode", (char**)keywords, &pyobj_ext, &pyobj_img, &pyobj_params) && + pyopencv_to_safe(pyobj_ext, ext, ArgInfo("ext", 0)) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = cv::imencode(ext, img, buf, params)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(buf)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("imencode"); + + return NULL; +} + +static PyObject* pyopencv_cv_imread(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_flags = NULL; + int flags=IMREAD_COLOR; + Mat retval; + + const char* keywords[] = { "filename", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:imread", (char**)keywords, &pyobj_filename, &pyobj_flags) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::imread(filename, flags)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_imreadmulti(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_mats = NULL; + vector_Mat mats; + PyObject* pyobj_flags = NULL; + int flags=IMREAD_ANYCOLOR; + bool retval; + + const char* keywords[] = { "filename", "mats", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:imreadmulti", (char**)keywords, &pyobj_filename, &pyobj_mats, &pyobj_flags) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_mats, mats, ArgInfo("mats", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::imreadmulti(filename, mats, flags)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(mats)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_mats = NULL; + vector_Mat mats; + PyObject* pyobj_start = NULL; + int start=0; + PyObject* pyobj_count = NULL; + int count=0; + PyObject* pyobj_flags = NULL; + int flags=IMREAD_ANYCOLOR; + bool retval; + + const char* keywords[] = { "filename", "start", "count", "mats", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:imreadmulti", (char**)keywords, &pyobj_filename, &pyobj_start, &pyobj_count, &pyobj_mats, &pyobj_flags) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_mats, mats, ArgInfo("mats", 1)) && + pyopencv_to_safe(pyobj_start, start, ArgInfo("start", 0)) && + pyopencv_to_safe(pyobj_count, count, ArgInfo("count", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::imreadmulti(filename, mats, start, count, flags)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(mats)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("imreadmulti"); + + return NULL; +} + +static PyObject* pyopencv_cv_imwrite(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_params = NULL; + vector_int params=std::vector(); + bool retval; + + const char* keywords[] = { "filename", "img", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:imwrite", (char**)keywords, &pyobj_filename, &pyobj_img, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = cv::imwrite(filename, img, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_params = NULL; + vector_int params=std::vector(); + bool retval; + + const char* keywords[] = { "filename", "img", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:imwrite", (char**)keywords, &pyobj_filename, &pyobj_img, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = cv::imwrite(filename, img, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("imwrite"); + + return NULL; +} + +static PyObject* pyopencv_cv_imwritemulti(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_img = NULL; + vector_Mat img; + PyObject* pyobj_params = NULL; + vector_int params=std::vector(); + bool retval; + + const char* keywords[] = { "filename", "img", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:imwritemulti", (char**)keywords, &pyobj_filename, &pyobj_img, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = cv::imwritemulti(filename, img, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_img = NULL; + vector_UMat img; + PyObject* pyobj_params = NULL; + vector_int params=std::vector(); + bool retval; + + const char* keywords[] = { "filename", "img", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:imwritemulti", (char**)keywords, &pyobj_filename, &pyobj_img, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = cv::imwritemulti(filename, img, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("imwritemulti"); + + return NULL; +} + +static PyObject* pyopencv_cv_inRange(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_lowerb = NULL; + Mat lowerb; + PyObject* pyobj_upperb = NULL; + Mat upperb; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "lowerb", "upperb", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:inRange", (char**)keywords, &pyobj_src, &pyobj_lowerb, &pyobj_upperb, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_lowerb, lowerb, ArgInfo("lowerb", 0)) && + pyopencv_to_safe(pyobj_upperb, upperb, ArgInfo("upperb", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::inRange(src, lowerb, upperb, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_lowerb = NULL; + UMat lowerb; + PyObject* pyobj_upperb = NULL; + UMat upperb; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "lowerb", "upperb", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:inRange", (char**)keywords, &pyobj_src, &pyobj_lowerb, &pyobj_upperb, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_lowerb, lowerb, ArgInfo("lowerb", 0)) && + pyopencv_to_safe(pyobj_upperb, upperb, ArgInfo("upperb", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::inRange(src, lowerb, upperb, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("inRange"); + + return NULL; +} + +static PyObject* pyopencv_cv_insertChannel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_coi = NULL; + int coi=0; + + const char* keywords[] = { "src", "dst", "coi", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:insertChannel", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_coi) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_coi, coi, ArgInfo("coi", 0)) ) + { + ERRWRAP2(cv::insertChannel(src, dst, coi)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_coi = NULL; + int coi=0; + + const char* keywords[] = { "src", "dst", "coi", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:insertChannel", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_coi) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_coi, coi, ArgInfo("coi", 0)) ) + { + ERRWRAP2(cv::insertChannel(src, dst, coi)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("insertChannel"); + + return NULL; +} + +static PyObject* pyopencv_cv_integral(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_sum = NULL; + Mat sum; + PyObject* pyobj_sdepth = NULL; + int sdepth=-1; + + const char* keywords[] = { "src", "sum", "sdepth", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:integral", (char**)keywords, &pyobj_src, &pyobj_sum, &pyobj_sdepth) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_sum, sum, ArgInfo("sum", 1)) && + pyopencv_to_safe(pyobj_sdepth, sdepth, ArgInfo("sdepth", 0)) ) + { + ERRWRAP2(cv::integral(src, sum, sdepth)); + return pyopencv_from(sum); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_sum = NULL; + UMat sum; + PyObject* pyobj_sdepth = NULL; + int sdepth=-1; + + const char* keywords[] = { "src", "sum", "sdepth", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:integral", (char**)keywords, &pyobj_src, &pyobj_sum, &pyobj_sdepth) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_sum, sum, ArgInfo("sum", 1)) && + pyopencv_to_safe(pyobj_sdepth, sdepth, ArgInfo("sdepth", 0)) ) + { + ERRWRAP2(cv::integral(src, sum, sdepth)); + return pyopencv_from(sum); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("integral"); + + return NULL; +} + +static PyObject* pyopencv_cv_integral2(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_sum = NULL; + Mat sum; + PyObject* pyobj_sqsum = NULL; + Mat sqsum; + PyObject* pyobj_sdepth = NULL; + int sdepth=-1; + PyObject* pyobj_sqdepth = NULL; + int sqdepth=-1; + + const char* keywords[] = { "src", "sum", "sqsum", "sdepth", "sqdepth", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:integral2", (char**)keywords, &pyobj_src, &pyobj_sum, &pyobj_sqsum, &pyobj_sdepth, &pyobj_sqdepth) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_sum, sum, ArgInfo("sum", 1)) && + pyopencv_to_safe(pyobj_sqsum, sqsum, ArgInfo("sqsum", 1)) && + pyopencv_to_safe(pyobj_sdepth, sdepth, ArgInfo("sdepth", 0)) && + pyopencv_to_safe(pyobj_sqdepth, sqdepth, ArgInfo("sqdepth", 0)) ) + { + ERRWRAP2(cv::integral(src, sum, sqsum, sdepth, sqdepth)); + return Py_BuildValue("(NN)", pyopencv_from(sum), pyopencv_from(sqsum)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_sum = NULL; + UMat sum; + PyObject* pyobj_sqsum = NULL; + UMat sqsum; + PyObject* pyobj_sdepth = NULL; + int sdepth=-1; + PyObject* pyobj_sqdepth = NULL; + int sqdepth=-1; + + const char* keywords[] = { "src", "sum", "sqsum", "sdepth", "sqdepth", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:integral2", (char**)keywords, &pyobj_src, &pyobj_sum, &pyobj_sqsum, &pyobj_sdepth, &pyobj_sqdepth) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_sum, sum, ArgInfo("sum", 1)) && + pyopencv_to_safe(pyobj_sqsum, sqsum, ArgInfo("sqsum", 1)) && + pyopencv_to_safe(pyobj_sdepth, sdepth, ArgInfo("sdepth", 0)) && + pyopencv_to_safe(pyobj_sqdepth, sqdepth, ArgInfo("sqdepth", 0)) ) + { + ERRWRAP2(cv::integral(src, sum, sqsum, sdepth, sqdepth)); + return Py_BuildValue("(NN)", pyopencv_from(sum), pyopencv_from(sqsum)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("integral2"); + + return NULL; +} + +static PyObject* pyopencv_cv_integral3(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_sum = NULL; + Mat sum; + PyObject* pyobj_sqsum = NULL; + Mat sqsum; + PyObject* pyobj_tilted = NULL; + Mat tilted; + PyObject* pyobj_sdepth = NULL; + int sdepth=-1; + PyObject* pyobj_sqdepth = NULL; + int sqdepth=-1; + + const char* keywords[] = { "src", "sum", "sqsum", "tilted", "sdepth", "sqdepth", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOOO:integral3", (char**)keywords, &pyobj_src, &pyobj_sum, &pyobj_sqsum, &pyobj_tilted, &pyobj_sdepth, &pyobj_sqdepth) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_sum, sum, ArgInfo("sum", 1)) && + pyopencv_to_safe(pyobj_sqsum, sqsum, ArgInfo("sqsum", 1)) && + pyopencv_to_safe(pyobj_tilted, tilted, ArgInfo("tilted", 1)) && + pyopencv_to_safe(pyobj_sdepth, sdepth, ArgInfo("sdepth", 0)) && + pyopencv_to_safe(pyobj_sqdepth, sqdepth, ArgInfo("sqdepth", 0)) ) + { + ERRWRAP2(cv::integral(src, sum, sqsum, tilted, sdepth, sqdepth)); + return Py_BuildValue("(NNN)", pyopencv_from(sum), pyopencv_from(sqsum), pyopencv_from(tilted)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_sum = NULL; + UMat sum; + PyObject* pyobj_sqsum = NULL; + UMat sqsum; + PyObject* pyobj_tilted = NULL; + UMat tilted; + PyObject* pyobj_sdepth = NULL; + int sdepth=-1; + PyObject* pyobj_sqdepth = NULL; + int sqdepth=-1; + + const char* keywords[] = { "src", "sum", "sqsum", "tilted", "sdepth", "sqdepth", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOOO:integral3", (char**)keywords, &pyobj_src, &pyobj_sum, &pyobj_sqsum, &pyobj_tilted, &pyobj_sdepth, &pyobj_sqdepth) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_sum, sum, ArgInfo("sum", 1)) && + pyopencv_to_safe(pyobj_sqsum, sqsum, ArgInfo("sqsum", 1)) && + pyopencv_to_safe(pyobj_tilted, tilted, ArgInfo("tilted", 1)) && + pyopencv_to_safe(pyobj_sdepth, sdepth, ArgInfo("sdepth", 0)) && + pyopencv_to_safe(pyobj_sqdepth, sqdepth, ArgInfo("sqdepth", 0)) ) + { + ERRWRAP2(cv::integral(src, sum, sqsum, tilted, sdepth, sqdepth)); + return Py_BuildValue("(NNN)", pyopencv_from(sum), pyopencv_from(sqsum), pyopencv_from(tilted)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("integral3"); + + return NULL; +} + +static PyObject* pyopencv_cv_intersectConvexConvex(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_p1 = NULL; + Mat p1; + PyObject* pyobj_p2 = NULL; + Mat p2; + PyObject* pyobj_p12 = NULL; + Mat p12; + PyObject* pyobj_handleNested = NULL; + bool handleNested=true; + float retval; + + const char* keywords[] = { "p1", "p2", "p12", "handleNested", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:intersectConvexConvex", (char**)keywords, &pyobj_p1, &pyobj_p2, &pyobj_p12, &pyobj_handleNested) && + pyopencv_to_safe(pyobj_p1, p1, ArgInfo("p1", 0)) && + pyopencv_to_safe(pyobj_p2, p2, ArgInfo("p2", 0)) && + pyopencv_to_safe(pyobj_p12, p12, ArgInfo("p12", 1)) && + pyopencv_to_safe(pyobj_handleNested, handleNested, ArgInfo("handleNested", 0)) ) + { + ERRWRAP2(retval = cv::intersectConvexConvex(p1, p2, p12, handleNested)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(p12)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_p1 = NULL; + UMat p1; + PyObject* pyobj_p2 = NULL; + UMat p2; + PyObject* pyobj_p12 = NULL; + UMat p12; + PyObject* pyobj_handleNested = NULL; + bool handleNested=true; + float retval; + + const char* keywords[] = { "p1", "p2", "p12", "handleNested", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:intersectConvexConvex", (char**)keywords, &pyobj_p1, &pyobj_p2, &pyobj_p12, &pyobj_handleNested) && + pyopencv_to_safe(pyobj_p1, p1, ArgInfo("p1", 0)) && + pyopencv_to_safe(pyobj_p2, p2, ArgInfo("p2", 0)) && + pyopencv_to_safe(pyobj_p12, p12, ArgInfo("p12", 1)) && + pyopencv_to_safe(pyobj_handleNested, handleNested, ArgInfo("handleNested", 0)) ) + { + ERRWRAP2(retval = cv::intersectConvexConvex(p1, p2, p12, handleNested)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(p12)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("intersectConvexConvex"); + + return NULL; +} + +static PyObject* pyopencv_cv_invert(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=DECOMP_LU; + double retval; + + const char* keywords[] = { "src", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:invert", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::invert(src, dst, flags)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=DECOMP_LU; + double retval; + + const char* keywords[] = { "src", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:invert", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::invert(src, dst, flags)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("invert"); + + return NULL; +} + +static PyObject* pyopencv_cv_invertAffineTransform(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_M = NULL; + Mat M; + PyObject* pyobj_iM = NULL; + Mat iM; + + const char* keywords[] = { "M", "iM", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:invertAffineTransform", (char**)keywords, &pyobj_M, &pyobj_iM) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_iM, iM, ArgInfo("iM", 1)) ) + { + ERRWRAP2(cv::invertAffineTransform(M, iM)); + return pyopencv_from(iM); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_M = NULL; + UMat M; + PyObject* pyobj_iM = NULL; + UMat iM; + + const char* keywords[] = { "M", "iM", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:invertAffineTransform", (char**)keywords, &pyobj_M, &pyobj_iM) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_iM, iM, ArgInfo("iM", 1)) ) + { + ERRWRAP2(cv::invertAffineTransform(M, iM)); + return pyopencv_from(iM); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("invertAffineTransform"); + + return NULL; +} + +static PyObject* pyopencv_cv_isContourConvex(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_contour = NULL; + Mat contour; + bool retval; + + const char* keywords[] = { "contour", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:isContourConvex", (char**)keywords, &pyobj_contour) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) ) + { + ERRWRAP2(retval = cv::isContourConvex(contour)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_contour = NULL; + UMat contour; + bool retval; + + const char* keywords[] = { "contour", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:isContourConvex", (char**)keywords, &pyobj_contour) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) ) + { + ERRWRAP2(retval = cv::isContourConvex(contour)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("isContourConvex"); + + return NULL; +} + +static PyObject* pyopencv_cv_kmeans(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_data = NULL; + Mat data; + PyObject* pyobj_K = NULL; + int K=0; + PyObject* pyobj_bestLabels = NULL; + Mat bestLabels; + PyObject* pyobj_criteria = NULL; + TermCriteria criteria; + PyObject* pyobj_attempts = NULL; + int attempts=0; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_centers = NULL; + Mat centers; + double retval; + + const char* keywords[] = { "data", "K", "bestLabels", "criteria", "attempts", "flags", "centers", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|O:kmeans", (char**)keywords, &pyobj_data, &pyobj_K, &pyobj_bestLabels, &pyobj_criteria, &pyobj_attempts, &pyobj_flags, &pyobj_centers) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_K, K, ArgInfo("K", 0)) && + pyopencv_to_safe(pyobj_bestLabels, bestLabels, ArgInfo("bestLabels", 1)) && + pyopencv_to_safe(pyobj_criteria, criteria, ArgInfo("criteria", 0)) && + pyopencv_to_safe(pyobj_attempts, attempts, ArgInfo("attempts", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_centers, centers, ArgInfo("centers", 1)) ) + { + ERRWRAP2(retval = cv::kmeans(data, K, bestLabels, criteria, attempts, flags, centers)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(bestLabels), pyopencv_from(centers)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_data = NULL; + UMat data; + PyObject* pyobj_K = NULL; + int K=0; + PyObject* pyobj_bestLabels = NULL; + UMat bestLabels; + PyObject* pyobj_criteria = NULL; + TermCriteria criteria; + PyObject* pyobj_attempts = NULL; + int attempts=0; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_centers = NULL; + UMat centers; + double retval; + + const char* keywords[] = { "data", "K", "bestLabels", "criteria", "attempts", "flags", "centers", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|O:kmeans", (char**)keywords, &pyobj_data, &pyobj_K, &pyobj_bestLabels, &pyobj_criteria, &pyobj_attempts, &pyobj_flags, &pyobj_centers) && + pyopencv_to_safe(pyobj_data, data, ArgInfo("data", 0)) && + pyopencv_to_safe(pyobj_K, K, ArgInfo("K", 0)) && + pyopencv_to_safe(pyobj_bestLabels, bestLabels, ArgInfo("bestLabels", 1)) && + pyopencv_to_safe(pyobj_criteria, criteria, ArgInfo("criteria", 0)) && + pyopencv_to_safe(pyobj_attempts, attempts, ArgInfo("attempts", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_centers, centers, ArgInfo("centers", 1)) ) + { + ERRWRAP2(retval = cv::kmeans(data, K, bestLabels, criteria, attempts, flags, centers)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(bestLabels), pyopencv_from(centers)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("kmeans"); + + return NULL; +} + +static PyObject* pyopencv_cv_line(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "pt1", "pt2", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:line", (char**)keywords, &pyobj_img, &pyobj_pt1, &pyobj_pt2, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 0)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::line(img, pt1, pt2, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "pt1", "pt2", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:line", (char**)keywords, &pyobj_img, &pyobj_pt1, &pyobj_pt2, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 0)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::line(img, pt1, pt2, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("line"); + + return NULL; +} + +static PyObject* pyopencv_cv_linearPolar(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_maxRadius = NULL; + double maxRadius=0; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "center", "maxRadius", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:linearPolar", (char**)keywords, &pyobj_src, &pyobj_center, &pyobj_maxRadius, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_maxRadius, maxRadius, ArgInfo("maxRadius", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::linearPolar(src, dst, center, maxRadius, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_maxRadius = NULL; + double maxRadius=0; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "center", "maxRadius", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:linearPolar", (char**)keywords, &pyobj_src, &pyobj_center, &pyobj_maxRadius, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_maxRadius, maxRadius, ArgInfo("maxRadius", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::linearPolar(src, dst, center, maxRadius, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("linearPolar"); + + return NULL; +} + +static PyObject* pyopencv_cv_log(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:log", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::log(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:log", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::log(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("log"); + + return NULL; +} + +static PyObject* pyopencv_cv_logPolar(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_M = NULL; + double M=0; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "center", "M", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:logPolar", (char**)keywords, &pyobj_src, &pyobj_center, &pyobj_M, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::logPolar(src, dst, center, M, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_M = NULL; + double M=0; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "center", "M", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:logPolar", (char**)keywords, &pyobj_src, &pyobj_center, &pyobj_M, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::logPolar(src, dst, center, M, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("logPolar"); + + return NULL; +} + +static PyObject* pyopencv_cv_magnitude(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_x = NULL; + Mat x; + PyObject* pyobj_y = NULL; + Mat y; + PyObject* pyobj_magnitude = NULL; + Mat magnitude; + + const char* keywords[] = { "x", "y", "magnitude", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:magnitude", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_magnitude) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_magnitude, magnitude, ArgInfo("magnitude", 1)) ) + { + ERRWRAP2(cv::magnitude(x, y, magnitude)); + return pyopencv_from(magnitude); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_x = NULL; + UMat x; + PyObject* pyobj_y = NULL; + UMat y; + PyObject* pyobj_magnitude = NULL; + UMat magnitude; + + const char* keywords[] = { "x", "y", "magnitude", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:magnitude", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_magnitude) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_magnitude, magnitude, ArgInfo("magnitude", 1)) ) + { + ERRWRAP2(cv::magnitude(x, y, magnitude)); + return pyopencv_from(magnitude); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("magnitude"); + + return NULL; +} + +static PyObject* pyopencv_cv_matchShapes(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_contour1 = NULL; + Mat contour1; + PyObject* pyobj_contour2 = NULL; + Mat contour2; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_parameter = NULL; + double parameter=0; + double retval; + + const char* keywords[] = { "contour1", "contour2", "method", "parameter", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:matchShapes", (char**)keywords, &pyobj_contour1, &pyobj_contour2, &pyobj_method, &pyobj_parameter) && + pyopencv_to_safe(pyobj_contour1, contour1, ArgInfo("contour1", 0)) && + pyopencv_to_safe(pyobj_contour2, contour2, ArgInfo("contour2", 0)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_parameter, parameter, ArgInfo("parameter", 0)) ) + { + ERRWRAP2(retval = cv::matchShapes(contour1, contour2, method, parameter)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_contour1 = NULL; + UMat contour1; + PyObject* pyobj_contour2 = NULL; + UMat contour2; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_parameter = NULL; + double parameter=0; + double retval; + + const char* keywords[] = { "contour1", "contour2", "method", "parameter", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:matchShapes", (char**)keywords, &pyobj_contour1, &pyobj_contour2, &pyobj_method, &pyobj_parameter) && + pyopencv_to_safe(pyobj_contour1, contour1, ArgInfo("contour1", 0)) && + pyopencv_to_safe(pyobj_contour2, contour2, ArgInfo("contour2", 0)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_parameter, parameter, ArgInfo("parameter", 0)) ) + { + ERRWRAP2(retval = cv::matchShapes(contour1, contour2, method, parameter)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("matchShapes"); + + return NULL; +} + +static PyObject* pyopencv_cv_matchTemplate(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_templ = NULL; + Mat templ; + PyObject* pyobj_result = NULL; + Mat result; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "image", "templ", "method", "result", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:matchTemplate", (char**)keywords, &pyobj_image, &pyobj_templ, &pyobj_method, &pyobj_result, &pyobj_mask) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_templ, templ, ArgInfo("templ", 0)) && + pyopencv_to_safe(pyobj_result, result, ArgInfo("result", 1)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::matchTemplate(image, templ, result, method, mask)); + return pyopencv_from(result); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_templ = NULL; + UMat templ; + PyObject* pyobj_result = NULL; + UMat result; + PyObject* pyobj_method = NULL; + int method=0; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "image", "templ", "method", "result", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:matchTemplate", (char**)keywords, &pyobj_image, &pyobj_templ, &pyobj_method, &pyobj_result, &pyobj_mask) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_templ, templ, ArgInfo("templ", 0)) && + pyopencv_to_safe(pyobj_result, result, ArgInfo("result", 1)) && + pyopencv_to_safe(pyobj_method, method, ArgInfo("method", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::matchTemplate(image, templ, result, method, mask)); + return pyopencv_from(result); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("matchTemplate"); + + return NULL; +} + +static PyObject* pyopencv_cv_max(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src1", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:max", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::max(src1, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src1", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:max", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::max(src1, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("max"); + + return NULL; +} + +static PyObject* pyopencv_cv_mean(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_mask = NULL; + Mat mask; + Scalar retval; + + const char* keywords[] = { "src", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:mean", (char**)keywords, &pyobj_src, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = cv::mean(src, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_mask = NULL; + UMat mask; + Scalar retval; + + const char* keywords[] = { "src", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:mean", (char**)keywords, &pyobj_src, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = cv::mean(src, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("mean"); + + return NULL; +} + +static PyObject* pyopencv_cv_meanStdDev(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_stddev = NULL; + Mat stddev; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "mean", "stddev", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:meanStdDev", (char**)keywords, &pyobj_src, &pyobj_mean, &pyobj_stddev, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_stddev, stddev, ArgInfo("stddev", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::meanStdDev(src, mean, stddev, mask)); + return Py_BuildValue("(NN)", pyopencv_from(mean), pyopencv_from(stddev)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_stddev = NULL; + UMat stddev; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "mean", "stddev", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:meanStdDev", (char**)keywords, &pyobj_src, &pyobj_mean, &pyobj_stddev, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 1)) && + pyopencv_to_safe(pyobj_stddev, stddev, ArgInfo("stddev", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::meanStdDev(src, mean, stddev, mask)); + return Py_BuildValue("(NN)", pyopencv_from(mean), pyopencv_from(stddev)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("meanStdDev"); + + return NULL; +} + +static PyObject* pyopencv_cv_medianBlur(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ksize = NULL; + int ksize=0; + + const char* keywords[] = { "src", "ksize", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:medianBlur", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) ) + { + ERRWRAP2(cv::medianBlur(src, dst, ksize)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ksize = NULL; + int ksize=0; + + const char* keywords[] = { "src", "ksize", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:medianBlur", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) ) + { + ERRWRAP2(cv::medianBlur(src, dst, ksize)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("medianBlur"); + + return NULL; +} + +static PyObject* pyopencv_cv_merge(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_mv = NULL; + vector_Mat mv; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "mv", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:merge", (char**)keywords, &pyobj_mv, &pyobj_dst) && + pyopencv_to_safe(pyobj_mv, mv, ArgInfo("mv", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::merge(mv, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_mv = NULL; + vector_UMat mv; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "mv", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:merge", (char**)keywords, &pyobj_mv, &pyobj_dst) && + pyopencv_to_safe(pyobj_mv, mv, ArgInfo("mv", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::merge(mv, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("merge"); + + return NULL; +} + +static PyObject* pyopencv_cv_min(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src1", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:min", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::min(src1, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src1", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:min", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::min(src1, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("min"); + + return NULL; +} + +static PyObject* pyopencv_cv_minAreaRect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:minAreaRect", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::minAreaRect(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + RotatedRect retval; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:minAreaRect", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(retval = cv::minAreaRect(points)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("minAreaRect"); + + return NULL; +} + +static PyObject* pyopencv_cv_minEnclosingCircle(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + Point2f center; + float radius; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:minEnclosingCircle", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(cv::minEnclosingCircle(points, center, radius)); + return Py_BuildValue("(NN)", pyopencv_from(center), pyopencv_from(radius)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + Point2f center; + float radius; + + const char* keywords[] = { "points", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:minEnclosingCircle", (char**)keywords, &pyobj_points) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) ) + { + ERRWRAP2(cv::minEnclosingCircle(points, center, radius)); + return Py_BuildValue("(NN)", pyopencv_from(center), pyopencv_from(radius)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("minEnclosingCircle"); + + return NULL; +} + +static PyObject* pyopencv_cv_minEnclosingTriangle(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_points = NULL; + Mat points; + PyObject* pyobj_triangle = NULL; + Mat triangle; + double retval; + + const char* keywords[] = { "points", "triangle", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:minEnclosingTriangle", (char**)keywords, &pyobj_points, &pyobj_triangle) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_triangle, triangle, ArgInfo("triangle", 1)) ) + { + ERRWRAP2(retval = cv::minEnclosingTriangle(points, triangle)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(triangle)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points = NULL; + UMat points; + PyObject* pyobj_triangle = NULL; + UMat triangle; + double retval; + + const char* keywords[] = { "points", "triangle", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:minEnclosingTriangle", (char**)keywords, &pyobj_points, &pyobj_triangle) && + pyopencv_to_safe(pyobj_points, points, ArgInfo("points", 0)) && + pyopencv_to_safe(pyobj_triangle, triangle, ArgInfo("triangle", 1)) ) + { + ERRWRAP2(retval = cv::minEnclosingTriangle(points, triangle)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(triangle)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("minEnclosingTriangle"); + + return NULL; +} + +static PyObject* pyopencv_cv_minMaxLoc(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + double minVal; + double maxVal; + Point minLoc; + Point maxLoc; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:minMaxLoc", (char**)keywords, &pyobj_src, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, mask)); + return Py_BuildValue("(NNNN)", pyopencv_from(minVal), pyopencv_from(maxVal), pyopencv_from(minLoc), pyopencv_from(maxLoc)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + double minVal; + double maxVal; + Point minLoc; + Point maxLoc; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:minMaxLoc", (char**)keywords, &pyobj_src, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::minMaxLoc(src, &minVal, &maxVal, &minLoc, &maxLoc, mask)); + return Py_BuildValue("(NNNN)", pyopencv_from(minVal), pyopencv_from(maxVal), pyopencv_from(minLoc), pyopencv_from(maxLoc)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("minMaxLoc"); + + return NULL; +} + +static PyObject* pyopencv_cv_mixChannels(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + vector_Mat src; + PyObject* pyobj_dst = NULL; + vector_Mat dst; + PyObject* pyobj_fromTo = NULL; + vector_int fromTo; + + const char* keywords[] = { "src", "dst", "fromTo", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:mixChannels", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_fromTo) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_fromTo, fromTo, ArgInfo("fromTo", 0)) ) + { + ERRWRAP2(cv::mixChannels(src, dst, fromTo)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + vector_UMat src; + PyObject* pyobj_dst = NULL; + vector_UMat dst; + PyObject* pyobj_fromTo = NULL; + vector_int fromTo; + + const char* keywords[] = { "src", "dst", "fromTo", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:mixChannels", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_fromTo) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_fromTo, fromTo, ArgInfo("fromTo", 0)) ) + { + ERRWRAP2(cv::mixChannels(src, dst, fromTo)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("mixChannels"); + + return NULL; +} + +static PyObject* pyopencv_cv_moments(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_array = NULL; + Mat array; + PyObject* pyobj_binaryImage = NULL; + bool binaryImage=false; + Moments retval; + + const char* keywords[] = { "array", "binaryImage", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:moments", (char**)keywords, &pyobj_array, &pyobj_binaryImage) && + pyopencv_to_safe(pyobj_array, array, ArgInfo("array", 0)) && + pyopencv_to_safe(pyobj_binaryImage, binaryImage, ArgInfo("binaryImage", 0)) ) + { + ERRWRAP2(retval = cv::moments(array, binaryImage)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_array = NULL; + UMat array; + PyObject* pyobj_binaryImage = NULL; + bool binaryImage=false; + Moments retval; + + const char* keywords[] = { "array", "binaryImage", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:moments", (char**)keywords, &pyobj_array, &pyobj_binaryImage) && + pyopencv_to_safe(pyobj_array, array, ArgInfo("array", 0)) && + pyopencv_to_safe(pyobj_binaryImage, binaryImage, ArgInfo("binaryImage", 0)) ) + { + ERRWRAP2(retval = cv::moments(array, binaryImage)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("moments"); + + return NULL; +} + +static PyObject* pyopencv_cv_morphologyEx(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_op = NULL; + int op=0; + PyObject* pyobj_kernel = NULL; + Mat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_iterations = NULL; + int iterations=1; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue=morphologyDefaultBorderValue(); + + const char* keywords[] = { "src", "op", "kernel", "dst", "anchor", "iterations", "borderType", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOOO:morphologyEx", (char**)keywords, &pyobj_src, &pyobj_op, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_iterations, &pyobj_borderType, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_op, op, ArgInfo("op", 0)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_iterations, iterations, ArgInfo("iterations", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::morphologyEx(src, dst, op, kernel, anchor, iterations, borderType, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_op = NULL; + int op=0; + PyObject* pyobj_kernel = NULL; + UMat kernel; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_iterations = NULL; + int iterations=1; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue=morphologyDefaultBorderValue(); + + const char* keywords[] = { "src", "op", "kernel", "dst", "anchor", "iterations", "borderType", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOOO:morphologyEx", (char**)keywords, &pyobj_src, &pyobj_op, &pyobj_kernel, &pyobj_dst, &pyobj_anchor, &pyobj_iterations, &pyobj_borderType, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_op, op, ArgInfo("op", 0)) && + pyopencv_to_safe(pyobj_kernel, kernel, ArgInfo("kernel", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_iterations, iterations, ArgInfo("iterations", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::morphologyEx(src, dst, op, kernel, anchor, iterations, borderType, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("morphologyEx"); + + return NULL; +} + +static PyObject* pyopencv_cv_mulSpectrums(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_a = NULL; + Mat a; + PyObject* pyobj_b = NULL; + Mat b; + PyObject* pyobj_c = NULL; + Mat c; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_conjB = NULL; + bool conjB=false; + + const char* keywords[] = { "a", "b", "flags", "c", "conjB", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:mulSpectrums", (char**)keywords, &pyobj_a, &pyobj_b, &pyobj_flags, &pyobj_c, &pyobj_conjB) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 0)) && + pyopencv_to_safe(pyobj_b, b, ArgInfo("b", 0)) && + pyopencv_to_safe(pyobj_c, c, ArgInfo("c", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_conjB, conjB, ArgInfo("conjB", 0)) ) + { + ERRWRAP2(cv::mulSpectrums(a, b, c, flags, conjB)); + return pyopencv_from(c); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_a = NULL; + UMat a; + PyObject* pyobj_b = NULL; + UMat b; + PyObject* pyobj_c = NULL; + UMat c; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_conjB = NULL; + bool conjB=false; + + const char* keywords[] = { "a", "b", "flags", "c", "conjB", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:mulSpectrums", (char**)keywords, &pyobj_a, &pyobj_b, &pyobj_flags, &pyobj_c, &pyobj_conjB) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 0)) && + pyopencv_to_safe(pyobj_b, b, ArgInfo("b", 0)) && + pyopencv_to_safe(pyobj_c, c, ArgInfo("c", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_conjB, conjB, ArgInfo("conjB", 0)) ) + { + ERRWRAP2(cv::mulSpectrums(a, b, c, flags, conjB)); + return pyopencv_from(c); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("mulSpectrums"); + + return NULL; +} + +static PyObject* pyopencv_cv_mulTransposed(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_aTa = NULL; + bool aTa=0; + PyObject* pyobj_delta = NULL; + Mat delta; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src", "aTa", "dst", "delta", "scale", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOO:mulTransposed", (char**)keywords, &pyobj_src, &pyobj_aTa, &pyobj_dst, &pyobj_delta, &pyobj_scale, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_aTa, aTa, ArgInfo("aTa", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::mulTransposed(src, dst, aTa, delta, scale, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_aTa = NULL; + bool aTa=0; + PyObject* pyobj_delta = NULL; + UMat delta; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src", "aTa", "dst", "delta", "scale", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOO:mulTransposed", (char**)keywords, &pyobj_src, &pyobj_aTa, &pyobj_dst, &pyobj_delta, &pyobj_scale, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_aTa, aTa, ArgInfo("aTa", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::mulTransposed(src, dst, aTa, delta, scale, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("mulTransposed"); + + return NULL; +} + +static PyObject* pyopencv_cv_multiply(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "scale", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:multiply", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_scale, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::multiply(src1, src2, dst, scale, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_scale = NULL; + double scale=1; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "scale", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:multiply", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_scale, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_scale, scale, ArgInfo("scale", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::multiply(src1, src2, dst, scale, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("multiply"); + + return NULL; +} + +static PyObject* pyopencv_cv_norm(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_normType = NULL; + int normType=NORM_L2; + PyObject* pyobj_mask = NULL; + Mat mask; + double retval; + + const char* keywords[] = { "src1", "normType", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:norm", (char**)keywords, &pyobj_src1, &pyobj_normType, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_normType, normType, ArgInfo("normType", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = cv::norm(src1, normType, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_normType = NULL; + int normType=NORM_L2; + PyObject* pyobj_mask = NULL; + UMat mask; + double retval; + + const char* keywords[] = { "src1", "normType", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:norm", (char**)keywords, &pyobj_src1, &pyobj_normType, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_normType, normType, ArgInfo("normType", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = cv::norm(src1, normType, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_normType = NULL; + int normType=NORM_L2; + PyObject* pyobj_mask = NULL; + Mat mask; + double retval; + + const char* keywords[] = { "src1", "src2", "normType", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:norm", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_normType, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_normType, normType, ArgInfo("normType", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = cv::norm(src1, src2, normType, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_normType = NULL; + int normType=NORM_L2; + PyObject* pyobj_mask = NULL; + UMat mask; + double retval; + + const char* keywords[] = { "src1", "src2", "normType", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:norm", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_normType, &pyobj_mask) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_normType, normType, ArgInfo("normType", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = cv::norm(src1, src2, normType, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("norm"); + + return NULL; +} + +static PyObject* pyopencv_cv_normalize(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_alpha = NULL; + double alpha=1; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_norm_type = NULL; + int norm_type=NORM_L2; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "src", "dst", "alpha", "beta", "norm_type", "dtype", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:normalize", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_alpha, &pyobj_beta, &pyobj_norm_type, &pyobj_dtype, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_norm_type, norm_type, ArgInfo("norm_type", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::normalize(src, dst, alpha, beta, norm_type, dtype, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_alpha = NULL; + double alpha=1; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_norm_type = NULL; + int norm_type=NORM_L2; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "src", "dst", "alpha", "beta", "norm_type", "dtype", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOOO:normalize", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_alpha, &pyobj_beta, &pyobj_norm_type, &pyobj_dtype, &pyobj_mask) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_norm_type, norm_type, ArgInfo("norm_type", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(cv::normalize(src, dst, alpha, beta, norm_type, dtype, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("normalize"); + + return NULL; +} + +static PyObject* pyopencv_cv_patchNaNs(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_a = NULL; + Mat a; + PyObject* pyobj_val = NULL; + double val=0; + + const char* keywords[] = { "a", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:patchNaNs", (char**)keywords, &pyobj_a, &pyobj_val) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 1)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(cv::patchNaNs(a, val)); + return pyopencv_from(a); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_a = NULL; + UMat a; + PyObject* pyobj_val = NULL; + double val=0; + + const char* keywords[] = { "a", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:patchNaNs", (char**)keywords, &pyobj_a, &pyobj_val) && + pyopencv_to_safe(pyobj_a, a, ArgInfo("a", 1)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(cv::patchNaNs(a, val)); + return pyopencv_from(a); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("patchNaNs"); + + return NULL; +} + +static PyObject* pyopencv_cv_perspectiveTransform(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_m = NULL; + Mat m; + + const char* keywords[] = { "src", "m", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:perspectiveTransform", (char**)keywords, &pyobj_src, &pyobj_m, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + ERRWRAP2(cv::perspectiveTransform(src, dst, m)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_m = NULL; + UMat m; + + const char* keywords[] = { "src", "m", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:perspectiveTransform", (char**)keywords, &pyobj_src, &pyobj_m, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + ERRWRAP2(cv::perspectiveTransform(src, dst, m)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("perspectiveTransform"); + + return NULL; +} + +static PyObject* pyopencv_cv_phase(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_x = NULL; + Mat x; + PyObject* pyobj_y = NULL; + Mat y; + PyObject* pyobj_angle = NULL; + Mat angle; + PyObject* pyobj_angleInDegrees = NULL; + bool angleInDegrees=false; + + const char* keywords[] = { "x", "y", "angle", "angleInDegrees", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:phase", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_angle, &pyobj_angleInDegrees) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 1)) && + pyopencv_to_safe(pyobj_angleInDegrees, angleInDegrees, ArgInfo("angleInDegrees", 0)) ) + { + ERRWRAP2(cv::phase(x, y, angle, angleInDegrees)); + return pyopencv_from(angle); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_x = NULL; + UMat x; + PyObject* pyobj_y = NULL; + UMat y; + PyObject* pyobj_angle = NULL; + UMat angle; + PyObject* pyobj_angleInDegrees = NULL; + bool angleInDegrees=false; + + const char* keywords[] = { "x", "y", "angle", "angleInDegrees", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:phase", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_angle, &pyobj_angleInDegrees) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 1)) && + pyopencv_to_safe(pyobj_angleInDegrees, angleInDegrees, ArgInfo("angleInDegrees", 0)) ) + { + ERRWRAP2(cv::phase(x, y, angle, angleInDegrees)); + return pyopencv_from(angle); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("phase"); + + return NULL; +} + +static PyObject* pyopencv_cv_phaseCorrelate(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_window = NULL; + Mat window; + double response; + Point2d retval; + + const char* keywords[] = { "src1", "src2", "window", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:phaseCorrelate", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_window) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_window, window, ArgInfo("window", 0)) ) + { + ERRWRAP2(retval = cv::phaseCorrelate(src1, src2, window, &response)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(response)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_window = NULL; + UMat window; + double response; + Point2d retval; + + const char* keywords[] = { "src1", "src2", "window", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:phaseCorrelate", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_window) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_window, window, ArgInfo("window", 0)) ) + { + ERRWRAP2(retval = cv::phaseCorrelate(src1, src2, window, &response)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(response)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("phaseCorrelate"); + + return NULL; +} + +static PyObject* pyopencv_cv_pointPolygonTest(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_contour = NULL; + Mat contour; + PyObject* pyobj_pt = NULL; + Point2f pt; + PyObject* pyobj_measureDist = NULL; + bool measureDist=0; + double retval; + + const char* keywords[] = { "contour", "pt", "measureDist", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:pointPolygonTest", (char**)keywords, &pyobj_contour, &pyobj_pt, &pyobj_measureDist) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) && + pyopencv_to_safe(pyobj_pt, pt, ArgInfo("pt", 0)) && + pyopencv_to_safe(pyobj_measureDist, measureDist, ArgInfo("measureDist", 0)) ) + { + ERRWRAP2(retval = cv::pointPolygonTest(contour, pt, measureDist)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_contour = NULL; + UMat contour; + PyObject* pyobj_pt = NULL; + Point2f pt; + PyObject* pyobj_measureDist = NULL; + bool measureDist=0; + double retval; + + const char* keywords[] = { "contour", "pt", "measureDist", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:pointPolygonTest", (char**)keywords, &pyobj_contour, &pyobj_pt, &pyobj_measureDist) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 0)) && + pyopencv_to_safe(pyobj_pt, pt, ArgInfo("pt", 0)) && + pyopencv_to_safe(pyobj_measureDist, measureDist, ArgInfo("measureDist", 0)) ) + { + ERRWRAP2(retval = cv::pointPolygonTest(contour, pt, measureDist)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("pointPolygonTest"); + + return NULL; +} + +static PyObject* pyopencv_cv_polarToCart(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_magnitude = NULL; + Mat magnitude; + PyObject* pyobj_angle = NULL; + Mat angle; + PyObject* pyobj_x = NULL; + Mat x; + PyObject* pyobj_y = NULL; + Mat y; + PyObject* pyobj_angleInDegrees = NULL; + bool angleInDegrees=false; + + const char* keywords[] = { "magnitude", "angle", "x", "y", "angleInDegrees", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:polarToCart", (char**)keywords, &pyobj_magnitude, &pyobj_angle, &pyobj_x, &pyobj_y, &pyobj_angleInDegrees) && + pyopencv_to_safe(pyobj_magnitude, magnitude, ArgInfo("magnitude", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 1)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 1)) && + pyopencv_to_safe(pyobj_angleInDegrees, angleInDegrees, ArgInfo("angleInDegrees", 0)) ) + { + ERRWRAP2(cv::polarToCart(magnitude, angle, x, y, angleInDegrees)); + return Py_BuildValue("(NN)", pyopencv_from(x), pyopencv_from(y)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_magnitude = NULL; + UMat magnitude; + PyObject* pyobj_angle = NULL; + UMat angle; + PyObject* pyobj_x = NULL; + UMat x; + PyObject* pyobj_y = NULL; + UMat y; + PyObject* pyobj_angleInDegrees = NULL; + bool angleInDegrees=false; + + const char* keywords[] = { "magnitude", "angle", "x", "y", "angleInDegrees", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:polarToCart", (char**)keywords, &pyobj_magnitude, &pyobj_angle, &pyobj_x, &pyobj_y, &pyobj_angleInDegrees) && + pyopencv_to_safe(pyobj_magnitude, magnitude, ArgInfo("magnitude", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 1)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 1)) && + pyopencv_to_safe(pyobj_angleInDegrees, angleInDegrees, ArgInfo("angleInDegrees", 0)) ) + { + ERRWRAP2(cv::polarToCart(magnitude, angle, x, y, angleInDegrees)); + return Py_BuildValue("(NN)", pyopencv_from(x), pyopencv_from(y)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("polarToCart"); + + return NULL; +} + +static PyObject* pyopencv_cv_polylines(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_pts = NULL; + vector_Mat pts; + PyObject* pyobj_isClosed = NULL; + bool isClosed=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "pts", "isClosed", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:polylines", (char**)keywords, &pyobj_img, &pyobj_pts, &pyobj_isClosed, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pts, pts, ArgInfo("pts", 0)) && + pyopencv_to_safe(pyobj_isClosed, isClosed, ArgInfo("isClosed", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::polylines(img, pts, isClosed, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_pts = NULL; + vector_UMat pts; + PyObject* pyobj_isClosed = NULL; + bool isClosed=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "pts", "isClosed", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:polylines", (char**)keywords, &pyobj_img, &pyobj_pts, &pyobj_isClosed, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pts, pts, ArgInfo("pts", 0)) && + pyopencv_to_safe(pyobj_isClosed, isClosed, ArgInfo("isClosed", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::polylines(img, pts, isClosed, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("polylines"); + + return NULL; +} + +static PyObject* pyopencv_cv_pow(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_power = NULL; + double power=0; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "power", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:pow", (char**)keywords, &pyobj_src, &pyobj_power, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_power, power, ArgInfo("power", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::pow(src, power, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_power = NULL; + double power=0; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "power", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:pow", (char**)keywords, &pyobj_src, &pyobj_power, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_power, power, ArgInfo("power", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::pow(src, power, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("pow"); + + return NULL; +} + +static PyObject* pyopencv_cv_preCornerDetect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ksize", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:preCornerDetect", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::preCornerDetect(src, dst, ksize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ksize = NULL; + int ksize=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ksize", "dst", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:preCornerDetect", (char**)keywords, &pyobj_src, &pyobj_ksize, &pyobj_dst, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::preCornerDetect(src, dst, ksize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("preCornerDetect"); + + return NULL; +} + +static PyObject* pyopencv_cv_putText(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_text = NULL; + String text; + PyObject* pyobj_org = NULL; + Point org; + PyObject* pyobj_fontFace = NULL; + int fontFace=0; + PyObject* pyobj_fontScale = NULL; + double fontScale=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_bottomLeftOrigin = NULL; + bool bottomLeftOrigin=false; + + const char* keywords[] = { "img", "text", "org", "fontFace", "fontScale", "color", "thickness", "lineType", "bottomLeftOrigin", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|OOO:putText", (char**)keywords, &pyobj_img, &pyobj_text, &pyobj_org, &pyobj_fontFace, &pyobj_fontScale, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_bottomLeftOrigin) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_text, text, ArgInfo("text", 0)) && + pyopencv_to_safe(pyobj_org, org, ArgInfo("org", 0)) && + pyopencv_to_safe(pyobj_fontFace, fontFace, ArgInfo("fontFace", 0)) && + pyopencv_to_safe(pyobj_fontScale, fontScale, ArgInfo("fontScale", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_bottomLeftOrigin, bottomLeftOrigin, ArgInfo("bottomLeftOrigin", 0)) ) + { + ERRWRAP2(cv::putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_text = NULL; + String text; + PyObject* pyobj_org = NULL; + Point org; + PyObject* pyobj_fontFace = NULL; + int fontFace=0; + PyObject* pyobj_fontScale = NULL; + double fontScale=0; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_bottomLeftOrigin = NULL; + bool bottomLeftOrigin=false; + + const char* keywords[] = { "img", "text", "org", "fontFace", "fontScale", "color", "thickness", "lineType", "bottomLeftOrigin", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO|OOO:putText", (char**)keywords, &pyobj_img, &pyobj_text, &pyobj_org, &pyobj_fontFace, &pyobj_fontScale, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_bottomLeftOrigin) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_text, text, ArgInfo("text", 0)) && + pyopencv_to_safe(pyobj_org, org, ArgInfo("org", 0)) && + pyopencv_to_safe(pyobj_fontFace, fontFace, ArgInfo("fontFace", 0)) && + pyopencv_to_safe(pyobj_fontScale, fontScale, ArgInfo("fontScale", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_bottomLeftOrigin, bottomLeftOrigin, ArgInfo("bottomLeftOrigin", 0)) ) + { + ERRWRAP2(cv::putText(img, text, org, fontFace, fontScale, color, thickness, lineType, bottomLeftOrigin)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("putText"); + + return NULL; +} + +static PyObject* pyopencv_cv_pyrDown(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dstsize = NULL; + Size dstsize; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "dst", "dstsize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:pyrDown", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_dstsize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dstsize, dstsize, ArgInfo("dstsize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::pyrDown(src, dst, dstsize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dstsize = NULL; + Size dstsize; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "dst", "dstsize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:pyrDown", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_dstsize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dstsize, dstsize, ArgInfo("dstsize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::pyrDown(src, dst, dstsize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("pyrDown"); + + return NULL; +} + +static PyObject* pyopencv_cv_pyrMeanShiftFiltering(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_sp = NULL; + double sp=0; + PyObject* pyobj_sr = NULL; + double sr=0; + PyObject* pyobj_maxLevel = NULL; + int maxLevel=1; + PyObject* pyobj_termcrit = NULL; + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1); + + const char* keywords[] = { "src", "sp", "sr", "dst", "maxLevel", "termcrit", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:pyrMeanShiftFiltering", (char**)keywords, &pyobj_src, &pyobj_sp, &pyobj_sr, &pyobj_dst, &pyobj_maxLevel, &pyobj_termcrit) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_sp, sp, ArgInfo("sp", 0)) && + pyopencv_to_safe(pyobj_sr, sr, ArgInfo("sr", 0)) && + pyopencv_to_safe(pyobj_maxLevel, maxLevel, ArgInfo("maxLevel", 0)) && + pyopencv_to_safe(pyobj_termcrit, termcrit, ArgInfo("termcrit", 0)) ) + { + ERRWRAP2(cv::pyrMeanShiftFiltering(src, dst, sp, sr, maxLevel, termcrit)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_sp = NULL; + double sp=0; + PyObject* pyobj_sr = NULL; + double sr=0; + PyObject* pyobj_maxLevel = NULL; + int maxLevel=1; + PyObject* pyobj_termcrit = NULL; + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1); + + const char* keywords[] = { "src", "sp", "sr", "dst", "maxLevel", "termcrit", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:pyrMeanShiftFiltering", (char**)keywords, &pyobj_src, &pyobj_sp, &pyobj_sr, &pyobj_dst, &pyobj_maxLevel, &pyobj_termcrit) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_sp, sp, ArgInfo("sp", 0)) && + pyopencv_to_safe(pyobj_sr, sr, ArgInfo("sr", 0)) && + pyopencv_to_safe(pyobj_maxLevel, maxLevel, ArgInfo("maxLevel", 0)) && + pyopencv_to_safe(pyobj_termcrit, termcrit, ArgInfo("termcrit", 0)) ) + { + ERRWRAP2(cv::pyrMeanShiftFiltering(src, dst, sp, sr, maxLevel, termcrit)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("pyrMeanShiftFiltering"); + + return NULL; +} + +static PyObject* pyopencv_cv_pyrUp(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dstsize = NULL; + Size dstsize; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "dst", "dstsize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:pyrUp", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_dstsize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dstsize, dstsize, ArgInfo("dstsize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::pyrUp(src, dst, dstsize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dstsize = NULL; + Size dstsize; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "dst", "dstsize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOO:pyrUp", (char**)keywords, &pyobj_src, &pyobj_dst, &pyobj_dstsize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dstsize, dstsize, ArgInfo("dstsize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::pyrUp(src, dst, dstsize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("pyrUp"); + + return NULL; +} + +static PyObject* pyopencv_cv_randShuffle(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_iterFactor = NULL; + double iterFactor=1.; + + const char* keywords[] = { "dst", "iterFactor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:randShuffle", (char**)keywords, &pyobj_dst, &pyobj_iterFactor) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_iterFactor, iterFactor, ArgInfo("iterFactor", 0)) ) + { + ERRWRAP2(cv::randShuffle(dst, iterFactor, 0)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_iterFactor = NULL; + double iterFactor=1.; + + const char* keywords[] = { "dst", "iterFactor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:randShuffle", (char**)keywords, &pyobj_dst, &pyobj_iterFactor) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_iterFactor, iterFactor, ArgInfo("iterFactor", 0)) ) + { + ERRWRAP2(cv::randShuffle(dst, iterFactor, 0)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("randShuffle"); + + return NULL; +} + +static PyObject* pyopencv_cv_randn(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mean = NULL; + Mat mean; + PyObject* pyobj_stddev = NULL; + Mat stddev; + + const char* keywords[] = { "dst", "mean", "stddev", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:randn", (char**)keywords, &pyobj_dst, &pyobj_mean, &pyobj_stddev) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 0)) && + pyopencv_to_safe(pyobj_stddev, stddev, ArgInfo("stddev", 0)) ) + { + ERRWRAP2(cv::randn(dst, mean, stddev)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mean = NULL; + UMat mean; + PyObject* pyobj_stddev = NULL; + UMat stddev; + + const char* keywords[] = { "dst", "mean", "stddev", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:randn", (char**)keywords, &pyobj_dst, &pyobj_mean, &pyobj_stddev) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mean, mean, ArgInfo("mean", 0)) && + pyopencv_to_safe(pyobj_stddev, stddev, ArgInfo("stddev", 0)) ) + { + ERRWRAP2(cv::randn(dst, mean, stddev)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("randn"); + + return NULL; +} + +static PyObject* pyopencv_cv_randu(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_low = NULL; + Mat low; + PyObject* pyobj_high = NULL; + Mat high; + + const char* keywords[] = { "dst", "low", "high", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:randu", (char**)keywords, &pyobj_dst, &pyobj_low, &pyobj_high) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_low, low, ArgInfo("low", 0)) && + pyopencv_to_safe(pyobj_high, high, ArgInfo("high", 0)) ) + { + ERRWRAP2(cv::randu(dst, low, high)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_low = NULL; + UMat low; + PyObject* pyobj_high = NULL; + UMat high; + + const char* keywords[] = { "dst", "low", "high", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:randu", (char**)keywords, &pyobj_dst, &pyobj_low, &pyobj_high) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_low, low, ArgInfo("low", 0)) && + pyopencv_to_safe(pyobj_high, high, ArgInfo("high", 0)) ) + { + ERRWRAP2(cv::randu(dst, low, high)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("randu"); + + return NULL; +} + +static PyObject* pyopencv_cv_rectangle(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "pt1", "pt2", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:rectangle", (char**)keywords, &pyobj_img, &pyobj_pt1, &pyobj_pt2, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 0)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::rectangle(img, pt1, pt2, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_pt1 = NULL; + Point pt1; + PyObject* pyobj_pt2 = NULL; + Point pt2; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "pt1", "pt2", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:rectangle", (char**)keywords, &pyobj_img, &pyobj_pt1, &pyobj_pt2, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_pt1, pt1, ArgInfo("pt1", 0)) && + pyopencv_to_safe(pyobj_pt2, pt2, ArgInfo("pt2", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::rectangle(img, pt1, pt2, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + Mat img; + PyObject* pyobj_rec = NULL; + Rect rec; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "rec", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:rectangle", (char**)keywords, &pyobj_img, &pyobj_rec, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_rec, rec, ArgInfo("rec", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::rectangle(img, rec, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_img = NULL; + UMat img; + PyObject* pyobj_rec = NULL; + Rect rec; + PyObject* pyobj_color = NULL; + Scalar color; + PyObject* pyobj_thickness = NULL; + int thickness=1; + PyObject* pyobj_lineType = NULL; + int lineType=LINE_8; + PyObject* pyobj_shift = NULL; + int shift=0; + + const char* keywords[] = { "img", "rec", "color", "thickness", "lineType", "shift", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOO:rectangle", (char**)keywords, &pyobj_img, &pyobj_rec, &pyobj_color, &pyobj_thickness, &pyobj_lineType, &pyobj_shift) && + pyopencv_to_safe(pyobj_img, img, ArgInfo("img", 1)) && + pyopencv_to_safe(pyobj_rec, rec, ArgInfo("rec", 0)) && + pyopencv_to_safe(pyobj_color, color, ArgInfo("color", 0)) && + pyopencv_to_safe(pyobj_thickness, thickness, ArgInfo("thickness", 0)) && + pyopencv_to_safe(pyobj_lineType, lineType, ArgInfo("lineType", 0)) && + pyopencv_to_safe(pyobj_shift, shift, ArgInfo("shift", 0)) ) + { + ERRWRAP2(cv::rectangle(img, rec, color, thickness, lineType, shift)); + return pyopencv_from(img); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("rectangle"); + + return NULL; +} + +static PyObject* pyopencv_cv_reduce(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dim = NULL; + int dim=0; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src", "dim", "rtype", "dst", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:reduce", (char**)keywords, &pyobj_src, &pyobj_dim, &pyobj_rtype, &pyobj_dst, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dim, dim, ArgInfo("dim", 0)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::reduce(src, dst, dim, rtype, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dim = NULL; + int dim=0; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src", "dim", "rtype", "dst", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:reduce", (char**)keywords, &pyobj_src, &pyobj_dim, &pyobj_rtype, &pyobj_dst, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dim, dim, ArgInfo("dim", 0)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::reduce(src, dst, dim, rtype, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("reduce"); + + return NULL; +} + +static PyObject* pyopencv_cv_remap(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_map1 = NULL; + Mat map1; + PyObject* pyobj_map2 = NULL; + Mat map2; + PyObject* pyobj_interpolation = NULL; + int interpolation=0; + PyObject* pyobj_borderMode = NULL; + int borderMode=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue; + + const char* keywords[] = { "src", "map1", "map2", "interpolation", "dst", "borderMode", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:remap", (char**)keywords, &pyobj_src, &pyobj_map1, &pyobj_map2, &pyobj_interpolation, &pyobj_dst, &pyobj_borderMode, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_map1, map1, ArgInfo("map1", 0)) && + pyopencv_to_safe(pyobj_map2, map2, ArgInfo("map2", 0)) && + pyopencv_to_safe(pyobj_interpolation, interpolation, ArgInfo("interpolation", 0)) && + pyopencv_to_safe(pyobj_borderMode, borderMode, ArgInfo("borderMode", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::remap(src, dst, map1, map2, interpolation, borderMode, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_map1 = NULL; + UMat map1; + PyObject* pyobj_map2 = NULL; + UMat map2; + PyObject* pyobj_interpolation = NULL; + int interpolation=0; + PyObject* pyobj_borderMode = NULL; + int borderMode=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue; + + const char* keywords[] = { "src", "map1", "map2", "interpolation", "dst", "borderMode", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOO:remap", (char**)keywords, &pyobj_src, &pyobj_map1, &pyobj_map2, &pyobj_interpolation, &pyobj_dst, &pyobj_borderMode, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_map1, map1, ArgInfo("map1", 0)) && + pyopencv_to_safe(pyobj_map2, map2, ArgInfo("map2", 0)) && + pyopencv_to_safe(pyobj_interpolation, interpolation, ArgInfo("interpolation", 0)) && + pyopencv_to_safe(pyobj_borderMode, borderMode, ArgInfo("borderMode", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::remap(src, dst, map1, map2, interpolation, borderMode, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("remap"); + + return NULL; +} + +static PyObject* pyopencv_cv_repeat(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_ny = NULL; + int ny=0; + PyObject* pyobj_nx = NULL; + int nx=0; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "ny", "nx", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:repeat", (char**)keywords, &pyobj_src, &pyobj_ny, &pyobj_nx, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_ny, ny, ArgInfo("ny", 0)) && + pyopencv_to_safe(pyobj_nx, nx, ArgInfo("nx", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::repeat(src, ny, nx, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_ny = NULL; + int ny=0; + PyObject* pyobj_nx = NULL; + int nx=0; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "ny", "nx", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:repeat", (char**)keywords, &pyobj_src, &pyobj_ny, &pyobj_nx, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_ny, ny, ArgInfo("ny", 0)) && + pyopencv_to_safe(pyobj_nx, nx, ArgInfo("nx", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::repeat(src, ny, nx, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("repeat"); + + return NULL; +} + +static PyObject* pyopencv_cv_resize(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_fx = NULL; + double fx=0; + PyObject* pyobj_fy = NULL; + double fy=0; + PyObject* pyobj_interpolation = NULL; + int interpolation=INTER_LINEAR; + + const char* keywords[] = { "src", "dsize", "dst", "fx", "fy", "interpolation", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOO:resize", (char**)keywords, &pyobj_src, &pyobj_dsize, &pyobj_dst, &pyobj_fx, &pyobj_fy, &pyobj_interpolation) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_fx, fx, ArgInfo("fx", 0)) && + pyopencv_to_safe(pyobj_fy, fy, ArgInfo("fy", 0)) && + pyopencv_to_safe(pyobj_interpolation, interpolation, ArgInfo("interpolation", 0)) ) + { + ERRWRAP2(cv::resize(src, dst, dsize, fx, fy, interpolation)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_fx = NULL; + double fx=0; + PyObject* pyobj_fy = NULL; + double fy=0; + PyObject* pyobj_interpolation = NULL; + int interpolation=INTER_LINEAR; + + const char* keywords[] = { "src", "dsize", "dst", "fx", "fy", "interpolation", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOOO:resize", (char**)keywords, &pyobj_src, &pyobj_dsize, &pyobj_dst, &pyobj_fx, &pyobj_fy, &pyobj_interpolation) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_fx, fx, ArgInfo("fx", 0)) && + pyopencv_to_safe(pyobj_fy, fy, ArgInfo("fy", 0)) && + pyopencv_to_safe(pyobj_interpolation, interpolation, ArgInfo("interpolation", 0)) ) + { + ERRWRAP2(cv::resize(src, dst, dsize, fx, fy, interpolation)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("resize"); + + return NULL; +} + +static PyObject* pyopencv_cv_rotate(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_rotateCode = NULL; + int rotateCode=0; + + const char* keywords[] = { "src", "rotateCode", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:rotate", (char**)keywords, &pyobj_src, &pyobj_rotateCode, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rotateCode, rotateCode, ArgInfo("rotateCode", 0)) ) + { + ERRWRAP2(cv::rotate(src, dst, rotateCode)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_rotateCode = NULL; + int rotateCode=0; + + const char* keywords[] = { "src", "rotateCode", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:rotate", (char**)keywords, &pyobj_src, &pyobj_rotateCode, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rotateCode, rotateCode, ArgInfo("rotateCode", 0)) ) + { + ERRWRAP2(cv::rotate(src, dst, rotateCode)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("rotate"); + + return NULL; +} + +static PyObject* pyopencv_cv_rotatedRectangleIntersection(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_rect1 = NULL; + RotatedRect rect1; + PyObject* pyobj_rect2 = NULL; + RotatedRect rect2; + PyObject* pyobj_intersectingRegion = NULL; + Mat intersectingRegion; + int retval; + + const char* keywords[] = { "rect1", "rect2", "intersectingRegion", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:rotatedRectangleIntersection", (char**)keywords, &pyobj_rect1, &pyobj_rect2, &pyobj_intersectingRegion) && + pyopencv_to_safe(pyobj_rect1, rect1, ArgInfo("rect1", 0)) && + pyopencv_to_safe(pyobj_rect2, rect2, ArgInfo("rect2", 0)) && + pyopencv_to_safe(pyobj_intersectingRegion, intersectingRegion, ArgInfo("intersectingRegion", 1)) ) + { + ERRWRAP2(retval = cv::rotatedRectangleIntersection(rect1, rect2, intersectingRegion)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(intersectingRegion)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rect1 = NULL; + RotatedRect rect1; + PyObject* pyobj_rect2 = NULL; + RotatedRect rect2; + PyObject* pyobj_intersectingRegion = NULL; + UMat intersectingRegion; + int retval; + + const char* keywords[] = { "rect1", "rect2", "intersectingRegion", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:rotatedRectangleIntersection", (char**)keywords, &pyobj_rect1, &pyobj_rect2, &pyobj_intersectingRegion) && + pyopencv_to_safe(pyobj_rect1, rect1, ArgInfo("rect1", 0)) && + pyopencv_to_safe(pyobj_rect2, rect2, ArgInfo("rect2", 0)) && + pyopencv_to_safe(pyobj_intersectingRegion, intersectingRegion, ArgInfo("intersectingRegion", 1)) ) + { + ERRWRAP2(retval = cv::rotatedRectangleIntersection(rect1, rect2, intersectingRegion)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(intersectingRegion)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("rotatedRectangleIntersection"); + + return NULL; +} + +static PyObject* pyopencv_cv_scaleAdd(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src1", "alpha", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:scaleAdd", (char**)keywords, &pyobj_src1, &pyobj_alpha, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::scaleAdd(src1, alpha, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src1", "alpha", "src2", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:scaleAdd", (char**)keywords, &pyobj_src1, &pyobj_alpha, &pyobj_src2, &pyobj_dst) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::scaleAdd(src1, alpha, src2, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("scaleAdd"); + + return NULL; +} + +static PyObject* pyopencv_cv_sepFilter2D(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_kernelX = NULL; + Mat kernelX; + PyObject* pyobj_kernelY = NULL; + Mat kernelY; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "kernelX", "kernelY", "dst", "anchor", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOO:sepFilter2D", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_kernelX, &pyobj_kernelY, &pyobj_dst, &pyobj_anchor, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_kernelX, kernelX, ArgInfo("kernelX", 0)) && + pyopencv_to_safe(pyobj_kernelY, kernelY, ArgInfo("kernelY", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::sepFilter2D(src, dst, ddepth, kernelX, kernelY, anchor, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_kernelX = NULL; + UMat kernelX; + PyObject* pyobj_kernelY = NULL; + UMat kernelY; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1,-1); + PyObject* pyobj_delta = NULL; + double delta=0; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "kernelX", "kernelY", "dst", "anchor", "delta", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|OOOO:sepFilter2D", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_kernelX, &pyobj_kernelY, &pyobj_dst, &pyobj_anchor, &pyobj_delta, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_kernelX, kernelX, ArgInfo("kernelX", 0)) && + pyopencv_to_safe(pyobj_kernelY, kernelY, ArgInfo("kernelY", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_delta, delta, ArgInfo("delta", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::sepFilter2D(src, dst, ddepth, kernelX, kernelY, anchor, delta, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("sepFilter2D"); + + return NULL; +} + +static PyObject* pyopencv_cv_setIdentity(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_mtx = NULL; + Mat mtx; + PyObject* pyobj_s = NULL; + Scalar s=Scalar(1); + + const char* keywords[] = { "mtx", "s", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:setIdentity", (char**)keywords, &pyobj_mtx, &pyobj_s) && + pyopencv_to_safe(pyobj_mtx, mtx, ArgInfo("mtx", 1)) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) ) + { + ERRWRAP2(cv::setIdentity(mtx, s)); + return pyopencv_from(mtx); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_mtx = NULL; + UMat mtx; + PyObject* pyobj_s = NULL; + Scalar s=Scalar(1); + + const char* keywords[] = { "mtx", "s", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:setIdentity", (char**)keywords, &pyobj_mtx, &pyobj_s) && + pyopencv_to_safe(pyobj_mtx, mtx, ArgInfo("mtx", 1)) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) ) + { + ERRWRAP2(cv::setIdentity(mtx, s)); + return pyopencv_from(mtx); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("setIdentity"); + + return NULL; +} + +static PyObject* pyopencv_cv_setLogLevel(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_level = NULL; + int level=0; + int retval; + + const char* keywords[] = { "level", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setLogLevel", (char**)keywords, &pyobj_level) && + pyopencv_to_safe(pyobj_level, level, ArgInfo("level", 0)) ) + { + ERRWRAP2(retval = cv::setLogLevel(level)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_setNumThreads(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_nthreads = NULL; + int nthreads=0; + + const char* keywords[] = { "nthreads", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setNumThreads", (char**)keywords, &pyobj_nthreads) && + pyopencv_to_safe(pyobj_nthreads, nthreads, ArgInfo("nthreads", 0)) ) + { + ERRWRAP2(cv::setNumThreads(nthreads)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_setRNGSeed(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_seed = NULL; + int seed=0; + + const char* keywords[] = { "seed", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setRNGSeed", (char**)keywords, &pyobj_seed) && + pyopencv_to_safe(pyobj_seed, seed, ArgInfo("seed", 0)) ) + { + ERRWRAP2(cv::setRNGSeed(seed)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_setUseOpenVX(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_flag = NULL; + bool flag=0; + + const char* keywords[] = { "flag", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setUseOpenVX", (char**)keywords, &pyobj_flag) && + pyopencv_to_safe(pyobj_flag, flag, ArgInfo("flag", 0)) ) + { + ERRWRAP2(cv::setUseOpenVX(flag)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_setUseOptimized(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_onoff = NULL; + bool onoff=0; + + const char* keywords[] = { "onoff", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setUseOptimized", (char**)keywords, &pyobj_onoff) && + pyopencv_to_safe(pyobj_onoff, onoff, ArgInfo("onoff", 0)) ) + { + ERRWRAP2(cv::setUseOptimized(onoff)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_solve(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=DECOMP_LU; + bool retval; + + const char* keywords[] = { "src1", "src2", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:solve", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::solve(src1, src2, dst, flags)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=DECOMP_LU; + bool retval; + + const char* keywords[] = { "src1", "src2", "dst", "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:solve", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_flags) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(retval = cv::solve(src1, src2, dst, flags)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("solve"); + + return NULL; +} + +static PyObject* pyopencv_cv_solveCubic(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_coeffs = NULL; + Mat coeffs; + PyObject* pyobj_roots = NULL; + Mat roots; + int retval; + + const char* keywords[] = { "coeffs", "roots", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:solveCubic", (char**)keywords, &pyobj_coeffs, &pyobj_roots) && + pyopencv_to_safe(pyobj_coeffs, coeffs, ArgInfo("coeffs", 0)) && + pyopencv_to_safe(pyobj_roots, roots, ArgInfo("roots", 1)) ) + { + ERRWRAP2(retval = cv::solveCubic(coeffs, roots)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(roots)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_coeffs = NULL; + UMat coeffs; + PyObject* pyobj_roots = NULL; + UMat roots; + int retval; + + const char* keywords[] = { "coeffs", "roots", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:solveCubic", (char**)keywords, &pyobj_coeffs, &pyobj_roots) && + pyopencv_to_safe(pyobj_coeffs, coeffs, ArgInfo("coeffs", 0)) && + pyopencv_to_safe(pyobj_roots, roots, ArgInfo("roots", 1)) ) + { + ERRWRAP2(retval = cv::solveCubic(coeffs, roots)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(roots)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("solveCubic"); + + return NULL; +} + +static PyObject* pyopencv_cv_solveLP(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_Func = NULL; + Mat Func; + PyObject* pyobj_Constr = NULL; + Mat Constr; + PyObject* pyobj_z = NULL; + Mat z; + int retval; + + const char* keywords[] = { "Func", "Constr", "z", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:solveLP", (char**)keywords, &pyobj_Func, &pyobj_Constr, &pyobj_z) && + pyopencv_to_safe(pyobj_Func, Func, ArgInfo("Func", 0)) && + pyopencv_to_safe(pyobj_Constr, Constr, ArgInfo("Constr", 0)) && + pyopencv_to_safe(pyobj_z, z, ArgInfo("z", 1)) ) + { + ERRWRAP2(retval = cv::solveLP(Func, Constr, z)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(z)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_Func = NULL; + UMat Func; + PyObject* pyobj_Constr = NULL; + UMat Constr; + PyObject* pyobj_z = NULL; + UMat z; + int retval; + + const char* keywords[] = { "Func", "Constr", "z", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:solveLP", (char**)keywords, &pyobj_Func, &pyobj_Constr, &pyobj_z) && + pyopencv_to_safe(pyobj_Func, Func, ArgInfo("Func", 0)) && + pyopencv_to_safe(pyobj_Constr, Constr, ArgInfo("Constr", 0)) && + pyopencv_to_safe(pyobj_z, z, ArgInfo("z", 1)) ) + { + ERRWRAP2(retval = cv::solveLP(Func, Constr, z)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(z)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("solveLP"); + + return NULL; +} + +static PyObject* pyopencv_cv_solvePoly(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_coeffs = NULL; + Mat coeffs; + PyObject* pyobj_roots = NULL; + Mat roots; + PyObject* pyobj_maxIters = NULL; + int maxIters=300; + double retval; + + const char* keywords[] = { "coeffs", "roots", "maxIters", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:solvePoly", (char**)keywords, &pyobj_coeffs, &pyobj_roots, &pyobj_maxIters) && + pyopencv_to_safe(pyobj_coeffs, coeffs, ArgInfo("coeffs", 0)) && + pyopencv_to_safe(pyobj_roots, roots, ArgInfo("roots", 1)) && + pyopencv_to_safe(pyobj_maxIters, maxIters, ArgInfo("maxIters", 0)) ) + { + ERRWRAP2(retval = cv::solvePoly(coeffs, roots, maxIters)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(roots)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_coeffs = NULL; + UMat coeffs; + PyObject* pyobj_roots = NULL; + UMat roots; + PyObject* pyobj_maxIters = NULL; + int maxIters=300; + double retval; + + const char* keywords[] = { "coeffs", "roots", "maxIters", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:solvePoly", (char**)keywords, &pyobj_coeffs, &pyobj_roots, &pyobj_maxIters) && + pyopencv_to_safe(pyobj_coeffs, coeffs, ArgInfo("coeffs", 0)) && + pyopencv_to_safe(pyobj_roots, roots, ArgInfo("roots", 1)) && + pyopencv_to_safe(pyobj_maxIters, maxIters, ArgInfo("maxIters", 0)) ) + { + ERRWRAP2(retval = cv::solvePoly(coeffs, roots, maxIters)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(roots)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("solvePoly"); + + return NULL; +} + +static PyObject* pyopencv_cv_sort(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:sort", (char**)keywords, &pyobj_src, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::sort(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:sort", (char**)keywords, &pyobj_src, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::sort(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("sort"); + + return NULL; +} + +static PyObject* pyopencv_cv_sortIdx(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:sortIdx", (char**)keywords, &pyobj_src, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::sortIdx(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:sortIdx", (char**)keywords, &pyobj_src, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::sortIdx(src, dst, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("sortIdx"); + + return NULL; +} + +static PyObject* pyopencv_cv_spatialGradient(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dx = NULL; + Mat dx; + PyObject* pyobj_dy = NULL; + Mat dy; + PyObject* pyobj_ksize = NULL; + int ksize=3; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "dx", "dy", "ksize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:spatialGradient", (char**)keywords, &pyobj_src, &pyobj_dx, &pyobj_dy, &pyobj_ksize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 1)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::spatialGradient(src, dx, dy, ksize, borderType)); + return Py_BuildValue("(NN)", pyopencv_from(dx), pyopencv_from(dy)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dx = NULL; + UMat dx; + PyObject* pyobj_dy = NULL; + UMat dy; + PyObject* pyobj_ksize = NULL; + int ksize=3; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "dx", "dy", "ksize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:spatialGradient", (char**)keywords, &pyobj_src, &pyobj_dx, &pyobj_dy, &pyobj_ksize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 1)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 1)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::spatialGradient(src, dx, dy, ksize, borderType)); + return Py_BuildValue("(NN)", pyopencv_from(dx), pyopencv_from(dy)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("spatialGradient"); + + return NULL; +} + +static PyObject* pyopencv_cv_split(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_m = NULL; + Mat m; + PyObject* pyobj_mv = NULL; + vector_Mat mv; + + const char* keywords[] = { "m", "mv", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:split", (char**)keywords, &pyobj_m, &pyobj_mv) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_mv, mv, ArgInfo("mv", 1)) ) + { + ERRWRAP2(cv::split(m, mv)); + return pyopencv_from(mv); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + UMat m; + PyObject* pyobj_mv = NULL; + vector_UMat mv; + + const char* keywords[] = { "m", "mv", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:split", (char**)keywords, &pyobj_m, &pyobj_mv) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_mv, mv, ArgInfo("mv", 1)) ) + { + ERRWRAP2(cv::split(m, mv)); + return pyopencv_from(mv); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("split"); + + return NULL; +} + +static PyObject* pyopencv_cv_sqrBoxFilter(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1, -1); + PyObject* pyobj_normalize = NULL; + bool normalize=true; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "ksize", "dst", "anchor", "normalize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:sqrBoxFilter", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_ksize, &pyobj_dst, &pyobj_anchor, &pyobj_normalize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_normalize, normalize, ArgInfo("normalize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::sqrBoxFilter(src, dst, ddepth, ksize, anchor, normalize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_ddepth = NULL; + int ddepth=0; + PyObject* pyobj_ksize = NULL; + Size ksize; + PyObject* pyobj_anchor = NULL; + Point anchor=Point(-1, -1); + PyObject* pyobj_normalize = NULL; + bool normalize=true; + PyObject* pyobj_borderType = NULL; + int borderType=BORDER_DEFAULT; + + const char* keywords[] = { "src", "ddepth", "ksize", "dst", "anchor", "normalize", "borderType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:sqrBoxFilter", (char**)keywords, &pyobj_src, &pyobj_ddepth, &pyobj_ksize, &pyobj_dst, &pyobj_anchor, &pyobj_normalize, &pyobj_borderType) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_ddepth, ddepth, ArgInfo("ddepth", 0)) && + pyopencv_to_safe(pyobj_ksize, ksize, ArgInfo("ksize", 0)) && + pyopencv_to_safe(pyobj_anchor, anchor, ArgInfo("anchor", 0)) && + pyopencv_to_safe(pyobj_normalize, normalize, ArgInfo("normalize", 0)) && + pyopencv_to_safe(pyobj_borderType, borderType, ArgInfo("borderType", 0)) ) + { + ERRWRAP2(cv::sqrBoxFilter(src, dst, ddepth, ksize, anchor, normalize, borderType)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("sqrBoxFilter"); + + return NULL; +} + +static PyObject* pyopencv_cv_sqrt(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:sqrt", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::sqrt(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:sqrt", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::sqrt(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("sqrt"); + + return NULL; +} + +static PyObject* pyopencv_cv_subtract(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src1 = NULL; + Mat src1; + PyObject* pyobj_src2 = NULL; + Mat src2; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "mask", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:subtract", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::subtract(src1, src2, dst, mask, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src1 = NULL; + UMat src1; + PyObject* pyobj_src2 = NULL; + UMat src2; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_dtype = NULL; + int dtype=-1; + + const char* keywords[] = { "src1", "src2", "dst", "mask", "dtype", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OOO:subtract", (char**)keywords, &pyobj_src1, &pyobj_src2, &pyobj_dst, &pyobj_mask, &pyobj_dtype) && + pyopencv_to_safe(pyobj_src1, src1, ArgInfo("src1", 0)) && + pyopencv_to_safe(pyobj_src2, src2, ArgInfo("src2", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) ) + { + ERRWRAP2(cv::subtract(src1, src2, dst, mask, dtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("subtract"); + + return NULL; +} + +static PyObject* pyopencv_cv_sumElems(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + Scalar retval; + + const char* keywords[] = { "src", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:sumElems", (char**)keywords, &pyobj_src) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) ) + { + ERRWRAP2(retval = cv::sum(src)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + Scalar retval; + + const char* keywords[] = { "src", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:sumElems", (char**)keywords, &pyobj_src) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) ) + { + ERRWRAP2(retval = cv::sum(src)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("sumElems"); + + return NULL; +} + +static PyObject* pyopencv_cv_threshold(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_thresh = NULL; + double thresh=0; + PyObject* pyobj_maxval = NULL; + double maxval=0; + PyObject* pyobj_type = NULL; + int type=0; + double retval; + + const char* keywords[] = { "src", "thresh", "maxval", "type", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:threshold", (char**)keywords, &pyobj_src, &pyobj_thresh, &pyobj_maxval, &pyobj_type, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_thresh, thresh, ArgInfo("thresh", 0)) && + pyopencv_to_safe(pyobj_maxval, maxval, ArgInfo("maxval", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(retval = cv::threshold(src, dst, thresh, maxval, type)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_thresh = NULL; + double thresh=0; + PyObject* pyobj_maxval = NULL; + double maxval=0; + PyObject* pyobj_type = NULL; + int type=0; + double retval; + + const char* keywords[] = { "src", "thresh", "maxval", "type", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:threshold", (char**)keywords, &pyobj_src, &pyobj_thresh, &pyobj_maxval, &pyobj_type, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_thresh, thresh, ArgInfo("thresh", 0)) && + pyopencv_to_safe(pyobj_maxval, maxval, ArgInfo("maxval", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(retval = cv::threshold(src, dst, thresh, maxval, type)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("threshold"); + + return NULL; +} + +static PyObject* pyopencv_cv_trace(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_mtx = NULL; + Mat mtx; + Scalar retval; + + const char* keywords[] = { "mtx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:trace", (char**)keywords, &pyobj_mtx) && + pyopencv_to_safe(pyobj_mtx, mtx, ArgInfo("mtx", 0)) ) + { + ERRWRAP2(retval = cv::trace(mtx)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_mtx = NULL; + UMat mtx; + Scalar retval; + + const char* keywords[] = { "mtx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:trace", (char**)keywords, &pyobj_mtx) && + pyopencv_to_safe(pyobj_mtx, mtx, ArgInfo("mtx", 0)) ) + { + ERRWRAP2(retval = cv::trace(mtx)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("trace"); + + return NULL; +} + +static PyObject* pyopencv_cv_transform(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_m = NULL; + Mat m; + + const char* keywords[] = { "src", "m", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:transform", (char**)keywords, &pyobj_src, &pyobj_m, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + ERRWRAP2(cv::transform(src, dst, m)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_m = NULL; + UMat m; + + const char* keywords[] = { "src", "m", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:transform", (char**)keywords, &pyobj_src, &pyobj_m, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + ERRWRAP2(cv::transform(src, dst, m)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("transform"); + + return NULL; +} + +static PyObject* pyopencv_cv_transpose(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:transpose", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::transpose(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:transpose", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::transpose(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("transpose"); + + return NULL; +} + +static PyObject* pyopencv_cv_useOpenVX(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::useOpenVX()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_useOptimized(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::useOptimized()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_vconcat(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + vector_Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:vconcat", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::vconcat(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + vector_UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:vconcat", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(cv::vconcat(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("vconcat"); + + return NULL; +} + +static PyObject* pyopencv_cv_warpAffine(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_M = NULL; + Mat M; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_flags = NULL; + int flags=INTER_LINEAR; + PyObject* pyobj_borderMode = NULL; + int borderMode=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue; + + const char* keywords[] = { "src", "M", "dsize", "dst", "flags", "borderMode", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:warpAffine", (char**)keywords, &pyobj_src, &pyobj_M, &pyobj_dsize, &pyobj_dst, &pyobj_flags, &pyobj_borderMode, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_borderMode, borderMode, ArgInfo("borderMode", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::warpAffine(src, dst, M, dsize, flags, borderMode, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_M = NULL; + UMat M; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_flags = NULL; + int flags=INTER_LINEAR; + PyObject* pyobj_borderMode = NULL; + int borderMode=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue; + + const char* keywords[] = { "src", "M", "dsize", "dst", "flags", "borderMode", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:warpAffine", (char**)keywords, &pyobj_src, &pyobj_M, &pyobj_dsize, &pyobj_dst, &pyobj_flags, &pyobj_borderMode, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_borderMode, borderMode, ArgInfo("borderMode", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::warpAffine(src, dst, M, dsize, flags, borderMode, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("warpAffine"); + + return NULL; +} + +static PyObject* pyopencv_cv_warpPerspective(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_M = NULL; + Mat M; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_flags = NULL; + int flags=INTER_LINEAR; + PyObject* pyobj_borderMode = NULL; + int borderMode=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue; + + const char* keywords[] = { "src", "M", "dsize", "dst", "flags", "borderMode", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:warpPerspective", (char**)keywords, &pyobj_src, &pyobj_M, &pyobj_dsize, &pyobj_dst, &pyobj_flags, &pyobj_borderMode, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_borderMode, borderMode, ArgInfo("borderMode", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::warpPerspective(src, dst, M, dsize, flags, borderMode, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_M = NULL; + UMat M; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_flags = NULL; + int flags=INTER_LINEAR; + PyObject* pyobj_borderMode = NULL; + int borderMode=BORDER_CONSTANT; + PyObject* pyobj_borderValue = NULL; + Scalar borderValue; + + const char* keywords[] = { "src", "M", "dsize", "dst", "flags", "borderMode", "borderValue", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:warpPerspective", (char**)keywords, &pyobj_src, &pyobj_M, &pyobj_dsize, &pyobj_dst, &pyobj_flags, &pyobj_borderMode, &pyobj_borderValue) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_M, M, ArgInfo("M", 0)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_borderMode, borderMode, ArgInfo("borderMode", 0)) && + pyopencv_to_safe(pyobj_borderValue, borderValue, ArgInfo("borderValue", 0)) ) + { + ERRWRAP2(cv::warpPerspective(src, dst, M, dsize, flags, borderMode, borderValue)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("warpPerspective"); + + return NULL; +} + +static PyObject* pyopencv_cv_warpPolar(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_maxRadius = NULL; + double maxRadius=0; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "dsize", "center", "maxRadius", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:warpPolar", (char**)keywords, &pyobj_src, &pyobj_dsize, &pyobj_center, &pyobj_maxRadius, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_maxRadius, maxRadius, ArgInfo("maxRadius", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::warpPolar(src, dst, dsize, center, maxRadius, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_dsize = NULL; + Size dsize; + PyObject* pyobj_center = NULL; + Point2f center; + PyObject* pyobj_maxRadius = NULL; + double maxRadius=0; + PyObject* pyobj_flags = NULL; + int flags=0; + + const char* keywords[] = { "src", "dsize", "center", "maxRadius", "flags", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:warpPolar", (char**)keywords, &pyobj_src, &pyobj_dsize, &pyobj_center, &pyobj_maxRadius, &pyobj_flags, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_dsize, dsize, ArgInfo("dsize", 0)) && + pyopencv_to_safe(pyobj_center, center, ArgInfo("center", 0)) && + pyopencv_to_safe(pyobj_maxRadius, maxRadius, ArgInfo("maxRadius", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + ERRWRAP2(cv::warpPolar(src, dst, dsize, center, maxRadius, flags)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("warpPolar"); + + return NULL; +} + +static PyObject* pyopencv_cv_watershed(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_markers = NULL; + Mat markers; + + const char* keywords[] = { "image", "markers", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:watershed", (char**)keywords, &pyobj_image, &pyobj_markers) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_markers, markers, ArgInfo("markers", 1)) ) + { + ERRWRAP2(cv::watershed(image, markers)); + return pyopencv_from(markers); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_markers = NULL; + UMat markers; + + const char* keywords[] = { "image", "markers", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:watershed", (char**)keywords, &pyobj_image, &pyobj_markers) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_markers, markers, ArgInfo("markers", 1)) ) + { + ERRWRAP2(cv::watershed(image, markers)); + return pyopencv_from(markers); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("watershed"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_Event_elapsedTime(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_start = NULL; + Event start; + PyObject* pyobj_end = NULL; + Event end; + float retval; + + const char* keywords[] = { "start", "end", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:Event_elapsedTime", (char**)keywords, &pyobj_start, &pyobj_end) && + pyopencv_to_safe(pyobj_start, start, ArgInfo("start", 0)) && + pyopencv_to_safe(pyobj_end, end, ArgInfo("end", 0)) ) + { + ERRWRAP2(retval = cv::cuda::Event::elapsedTime(start, end)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_GpuMat_defaultAllocator(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + GpuMat::Allocator* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::cuda::GpuMat::defaultAllocator()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_GpuMat_setDefaultAllocator(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator; + + const char* keywords[] = { "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GpuMat_setDefaultAllocator", (char**)keywords, &pyobj_allocator) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + ERRWRAP2(cv::cuda::GpuMat::setDefaultAllocator(allocator)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_Stream_Null(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + Stream retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::cuda::Stream::Null()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_has(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_has", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::has(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_hasBin(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_hasBin", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasBin(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_hasEqualOrGreater(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_hasEqualOrGreater", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrGreater(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_hasEqualOrGreaterBin(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_hasEqualOrGreaterBin", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrGreaterBin(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_hasEqualOrGreaterPtx(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_hasEqualOrGreaterPtx", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrGreaterPtx(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_hasEqualOrLessPtx(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_hasEqualOrLessPtx", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrLessPtx(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_TargetArchs_hasPtx(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:TargetArchs_hasPtx", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasPtx(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_createContinuous(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + pyPrepareArgumentConversionErrorsStorage(3); + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_arr = NULL; + Mat arr; + + const char* keywords[] = { "rows", "cols", "type", "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:createContinuous", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_arr) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 1)) ) + { + ERRWRAP2(cv::cuda::createContinuous(rows, cols, type, arr)); + return pyopencv_from(arr); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_arr = NULL; + cuda::GpuMat arr; + + const char* keywords[] = { "rows", "cols", "type", "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:createContinuous", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_arr) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 1)) ) + { + ERRWRAP2(cv::cuda::createContinuous(rows, cols, type, arr)); + return pyopencv_from(arr); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_arr = NULL; + UMat arr; + + const char* keywords[] = { "rows", "cols", "type", "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:createContinuous", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_arr) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 1)) ) + { + ERRWRAP2(cv::cuda::createContinuous(rows, cols, type, arr)); + return pyopencv_from(arr); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("createContinuous"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_ensureSizeIsEnough(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + pyPrepareArgumentConversionErrorsStorage(3); + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_arr = NULL; + Mat arr; + + const char* keywords[] = { "rows", "cols", "type", "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:ensureSizeIsEnough", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_arr) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 1)) ) + { + ERRWRAP2(cv::cuda::ensureSizeIsEnough(rows, cols, type, arr)); + return pyopencv_from(arr); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_arr = NULL; + cuda::GpuMat arr; + + const char* keywords[] = { "rows", "cols", "type", "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:ensureSizeIsEnough", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_arr) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 1)) ) + { + ERRWRAP2(cv::cuda::ensureSizeIsEnough(rows, cols, type, arr)); + return pyopencv_from(arr); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_arr = NULL; + UMat arr; + + const char* keywords[] = { "rows", "cols", "type", "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:ensureSizeIsEnough", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_arr) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 1)) ) + { + ERRWRAP2(cv::cuda::ensureSizeIsEnough(rows, cols, type, arr)); + return pyopencv_from(arr); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("ensureSizeIsEnough"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_getCudaEnabledDeviceCount(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::cuda::getCudaEnabledDeviceCount()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_getDevice(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::cuda::getDevice()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_printCudaDeviceInfo(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_device = NULL; + int device=0; + + const char* keywords[] = { "device", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:printCudaDeviceInfo", (char**)keywords, &pyobj_device) && + pyopencv_to_safe(pyobj_device, device, ArgInfo("device", 0)) ) + { + ERRWRAP2(cv::cuda::printCudaDeviceInfo(device)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_printShortCudaDeviceInfo(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_device = NULL; + int device=0; + + const char* keywords[] = { "device", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:printShortCudaDeviceInfo", (char**)keywords, &pyobj_device) && + pyopencv_to_safe(pyobj_device, device, ArgInfo("device", 0)) ) + { + ERRWRAP2(cv::cuda::printShortCudaDeviceInfo(device)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_registerPageLocked(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_m = NULL; + Mat m; + + const char* keywords[] = { "m", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:registerPageLocked", (char**)keywords, &pyobj_m) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + ERRWRAP2(cv::cuda::registerPageLocked(m)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_resetDevice(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(cv::cuda::resetDevice()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_setBufferPoolConfig(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_deviceId = NULL; + int deviceId=0; + PyObject* pyobj_stackSize = NULL; + size_t stackSize=0; + PyObject* pyobj_stackCount = NULL; + int stackCount=0; + + const char* keywords[] = { "deviceId", "stackSize", "stackCount", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:setBufferPoolConfig", (char**)keywords, &pyobj_deviceId, &pyobj_stackSize, &pyobj_stackCount) && + pyopencv_to_safe(pyobj_deviceId, deviceId, ArgInfo("deviceId", 0)) && + pyopencv_to_safe(pyobj_stackSize, stackSize, ArgInfo("stackSize", 0)) && + pyopencv_to_safe(pyobj_stackCount, stackCount, ArgInfo("stackCount", 0)) ) + { + ERRWRAP2(cv::cuda::setBufferPoolConfig(deviceId, stackSize, stackCount)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_setBufferPoolUsage(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_on = NULL; + bool on=0; + + const char* keywords[] = { "on", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setBufferPoolUsage", (char**)keywords, &pyobj_on) && + pyopencv_to_safe(pyobj_on, on, ArgInfo("on", 0)) ) + { + ERRWRAP2(cv::cuda::setBufferPoolUsage(on)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_setDevice(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_device = NULL; + int device=0; + + const char* keywords[] = { "device", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setDevice", (char**)keywords, &pyobj_device) && + pyopencv_to_safe(pyobj_device, device, ArgInfo("device", 0)) ) + { + ERRWRAP2(cv::cuda::setDevice(device)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_unregisterPageLocked(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_m = NULL; + Mat m; + + const char* keywords[] = { "m", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:unregisterPageLocked", (char**)keywords, &pyobj_m) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + ERRWRAP2(cv::cuda::unregisterPageLocked(m)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_ipp_getIppVersion(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ipp; + + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ipp::getIppVersion()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ipp_setUseIPP(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ipp; + + PyObject* pyobj_flag = NULL; + bool flag=0; + + const char* keywords[] = { "flag", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setUseIPP", (char**)keywords, &pyobj_flag) && + pyopencv_to_safe(pyobj_flag, flag, ArgInfo("flag", 0)) ) + { + ERRWRAP2(cv::ipp::setUseIPP(flag)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_ipp_setUseIPP_NotExact(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ipp; + + PyObject* pyobj_flag = NULL; + bool flag=0; + + const char* keywords[] = { "flag", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setUseIPP_NotExact", (char**)keywords, &pyobj_flag) && + pyopencv_to_safe(pyobj_flag, flag, ArgInfo("flag", 0)) ) + { + ERRWRAP2(cv::ipp::setUseIPP_NotExact(flag)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_ipp_useIPP(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ipp; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ipp::useIPP()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ipp_useIPP_NotExact(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ipp; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ipp::useIPP_NotExact()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_Device_getDefault(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + Device retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ocl::Device::getDefault()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_finish(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(cv::ocl::finish()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_haveAmdBlas(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ocl::haveAmdBlas()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_haveAmdFft(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ocl::haveAmdFft()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_haveOpenCL(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ocl::haveOpenCL()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_setUseOpenCL(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + PyObject* pyobj_flag = NULL; + bool flag=0; + + const char* keywords[] = { "flag", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:setUseOpenCL", (char**)keywords, &pyobj_flag) && + pyopencv_to_safe(pyobj_flag, flag, ArgInfo("flag", 0)) ) + { + ERRWRAP2(cv::ocl::setUseOpenCL(flag)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_useOpenCL(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ocl::useOpenCL()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_parallel_setParallelForBackend(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::parallel; + + PyObject* pyobj_backendName = NULL; + std::string backendName; + PyObject* pyobj_propagateNumThreads = NULL; + bool propagateNumThreads=true; + bool retval; + + const char* keywords[] = { "backendName", "propagateNumThreads", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:setParallelForBackend", (char**)keywords, &pyobj_backendName, &pyobj_propagateNumThreads) && + pyopencv_to_safe(pyobj_backendName, backendName, ArgInfo("backendName", 0)) && + pyopencv_to_safe(pyobj_propagateNumThreads, propagateNumThreads, ArgInfo("propagateNumThreads", 0)) ) + { + ERRWRAP2(retval = cv::parallel::setParallelForBackend(backendName, propagateNumThreads)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_samples_addSamplesDataSearchPath(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::samples; + + PyObject* pyobj_path = NULL; + String path; + + const char* keywords[] = { "path", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:addSamplesDataSearchPath", (char**)keywords, &pyobj_path) && + pyopencv_to_safe(pyobj_path, path, ArgInfo("path", 0)) ) + { + ERRWRAP2(cv::samples::addSamplesDataSearchPath(path)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_samples_addSamplesDataSearchSubDirectory(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::samples; + + PyObject* pyobj_subdir = NULL; + String subdir; + + const char* keywords[] = { "subdir", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:addSamplesDataSearchSubDirectory", (char**)keywords, &pyobj_subdir) && + pyopencv_to_safe(pyobj_subdir, subdir, ArgInfo("subdir", 0)) ) + { + ERRWRAP2(cv::samples::addSamplesDataSearchSubDirectory(subdir)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_samples_findFile(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::samples; + + PyObject* pyobj_relative_path = NULL; + String relative_path; + PyObject* pyobj_required = NULL; + bool required=true; + PyObject* pyobj_silentMode = NULL; + bool silentMode=false; + cv::String retval; + + const char* keywords[] = { "relative_path", "required", "silentMode", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:findFile", (char**)keywords, &pyobj_relative_path, &pyobj_required, &pyobj_silentMode) && + pyopencv_to_safe(pyobj_relative_path, relative_path, ArgInfo("relative_path", 0)) && + pyopencv_to_safe(pyobj_required, required, ArgInfo("required", 0)) && + pyopencv_to_safe(pyobj_silentMode, silentMode, ArgInfo("silentMode", 0)) ) + { + ERRWRAP2(retval = cv::samples::findFile(relative_path, required, silentMode)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_samples_findFileOrKeep(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::samples; + + PyObject* pyobj_relative_path = NULL; + String relative_path; + PyObject* pyobj_silentMode = NULL; + bool silentMode=false; + cv::String retval; + + const char* keywords[] = { "relative_path", "silentMode", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:findFileOrKeep", (char**)keywords, &pyobj_relative_path, &pyobj_silentMode) && + pyopencv_to_safe(pyobj_relative_path, relative_path, ArgInfo("relative_path", 0)) && + pyopencv_to_safe(pyobj_silentMode, silentMode, ArgInfo("silentMode", 0)) ) + { + ERRWRAP2(retval = cv::samples::findFileOrKeep(relative_path, silentMode)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpBool(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + bool argument=0; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpBool", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpBool(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpCString(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + char* argument=(char*)""; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "s:dumpCString", (char**)keywords, &argument) ) + { + ERRWRAP2(retval = cv::utils::dumpCString(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpDouble(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + double argument=0; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpDouble", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpDouble(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpFloat(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + float argument=0.f; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpFloat", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpFloat(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpInputArray(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_argument = NULL; + Mat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputArray", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputArray(argument)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_argument = NULL; + UMat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputArray", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputArray(argument)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dumpInputArray"); + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpInputArrayOfArrays(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_argument = NULL; + vector_Mat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputArrayOfArrays", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputArrayOfArrays(argument)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_argument = NULL; + vector_UMat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputArrayOfArrays", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputArrayOfArrays(argument)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dumpInputArrayOfArrays"); + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpInputOutputArray(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_argument = NULL; + Mat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputOutputArray", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 1)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputOutputArray(argument)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(argument)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_argument = NULL; + UMat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputOutputArray", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 1)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputOutputArray(argument)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(argument)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dumpInputOutputArray"); + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpInputOutputArrayOfArrays(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_argument = NULL; + vector_Mat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputOutputArrayOfArrays", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 1)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputOutputArrayOfArrays(argument)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(argument)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_argument = NULL; + vector_UMat argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInputOutputArrayOfArrays", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 1)) ) + { + ERRWRAP2(retval = cv::utils::dumpInputOutputArrayOfArrays(argument)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(argument)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("dumpInputOutputArrayOfArrays"); + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpInt(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + int argument=0; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpInt", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpInt(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpRange(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + Range argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpRange", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpRange(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpRect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + Rect argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpRect", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpRect(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpRotatedRect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + RotatedRect argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpRotatedRect", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpRotatedRect(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpSizeT(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + size_t argument=0; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpSizeT", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpSizeT(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpString(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + String argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpString", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpString(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpTermCriteria(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + TermCriteria argument; + String retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpTermCriteria", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpTermCriteria(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpVectorOfDouble(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_vec = NULL; + vector_double vec; + String retval; + + const char* keywords[] = { "vec", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpVectorOfDouble", (char**)keywords, &pyobj_vec) && + pyopencv_to_safe(pyobj_vec, vec, ArgInfo("vec", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpVectorOfDouble(vec)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpVectorOfInt(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_vec = NULL; + vector_int vec; + String retval; + + const char* keywords[] = { "vec", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpVectorOfInt", (char**)keywords, &pyobj_vec) && + pyopencv_to_safe(pyobj_vec, vec, ArgInfo("vec", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpVectorOfInt(vec)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_dumpVectorOfRect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_vec = NULL; + vector_Rect vec; + String retval; + + const char* keywords[] = { "vec", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:dumpVectorOfRect", (char**)keywords, &pyobj_vec) && + pyopencv_to_safe(pyobj_vec, vec, ArgInfo("vec", 0)) ) + { + ERRWRAP2(retval = cv::utils::dumpVectorOfRect(vec)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_generateVectorOfInt(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_len = NULL; + size_t len=0; + vector_int vec; + + const char* keywords[] = { "len", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:generateVectorOfInt", (char**)keywords, &pyobj_len) && + pyopencv_to_safe(pyobj_len, len, ArgInfo("len", 0)) ) + { + ERRWRAP2(cv::utils::generateVectorOfInt(len, vec)); + return pyopencv_from(vec); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_generateVectorOfMat(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_len = NULL; + size_t len=0; + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_dtype = NULL; + int dtype=0; + PyObject* pyobj_vec = NULL; + vector_Mat vec; + + const char* keywords[] = { "len", "rows", "cols", "dtype", "vec", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:generateVectorOfMat", (char**)keywords, &pyobj_len, &pyobj_rows, &pyobj_cols, &pyobj_dtype, &pyobj_vec) && + pyopencv_to_safe(pyobj_len, len, ArgInfo("len", 0)) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_dtype, dtype, ArgInfo("dtype", 0)) && + pyopencv_to_safe(pyobj_vec, vec, ArgInfo("vec", 1)) ) + { + ERRWRAP2(cv::utils::generateVectorOfMat(len, rows, cols, dtype, vec)); + return pyopencv_from(vec); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_generateVectorOfRect(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_len = NULL; + size_t len=0; + vector_Rect vec; + + const char* keywords[] = { "len", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:generateVectorOfRect", (char**)keywords, &pyobj_len) && + pyopencv_to_safe(pyobj_len, len, ArgInfo("len", 0)) ) + { + ERRWRAP2(cv::utils::generateVectorOfRect(len, vec)); + return pyopencv_from(vec); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_testAsyncArray(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_argument = NULL; + Mat argument; + AsyncArray retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:testAsyncArray", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::testAsyncArray(argument)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_argument = NULL; + UMat argument; + AsyncArray retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:testAsyncArray", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::testAsyncArray(argument)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("testAsyncArray"); + + return NULL; +} + +static PyObject* pyopencv_cv_utils_testAsyncException(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + AsyncArray retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::utils::testAsyncException()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_testOverloadResolution(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_value = NULL; + int value=0; + PyObject* pyobj_point = NULL; + Point point=Point(42, 24); + String retval; + + const char* keywords[] = { "value", "point", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:testOverloadResolution", (char**)keywords, &pyobj_value, &pyobj_point) && + pyopencv_to_safe(pyobj_value, value, ArgInfo("value", 0)) && + pyopencv_to_safe(pyobj_point, point, ArgInfo("point", 0)) ) + { + ERRWRAP2(retval = cv::utils::testOverloadResolution(value, point)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rect = NULL; + Rect rect; + String retval; + + const char* keywords[] = { "rect", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:testOverloadResolution", (char**)keywords, &pyobj_rect) && + pyopencv_to_safe(pyobj_rect, rect, ArgInfo("rect", 0)) ) + { + ERRWRAP2(retval = cv::utils::testOverloadResolution(rect)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("testOverloadResolution"); + + return NULL; +} + +static PyObject* pyopencv_cv_utils_testOverwriteNativeMethod(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_argument = NULL; + int argument=0; + int retval; + + const char* keywords[] = { "argument", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:testOverwriteNativeMethod", (char**)keywords, &pyobj_argument) && + pyopencv_to_safe(pyobj_argument, argument, ArgInfo("argument", 0)) ) + { + ERRWRAP2(retval = cv::utils::testOverwriteNativeMethod(argument)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_testRaiseGeneralException(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(cv::utils::testRaiseGeneralException()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_testReservedKeywordConversion(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils; + + PyObject* pyobj_positional_argument = NULL; + int positional_argument=0; + PyObject* pyobj_lambda_ = NULL; + int lambda_=2; + PyObject* pyobj_from_ = NULL; + int from_=3; + String retval; + + const char* keywords[] = { "positional_argument", "lambda_", "from_", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:testReservedKeywordConversion", (char**)keywords, &pyobj_positional_argument, &pyobj_lambda_, &pyobj_from_) && + pyopencv_to_safe(pyobj_positional_argument, positional_argument, ArgInfo("positional_argument", 0)) && + pyopencv_to_safe(pyobj_lambda_, lambda_, ArgInfo("lambda_", 0)) && + pyopencv_to_safe(pyobj_from_, from_, ArgInfo("from_", 0)) ) + { + ERRWRAP2(retval = cv::utils::testReservedKeywordConversion(positional_argument, lambda_, from_)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_utils_fs_getCacheDirectoryForDownloads(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::utils::fs; + + cv::String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::utils::fs::getCacheDirectoryForDownloads()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getBackendName(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + PyObject* pyobj_api = NULL; + VideoCaptureAPIs api=static_cast(0); + cv::String retval; + + const char* keywords[] = { "api", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:getBackendName", (char**)keywords, &pyobj_api) && + pyopencv_to_safe(pyobj_api, api, ArgInfo("api", 0)) ) + { + ERRWRAP2(retval = cv::videoio_registry::getBackendName(api)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getBackends(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + std::vector retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::videoio_registry::getBackends()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getCameraBackendPluginVersion(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + PyObject* pyobj_api = NULL; + VideoCaptureAPIs api=static_cast(0); + int version_ABI; + int version_API; + std::string retval; + + const char* keywords[] = { "api", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:getCameraBackendPluginVersion", (char**)keywords, &pyobj_api) && + pyopencv_to_safe(pyobj_api, api, ArgInfo("api", 0)) ) + { + ERRWRAP2(retval = cv::videoio_registry::getCameraBackendPluginVersion(api, version_ABI, version_API)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(version_ABI), pyopencv_from(version_API)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getCameraBackends(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + std::vector retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::videoio_registry::getCameraBackends()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getStreamBackendPluginVersion(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + PyObject* pyobj_api = NULL; + VideoCaptureAPIs api=static_cast(0); + int version_ABI; + int version_API; + std::string retval; + + const char* keywords[] = { "api", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:getStreamBackendPluginVersion", (char**)keywords, &pyobj_api) && + pyopencv_to_safe(pyobj_api, api, ArgInfo("api", 0)) ) + { + ERRWRAP2(retval = cv::videoio_registry::getStreamBackendPluginVersion(api, version_ABI, version_API)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(version_ABI), pyopencv_from(version_API)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getStreamBackends(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + std::vector retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::videoio_registry::getStreamBackends()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getWriterBackendPluginVersion(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + PyObject* pyobj_api = NULL; + VideoCaptureAPIs api=static_cast(0); + int version_ABI; + int version_API; + std::string retval; + + const char* keywords[] = { "api", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:getWriterBackendPluginVersion", (char**)keywords, &pyobj_api) && + pyopencv_to_safe(pyobj_api, api, ArgInfo("api", 0)) ) + { + ERRWRAP2(retval = cv::videoio_registry::getWriterBackendPluginVersion(api, version_ABI, version_API)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(version_ABI), pyopencv_from(version_API)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_getWriterBackends(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + std::vector retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::videoio_registry::getWriterBackends()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_hasBackend(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + PyObject* pyobj_api = NULL; + VideoCaptureAPIs api=static_cast(0); + bool retval; + + const char* keywords[] = { "api", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:hasBackend", (char**)keywords, &pyobj_api) && + pyopencv_to_safe(pyobj_api, api, ArgInfo("api", 0)) ) + { + ERRWRAP2(retval = cv::videoio_registry::hasBackend(api)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_videoio_registry_isBackendBuiltIn(PyObject* , PyObject* py_args, PyObject* kw) +{ + using namespace cv::videoio_registry; + + PyObject* pyobj_api = NULL; + VideoCaptureAPIs api=static_cast(0); + bool retval; + + const char* keywords[] = { "api", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:isBackendBuiltIn", (char**)keywords, &pyobj_api) && + pyopencv_to_safe(pyobj_api, api, ArgInfo("api", 0)) ) + { + ERRWRAP2(retval = cv::videoio_registry::isBackendBuiltIn(api)); + return pyopencv_from(retval); + } + + return NULL; +} + diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_include.h b/generated/modules/python_bindings_generator/pyopencv_generated_include.h new file mode 100644 index 0000000..66dc73b --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_include.h @@ -0,0 +1,22 @@ +#include "opencv2/core.hpp" +#include "opencv2/core/async.hpp" +#include "opencv2/core/base.hpp" +#include "opencv2/core/bindings_utils.hpp" +#include "opencv2/core/check.hpp" +#include "opencv2/core/cuda.hpp" +#include "opencv2/core/mat.hpp" +#include "opencv2/core/ocl.hpp" +#include "opencv2/core/opengl.hpp" +#include "opencv2/core/optim.hpp" +#include "opencv2/core/ovx.hpp" +#include "opencv2/core/parallel/parallel_backend.hpp" +#include "opencv2/core/persistence.hpp" +#include "opencv2/core/quaternion.hpp" +#include "opencv2/core/types.hpp" +#include "opencv2/core/utility.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/imgproc/bindings.hpp" +#include "opencv2/imgproc/segmentation.hpp" +#include "opencv2/imgcodecs.hpp" +#include "opencv2/videoio.hpp" +#include "opencv2/videoio/registry.hpp" diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_modules.h b/generated/modules/python_bindings_generator/pyopencv_generated_modules.h new file mode 100644 index 0000000..8396521 --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_modules.h @@ -0,0 +1,13 @@ +CVPY_MODULE("", cv); +CVPY_MODULE(".Error", Error); +CVPY_MODULE(".cuda", cuda); +CVPY_MODULE(".detail", detail); +CVPY_MODULE(".ipp", ipp); +CVPY_MODULE(".ocl", ocl); +CVPY_MODULE(".ogl", ogl); +CVPY_MODULE(".parallel", parallel); +CVPY_MODULE(".samples", samples); +CVPY_MODULE(".segmentation", segmentation); +CVPY_MODULE(".utils", utils); +CVPY_MODULE(".utils.fs", utils_fs); +CVPY_MODULE(".videoio_registry", videoio_registry); diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_modules_content.h b/generated/modules/python_bindings_generator/pyopencv_generated_modules_content.h new file mode 100644 index 0000000..aa06152 --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_modules_content.h @@ -0,0 +1,1981 @@ +static PyMethodDef methods_cv[] = { + {"Canny", CV_PY_FN_WITH_KW_(pyopencv_cv_Canny, 0), "Canny(image, threshold1, threshold2[, edges[, apertureSize[, L2gradient]]]) -> edges\n. @brief Finds edges in an image using the Canny algorithm @cite Canny86 .\n. \n. The function finds edges in the input image and marks them in the output map edges using the\n. Canny algorithm. The smallest value between threshold1 and threshold2 is used for edge linking. The\n. largest value is used to find initial segments of strong edges. See\n. \n. \n. @param image 8-bit input image.\n. @param edges output edge map; single channels 8-bit image, which has the same size as image .\n. @param threshold1 first threshold for the hysteresis procedure.\n. @param threshold2 second threshold for the hysteresis procedure.\n. @param apertureSize aperture size for the Sobel operator.\n. @param L2gradient a flag, indicating whether a more accurate \\f$L_2\\f$ norm\n. \\f$=\\sqrt{(dI/dx)^2 + (dI/dy)^2}\\f$ should be used to calculate the image gradient magnitude (\n. L2gradient=true ), or whether the default \\f$L_1\\f$ norm \\f$=|dI/dx|+|dI/dy|\\f$ is enough (\n. L2gradient=false ).\n\n\n\nCanny(dx, dy, threshold1, threshold2[, edges[, L2gradient]]) -> edges\n. \\overload\n. \n. Finds edges in an image using the Canny algorithm with custom image gradient.\n. \n. @param dx 16-bit x derivative of input image (CV_16SC1 or CV_16SC3).\n. @param dy 16-bit y derivative of input image (same type as dx).\n. @param edges output edge map; single channels 8-bit image, which has the same size as image .\n. @param threshold1 first threshold for the hysteresis procedure.\n. @param threshold2 second threshold for the hysteresis procedure.\n. @param L2gradient a flag, indicating whether a more accurate \\f$L_2\\f$ norm\n. \\f$=\\sqrt{(dI/dx)^2 + (dI/dy)^2}\\f$ should be used to calculate the image gradient magnitude (\n. L2gradient=true ), or whether the default \\f$L_1\\f$ norm \\f$=|dI/dx|+|dI/dy|\\f$ is enough (\n. L2gradient=false )."}, + {"EMD", CV_PY_FN_WITH_KW_(pyopencv_cv_EMD, 0), "EMD(signature1, signature2, distType[, cost[, lowerBound[, flow]]]) -> retval, lowerBound, flow\n. @brief Computes the \"minimal work\" distance between two weighted point configurations.\n. \n. The function computes the earth mover distance and/or a lower boundary of the distance between the\n. two weighted point configurations. One of the applications described in @cite RubnerSept98,\n. @cite Rubner2000 is multi-dimensional histogram comparison for image retrieval. EMD is a transportation\n. problem that is solved using some modification of a simplex algorithm, thus the complexity is\n. exponential in the worst case, though, on average it is much faster. In the case of a real metric\n. the lower boundary can be calculated even faster (using linear-time algorithm) and it can be used\n. to determine roughly whether the two signatures are far enough so that they cannot relate to the\n. same object.\n. \n. @param signature1 First signature, a \\f$\\texttt{size1}\\times \\texttt{dims}+1\\f$ floating-point matrix.\n. Each row stores the point weight followed by the point coordinates. The matrix is allowed to have\n. a single column (weights only) if the user-defined cost matrix is used. The weights must be\n. non-negative and have at least one non-zero value.\n. @param signature2 Second signature of the same format as signature1 , though the number of rows\n. may be different. The total weights may be different. In this case an extra \"dummy\" point is added\n. to either signature1 or signature2. The weights must be non-negative and have at least one non-zero\n. value.\n. @param distType Used metric. See #DistanceTypes.\n. @param cost User-defined \\f$\\texttt{size1}\\times \\texttt{size2}\\f$ cost matrix. Also, if a cost matrix\n. is used, lower boundary lowerBound cannot be calculated because it needs a metric function.\n. @param lowerBound Optional input/output parameter: lower boundary of a distance between the two\n. signatures that is a distance between mass centers. The lower boundary may not be calculated if\n. the user-defined cost matrix is used, the total weights of point configurations are not equal, or\n. if the signatures consist of weights only (the signature matrices have a single column). You\n. **must** initialize \\*lowerBound . If the calculated distance between mass centers is greater or\n. equal to \\*lowerBound (it means that the signatures are far enough), the function does not\n. calculate EMD. In any case \\*lowerBound is set to the calculated distance between mass centers on\n. return. Thus, if you want to calculate both distance between mass centers and EMD, \\*lowerBound\n. should be set to 0.\n. @param flow Resultant \\f$\\texttt{size1} \\times \\texttt{size2}\\f$ flow matrix: \\f$\\texttt{flow}_{i,j}\\f$ is\n. a flow from \\f$i\\f$ -th point of signature1 to \\f$j\\f$ -th point of signature2 ."}, + {"GaussianBlur", CV_PY_FN_WITH_KW_(pyopencv_cv_GaussianBlur, 0), "GaussianBlur(src, ksize, sigmaX[, dst[, sigmaY[, borderType]]]) -> dst\n. @brief Blurs an image using a Gaussian filter.\n. \n. The function convolves the source image with the specified Gaussian kernel. In-place filtering is\n. supported.\n. \n. @param src input image; the image can have any number of channels, which are processed\n. independently, but the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n. @param dst output image of the same size and type as src.\n. @param ksize Gaussian kernel size. ksize.width and ksize.height can differ but they both must be\n. positive and odd. Or, they can be zero's and then they are computed from sigma.\n. @param sigmaX Gaussian kernel standard deviation in X direction.\n. @param sigmaY Gaussian kernel standard deviation in Y direction; if sigmaY is zero, it is set to be\n. equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width and ksize.height,\n. respectively (see #getGaussianKernel for details); to fully control the result regardless of\n. possible future modifications of all this semantics, it is recommended to specify all of ksize,\n. sigmaX, and sigmaY.\n. @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. \n. @sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur"}, + {"HoughCircles", CV_PY_FN_WITH_KW_(pyopencv_cv_HoughCircles, 0), "HoughCircles(image, method, dp, minDist[, circles[, param1[, param2[, minRadius[, maxRadius]]]]]) -> circles\n. @brief Finds circles in a grayscale image using the Hough transform.\n. \n. The function finds circles in a grayscale image using a modification of the Hough transform.\n. \n. Example: :\n. @include snippets/imgproc_HoughLinesCircles.cpp\n. \n. @note Usually the function detects the centers of circles well. However, it may fail to find correct\n. radii. You can assist to the function by specifying the radius range ( minRadius and maxRadius ) if\n. you know it. Or, in the case of #HOUGH_GRADIENT method you may set maxRadius to a negative number\n. to return centers only without radius search, and find the correct radius using an additional procedure.\n. \n. It also helps to smooth image a bit unless it's already soft. For example,\n. GaussianBlur() with 7x7 kernel and 1.5x1.5 sigma or similar blurring may help.\n. \n. @param image 8-bit, single-channel, grayscale input image.\n. @param circles Output vector of found circles. Each vector is encoded as 3 or 4 element\n. floating-point vector \\f$(x, y, radius)\\f$ or \\f$(x, y, radius, votes)\\f$ .\n. @param method Detection method, see #HoughModes. The available methods are #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT.\n. @param dp Inverse ratio of the accumulator resolution to the image resolution. For example, if\n. dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has\n. half as big width and height. For #HOUGH_GRADIENT_ALT the recommended value is dp=1.5,\n. unless some small very circles need to be detected.\n. @param minDist Minimum distance between the centers of the detected circles. If the parameter is\n. too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is\n. too large, some circles may be missed.\n. @param param1 First method-specific parameter. In case of #HOUGH_GRADIENT and #HOUGH_GRADIENT_ALT,\n. it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller).\n. Note that #HOUGH_GRADIENT_ALT uses #Scharr algorithm to compute image derivatives, so the threshold value\n. shough normally be higher, such as 300 or normally exposed and contrasty images.\n. @param param2 Second method-specific parameter. In case of #HOUGH_GRADIENT, it is the\n. accumulator threshold for the circle centers at the detection stage. The smaller it is, the more\n. false circles may be detected. Circles, corresponding to the larger accumulator values, will be\n. returned first. In the case of #HOUGH_GRADIENT_ALT algorithm, this is the circle \"perfectness\" measure.\n. The closer it to 1, the better shaped circles algorithm selects. In most cases 0.9 should be fine.\n. If you want get better detection of small circles, you may decrease it to 0.85, 0.8 or even less.\n. But then also try to limit the search range [minRadius, maxRadius] to avoid many false circles.\n. @param minRadius Minimum circle radius.\n. @param maxRadius Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, #HOUGH_GRADIENT returns\n. centers without finding the radius. #HOUGH_GRADIENT_ALT always computes circle radiuses.\n. \n. @sa fitEllipse, minEnclosingCircle"}, + {"HoughLines", CV_PY_FN_WITH_KW_(pyopencv_cv_HoughLines, 0), "HoughLines(image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta]]]]]) -> lines\n. @brief Finds lines in a binary image using the standard Hough transform.\n. \n. The function implements the standard or standard multi-scale Hough transform algorithm for line\n. detection. See for a good explanation of Hough\n. transform.\n. \n. @param image 8-bit, single-channel binary source image. The image may be modified by the function.\n. @param lines Output vector of lines. Each line is represented by a 2 or 3 element vector\n. \\f$(\\rho, \\theta)\\f$ or \\f$(\\rho, \\theta, \\textrm{votes})\\f$ . \\f$\\rho\\f$ is the distance from the coordinate origin \\f$(0,0)\\f$ (top-left corner of\n. the image). \\f$\\theta\\f$ is the line rotation angle in radians (\n. \\f$0 \\sim \\textrm{vertical line}, \\pi/2 \\sim \\textrm{horizontal line}\\f$ ).\n. \\f$\\textrm{votes}\\f$ is the value of accumulator.\n. @param rho Distance resolution of the accumulator in pixels.\n. @param theta Angle resolution of the accumulator in radians.\n. @param threshold Accumulator threshold parameter. Only those lines are returned that get enough\n. votes ( \\f$>\\texttt{threshold}\\f$ ).\n. @param srn For the multi-scale Hough transform, it is a divisor for the distance resolution rho .\n. The coarse accumulator distance resolution is rho and the accurate accumulator resolution is\n. rho/srn . If both srn=0 and stn=0 , the classical Hough transform is used. Otherwise, both these\n. parameters should be positive.\n. @param stn For the multi-scale Hough transform, it is a divisor for the distance resolution theta.\n. @param min_theta For standard and multi-scale Hough transform, minimum angle to check for lines.\n. Must fall between 0 and max_theta.\n. @param max_theta For standard and multi-scale Hough transform, maximum angle to check for lines.\n. Must fall between min_theta and CV_PI."}, + {"HoughLinesP", CV_PY_FN_WITH_KW_(pyopencv_cv_HoughLinesP, 0), "HoughLinesP(image, rho, theta, threshold[, lines[, minLineLength[, maxLineGap]]]) -> lines\n. @brief Finds line segments in a binary image using the probabilistic Hough transform.\n. \n. The function implements the probabilistic Hough transform algorithm for line detection, described\n. in @cite Matas00\n. \n. See the line detection example below:\n. @include snippets/imgproc_HoughLinesP.cpp\n. This is a sample picture the function parameters have been tuned for:\n. \n. ![image](pics/building.jpg)\n. \n. And this is the output of the above program in case of the probabilistic Hough transform:\n. \n. ![image](pics/houghp.png)\n. \n. @param image 8-bit, single-channel binary source image. The image may be modified by the function.\n. @param lines Output vector of lines. Each line is represented by a 4-element vector\n. \\f$(x_1, y_1, x_2, y_2)\\f$ , where \\f$(x_1,y_1)\\f$ and \\f$(x_2, y_2)\\f$ are the ending points of each detected\n. line segment.\n. @param rho Distance resolution of the accumulator in pixels.\n. @param theta Angle resolution of the accumulator in radians.\n. @param threshold Accumulator threshold parameter. Only those lines are returned that get enough\n. votes ( \\f$>\\texttt{threshold}\\f$ ).\n. @param minLineLength Minimum line length. Line segments shorter than that are rejected.\n. @param maxLineGap Maximum allowed gap between points on the same line to link them.\n. \n. @sa LineSegmentDetector"}, + {"HoughLinesPointSet", CV_PY_FN_WITH_KW_(pyopencv_cv_HoughLinesPointSet, 0), "HoughLinesPointSet(point, lines_max, threshold, min_rho, max_rho, rho_step, min_theta, max_theta, theta_step[, lines]) -> lines\n. @brief Finds lines in a set of points using the standard Hough transform.\n. \n. The function finds lines in a set of points using a modification of the Hough transform.\n. @include snippets/imgproc_HoughLinesPointSet.cpp\n. @param point Input vector of points. Each vector must be encoded as a Point vector \\f$(x,y)\\f$. Type must be CV_32FC2 or CV_32SC2.\n. @param lines Output vector of found lines. Each vector is encoded as a vector \\f$(votes, rho, theta)\\f$.\n. The larger the value of 'votes', the higher the reliability of the Hough line.\n. @param lines_max Max count of hough lines.\n. @param threshold Accumulator threshold parameter. Only those lines are returned that get enough\n. votes ( \\f$>\\texttt{threshold}\\f$ )\n. @param min_rho Minimum Distance value of the accumulator in pixels.\n. @param max_rho Maximum Distance value of the accumulator in pixels.\n. @param rho_step Distance resolution of the accumulator in pixels.\n. @param min_theta Minimum angle value of the accumulator in radians.\n. @param max_theta Maximum angle value of the accumulator in radians.\n. @param theta_step Angle resolution of the accumulator in radians."}, + {"HoughLinesWithAccumulator", CV_PY_FN_WITH_KW_(pyopencv_cv_HoughLinesWithAccumulator, 0), "HoughLinesWithAccumulator(image, rho, theta, threshold[, lines[, srn[, stn[, min_theta[, max_theta]]]]]) -> lines\n. @brief Finds lines in a binary image using the standard Hough transform and get accumulator.\n. *\n. * @note This function is for bindings use only. Use original function in C++ code\n. *\n. * @sa HoughLines"}, + {"HuMoments", CV_PY_FN_WITH_KW_(pyopencv_cv_HuMoments, 0), "HuMoments(m[, hu]) -> hu\n. @overload"}, + {"KeyPoint_convert", CV_PY_FN_WITH_KW_(pyopencv_cv_KeyPoint_convert, 0), "KeyPoint_convert(keypoints[, keypointIndexes]) -> points2f\n. This method converts vector of keypoints to vector of points or the reverse, where each keypoint is\n. assigned the same size and the same orientation.\n. \n. @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB\n. @param points2f Array of (x,y) coordinates of each keypoint\n. @param keypointIndexes Array of indexes of keypoints to be converted to points. (Acts like a mask to\n. convert only specified keypoints)\n\n\n\nKeyPoint_convert(points2f[, size[, response[, octave[, class_id]]]]) -> keypoints\n. @overload\n. @param points2f Array of (x,y) coordinates of each keypoint\n. @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB\n. @param size keypoint diameter\n. @param response keypoint detector response on the keypoint (that is, strength of the keypoint)\n. @param octave pyramid octave in which the keypoint has been detected\n. @param class_id object id"}, + {"KeyPoint_overlap", CV_PY_FN_WITH_KW_(pyopencv_cv_KeyPoint_overlap, 0), "KeyPoint_overlap(kp1, kp2) -> retval\n. This method computes overlap for pair of keypoints. Overlap is the ratio between area of keypoint\n. regions' intersection and area of keypoint regions' union (considering keypoint region as circle).\n. If they don't overlap, we get zero. If they coincide at same location with same size, we get 1.\n. @param kp1 First keypoint\n. @param kp2 Second keypoint"}, + {"LUT", CV_PY_FN_WITH_KW_(pyopencv_cv_LUT, 0), "LUT(src, lut[, dst]) -> dst\n. @brief Performs a look-up table transform of an array.\n. \n. The function LUT fills the output array with values from the look-up table. Indices of the entries\n. are taken from the input array. That is, the function processes each element of src as follows:\n. \\f[\\texttt{dst} (I) \\leftarrow \\texttt{lut(src(I) + d)}\\f]\n. where\n. \\f[d = \\fork{0}{if \\(\\texttt{src}\\) has depth \\(\\texttt{CV_8U}\\)}{128}{if \\(\\texttt{src}\\) has depth \\(\\texttt{CV_8S}\\)}\\f]\n. @param src input array of 8-bit elements.\n. @param lut look-up table of 256 elements; in case of multi-channel input array, the table should\n. either have a single channel (in this case the same table is used for all channels) or the same\n. number of channels as in the input array.\n. @param dst output array of the same size and number of channels as src, and the same depth as lut.\n. @sa convertScaleAbs, Mat::convertTo"}, + {"Laplacian", CV_PY_FN_WITH_KW_(pyopencv_cv_Laplacian, 0), "Laplacian(src, ddepth[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst\n. @brief Calculates the Laplacian of an image.\n. \n. The function calculates the Laplacian of the source image by adding up the second x and y\n. derivatives calculated using the Sobel operator:\n. \n. \\f[\\texttt{dst} = \\Delta \\texttt{src} = \\frac{\\partial^2 \\texttt{src}}{\\partial x^2} + \\frac{\\partial^2 \\texttt{src}}{\\partial y^2}\\f]\n. \n. This is done when `ksize > 1`. When `ksize == 1`, the Laplacian is computed by filtering the image\n. with the following \\f$3 \\times 3\\f$ aperture:\n. \n. \\f[\\vecthreethree {0}{1}{0}{1}{-4}{1}{0}{1}{0}\\f]\n. \n. @param src Source image.\n. @param dst Destination image of the same size and the same number of channels as src .\n. @param ddepth Desired depth of the destination image.\n. @param ksize Aperture size used to compute the second-derivative filters. See #getDerivKernels for\n. details. The size must be positive and odd.\n. @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is\n. applied. See #getDerivKernels for details.\n. @param delta Optional delta value that is added to the results prior to storing them in dst .\n. @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa Sobel, Scharr"}, + {"Mahalanobis", CV_PY_FN_WITH_KW_(pyopencv_cv_Mahalanobis, 0), "Mahalanobis(v1, v2, icovar) -> retval\n. @brief Calculates the Mahalanobis distance between two vectors.\n. \n. The function cv::Mahalanobis calculates and returns the weighted distance between two vectors:\n. \\f[d( \\texttt{vec1} , \\texttt{vec2} )= \\sqrt{\\sum_{i,j}{\\texttt{icovar(i,j)}\\cdot(\\texttt{vec1}(I)-\\texttt{vec2}(I))\\cdot(\\texttt{vec1(j)}-\\texttt{vec2(j)})} }\\f]\n. The covariance matrix may be calculated using the #calcCovarMatrix function and then inverted using\n. the invert function (preferably using the #DECOMP_SVD method, as the most accurate).\n. @param v1 first 1D input vector.\n. @param v2 second 1D input vector.\n. @param icovar inverse covariance matrix."}, + {"PCABackProject", CV_PY_FN_WITH_KW_(pyopencv_cv_PCABackProject, 0), "PCABackProject(data, mean, eigenvectors[, result]) -> result\n. wrap PCA::backProject"}, + {"PCACompute", CV_PY_FN_WITH_KW_(pyopencv_cv_PCACompute, 0), "PCACompute(data, mean[, eigenvectors[, maxComponents]]) -> mean, eigenvectors\n. wrap PCA::operator()\n\n\n\nPCACompute(data, mean, retainedVariance[, eigenvectors]) -> mean, eigenvectors\n. wrap PCA::operator()"}, + {"PCACompute2", CV_PY_FN_WITH_KW_(pyopencv_cv_PCACompute2, 0), "PCACompute2(data, mean[, eigenvectors[, eigenvalues[, maxComponents]]]) -> mean, eigenvectors, eigenvalues\n. wrap PCA::operator() and add eigenvalues output parameter\n\n\n\nPCACompute2(data, mean, retainedVariance[, eigenvectors[, eigenvalues]]) -> mean, eigenvectors, eigenvalues\n. wrap PCA::operator() and add eigenvalues output parameter"}, + {"PCAProject", CV_PY_FN_WITH_KW_(pyopencv_cv_PCAProject, 0), "PCAProject(data, mean, eigenvectors[, result]) -> result\n. wrap PCA::project"}, + {"PSNR", CV_PY_FN_WITH_KW_(pyopencv_cv_PSNR, 0), "PSNR(src1, src2[, R]) -> retval\n. @brief Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric.\n. \n. This function calculates the Peak Signal-to-Noise Ratio (PSNR) image quality metric in decibels (dB),\n. between two input arrays src1 and src2. The arrays must have the same type.\n. \n. The PSNR is calculated as follows:\n. \n. \\f[\n. \\texttt{PSNR} = 10 \\cdot \\log_{10}{\\left( \\frac{R^2}{MSE} \\right) }\n. \\f]\n. \n. where R is the maximum integer value of depth (e.g. 255 in the case of CV_8U data)\n. and MSE is the mean squared error between the two arrays.\n. \n. @param src1 first input array.\n. @param src2 second input array of the same size as src1.\n. @param R the maximum pixel value (255 by default)"}, + {"SVBackSubst", CV_PY_FN_WITH_KW_(pyopencv_cv_SVBackSubst, 0), "SVBackSubst(w, u, vt, rhs[, dst]) -> dst\n. wrap SVD::backSubst"}, + {"SVDecomp", CV_PY_FN_WITH_KW_(pyopencv_cv_SVDecomp, 0), "SVDecomp(src[, w[, u[, vt[, flags]]]]) -> w, u, vt\n. wrap SVD::compute"}, + {"Scharr", CV_PY_FN_WITH_KW_(pyopencv_cv_Scharr, 0), "Scharr(src, ddepth, dx, dy[, dst[, scale[, delta[, borderType]]]]) -> dst\n. @brief Calculates the first x- or y- image derivative using Scharr operator.\n. \n. The function computes the first x- or y- spatial image derivative using the Scharr operator. The\n. call\n. \n. \\f[\\texttt{Scharr(src, dst, ddepth, dx, dy, scale, delta, borderType)}\\f]\n. \n. is equivalent to\n. \n. \\f[\\texttt{Sobel(src, dst, ddepth, dx, dy, FILTER_SCHARR, scale, delta, borderType)} .\\f]\n. \n. @param src input image.\n. @param dst output image of the same size and the same number of channels as src.\n. @param ddepth output image depth, see @ref filter_depths \"combinations\"\n. @param dx order of the derivative x.\n. @param dy order of the derivative y.\n. @param scale optional scale factor for the computed derivative values; by default, no scaling is\n. applied (see #getDerivKernels for details).\n. @param delta optional delta value that is added to the results prior to storing them in dst.\n. @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa cartToPolar"}, + {"Sobel", CV_PY_FN_WITH_KW_(pyopencv_cv_Sobel, 0), "Sobel(src, ddepth, dx, dy[, dst[, ksize[, scale[, delta[, borderType]]]]]) -> dst\n. @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.\n. \n. In all cases except one, the \\f$\\texttt{ksize} \\times \\texttt{ksize}\\f$ separable kernel is used to\n. calculate the derivative. When \\f$\\texttt{ksize = 1}\\f$, the \\f$3 \\times 1\\f$ or \\f$1 \\times 3\\f$\n. kernel is used (that is, no Gaussian smoothing is done). `ksize = 1` can only be used for the first\n. or the second x- or y- derivatives.\n. \n. There is also the special value `ksize = #FILTER_SCHARR (-1)` that corresponds to the \\f$3\\times3\\f$ Scharr\n. filter that may give more accurate results than the \\f$3\\times3\\f$ Sobel. The Scharr aperture is\n. \n. \\f[\\vecthreethree{-3}{0}{3}{-10}{0}{10}{-3}{0}{3}\\f]\n. \n. for the x-derivative, or transposed for the y-derivative.\n. \n. The function calculates an image derivative by convolving the image with the appropriate kernel:\n. \n. \\f[\\texttt{dst} = \\frac{\\partial^{xorder+yorder} \\texttt{src}}{\\partial x^{xorder} \\partial y^{yorder}}\\f]\n. \n. The Sobel operators combine Gaussian smoothing and differentiation, so the result is more or less\n. resistant to the noise. Most often, the function is called with ( xorder = 1, yorder = 0, ksize = 3)\n. or ( xorder = 0, yorder = 1, ksize = 3) to calculate the first x- or y- image derivative. The first\n. case corresponds to a kernel of:\n. \n. \\f[\\vecthreethree{-1}{0}{1}{-2}{0}{2}{-1}{0}{1}\\f]\n. \n. The second case corresponds to a kernel of:\n. \n. \\f[\\vecthreethree{-1}{-2}{-1}{0}{0}{0}{1}{2}{1}\\f]\n. \n. @param src input image.\n. @param dst output image of the same size and the same number of channels as src .\n. @param ddepth output image depth, see @ref filter_depths \"combinations\"; in the case of\n. 8-bit input images it will result in truncated derivatives.\n. @param dx order of the derivative x.\n. @param dy order of the derivative y.\n. @param ksize size of the extended Sobel kernel; it must be 1, 3, 5, or 7.\n. @param scale optional scale factor for the computed derivative values; by default, no scaling is\n. applied (see #getDerivKernels for details).\n. @param delta optional delta value that is added to the results prior to storing them in dst.\n. @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar"}, + {"UMat_context", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_context, 0), "UMat_context() -> retval\n."}, + {"UMat_queue", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_queue, 0), "UMat_queue() -> retval\n."}, + {"VideoWriter_fourcc", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_fourcc, 0), "VideoWriter_fourcc(c1, c2, c3, c4) -> retval\n. @brief Concatenates 4 chars to a fourcc code\n. \n. @return a fourcc code\n. \n. This static method constructs the fourcc code of the codec to be used in the constructor\n. VideoWriter::VideoWriter or VideoWriter::open."}, + {"absdiff", CV_PY_FN_WITH_KW_(pyopencv_cv_absdiff, 0), "absdiff(src1, src2[, dst]) -> dst\n. @brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.\n. \n. The function cv::absdiff calculates:\n. * Absolute difference between two arrays when they have the same\n. size and type:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} (| \\texttt{src1}(I) - \\texttt{src2}(I)|)\\f]\n. * Absolute difference between an array and a scalar when the second\n. array is constructed from Scalar or has as many elements as the\n. number of channels in `src1`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} (| \\texttt{src1}(I) - \\texttt{src2} |)\\f]\n. * Absolute difference between a scalar and an array when the first\n. array is constructed from Scalar or has as many elements as the\n. number of channels in `src2`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} (| \\texttt{src1} - \\texttt{src2}(I) |)\\f]\n. where I is a multi-dimensional index of array elements. In case of\n. multi-channel arrays, each channel is processed independently.\n. @note Saturation is not applied when the arrays have the depth CV_32S.\n. You may even get a negative value in the case of overflow.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array that has the same size and type as input arrays.\n. @sa cv::abs(const Mat&)"}, + {"accumulate", CV_PY_FN_WITH_KW_(pyopencv_cv_accumulate, 0), "accumulate(src, dst[, mask]) -> dst\n. @brief Adds an image to the accumulator image.\n. \n. The function adds src or some of its elements to dst :\n. \n. \\f[\\texttt{dst} (x,y) \\leftarrow \\texttt{dst} (x,y) + \\texttt{src} (x,y) \\quad \\text{if} \\quad \\texttt{mask} (x,y) \\ne 0\\f]\n. \n. The function supports multi-channel images. Each channel is processed independently.\n. \n. The function cv::accumulate can be used, for example, to collect statistics of a scene background\n. viewed by a still camera and for the further foreground-background segmentation.\n. \n. @param src Input image of type CV_8UC(n), CV_16UC(n), CV_32FC(n) or CV_64FC(n), where n is a positive integer.\n. @param dst %Accumulator image with the same number of channels as input image, and a depth of CV_32F or CV_64F.\n. @param mask Optional operation mask.\n. \n. @sa accumulateSquare, accumulateProduct, accumulateWeighted"}, + {"accumulateProduct", CV_PY_FN_WITH_KW_(pyopencv_cv_accumulateProduct, 0), "accumulateProduct(src1, src2, dst[, mask]) -> dst\n. @brief Adds the per-element product of two input images to the accumulator image.\n. \n. The function adds the product of two images or their selected regions to the accumulator dst :\n. \n. \\f[\\texttt{dst} (x,y) \\leftarrow \\texttt{dst} (x,y) + \\texttt{src1} (x,y) \\cdot \\texttt{src2} (x,y) \\quad \\text{if} \\quad \\texttt{mask} (x,y) \\ne 0\\f]\n. \n. The function supports multi-channel images. Each channel is processed independently.\n. \n. @param src1 First input image, 1- or 3-channel, 8-bit or 32-bit floating point.\n. @param src2 Second input image of the same type and the same size as src1 .\n. @param dst %Accumulator image with the same number of channels as input images, 32-bit or 64-bit\n. floating-point.\n. @param mask Optional operation mask.\n. \n. @sa accumulate, accumulateSquare, accumulateWeighted"}, + {"accumulateSquare", CV_PY_FN_WITH_KW_(pyopencv_cv_accumulateSquare, 0), "accumulateSquare(src, dst[, mask]) -> dst\n. @brief Adds the square of a source image to the accumulator image.\n. \n. The function adds the input image src or its selected region, raised to a power of 2, to the\n. accumulator dst :\n. \n. \\f[\\texttt{dst} (x,y) \\leftarrow \\texttt{dst} (x,y) + \\texttt{src} (x,y)^2 \\quad \\text{if} \\quad \\texttt{mask} (x,y) \\ne 0\\f]\n. \n. The function supports multi-channel images. Each channel is processed independently.\n. \n. @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.\n. @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit\n. floating-point.\n. @param mask Optional operation mask.\n. \n. @sa accumulateSquare, accumulateProduct, accumulateWeighted"}, + {"accumulateWeighted", CV_PY_FN_WITH_KW_(pyopencv_cv_accumulateWeighted, 0), "accumulateWeighted(src, dst, alpha[, mask]) -> dst\n. @brief Updates a running average.\n. \n. The function calculates the weighted sum of the input image src and the accumulator dst so that dst\n. becomes a running average of a frame sequence:\n. \n. \\f[\\texttt{dst} (x,y) \\leftarrow (1- \\texttt{alpha} ) \\cdot \\texttt{dst} (x,y) + \\texttt{alpha} \\cdot \\texttt{src} (x,y) \\quad \\text{if} \\quad \\texttt{mask} (x,y) \\ne 0\\f]\n. \n. That is, alpha regulates the update speed (how fast the accumulator \"forgets\" about earlier images).\n. The function supports multi-channel images. Each channel is processed independently.\n. \n. @param src Input image as 1- or 3-channel, 8-bit or 32-bit floating point.\n. @param dst %Accumulator image with the same number of channels as input image, 32-bit or 64-bit\n. floating-point.\n. @param alpha Weight of the input image.\n. @param mask Optional operation mask.\n. \n. @sa accumulate, accumulateSquare, accumulateProduct"}, + {"adaptiveThreshold", CV_PY_FN_WITH_KW_(pyopencv_cv_adaptiveThreshold, 0), "adaptiveThreshold(src, maxValue, adaptiveMethod, thresholdType, blockSize, C[, dst]) -> dst\n. @brief Applies an adaptive threshold to an array.\n. \n. The function transforms a grayscale image to a binary image according to the formulae:\n. - **THRESH_BINARY**\n. \\f[dst(x,y) = \\fork{\\texttt{maxValue}}{if \\(src(x,y) > T(x,y)\\)}{0}{otherwise}\\f]\n. - **THRESH_BINARY_INV**\n. \\f[dst(x,y) = \\fork{0}{if \\(src(x,y) > T(x,y)\\)}{\\texttt{maxValue}}{otherwise}\\f]\n. where \\f$T(x,y)\\f$ is a threshold calculated individually for each pixel (see adaptiveMethod parameter).\n. \n. The function can process the image in-place.\n. \n. @param src Source 8-bit single-channel image.\n. @param dst Destination image of the same size and the same type as src.\n. @param maxValue Non-zero value assigned to the pixels for which the condition is satisfied\n. @param adaptiveMethod Adaptive thresholding algorithm to use, see #AdaptiveThresholdTypes.\n. The #BORDER_REPLICATE | #BORDER_ISOLATED is used to process boundaries.\n. @param thresholdType Thresholding type that must be either #THRESH_BINARY or #THRESH_BINARY_INV,\n. see #ThresholdTypes.\n. @param blockSize Size of a pixel neighborhood that is used to calculate a threshold value for the\n. pixel: 3, 5, 7, and so on.\n. @param C Constant subtracted from the mean or weighted mean (see the details below). Normally, it\n. is positive but may be zero or negative as well.\n. \n. @sa threshold, blur, GaussianBlur"}, + {"add", CV_PY_FN_WITH_KW_(pyopencv_cv_add, 0), "add(src1, src2[, dst[, mask[, dtype]]]) -> dst\n. @brief Calculates the per-element sum of two arrays or an array and a scalar.\n. \n. The function add calculates:\n. - Sum of two arrays when both input arrays have the same size and the same number of channels:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) + \\texttt{src2}(I)) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Sum of an array and a scalar when src2 is constructed from Scalar or has the same number of\n. elements as `src1.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) + \\texttt{src2} ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Sum of a scalar and an array when src1 is constructed from Scalar or has the same number of\n. elements as `src2.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1} + \\texttt{src2}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. where `I` is a multi-dimensional index of array elements. In case of multi-channel arrays, each\n. channel is processed independently.\n. \n. The first function in the list above can be replaced with matrix expressions:\n. @code{.cpp}\n. dst = src1 + src2;\n. dst += src1; // equivalent to add(dst, src1, dst);\n. @endcode\n. The input arrays and the output array can all have the same or different depths. For example, you\n. can add a 16-bit unsigned array to a 8-bit signed array and store the sum as a 32-bit\n. floating-point array. Depth of the output array is determined by the dtype parameter. In the second\n. and third cases above, as well as in the first case, when src1.depth() == src2.depth(), dtype can\n. be set to the default -1. In this case, the output array will have the same depth as the input\n. array, be it src1, src2 or both.\n. @note Saturation is not applied when the output array has the depth CV_32S. You may even get\n. result of an incorrect sign in the case of overflow.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array that has the same size and number of channels as the input array(s); the\n. depth is defined by dtype or src1/src2.\n. @param mask optional operation mask - 8-bit single channel array, that specifies elements of the\n. output array to be changed.\n. @param dtype optional depth of the output array (see the discussion below).\n. @sa subtract, addWeighted, scaleAdd, Mat::convertTo"}, + {"addWeighted", CV_PY_FN_WITH_KW_(pyopencv_cv_addWeighted, 0), "addWeighted(src1, alpha, src2, beta, gamma[, dst[, dtype]]) -> dst\n. @brief Calculates the weighted sum of two arrays.\n. \n. The function addWeighted calculates the weighted sum of two arrays as follows:\n. \\f[\\texttt{dst} (I)= \\texttt{saturate} ( \\texttt{src1} (I)* \\texttt{alpha} + \\texttt{src2} (I)* \\texttt{beta} + \\texttt{gamma} )\\f]\n. where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each\n. channel is processed independently.\n. The function can be replaced with a matrix expression:\n. @code{.cpp}\n. dst = src1*alpha + src2*beta + gamma;\n. @endcode\n. @note Saturation is not applied when the output array has the depth CV_32S. You may even get\n. result of an incorrect sign in the case of overflow.\n. @param src1 first input array.\n. @param alpha weight of the first array elements.\n. @param src2 second input array of the same size and channel number as src1.\n. @param beta weight of the second array elements.\n. @param gamma scalar added to each sum.\n. @param dst output array that has the same size and number of channels as the input arrays.\n. @param dtype optional depth of the output array; when both input arrays have the same depth, dtype\n. can be set to -1, which will be equivalent to src1.depth().\n. @sa add, subtract, scaleAdd, Mat::convertTo"}, + {"applyColorMap", CV_PY_FN_WITH_KW_(pyopencv_cv_applyColorMap, 0), "applyColorMap(src, colormap[, dst]) -> dst\n. @brief Applies a GNU Octave/MATLAB equivalent colormap on a given image.\n. \n. @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.\n. @param dst The result is the colormapped source image. Note: Mat::create is called on dst.\n. @param colormap The colormap to apply, see #ColormapTypes\n\n\n\napplyColorMap(src, userColor[, dst]) -> dst\n. @brief Applies a user colormap on a given image.\n. \n. @param src The source image, grayscale or colored of type CV_8UC1 or CV_8UC3.\n. @param dst The result is the colormapped source image. Note: Mat::create is called on dst.\n. @param userColor The colormap to apply of type CV_8UC1 or CV_8UC3 and size 256"}, + {"approxPolyDP", CV_PY_FN_WITH_KW_(pyopencv_cv_approxPolyDP, 0), "approxPolyDP(curve, epsilon, closed[, approxCurve]) -> approxCurve\n. @brief Approximates a polygonal curve(s) with the specified precision.\n. \n. The function cv::approxPolyDP approximates a curve or a polygon with another curve/polygon with less\n. vertices so that the distance between them is less or equal to the specified precision. It uses the\n. Douglas-Peucker algorithm \n. \n. @param curve Input vector of a 2D point stored in std::vector or Mat\n. @param approxCurve Result of the approximation. The type should match the type of the input curve.\n. @param epsilon Parameter specifying the approximation accuracy. This is the maximum distance\n. between the original curve and its approximation.\n. @param closed If true, the approximated curve is closed (its first and last vertices are\n. connected). Otherwise, it is not closed."}, + {"arcLength", CV_PY_FN_WITH_KW_(pyopencv_cv_arcLength, 0), "arcLength(curve, closed) -> retval\n. @brief Calculates a contour perimeter or a curve length.\n. \n. The function computes a curve length or a closed contour perimeter.\n. \n. @param curve Input vector of 2D points, stored in std::vector or Mat.\n. @param closed Flag indicating whether the curve is closed or not."}, + {"arrowedLine", CV_PY_FN_WITH_KW_(pyopencv_cv_arrowedLine, 0), "arrowedLine(img, pt1, pt2, color[, thickness[, line_type[, shift[, tipLength]]]]) -> img\n. @brief Draws an arrow segment pointing from the first point to the second one.\n. \n. The function cv::arrowedLine draws an arrow between pt1 and pt2 points in the image. See also #line.\n. \n. @param img Image.\n. @param pt1 The point the arrow starts from.\n. @param pt2 The point the arrow points to.\n. @param color Line color.\n. @param thickness Line thickness.\n. @param line_type Type of the line. See #LineTypes\n. @param shift Number of fractional bits in the point coordinates.\n. @param tipLength The length of the arrow tip in relation to the arrow length"}, + {"batchDistance", CV_PY_FN_WITH_KW_(pyopencv_cv_batchDistance, 0), "batchDistance(src1, src2, dtype[, dist[, nidx[, normType[, K[, mask[, update[, crosscheck]]]]]]]) -> dist, nidx\n. @brief naive nearest neighbor finder\n. \n. see http://en.wikipedia.org/wiki/Nearest_neighbor_search\n. @todo document"}, + {"bilateralFilter", CV_PY_FN_WITH_KW_(pyopencv_cv_bilateralFilter, 0), "bilateralFilter(src, d, sigmaColor, sigmaSpace[, dst[, borderType]]) -> dst\n. @brief Applies the bilateral filter to an image.\n. \n. The function applies bilateral filtering to the input image, as described in\n. http://www.dai.ed.ac.uk/CVonline/LOCAL_COPIES/MANDUCHI1/Bilateral_Filtering.html\n. bilateralFilter can reduce unwanted noise very well while keeping edges fairly sharp. However, it is\n. very slow compared to most filters.\n. \n. _Sigma values_: For simplicity, you can set the 2 sigma values to be the same. If they are small (\\<\n. 10), the filter will not have much effect, whereas if they are large (\\> 150), they will have a very\n. strong effect, making the image look \"cartoonish\".\n. \n. _Filter size_: Large filters (d \\> 5) are very slow, so it is recommended to use d=5 for real-time\n. applications, and perhaps d=9 for offline applications that need heavy noise filtering.\n. \n. This filter does not work inplace.\n. @param src Source 8-bit or floating-point, 1-channel or 3-channel image.\n. @param dst Destination image of the same size and type as src .\n. @param d Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,\n. it is computed from sigmaSpace.\n. @param sigmaColor Filter sigma in the color space. A larger value of the parameter means that\n. farther colors within the pixel neighborhood (see sigmaSpace) will be mixed together, resulting\n. in larger areas of semi-equal color.\n. @param sigmaSpace Filter sigma in the coordinate space. A larger value of the parameter means that\n. farther pixels will influence each other as long as their colors are close enough (see sigmaColor\n. ). When d\\>0, it specifies the neighborhood size regardless of sigmaSpace. Otherwise, d is\n. proportional to sigmaSpace.\n. @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes"}, + {"bitwise_and", CV_PY_FN_WITH_KW_(pyopencv_cv_bitwise_and, 0), "bitwise_and(src1, src2[, dst[, mask]]) -> dst\n. @brief computes bitwise conjunction of the two arrays (dst = src1 & src2)\n. Calculates the per-element bit-wise conjunction of two arrays or an\n. array and a scalar.\n. \n. The function cv::bitwise_and calculates the per-element bit-wise logical conjunction for:\n. * Two arrays when src1 and src2 have the same size:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\wedge \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n. * An array and a scalar when src2 is constructed from Scalar or has\n. the same number of elements as `src1.channels()`:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\wedge \\texttt{src2} \\quad \\texttt{if mask} (I) \\ne0\\f]\n. * A scalar and an array when src1 is constructed from Scalar or has\n. the same number of elements as `src2.channels()`:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} \\wedge \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n. In case of floating-point arrays, their machine-specific bit\n. representations (usually IEEE754-compliant) are used for the operation.\n. In case of multi-channel arrays, each channel is processed\n. independently. In the second and third cases above, the scalar is first\n. converted to the array type.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array that has the same size and type as the input\n. arrays.\n. @param mask optional operation mask, 8-bit single channel array, that\n. specifies elements of the output array to be changed."}, + {"bitwise_not", CV_PY_FN_WITH_KW_(pyopencv_cv_bitwise_not, 0), "bitwise_not(src[, dst[, mask]]) -> dst\n. @brief Inverts every bit of an array.\n. \n. The function cv::bitwise_not calculates per-element bit-wise inversion of the input\n. array:\n. \\f[\\texttt{dst} (I) = \\neg \\texttt{src} (I)\\f]\n. In case of a floating-point input array, its machine-specific bit\n. representation (usually IEEE754-compliant) is used for the operation. In\n. case of multi-channel arrays, each channel is processed independently.\n. @param src input array.\n. @param dst output array that has the same size and type as the input\n. array.\n. @param mask optional operation mask, 8-bit single channel array, that\n. specifies elements of the output array to be changed."}, + {"bitwise_or", CV_PY_FN_WITH_KW_(pyopencv_cv_bitwise_or, 0), "bitwise_or(src1, src2[, dst[, mask]]) -> dst\n. @brief Calculates the per-element bit-wise disjunction of two arrays or an\n. array and a scalar.\n. \n. The function cv::bitwise_or calculates the per-element bit-wise logical disjunction for:\n. * Two arrays when src1 and src2 have the same size:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\vee \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n. * An array and a scalar when src2 is constructed from Scalar or has\n. the same number of elements as `src1.channels()`:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\vee \\texttt{src2} \\quad \\texttt{if mask} (I) \\ne0\\f]\n. * A scalar and an array when src1 is constructed from Scalar or has\n. the same number of elements as `src2.channels()`:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} \\vee \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n. In case of floating-point arrays, their machine-specific bit\n. representations (usually IEEE754-compliant) are used for the operation.\n. In case of multi-channel arrays, each channel is processed\n. independently. In the second and third cases above, the scalar is first\n. converted to the array type.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array that has the same size and type as the input\n. arrays.\n. @param mask optional operation mask, 8-bit single channel array, that\n. specifies elements of the output array to be changed."}, + {"bitwise_xor", CV_PY_FN_WITH_KW_(pyopencv_cv_bitwise_xor, 0), "bitwise_xor(src1, src2[, dst[, mask]]) -> dst\n. @brief Calculates the per-element bit-wise \"exclusive or\" operation on two\n. arrays or an array and a scalar.\n. \n. The function cv::bitwise_xor calculates the per-element bit-wise logical \"exclusive-or\"\n. operation for:\n. * Two arrays when src1 and src2 have the same size:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\oplus \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n. * An array and a scalar when src2 is constructed from Scalar or has\n. the same number of elements as `src1.channels()`:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\oplus \\texttt{src2} \\quad \\texttt{if mask} (I) \\ne0\\f]\n. * A scalar and an array when src1 is constructed from Scalar or has\n. the same number of elements as `src2.channels()`:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} \\oplus \\texttt{src2} (I) \\quad \\texttt{if mask} (I) \\ne0\\f]\n. In case of floating-point arrays, their machine-specific bit\n. representations (usually IEEE754-compliant) are used for the operation.\n. In case of multi-channel arrays, each channel is processed\n. independently. In the 2nd and 3rd cases above, the scalar is first\n. converted to the array type.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array that has the same size and type as the input\n. arrays.\n. @param mask optional operation mask, 8-bit single channel array, that\n. specifies elements of the output array to be changed."}, + {"blendLinear", CV_PY_FN_WITH_KW_(pyopencv_cv_blendLinear, 0), "blendLinear(src1, src2, weights1, weights2[, dst]) -> dst\n. @overload\n. \n. variant without `mask` parameter"}, + {"blur", CV_PY_FN_WITH_KW_(pyopencv_cv_blur, 0), "blur(src, ksize[, dst[, anchor[, borderType]]]) -> dst\n. @brief Blurs an image using the normalized box filter.\n. \n. The function smooths an image using the kernel:\n. \n. \\f[\\texttt{K} = \\frac{1}{\\texttt{ksize.width*ksize.height}} \\begin{bmatrix} 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\hdotsfor{6} \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\end{bmatrix}\\f]\n. \n. The call `blur(src, dst, ksize, anchor, borderType)` is equivalent to `boxFilter(src, dst, src.type(), ksize,\n. anchor, true, borderType)`.\n. \n. @param src input image; it can have any number of channels, which are processed independently, but\n. the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n. @param dst output image of the same size and type as src.\n. @param ksize blurring kernel size.\n. @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel\n. center.\n. @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa boxFilter, bilateralFilter, GaussianBlur, medianBlur"}, + {"borderInterpolate", CV_PY_FN_WITH_KW_(pyopencv_cv_borderInterpolate, 0), "borderInterpolate(p, len, borderType) -> retval\n. @brief Computes the source location of an extrapolated pixel.\n. \n. The function computes and returns the coordinate of a donor pixel corresponding to the specified\n. extrapolated pixel when using the specified extrapolation border mode. For example, if you use\n. cv::BORDER_WRAP mode in the horizontal direction, cv::BORDER_REFLECT_101 in the vertical direction and\n. want to compute value of the \"virtual\" pixel Point(-5, 100) in a floating-point image img , it\n. looks like:\n. @code{.cpp}\n. float val = img.at(borderInterpolate(100, img.rows, cv::BORDER_REFLECT_101),\n. borderInterpolate(-5, img.cols, cv::BORDER_WRAP));\n. @endcode\n. Normally, the function is not called directly. It is used inside filtering functions and also in\n. copyMakeBorder.\n. @param p 0-based coordinate of the extrapolated pixel along one of the axes, likely \\<0 or \\>= len\n. @param len Length of the array along the corresponding axis.\n. @param borderType Border type, one of the #BorderTypes, except for #BORDER_TRANSPARENT and\n. #BORDER_ISOLATED . When borderType==#BORDER_CONSTANT , the function always returns -1, regardless\n. of p and len.\n. \n. @sa copyMakeBorder"}, + {"boundingRect", CV_PY_FN_WITH_KW_(pyopencv_cv_boundingRect, 0), "boundingRect(array) -> retval\n. @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels of gray-scale image.\n. \n. The function calculates and returns the minimal up-right bounding rectangle for the specified point set or\n. non-zero pixels of gray-scale image.\n. \n. @param array Input gray-scale image or 2D point set, stored in std::vector or Mat."}, + {"boxFilter", CV_PY_FN_WITH_KW_(pyopencv_cv_boxFilter, 0), "boxFilter(src, ddepth, ksize[, dst[, anchor[, normalize[, borderType]]]]) -> dst\n. @brief Blurs an image using the box filter.\n. \n. The function smooths an image using the kernel:\n. \n. \\f[\\texttt{K} = \\alpha \\begin{bmatrix} 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\\\ \\hdotsfor{6} \\\\ 1 & 1 & 1 & \\cdots & 1 & 1 \\end{bmatrix}\\f]\n. \n. where\n. \n. \\f[\\alpha = \\begin{cases} \\frac{1}{\\texttt{ksize.width*ksize.height}} & \\texttt{when } \\texttt{normalize=true} \\\\1 & \\texttt{otherwise}\\end{cases}\\f]\n. \n. Unnormalized box filter is useful for computing various integral characteristics over each pixel\n. neighborhood, such as covariance matrices of image derivatives (used in dense optical flow\n. algorithms, and so on). If you need to compute pixel sums over variable-size windows, use #integral.\n. \n. @param src input image.\n. @param dst output image of the same size and type as src.\n. @param ddepth the output image depth (-1 to use src.depth()).\n. @param ksize blurring kernel size.\n. @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel\n. center.\n. @param normalize flag, specifying whether the kernel is normalized by its area or not.\n. @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa blur, bilateralFilter, GaussianBlur, medianBlur, integral"}, + {"boxPoints", CV_PY_FN_WITH_KW_(pyopencv_cv_boxPoints, 0), "boxPoints(box[, points]) -> points\n. @brief Finds the four vertices of a rotated rect. Useful to draw the rotated rectangle.\n. \n. The function finds the four vertices of a rotated rectangle. This function is useful to draw the\n. rectangle. In C++, instead of using this function, you can directly use RotatedRect::points method. Please\n. visit the @ref tutorial_bounding_rotated_ellipses \"tutorial on Creating Bounding rotated boxes and ellipses for contours\" for more information.\n. \n. @param box The input rotated rectangle. It may be the output of\n. @param points The output array of four vertices of rectangles."}, + {"calcBackProject", CV_PY_FN_WITH_KW_(pyopencv_cv_calcBackProject, 0), "calcBackProject(images, channels, hist, ranges, scale[, dst]) -> dst\n. @overload"}, + {"calcCovarMatrix", CV_PY_FN_WITH_KW_(pyopencv_cv_calcCovarMatrix, 0), "calcCovarMatrix(samples, mean, flags[, covar[, ctype]]) -> covar, mean\n. @overload\n. @note use #COVAR_ROWS or #COVAR_COLS flag\n. @param samples samples stored as rows/columns of a single matrix.\n. @param covar output covariance matrix of the type ctype and square size.\n. @param mean input or output (depending on the flags) array as the average value of the input vectors.\n. @param flags operation flags as a combination of #CovarFlags\n. @param ctype type of the matrixl; it equals 'CV_64F' by default."}, + {"calcHist", CV_PY_FN_WITH_KW_(pyopencv_cv_calcHist, 0), "calcHist(images, channels, mask, histSize, ranges[, hist[, accumulate]]) -> hist\n. @overload"}, + {"cartToPolar", CV_PY_FN_WITH_KW_(pyopencv_cv_cartToPolar, 0), "cartToPolar(x, y[, magnitude[, angle[, angleInDegrees]]]) -> magnitude, angle\n. @brief Calculates the magnitude and angle of 2D vectors.\n. \n. The function cv::cartToPolar calculates either the magnitude, angle, or both\n. for every 2D vector (x(I),y(I)):\n. \\f[\\begin{array}{l} \\texttt{magnitude} (I)= \\sqrt{\\texttt{x}(I)^2+\\texttt{y}(I)^2} , \\\\ \\texttt{angle} (I)= \\texttt{atan2} ( \\texttt{y} (I), \\texttt{x} (I))[ \\cdot180 / \\pi ] \\end{array}\\f]\n. \n. The angles are calculated with accuracy about 0.3 degrees. For the point\n. (0,0), the angle is set to 0.\n. @param x array of x-coordinates; this must be a single-precision or\n. double-precision floating-point array.\n. @param y array of y-coordinates, that must have the same size and same type as x.\n. @param magnitude output array of magnitudes of the same size and type as x.\n. @param angle output array of angles that has the same size and type as\n. x; the angles are measured in radians (from 0 to 2\\*Pi) or in degrees (0 to 360 degrees).\n. @param angleInDegrees a flag, indicating whether the angles are measured\n. in radians (which is by default), or in degrees.\n. @sa Sobel, Scharr"}, + {"checkHardwareSupport", CV_PY_FN_WITH_KW_(pyopencv_cv_checkHardwareSupport, 0), "checkHardwareSupport(feature) -> retval\n. @brief Returns true if the specified feature is supported by the host hardware.\n. \n. The function returns true if the host hardware supports the specified feature. When user calls\n. setUseOptimized(false), the subsequent calls to checkHardwareSupport() will return false until\n. setUseOptimized(true) is called. This way user can dynamically switch on and off the optimized code\n. in OpenCV.\n. @param feature The feature of interest, one of cv::CpuFeatures"}, + {"checkRange", CV_PY_FN_WITH_KW_(pyopencv_cv_checkRange, 0), "checkRange(a[, quiet[, minVal[, maxVal]]]) -> retval, pos\n. @brief Checks every element of an input array for invalid values.\n. \n. The function cv::checkRange checks that every array element is neither NaN nor infinite. When minVal \\>\n. -DBL_MAX and maxVal \\< DBL_MAX, the function also checks that each value is between minVal and\n. maxVal. In case of multi-channel arrays, each channel is processed independently. If some values\n. are out of range, position of the first outlier is stored in pos (when pos != NULL). Then, the\n. function either returns false (when quiet=true) or throws an exception.\n. @param a input array.\n. @param quiet a flag, indicating whether the functions quietly return false when the array elements\n. are out of range or they throw an exception.\n. @param pos optional output parameter, when not NULL, must be a pointer to array of src.dims\n. elements.\n. @param minVal inclusive lower boundary of valid values range.\n. @param maxVal exclusive upper boundary of valid values range."}, + {"circle", CV_PY_FN_WITH_KW_(pyopencv_cv_circle, 0), "circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> img\n. @brief Draws a circle.\n. \n. The function cv::circle draws a simple or filled circle with a given center and radius.\n. @param img Image where the circle is drawn.\n. @param center Center of the circle.\n. @param radius Radius of the circle.\n. @param color Circle color.\n. @param thickness Thickness of the circle outline, if positive. Negative values, like #FILLED,\n. mean that a filled circle is to be drawn.\n. @param lineType Type of the circle boundary. See #LineTypes\n. @param shift Number of fractional bits in the coordinates of the center and in the radius value."}, + {"clipLine", CV_PY_FN_WITH_KW_(pyopencv_cv_clipLine, 0), "clipLine(imgRect, pt1, pt2) -> retval, pt1, pt2\n. @overload\n. @param imgRect Image rectangle.\n. @param pt1 First line point.\n. @param pt2 Second line point."}, + {"compare", CV_PY_FN_WITH_KW_(pyopencv_cv_compare, 0), "compare(src1, src2, cmpop[, dst]) -> dst\n. @brief Performs the per-element comparison of two arrays or an array and scalar value.\n. \n. The function compares:\n. * Elements of two arrays when src1 and src2 have the same size:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} (I) \\,\\texttt{cmpop}\\, \\texttt{src2} (I)\\f]\n. * Elements of src1 with a scalar src2 when src2 is constructed from\n. Scalar or has a single element:\n. \\f[\\texttt{dst} (I) = \\texttt{src1}(I) \\,\\texttt{cmpop}\\, \\texttt{src2}\\f]\n. * src1 with elements of src2 when src1 is constructed from Scalar or\n. has a single element:\n. \\f[\\texttt{dst} (I) = \\texttt{src1} \\,\\texttt{cmpop}\\, \\texttt{src2} (I)\\f]\n. When the comparison result is true, the corresponding element of output\n. array is set to 255. The comparison operations can be replaced with the\n. equivalent matrix expressions:\n. @code{.cpp}\n. Mat dst1 = src1 >= src2;\n. Mat dst2 = src1 < 8;\n. ...\n. @endcode\n. @param src1 first input array or a scalar; when it is an array, it must have a single channel.\n. @param src2 second input array or a scalar; when it is an array, it must have a single channel.\n. @param dst output array of type ref CV_8U that has the same size and the same number of channels as\n. the input arrays.\n. @param cmpop a flag, that specifies correspondence between the arrays (cv::CmpTypes)\n. @sa checkRange, min, max, threshold"}, + {"compareHist", CV_PY_FN_WITH_KW_(pyopencv_cv_compareHist, 0), "compareHist(H1, H2, method) -> retval\n. @brief Compares two histograms.\n. \n. The function cv::compareHist compares two dense or two sparse histograms using the specified method.\n. \n. The function returns \\f$d(H_1, H_2)\\f$ .\n. \n. While the function works well with 1-, 2-, 3-dimensional dense histograms, it may not be suitable\n. for high-dimensional sparse histograms. In such histograms, because of aliasing and sampling\n. problems, the coordinates of non-zero histogram bins can slightly shift. To compare such histograms\n. or more general sparse configurations of weighted points, consider using the #EMD function.\n. \n. @param H1 First compared histogram.\n. @param H2 Second compared histogram of the same size as H1 .\n. @param method Comparison method, see #HistCompMethods"}, + {"completeSymm", CV_PY_FN_WITH_KW_(pyopencv_cv_completeSymm, 0), "completeSymm(m[, lowerToUpper]) -> m\n. @brief Copies the lower or the upper half of a square matrix to its another half.\n. \n. The function cv::completeSymm copies the lower or the upper half of a square matrix to\n. its another half. The matrix diagonal remains unchanged:\n. - \\f$\\texttt{m}_{ij}=\\texttt{m}_{ji}\\f$ for \\f$i > j\\f$ if\n. lowerToUpper=false\n. - \\f$\\texttt{m}_{ij}=\\texttt{m}_{ji}\\f$ for \\f$i < j\\f$ if\n. lowerToUpper=true\n. \n. @param m input-output floating-point square matrix.\n. @param lowerToUpper operation flag; if true, the lower half is copied to\n. the upper half. Otherwise, the upper half is copied to the lower half.\n. @sa flip, transpose"}, + {"connectedComponents", CV_PY_FN_WITH_KW_(pyopencv_cv_connectedComponents, 0), "connectedComponents(image[, labels[, connectivity[, ltype]]]) -> retval, labels\n. @overload\n. \n. @param image the 8-bit single-channel image to be labeled\n. @param labels destination labeled image\n. @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively\n. @param ltype output image label type. Currently CV_32S and CV_16U are supported."}, + {"connectedComponentsWithAlgorithm", CV_PY_FN_WITH_KW_(pyopencv_cv_connectedComponentsWithAlgorithm, 0), "connectedComponentsWithAlgorithm(image, connectivity, ltype, ccltype[, labels]) -> retval, labels\n. @brief computes the connected components labeled image of boolean image\n. \n. image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0\n. represents the background label. ltype specifies the output label image type, an important\n. consideration based on the total number of labels or alternatively the total number of pixels in\n. the source image. ccltype specifies the connected components labeling algorithm to use, currently\n. Grana (BBDT) and Wu's (SAUF) @cite Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes\n. for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not.\n. This function uses parallel version of both Grana and Wu's algorithms if at least one allowed\n. parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.\n. \n. @param image the 8-bit single-channel image to be labeled\n. @param labels destination labeled image\n. @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively\n. @param ltype output image label type. Currently CV_32S and CV_16U are supported.\n. @param ccltype connected components algorithm type (see the #ConnectedComponentsAlgorithmsTypes)."}, + {"connectedComponentsWithStats", CV_PY_FN_WITH_KW_(pyopencv_cv_connectedComponentsWithStats, 0), "connectedComponentsWithStats(image[, labels[, stats[, centroids[, connectivity[, ltype]]]]]) -> retval, labels, stats, centroids\n. @overload\n. @param image the 8-bit single-channel image to be labeled\n. @param labels destination labeled image\n. @param stats statistics output for each label, including the background label.\n. Statistics are accessed via stats(label, COLUMN) where COLUMN is one of\n. #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.\n. @param centroids centroid output for each label, including the background label. Centroids are\n. accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.\n. @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively\n. @param ltype output image label type. Currently CV_32S and CV_16U are supported."}, + {"connectedComponentsWithStatsWithAlgorithm", CV_PY_FN_WITH_KW_(pyopencv_cv_connectedComponentsWithStatsWithAlgorithm, 0), "connectedComponentsWithStatsWithAlgorithm(image, connectivity, ltype, ccltype[, labels[, stats[, centroids]]]) -> retval, labels, stats, centroids\n. @brief computes the connected components labeled image of boolean image and also produces a statistics output for each label\n. \n. image with 4 or 8 way connectivity - returns N, the total number of labels [0, N-1] where 0\n. represents the background label. ltype specifies the output label image type, an important\n. consideration based on the total number of labels or alternatively the total number of pixels in\n. the source image. ccltype specifies the connected components labeling algorithm to use, currently\n. Grana's (BBDT) and Wu's (SAUF) @cite Wu2009 algorithms are supported, see the #ConnectedComponentsAlgorithmsTypes\n. for details. Note that SAUF algorithm forces a row major ordering of labels while BBDT does not.\n. This function uses parallel version of both Grana and Wu's algorithms (statistics included) if at least one allowed\n. parallel framework is enabled and if the rows of the image are at least twice the number returned by #getNumberOfCPUs.\n. \n. @param image the 8-bit single-channel image to be labeled\n. @param labels destination labeled image\n. @param stats statistics output for each label, including the background label.\n. Statistics are accessed via stats(label, COLUMN) where COLUMN is one of\n. #ConnectedComponentsTypes, selecting the statistic. The data type is CV_32S.\n. @param centroids centroid output for each label, including the background label. Centroids are\n. accessed via centroids(label, 0) for x and centroids(label, 1) for y. The data type CV_64F.\n. @param connectivity 8 or 4 for 8-way or 4-way connectivity respectively\n. @param ltype output image label type. Currently CV_32S and CV_16U are supported.\n. @param ccltype connected components algorithm type (see #ConnectedComponentsAlgorithmsTypes)."}, + {"contourArea", CV_PY_FN_WITH_KW_(pyopencv_cv_contourArea, 0), "contourArea(contour[, oriented]) -> retval\n. @brief Calculates a contour area.\n. \n. The function computes a contour area. Similarly to moments , the area is computed using the Green\n. formula. Thus, the returned area and the number of non-zero pixels, if you draw the contour using\n. #drawContours or #fillPoly , can be different. Also, the function will most certainly give a wrong\n. results for contours with self-intersections.\n. \n. Example:\n. @code\n. vector contour;\n. contour.push_back(Point2f(0, 0));\n. contour.push_back(Point2f(10, 0));\n. contour.push_back(Point2f(10, 10));\n. contour.push_back(Point2f(5, 4));\n. \n. double area0 = contourArea(contour);\n. vector approx;\n. approxPolyDP(contour, approx, 5, true);\n. double area1 = contourArea(approx);\n. \n. cout << \"area0 =\" << area0 << endl <<\n. \"area1 =\" << area1 << endl <<\n. \"approx poly vertices\" << approx.size() << endl;\n. @endcode\n. @param contour Input vector of 2D points (contour vertices), stored in std::vector or Mat.\n. @param oriented Oriented area flag. If it is true, the function returns a signed area value,\n. depending on the contour orientation (clockwise or counter-clockwise). Using this feature you can\n. determine orientation of a contour by taking the sign of an area. By default, the parameter is\n. false, which means that the absolute value is returned."}, + {"convertFp16", CV_PY_FN_WITH_KW_(pyopencv_cv_convertFp16, 0), "convertFp16(src[, dst]) -> dst\n. @brief Converts an array to half precision floating number.\n. \n. This function converts FP32 (single precision floating point) from/to FP16 (half precision floating point). CV_16S format is used to represent FP16 data.\n. There are two use modes (src -> dst): CV_32F -> CV_16S and CV_16S -> CV_32F. The input array has to have type of CV_32F or\n. CV_16S to represent the bit depth. If the input array is neither of them, the function will raise an error.\n. The format of half precision floating point is defined in IEEE 754-2008.\n. \n. @param src input array.\n. @param dst output array."}, + {"convertMaps", CV_PY_FN_WITH_KW_(pyopencv_cv_convertMaps, 0), "convertMaps(map1, map2, dstmap1type[, dstmap1[, dstmap2[, nninterpolation]]]) -> dstmap1, dstmap2\n. @brief Converts image transformation maps from one representation to another.\n. \n. The function converts a pair of maps for remap from one representation to another. The following\n. options ( (map1.type(), map2.type()) \\f$\\rightarrow\\f$ (dstmap1.type(), dstmap2.type()) ) are\n. supported:\n. \n. - \\f$\\texttt{(CV_32FC1, CV_32FC1)} \\rightarrow \\texttt{(CV_16SC2, CV_16UC1)}\\f$. This is the\n. most frequently used conversion operation, in which the original floating-point maps (see remap )\n. are converted to a more compact and much faster fixed-point representation. The first output array\n. contains the rounded coordinates and the second array (created only when nninterpolation=false )\n. contains indices in the interpolation tables.\n. \n. - \\f$\\texttt{(CV_32FC2)} \\rightarrow \\texttt{(CV_16SC2, CV_16UC1)}\\f$. The same as above but\n. the original maps are stored in one 2-channel matrix.\n. \n. - Reverse conversion. Obviously, the reconstructed floating-point maps will not be exactly the same\n. as the originals.\n. \n. @param map1 The first input map of type CV_16SC2, CV_32FC1, or CV_32FC2 .\n. @param map2 The second input map of type CV_16UC1, CV_32FC1, or none (empty matrix),\n. respectively.\n. @param dstmap1 The first output map that has the type dstmap1type and the same size as src .\n. @param dstmap2 The second output map.\n. @param dstmap1type Type of the first output map that should be CV_16SC2, CV_32FC1, or\n. CV_32FC2 .\n. @param nninterpolation Flag indicating whether the fixed-point maps are used for the\n. nearest-neighbor or for a more complex interpolation.\n. \n. @sa remap, undistort, initUndistortRectifyMap"}, + {"convertScaleAbs", CV_PY_FN_WITH_KW_(pyopencv_cv_convertScaleAbs, 0), "convertScaleAbs(src[, dst[, alpha[, beta]]]) -> dst\n. @brief Scales, calculates absolute values, and converts the result to 8-bit.\n. \n. On each element of the input array, the function convertScaleAbs\n. performs three operations sequentially: scaling, taking an absolute\n. value, conversion to an unsigned 8-bit type:\n. \\f[\\texttt{dst} (I)= \\texttt{saturate\\_cast} (| \\texttt{src} (I)* \\texttt{alpha} + \\texttt{beta} |)\\f]\n. In case of multi-channel arrays, the function processes each channel\n. independently. When the output is not 8-bit, the operation can be\n. emulated by calling the Mat::convertTo method (or by using matrix\n. expressions) and then by calculating an absolute value of the result.\n. For example:\n. @code{.cpp}\n. Mat_ A(30,30);\n. randu(A, Scalar(-100), Scalar(100));\n. Mat_ B = A*5 + 3;\n. B = abs(B);\n. // Mat_ B = abs(A*5+3) will also do the job,\n. // but it will allocate a temporary matrix\n. @endcode\n. @param src input array.\n. @param dst output array.\n. @param alpha optional scale factor.\n. @param beta optional delta added to the scaled values.\n. @sa Mat::convertTo, cv::abs(const Mat&)"}, + {"convexHull", CV_PY_FN_WITH_KW_(pyopencv_cv_convexHull, 0), "convexHull(points[, hull[, clockwise[, returnPoints]]]) -> hull\n. @brief Finds the convex hull of a point set.\n. \n. The function cv::convexHull finds the convex hull of a 2D point set using the Sklansky's algorithm @cite Sklansky82\n. that has *O(N logN)* complexity in the current implementation.\n. \n. @param points Input 2D point set, stored in std::vector or Mat.\n. @param hull Output convex hull. It is either an integer vector of indices or vector of points. In\n. the first case, the hull elements are 0-based indices of the convex hull points in the original\n. array (since the set of convex hull points is a subset of the original point set). In the second\n. case, hull elements are the convex hull points themselves.\n. @param clockwise Orientation flag. If it is true, the output convex hull is oriented clockwise.\n. Otherwise, it is oriented counter-clockwise. The assumed coordinate system has its X axis pointing\n. to the right, and its Y axis pointing upwards.\n. @param returnPoints Operation flag. In case of a matrix, when the flag is true, the function\n. returns convex hull points. Otherwise, it returns indices of the convex hull points. When the\n. output array is std::vector, the flag is ignored, and the output depends on the type of the\n. vector: std::vector\\ implies returnPoints=false, std::vector\\ implies\n. returnPoints=true.\n. \n. @note `points` and `hull` should be different arrays, inplace processing isn't supported.\n. \n. Check @ref tutorial_hull \"the corresponding tutorial\" for more details.\n. \n. useful links:\n. \n. https://www.learnopencv.com/convex-hull-using-opencv-in-python-and-c/"}, + {"convexityDefects", CV_PY_FN_WITH_KW_(pyopencv_cv_convexityDefects, 0), "convexityDefects(contour, convexhull[, convexityDefects]) -> convexityDefects\n. @brief Finds the convexity defects of a contour.\n. \n. The figure below displays convexity defects of a hand contour:\n. \n. ![image](pics/defects.png)\n. \n. @param contour Input contour.\n. @param convexhull Convex hull obtained using convexHull that should contain indices of the contour\n. points that make the hull.\n. @param convexityDefects The output vector of convexity defects. In C++ and the new Python/Java\n. interface each convexity defect is represented as 4-element integer vector (a.k.a. #Vec4i):\n. (start_index, end_index, farthest_pt_index, fixpt_depth), where indices are 0-based indices\n. in the original contour of the convexity defect beginning, end and the farthest point, and\n. fixpt_depth is fixed-point approximation (with 8 fractional bits) of the distance between the\n. farthest contour point and the hull. That is, to get the floating-point value of the depth will be\n. fixpt_depth/256.0."}, + {"copyMakeBorder", CV_PY_FN_WITH_KW_(pyopencv_cv_copyMakeBorder, 0), "copyMakeBorder(src, top, bottom, left, right, borderType[, dst[, value]]) -> dst\n. @brief Forms a border around an image.\n. \n. The function copies the source image into the middle of the destination image. The areas to the\n. left, to the right, above and below the copied source image will be filled with extrapolated\n. pixels. This is not what filtering functions based on it do (they extrapolate pixels on-fly), but\n. what other more complex functions, including your own, may do to simplify image boundary handling.\n. \n. The function supports the mode when src is already in the middle of dst . In this case, the\n. function does not copy src itself but simply constructs the border, for example:\n. \n. @code{.cpp}\n. // let border be the same in all directions\n. int border=2;\n. // constructs a larger image to fit both the image and the border\n. Mat gray_buf(rgb.rows + border*2, rgb.cols + border*2, rgb.depth());\n. // select the middle part of it w/o copying data\n. Mat gray(gray_canvas, Rect(border, border, rgb.cols, rgb.rows));\n. // convert image from RGB to grayscale\n. cvtColor(rgb, gray, COLOR_RGB2GRAY);\n. // form a border in-place\n. copyMakeBorder(gray, gray_buf, border, border,\n. border, border, BORDER_REPLICATE);\n. // now do some custom filtering ...\n. ...\n. @endcode\n. @note When the source image is a part (ROI) of a bigger image, the function will try to use the\n. pixels outside of the ROI to form a border. To disable this feature and always do extrapolation, as\n. if src was not a ROI, use borderType | #BORDER_ISOLATED.\n. \n. @param src Source image.\n. @param dst Destination image of the same type as src and the size Size(src.cols+left+right,\n. src.rows+top+bottom) .\n. @param top the top pixels\n. @param bottom the bottom pixels\n. @param left the left pixels\n. @param right Parameter specifying how many pixels in each direction from the source image rectangle\n. to extrapolate. For example, top=1, bottom=1, left=1, right=1 mean that 1 pixel-wide border needs\n. to be built.\n. @param borderType Border type. See borderInterpolate for details.\n. @param value Border value if borderType==BORDER_CONSTANT .\n. \n. @sa borderInterpolate"}, + {"copyTo", CV_PY_FN_WITH_KW_(pyopencv_cv_copyTo, 0), "copyTo(src, mask[, dst]) -> dst\n. @brief This is an overloaded member function, provided for convenience (python)\n. Copies the matrix to another one.\n. When the operation mask is specified, if the Mat::create call shown above reallocates the matrix, the newly allocated matrix is initialized with all zeros before copying the data.\n. @param src source matrix.\n. @param dst Destination matrix. If it does not have a proper size or type before the operation, it is\n. reallocated.\n. @param mask Operation mask of the same size as \\*this. Its non-zero elements indicate which matrix\n. elements need to be copied. The mask has to be of type CV_8U and can have 1 or multiple channels."}, + {"cornerEigenValsAndVecs", CV_PY_FN_WITH_KW_(pyopencv_cv_cornerEigenValsAndVecs, 0), "cornerEigenValsAndVecs(src, blockSize, ksize[, dst[, borderType]]) -> dst\n. @brief Calculates eigenvalues and eigenvectors of image blocks for corner detection.\n. \n. For every pixel \\f$p\\f$ , the function cornerEigenValsAndVecs considers a blockSize \\f$\\times\\f$ blockSize\n. neighborhood \\f$S(p)\\f$ . It calculates the covariation matrix of derivatives over the neighborhood as:\n. \n. \\f[M = \\begin{bmatrix} \\sum _{S(p)}(dI/dx)^2 & \\sum _{S(p)}dI/dx dI/dy \\\\ \\sum _{S(p)}dI/dx dI/dy & \\sum _{S(p)}(dI/dy)^2 \\end{bmatrix}\\f]\n. \n. where the derivatives are computed using the Sobel operator.\n. \n. After that, it finds eigenvectors and eigenvalues of \\f$M\\f$ and stores them in the destination image as\n. \\f$(\\lambda_1, \\lambda_2, x_1, y_1, x_2, y_2)\\f$ where\n. \n. - \\f$\\lambda_1, \\lambda_2\\f$ are the non-sorted eigenvalues of \\f$M\\f$\n. - \\f$x_1, y_1\\f$ are the eigenvectors corresponding to \\f$\\lambda_1\\f$\n. - \\f$x_2, y_2\\f$ are the eigenvectors corresponding to \\f$\\lambda_2\\f$\n. \n. The output of the function can be used for robust edge or corner detection.\n. \n. @param src Input single-channel 8-bit or floating-point image.\n. @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .\n. @param blockSize Neighborhood size (see details below).\n. @param ksize Aperture parameter for the Sobel operator.\n. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.\n. \n. @sa cornerMinEigenVal, cornerHarris, preCornerDetect"}, + {"cornerHarris", CV_PY_FN_WITH_KW_(pyopencv_cv_cornerHarris, 0), "cornerHarris(src, blockSize, ksize, k[, dst[, borderType]]) -> dst\n. @brief Harris corner detector.\n. \n. The function runs the Harris corner detector on the image. Similarly to cornerMinEigenVal and\n. cornerEigenValsAndVecs , for each pixel \\f$(x, y)\\f$ it calculates a \\f$2\\times2\\f$ gradient covariance\n. matrix \\f$M^{(x,y)}\\f$ over a \\f$\\texttt{blockSize} \\times \\texttt{blockSize}\\f$ neighborhood. Then, it\n. computes the following characteristic:\n. \n. \\f[\\texttt{dst} (x,y) = \\mathrm{det} M^{(x,y)} - k \\cdot \\left ( \\mathrm{tr} M^{(x,y)} \\right )^2\\f]\n. \n. Corners in the image can be found as the local maxima of this response map.\n. \n. @param src Input single-channel 8-bit or floating-point image.\n. @param dst Image to store the Harris detector responses. It has the type CV_32FC1 and the same\n. size as src .\n. @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).\n. @param ksize Aperture parameter for the Sobel operator.\n. @param k Harris detector free parameter. See the formula above.\n. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported."}, + {"cornerMinEigenVal", CV_PY_FN_WITH_KW_(pyopencv_cv_cornerMinEigenVal, 0), "cornerMinEigenVal(src, blockSize[, dst[, ksize[, borderType]]]) -> dst\n. @brief Calculates the minimal eigenvalue of gradient matrices for corner detection.\n. \n. The function is similar to cornerEigenValsAndVecs but it calculates and stores only the minimal\n. eigenvalue of the covariance matrix of derivatives, that is, \\f$\\min(\\lambda_1, \\lambda_2)\\f$ in terms\n. of the formulae in the cornerEigenValsAndVecs description.\n. \n. @param src Input single-channel 8-bit or floating-point image.\n. @param dst Image to store the minimal eigenvalues. It has the type CV_32FC1 and the same size as\n. src .\n. @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).\n. @param ksize Aperture parameter for the Sobel operator.\n. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported."}, + {"cornerSubPix", CV_PY_FN_WITH_KW_(pyopencv_cv_cornerSubPix, 0), "cornerSubPix(image, corners, winSize, zeroZone, criteria) -> corners\n. @brief Refines the corner locations.\n. \n. The function iterates to find the sub-pixel accurate location of corners or radial saddle\n. points as described in @cite forstner1987fast, and as shown on the figure below.\n. \n. ![image](pics/cornersubpix.png)\n. \n. Sub-pixel accurate corner locator is based on the observation that every vector from the center \\f$q\\f$\n. to a point \\f$p\\f$ located within a neighborhood of \\f$q\\f$ is orthogonal to the image gradient at \\f$p\\f$\n. subject to image and measurement noise. Consider the expression:\n. \n. \\f[\\epsilon _i = {DI_{p_i}}^T \\cdot (q - p_i)\\f]\n. \n. where \\f${DI_{p_i}}\\f$ is an image gradient at one of the points \\f$p_i\\f$ in a neighborhood of \\f$q\\f$ . The\n. value of \\f$q\\f$ is to be found so that \\f$\\epsilon_i\\f$ is minimized. A system of equations may be set up\n. with \\f$\\epsilon_i\\f$ set to zero:\n. \n. \\f[\\sum _i(DI_{p_i} \\cdot {DI_{p_i}}^T) \\cdot q - \\sum _i(DI_{p_i} \\cdot {DI_{p_i}}^T \\cdot p_i)\\f]\n. \n. where the gradients are summed within a neighborhood (\"search window\") of \\f$q\\f$ . Calling the first\n. gradient term \\f$G\\f$ and the second gradient term \\f$b\\f$ gives:\n. \n. \\f[q = G^{-1} \\cdot b\\f]\n. \n. The algorithm sets the center of the neighborhood window at this new center \\f$q\\f$ and then iterates\n. until the center stays within a set threshold.\n. \n. @param image Input single-channel, 8-bit or float image.\n. @param corners Initial coordinates of the input corners and refined coordinates provided for\n. output.\n. @param winSize Half of the side length of the search window. For example, if winSize=Size(5,5) ,\n. then a \\f$(5*2+1) \\times (5*2+1) = 11 \\times 11\\f$ search window is used.\n. @param zeroZone Half of the size of the dead region in the middle of the search zone over which\n. the summation in the formula below is not done. It is used sometimes to avoid possible\n. singularities of the autocorrelation matrix. The value of (-1,-1) indicates that there is no such\n. a size.\n. @param criteria Criteria for termination of the iterative process of corner refinement. That is,\n. the process of corner position refinement stops either after criteria.maxCount iterations or when\n. the corner position moves by less than criteria.epsilon on some iteration."}, + {"countNonZero", CV_PY_FN_WITH_KW_(pyopencv_cv_countNonZero, 0), "countNonZero(src) -> retval\n. @brief Counts non-zero array elements.\n. \n. The function returns the number of non-zero elements in src :\n. \\f[\\sum _{I: \\; \\texttt{src} (I) \\ne0 } 1\\f]\n. @param src single-channel array.\n. @sa mean, meanStdDev, norm, minMaxLoc, calcCovarMatrix"}, + {"createCLAHE", CV_PY_FN_WITH_KW_(pyopencv_cv_createCLAHE, 0), "createCLAHE([, clipLimit[, tileGridSize]]) -> retval\n. @brief Creates a smart pointer to a cv::CLAHE class and initializes it.\n. \n. @param clipLimit Threshold for contrast limiting.\n. @param tileGridSize Size of grid for histogram equalization. Input image will be divided into\n. equally sized rectangular tiles. tileGridSize defines the number of tiles in row and column."}, + {"createGeneralizedHoughBallard", CV_PY_FN_WITH_KW_(pyopencv_cv_createGeneralizedHoughBallard, 0), "createGeneralizedHoughBallard() -> retval\n. @brief Creates a smart pointer to a cv::GeneralizedHoughBallard class and initializes it."}, + {"createGeneralizedHoughGuil", CV_PY_FN_WITH_KW_(pyopencv_cv_createGeneralizedHoughGuil, 0), "createGeneralizedHoughGuil() -> retval\n. @brief Creates a smart pointer to a cv::GeneralizedHoughGuil class and initializes it."}, + {"createHanningWindow", CV_PY_FN_WITH_KW_(pyopencv_cv_createHanningWindow, 0), "createHanningWindow(winSize, type[, dst]) -> dst\n. @brief This function computes a Hanning window coefficients in two dimensions.\n. \n. See (http://en.wikipedia.org/wiki/Hann_function) and (http://en.wikipedia.org/wiki/Window_function)\n. for more information.\n. \n. An example is shown below:\n. @code\n. // create hanning window of size 100x100 and type CV_32F\n. Mat hann;\n. createHanningWindow(hann, Size(100, 100), CV_32F);\n. @endcode\n. @param dst Destination array to place Hann coefficients in\n. @param winSize The window size specifications (both width and height must be > 1)\n. @param type Created array type"}, + {"createLineSegmentDetector", CV_PY_FN_WITH_KW_(pyopencv_cv_createLineSegmentDetector, 0), "createLineSegmentDetector([, refine[, scale[, sigma_scale[, quant[, ang_th[, log_eps[, density_th[, n_bins]]]]]]]]) -> retval\n. @brief Creates a smart pointer to a LineSegmentDetector object and initializes it.\n. \n. The LineSegmentDetector algorithm is defined using the standard values. Only advanced users may want\n. to edit those, as to tailor it for their own application.\n. \n. @param refine The way found lines will be refined, see #LineSegmentDetectorModes\n. @param scale The scale of the image that will be used to find the lines. Range (0..1].\n. @param sigma_scale Sigma for Gaussian filter. It is computed as sigma = sigma_scale/scale.\n. @param quant Bound to the quantization error on the gradient norm.\n. @param ang_th Gradient angle tolerance in degrees.\n. @param log_eps Detection threshold: -log10(NFA) \\> log_eps. Used only when advance refinement is chosen.\n. @param density_th Minimal density of aligned region points in the enclosing rectangle.\n. @param n_bins Number of bins in pseudo-ordering of gradient modulus."}, + {"cubeRoot", CV_PY_FN_WITH_KW_(pyopencv_cv_cubeRoot, 0), "cubeRoot(val) -> retval\n. @brief Computes the cube root of an argument.\n. \n. The function cubeRoot computes \\f$\\sqrt[3]{\\texttt{val}}\\f$. Negative arguments are handled correctly.\n. NaN and Inf are not handled. The accuracy approaches the maximum possible accuracy for\n. single-precision data.\n. @param val A function argument."}, + {"cvtColor", CV_PY_FN_WITH_KW_(pyopencv_cv_cvtColor, 0), "cvtColor(src, code[, dst[, dstCn]]) -> dst\n. @brief Converts an image from one color space to another.\n. \n. The function converts an input image from one color space to another. In case of a transformation\n. to-from RGB color space, the order of the channels should be specified explicitly (RGB or BGR). Note\n. that the default color format in OpenCV is often referred to as RGB but it is actually BGR (the\n. bytes are reversed). So the first byte in a standard (24-bit) color image will be an 8-bit Blue\n. component, the second byte will be Green, and the third byte will be Red. The fourth, fifth, and\n. sixth bytes would then be the second pixel (Blue, then Green, then Red), and so on.\n. \n. The conventional ranges for R, G, and B channel values are:\n. - 0 to 255 for CV_8U images\n. - 0 to 65535 for CV_16U images\n. - 0 to 1 for CV_32F images\n. \n. In case of linear transformations, the range does not matter. But in case of a non-linear\n. transformation, an input RGB image should be normalized to the proper value range to get the correct\n. results, for example, for RGB \\f$\\rightarrow\\f$ L\\*u\\*v\\* transformation. For example, if you have a\n. 32-bit floating-point image directly converted from an 8-bit image without any scaling, then it will\n. have the 0..255 value range instead of 0..1 assumed by the function. So, before calling #cvtColor ,\n. you need first to scale the image down:\n. @code\n. img *= 1./255;\n. cvtColor(img, img, COLOR_BGR2Luv);\n. @endcode\n. If you use #cvtColor with 8-bit images, the conversion will have some information lost. For many\n. applications, this will not be noticeable but it is recommended to use 32-bit images in applications\n. that need the full range of colors or that convert an image before an operation and then convert\n. back.\n. \n. If conversion adds the alpha channel, its value will set to the maximum of corresponding channel\n. range: 255 for CV_8U, 65535 for CV_16U, 1 for CV_32F.\n. \n. @param src input image: 8-bit unsigned, 16-bit unsigned ( CV_16UC... ), or single-precision\n. floating-point.\n. @param dst output image of the same size and depth as src.\n. @param code color space conversion code (see #ColorConversionCodes).\n. @param dstCn number of channels in the destination image; if the parameter is 0, the number of the\n. channels is derived automatically from src and code.\n. \n. @see @ref imgproc_color_conversions"}, + {"cvtColorTwoPlane", CV_PY_FN_WITH_KW_(pyopencv_cv_cvtColorTwoPlane, 0), "cvtColorTwoPlane(src1, src2, code[, dst]) -> dst\n. @brief Converts an image from one color space to another where the source image is\n. stored in two planes.\n. \n. This function only supports YUV420 to RGB conversion as of now.\n. \n. @param src1: 8-bit image (#CV_8U) of the Y plane.\n. @param src2: image containing interleaved U/V plane.\n. @param dst: output image.\n. @param code: Specifies the type of conversion. It can take any of the following values:\n. - #COLOR_YUV2BGR_NV12\n. - #COLOR_YUV2RGB_NV12\n. - #COLOR_YUV2BGRA_NV12\n. - #COLOR_YUV2RGBA_NV12\n. - #COLOR_YUV2BGR_NV21\n. - #COLOR_YUV2RGB_NV21\n. - #COLOR_YUV2BGRA_NV21\n. - #COLOR_YUV2RGBA_NV21"}, + {"dct", CV_PY_FN_WITH_KW_(pyopencv_cv_dct, 0), "dct(src[, dst[, flags]]) -> dst\n. @brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array.\n. \n. The function cv::dct performs a forward or inverse discrete Cosine transform (DCT) of a 1D or 2D\n. floating-point array:\n. - Forward Cosine transform of a 1D vector of N elements:\n. \\f[Y = C^{(N)} \\cdot X\\f]\n. where\n. \\f[C^{(N)}_{jk}= \\sqrt{\\alpha_j/N} \\cos \\left ( \\frac{\\pi(2k+1)j}{2N} \\right )\\f]\n. and\n. \\f$\\alpha_0=1\\f$, \\f$\\alpha_j=2\\f$ for *j \\> 0*.\n. - Inverse Cosine transform of a 1D vector of N elements:\n. \\f[X = \\left (C^{(N)} \\right )^{-1} \\cdot Y = \\left (C^{(N)} \\right )^T \\cdot Y\\f]\n. (since \\f$C^{(N)}\\f$ is an orthogonal matrix, \\f$C^{(N)} \\cdot \\left(C^{(N)}\\right)^T = I\\f$ )\n. - Forward 2D Cosine transform of M x N matrix:\n. \\f[Y = C^{(N)} \\cdot X \\cdot \\left (C^{(N)} \\right )^T\\f]\n. - Inverse 2D Cosine transform of M x N matrix:\n. \\f[X = \\left (C^{(N)} \\right )^T \\cdot X \\cdot C^{(N)}\\f]\n. \n. The function chooses the mode of operation by looking at the flags and size of the input array:\n. - If (flags & #DCT_INVERSE) == 0 , the function does a forward 1D or 2D transform. Otherwise, it\n. is an inverse 1D or 2D transform.\n. - If (flags & #DCT_ROWS) != 0 , the function performs a 1D transform of each row.\n. - If the array is a single column or a single row, the function performs a 1D transform.\n. - If none of the above is true, the function performs a 2D transform.\n. \n. @note Currently dct supports even-size arrays (2, 4, 6 ...). For data analysis and approximation, you\n. can pad the array when necessary.\n. Also, the function performance depends very much, and not monotonically, on the array size (see\n. getOptimalDFTSize ). In the current implementation DCT of a vector of size N is calculated via DFT\n. of a vector of size N/2 . Thus, the optimal DCT size N1 \\>= N can be calculated as:\n. @code\n. size_t getOptimalDCTSize(size_t N) { return 2*getOptimalDFTSize((N+1)/2); }\n. N1 = getOptimalDCTSize(N);\n. @endcode\n. @param src input floating-point array.\n. @param dst output array of the same size and type as src .\n. @param flags transformation flags as a combination of cv::DftFlags (DCT_*)\n. @sa dft , getOptimalDFTSize , idct"}, + {"demosaicing", CV_PY_FN_WITH_KW_(pyopencv_cv_demosaicing, 0), "demosaicing(src, code[, dst[, dstCn]]) -> dst\n. @brief main function for all demosaicing processes\n. \n. @param src input image: 8-bit unsigned or 16-bit unsigned.\n. @param dst output image of the same size and depth as src.\n. @param code Color space conversion code (see the description below).\n. @param dstCn number of channels in the destination image; if the parameter is 0, the number of the\n. channels is derived automatically from src and code.\n. \n. The function can do the following transformations:\n. \n. - Demosaicing using bilinear interpolation\n. \n. #COLOR_BayerBG2BGR , #COLOR_BayerGB2BGR , #COLOR_BayerRG2BGR , #COLOR_BayerGR2BGR\n. \n. #COLOR_BayerBG2GRAY , #COLOR_BayerGB2GRAY , #COLOR_BayerRG2GRAY , #COLOR_BayerGR2GRAY\n. \n. - Demosaicing using Variable Number of Gradients.\n. \n. #COLOR_BayerBG2BGR_VNG , #COLOR_BayerGB2BGR_VNG , #COLOR_BayerRG2BGR_VNG , #COLOR_BayerGR2BGR_VNG\n. \n. - Edge-Aware Demosaicing.\n. \n. #COLOR_BayerBG2BGR_EA , #COLOR_BayerGB2BGR_EA , #COLOR_BayerRG2BGR_EA , #COLOR_BayerGR2BGR_EA\n. \n. - Demosaicing with alpha channel\n. \n. #COLOR_BayerBG2BGRA , #COLOR_BayerGB2BGRA , #COLOR_BayerRG2BGRA , #COLOR_BayerGR2BGRA\n. \n. @sa cvtColor"}, + {"determinant", CV_PY_FN_WITH_KW_(pyopencv_cv_determinant, 0), "determinant(mtx) -> retval\n. @brief Returns the determinant of a square floating-point matrix.\n. \n. The function cv::determinant calculates and returns the determinant of the\n. specified matrix. For small matrices ( mtx.cols=mtx.rows\\<=3 ), the\n. direct method is used. For larger matrices, the function uses LU\n. factorization with partial pivoting.\n. \n. For symmetric positively-determined matrices, it is also possible to use\n. eigen decomposition to calculate the determinant.\n. @param mtx input matrix that must have CV_32FC1 or CV_64FC1 type and\n. square size.\n. @sa trace, invert, solve, eigen, @ref MatrixExpressions"}, + {"dft", CV_PY_FN_WITH_KW_(pyopencv_cv_dft, 0), "dft(src[, dst[, flags[, nonzeroRows]]]) -> dst\n. @brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.\n. \n. The function cv::dft performs one of the following:\n. - Forward the Fourier transform of a 1D vector of N elements:\n. \\f[Y = F^{(N)} \\cdot X,\\f]\n. where \\f$F^{(N)}_{jk}=\\exp(-2\\pi i j k/N)\\f$ and \\f$i=\\sqrt{-1}\\f$\n. - Inverse the Fourier transform of a 1D vector of N elements:\n. \\f[\\begin{array}{l} X'= \\left (F^{(N)} \\right )^{-1} \\cdot Y = \\left (F^{(N)} \\right )^* \\cdot y \\\\ X = (1/N) \\cdot X, \\end{array}\\f]\n. where \\f$F^*=\\left(\\textrm{Re}(F^{(N)})-\\textrm{Im}(F^{(N)})\\right)^T\\f$\n. - Forward the 2D Fourier transform of a M x N matrix:\n. \\f[Y = F^{(M)} \\cdot X \\cdot F^{(N)}\\f]\n. - Inverse the 2D Fourier transform of a M x N matrix:\n. \\f[\\begin{array}{l} X'= \\left (F^{(M)} \\right )^* \\cdot Y \\cdot \\left (F^{(N)} \\right )^* \\\\ X = \\frac{1}{M \\cdot N} \\cdot X' \\end{array}\\f]\n. \n. In case of real (single-channel) data, the output spectrum of the forward Fourier transform or input\n. spectrum of the inverse Fourier transform can be represented in a packed format called *CCS*\n. (complex-conjugate-symmetrical). It was borrowed from IPL (Intel\\* Image Processing Library). Here\n. is how 2D *CCS* spectrum looks:\n. \\f[\\begin{bmatrix} Re Y_{0,0} & Re Y_{0,1} & Im Y_{0,1} & Re Y_{0,2} & Im Y_{0,2} & \\cdots & Re Y_{0,N/2-1} & Im Y_{0,N/2-1} & Re Y_{0,N/2} \\\\ Re Y_{1,0} & Re Y_{1,1} & Im Y_{1,1} & Re Y_{1,2} & Im Y_{1,2} & \\cdots & Re Y_{1,N/2-1} & Im Y_{1,N/2-1} & Re Y_{1,N/2} \\\\ Im Y_{1,0} & Re Y_{2,1} & Im Y_{2,1} & Re Y_{2,2} & Im Y_{2,2} & \\cdots & Re Y_{2,N/2-1} & Im Y_{2,N/2-1} & Im Y_{1,N/2} \\\\ \\hdotsfor{9} \\\\ Re Y_{M/2-1,0} & Re Y_{M-3,1} & Im Y_{M-3,1} & \\hdotsfor{3} & Re Y_{M-3,N/2-1} & Im Y_{M-3,N/2-1}& Re Y_{M/2-1,N/2} \\\\ Im Y_{M/2-1,0} & Re Y_{M-2,1} & Im Y_{M-2,1} & \\hdotsfor{3} & Re Y_{M-2,N/2-1} & Im Y_{M-2,N/2-1}& Im Y_{M/2-1,N/2} \\\\ Re Y_{M/2,0} & Re Y_{M-1,1} & Im Y_{M-1,1} & \\hdotsfor{3} & Re Y_{M-1,N/2-1} & Im Y_{M-1,N/2-1}& Re Y_{M/2,N/2} \\end{bmatrix}\\f]\n. \n. In case of 1D transform of a real vector, the output looks like the first row of the matrix above.\n. \n. So, the function chooses an operation mode depending on the flags and size of the input array:\n. - If #DFT_ROWS is set or the input array has a single row or single column, the function\n. performs a 1D forward or inverse transform of each row of a matrix when #DFT_ROWS is set.\n. Otherwise, it performs a 2D transform.\n. - If the input array is real and #DFT_INVERSE is not set, the function performs a forward 1D or\n. 2D transform:\n. - When #DFT_COMPLEX_OUTPUT is set, the output is a complex matrix of the same size as\n. input.\n. - When #DFT_COMPLEX_OUTPUT is not set, the output is a real matrix of the same size as\n. input. In case of 2D transform, it uses the packed format as shown above. In case of a\n. single 1D transform, it looks like the first row of the matrix above. In case of\n. multiple 1D transforms (when using the #DFT_ROWS flag), each row of the output matrix\n. looks like the first row of the matrix above.\n. - If the input array is complex and either #DFT_INVERSE or #DFT_REAL_OUTPUT are not set, the\n. output is a complex array of the same size as input. The function performs a forward or\n. inverse 1D or 2D transform of the whole input array or each row of the input array\n. independently, depending on the flags DFT_INVERSE and DFT_ROWS.\n. - When #DFT_INVERSE is set and the input array is real, or it is complex but #DFT_REAL_OUTPUT\n. is set, the output is a real array of the same size as input. The function performs a 1D or 2D\n. inverse transformation of the whole input array or each individual row, depending on the flags\n. #DFT_INVERSE and #DFT_ROWS.\n. \n. If #DFT_SCALE is set, the scaling is done after the transformation.\n. \n. Unlike dct , the function supports arrays of arbitrary size. But only those arrays are processed\n. efficiently, whose sizes can be factorized in a product of small prime numbers (2, 3, and 5 in the\n. current implementation). Such an efficient DFT size can be calculated using the getOptimalDFTSize\n. method.\n. \n. The sample below illustrates how to calculate a DFT-based convolution of two 2D real arrays:\n. @code\n. void convolveDFT(InputArray A, InputArray B, OutputArray C)\n. {\n. // reallocate the output array if needed\n. C.create(abs(A.rows - B.rows)+1, abs(A.cols - B.cols)+1, A.type());\n. Size dftSize;\n. // calculate the size of DFT transform\n. dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);\n. dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);\n. \n. // allocate temporary buffers and initialize them with 0's\n. Mat tempA(dftSize, A.type(), Scalar::all(0));\n. Mat tempB(dftSize, B.type(), Scalar::all(0));\n. \n. // copy A and B to the top-left corners of tempA and tempB, respectively\n. Mat roiA(tempA, Rect(0,0,A.cols,A.rows));\n. A.copyTo(roiA);\n. Mat roiB(tempB, Rect(0,0,B.cols,B.rows));\n. B.copyTo(roiB);\n. \n. // now transform the padded A & B in-place;\n. // use \"nonzeroRows\" hint for faster processing\n. dft(tempA, tempA, 0, A.rows);\n. dft(tempB, tempB, 0, B.rows);\n. \n. // multiply the spectrums;\n. // the function handles packed spectrum representations well\n. mulSpectrums(tempA, tempB, tempA);\n. \n. // transform the product back from the frequency domain.\n. // Even though all the result rows will be non-zero,\n. // you need only the first C.rows of them, and thus you\n. // pass nonzeroRows == C.rows\n. dft(tempA, tempA, DFT_INVERSE + DFT_SCALE, C.rows);\n. \n. // now copy the result back to C.\n. tempA(Rect(0, 0, C.cols, C.rows)).copyTo(C);\n. \n. // all the temporary buffers will be deallocated automatically\n. }\n. @endcode\n. To optimize this sample, consider the following approaches:\n. - Since nonzeroRows != 0 is passed to the forward transform calls and since A and B are copied to\n. the top-left corners of tempA and tempB, respectively, it is not necessary to clear the whole\n. tempA and tempB. It is only necessary to clear the tempA.cols - A.cols ( tempB.cols - B.cols)\n. rightmost columns of the matrices.\n. - This DFT-based convolution does not have to be applied to the whole big arrays, especially if B\n. is significantly smaller than A or vice versa. Instead, you can calculate convolution by parts.\n. To do this, you need to split the output array C into multiple tiles. For each tile, estimate\n. which parts of A and B are required to calculate convolution in this tile. If the tiles in C are\n. too small, the speed will decrease a lot because of repeated work. In the ultimate case, when\n. each tile in C is a single pixel, the algorithm becomes equivalent to the naive convolution\n. algorithm. If the tiles are too big, the temporary arrays tempA and tempB become too big and\n. there is also a slowdown because of bad cache locality. So, there is an optimal tile size\n. somewhere in the middle.\n. - If different tiles in C can be calculated in parallel and, thus, the convolution is done by\n. parts, the loop can be threaded.\n. \n. All of the above improvements have been implemented in #matchTemplate and #filter2D . Therefore, by\n. using them, you can get the performance even better than with the above theoretically optimal\n. implementation. Though, those two functions actually calculate cross-correlation, not convolution,\n. so you need to \"flip\" the second convolution operand B vertically and horizontally using flip .\n. @note\n. - An example using the discrete fourier transform can be found at\n. opencv_source_code/samples/cpp/dft.cpp\n. - (Python) An example using the dft functionality to perform Wiener deconvolution can be found\n. at opencv_source/samples/python/deconvolution.py\n. - (Python) An example rearranging the quadrants of a Fourier image can be found at\n. opencv_source/samples/python/dft.py\n. @param src input array that could be real or complex.\n. @param dst output array whose size and type depends on the flags .\n. @param flags transformation flags, representing a combination of the #DftFlags\n. @param nonzeroRows when the parameter is not zero, the function assumes that only the first\n. nonzeroRows rows of the input array (#DFT_INVERSE is not set) or only the first nonzeroRows of the\n. output array (#DFT_INVERSE is set) contain non-zeros, thus, the function can handle the rest of the\n. rows more efficiently and save some time; this technique is very useful for calculating array\n. cross-correlation or convolution using DFT.\n. @sa dct , getOptimalDFTSize , mulSpectrums, filter2D , matchTemplate , flip , cartToPolar ,\n. magnitude , phase"}, + {"dilate", CV_PY_FN_WITH_KW_(pyopencv_cv_dilate, 0), "dilate(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst\n. @brief Dilates an image by using a specific structuring element.\n. \n. The function dilates the source image using the specified structuring element that determines the\n. shape of a pixel neighborhood over which the maximum is taken:\n. \\f[\\texttt{dst} (x,y) = \\max _{(x',y'): \\, \\texttt{element} (x',y') \\ne0 } \\texttt{src} (x+x',y+y')\\f]\n. \n. The function supports the in-place mode. Dilation can be applied several ( iterations ) times. In\n. case of multi-channel images, each channel is processed independently.\n. \n. @param src input image; the number of channels can be arbitrary, but the depth should be one of\n. CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n. @param dst output image of the same size and type as src.\n. @param kernel structuring element used for dilation; if elemenat=Mat(), a 3 x 3 rectangular\n. structuring element is used. Kernel can be created using #getStructuringElement\n. @param anchor position of the anchor within the element; default value (-1, -1) means that the\n. anchor is at the element center.\n. @param iterations number of times dilation is applied.\n. @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.\n. @param borderValue border value in case of a constant border\n. @sa erode, morphologyEx, getStructuringElement"}, + {"distanceTransform", CV_PY_FN_WITH_KW_(pyopencv_cv_distanceTransform, 0), "distanceTransform(src, distanceType, maskSize[, dst[, dstType]]) -> dst\n. @overload\n. @param src 8-bit, single-channel (binary) source image.\n. @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,\n. single-channel image of the same size as src .\n. @param distanceType Type of distance, see #DistanceTypes\n. @param maskSize Size of the distance transform mask, see #DistanceTransformMasks. In case of the\n. #DIST_L1 or #DIST_C distance type, the parameter is forced to 3 because a \\f$3\\times 3\\f$ mask gives\n. the same result as \\f$5\\times 5\\f$ or any larger aperture.\n. @param dstType Type of output image. It can be CV_8U or CV_32F. Type CV_8U can be used only for\n. the first variant of the function and distanceType == #DIST_L1."}, + {"distanceTransformWithLabels", CV_PY_FN_WITH_KW_(pyopencv_cv_distanceTransformWithLabels, 0), "distanceTransformWithLabels(src, distanceType, maskSize[, dst[, labels[, labelType]]]) -> dst, labels\n. @brief Calculates the distance to the closest zero pixel for each pixel of the source image.\n. \n. The function cv::distanceTransform calculates the approximate or precise distance from every binary\n. image pixel to the nearest zero pixel. For zero image pixels, the distance will obviously be zero.\n. \n. When maskSize == #DIST_MASK_PRECISE and distanceType == #DIST_L2 , the function runs the\n. algorithm described in @cite Felzenszwalb04 . This algorithm is parallelized with the TBB library.\n. \n. In other cases, the algorithm @cite Borgefors86 is used. This means that for a pixel the function\n. finds the shortest path to the nearest zero pixel consisting of basic shifts: horizontal, vertical,\n. diagonal, or knight's move (the latest is available for a \\f$5\\times 5\\f$ mask). The overall\n. distance is calculated as a sum of these basic distances. Since the distance function should be\n. symmetric, all of the horizontal and vertical shifts must have the same cost (denoted as a ), all\n. the diagonal shifts must have the same cost (denoted as `b`), and all knight's moves must have the\n. same cost (denoted as `c`). For the #DIST_C and #DIST_L1 types, the distance is calculated\n. precisely, whereas for #DIST_L2 (Euclidean distance) the distance can be calculated only with a\n. relative error (a \\f$5\\times 5\\f$ mask gives more accurate results). For `a`,`b`, and `c`, OpenCV\n. uses the values suggested in the original paper:\n. - DIST_L1: `a = 1, b = 2`\n. - DIST_L2:\n. - `3 x 3`: `a=0.955, b=1.3693`\n. - `5 x 5`: `a=1, b=1.4, c=2.1969`\n. - DIST_C: `a = 1, b = 1`\n. \n. Typically, for a fast, coarse distance estimation #DIST_L2, a \\f$3\\times 3\\f$ mask is used. For a\n. more accurate distance estimation #DIST_L2, a \\f$5\\times 5\\f$ mask or the precise algorithm is used.\n. Note that both the precise and the approximate algorithms are linear on the number of pixels.\n. \n. This variant of the function does not only compute the minimum distance for each pixel \\f$(x, y)\\f$\n. but also identifies the nearest connected component consisting of zero pixels\n. (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the\n. component/pixel is stored in `labels(x, y)`. When labelType==#DIST_LABEL_CCOMP, the function\n. automatically finds connected components of zero pixels in the input image and marks them with\n. distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and\n. marks all the zero pixels with distinct labels.\n. \n. In this mode, the complexity is still linear. That is, the function provides a very fast way to\n. compute the Voronoi diagram for a binary image. Currently, the second variant can use only the\n. approximate distance transform algorithm, i.e. maskSize=#DIST_MASK_PRECISE is not supported\n. yet.\n. \n. @param src 8-bit, single-channel (binary) source image.\n. @param dst Output image with calculated distances. It is a 8-bit or 32-bit floating-point,\n. single-channel image of the same size as src.\n. @param labels Output 2D array of labels (the discrete Voronoi diagram). It has the type\n. CV_32SC1 and the same size as src.\n. @param distanceType Type of distance, see #DistanceTypes\n. @param maskSize Size of the distance transform mask, see #DistanceTransformMasks.\n. #DIST_MASK_PRECISE is not supported by this variant. In case of the #DIST_L1 or #DIST_C distance type,\n. the parameter is forced to 3 because a \\f$3\\times 3\\f$ mask gives the same result as \\f$5\\times\n. 5\\f$ or any larger aperture.\n. @param labelType Type of the label array to build, see #DistanceTransformLabelTypes."}, + {"divSpectrums", CV_PY_FN_WITH_KW_(pyopencv_cv_divSpectrums, 0), "divSpectrums(a, b, flags[, c[, conjB]]) -> c\n. @brief Performs the per-element division of the first Fourier spectrum by the second Fourier spectrum.\n. \n. The function cv::divSpectrums performs the per-element division of the first array by the second array.\n. The arrays are CCS-packed or complex matrices that are results of a real or complex Fourier transform.\n. \n. @param a first input array.\n. @param b second input array of the same size and type as src1 .\n. @param c output array of the same size and type as src1 .\n. @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that\n. each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.\n. @param conjB optional flag that conjugates the second input array before the multiplication (true)\n. or not (false)."}, + {"divide", CV_PY_FN_WITH_KW_(pyopencv_cv_divide, 0), "divide(src1, src2[, dst[, scale[, dtype]]]) -> dst\n. @brief Performs per-element division of two arrays or a scalar by an array.\n. \n. The function cv::divide divides one array by another:\n. \\f[\\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\\f]\n. or a scalar by an array when there is no src1 :\n. \\f[\\texttt{dst(I) = saturate(scale/src2(I))}\\f]\n. \n. Different channels of multi-channel arrays are processed independently.\n. \n. For integer types when src2(I) is zero, dst(I) will also be zero.\n. \n. @note In case of floating point data there is no special defined behavior for zero src2(I) values.\n. Regular floating-point division is used.\n. Expect correct IEEE-754 behaviour for floating-point data (with NaN, Inf result values).\n. \n. @note Saturation is not applied when the output array has the depth CV_32S. You may even get\n. result of an incorrect sign in the case of overflow.\n. @param src1 first input array.\n. @param src2 second input array of the same size and type as src1.\n. @param scale scalar factor.\n. @param dst output array of the same size and type as src2.\n. @param dtype optional depth of the output array; if -1, dst will have depth src2.depth(), but in\n. case of an array-by-array division, you can only pass -1 when src1.depth()==src2.depth().\n. @sa multiply, add, subtract\n\n\n\ndivide(scale, src2[, dst[, dtype]]) -> dst\n. @overload"}, + {"drawContours", CV_PY_FN_WITH_KW_(pyopencv_cv_drawContours, 0), "drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> image\n. @brief Draws contours outlines or filled contours.\n. \n. The function draws contour outlines in the image if \\f$\\texttt{thickness} \\ge 0\\f$ or fills the area\n. bounded by the contours if \\f$\\texttt{thickness}<0\\f$ . The example below shows how to retrieve\n. connected components from the binary image and label them: :\n. @include snippets/imgproc_drawContours.cpp\n. \n. @param image Destination image.\n. @param contours All the input contours. Each contour is stored as a point vector.\n. @param contourIdx Parameter indicating a contour to draw. If it is negative, all the contours are drawn.\n. @param color Color of the contours.\n. @param thickness Thickness of lines the contours are drawn with. If it is negative (for example,\n. thickness=#FILLED ), the contour interiors are drawn.\n. @param lineType Line connectivity. See #LineTypes\n. @param hierarchy Optional information about hierarchy. It is only needed if you want to draw only\n. some of the contours (see maxLevel ).\n. @param maxLevel Maximal level for drawn contours. If it is 0, only the specified contour is drawn.\n. If it is 1, the function draws the contour(s) and all the nested contours. If it is 2, the function\n. draws the contours, all the nested contours, all the nested-to-nested contours, and so on. This\n. parameter is only taken into account when there is hierarchy available.\n. @param offset Optional contour shift parameter. Shift all the drawn contours by the specified\n. \\f$\\texttt{offset}=(dx,dy)\\f$ .\n. @note When thickness=#FILLED, the function is designed to handle connected components with holes correctly\n. even when no hierarchy data is provided. This is done by analyzing all the outlines together\n. using even-odd rule. This may give incorrect results if you have a joint collection of separately retrieved\n. contours. In order to solve this problem, you need to call #drawContours separately for each sub-group\n. of contours, or iterate over the collection using contourIdx parameter."}, + {"drawMarker", CV_PY_FN_WITH_KW_(pyopencv_cv_drawMarker, 0), "drawMarker(img, position, color[, markerType[, markerSize[, thickness[, line_type]]]]) -> img\n. @brief Draws a marker on a predefined position in an image.\n. \n. The function cv::drawMarker draws a marker on a given position in the image. For the moment several\n. marker types are supported, see #MarkerTypes for more information.\n. \n. @param img Image.\n. @param position The point where the crosshair is positioned.\n. @param color Line color.\n. @param markerType The specific type of marker you want to use, see #MarkerTypes\n. @param thickness Line thickness.\n. @param line_type Type of the line, See #LineTypes\n. @param markerSize The length of the marker axis [default = 20 pixels]"}, + {"eigen", CV_PY_FN_WITH_KW_(pyopencv_cv_eigen, 0), "eigen(src[, eigenvalues[, eigenvectors]]) -> retval, eigenvalues, eigenvectors\n. @brief Calculates eigenvalues and eigenvectors of a symmetric matrix.\n. \n. The function cv::eigen calculates just eigenvalues, or eigenvalues and eigenvectors of the symmetric\n. matrix src:\n. @code\n. src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()\n. @endcode\n. \n. @note Use cv::eigenNonSymmetric for calculation of real eigenvalues and eigenvectors of non-symmetric matrix.\n. \n. @param src input matrix that must have CV_32FC1 or CV_64FC1 type, square size and be symmetrical\n. (src ^T^ == src).\n. @param eigenvalues output vector of eigenvalues of the same type as src; the eigenvalues are stored\n. in the descending order.\n. @param eigenvectors output matrix of eigenvectors; it has the same size and type as src; the\n. eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding\n. eigenvalues.\n. @sa eigenNonSymmetric, completeSymm , PCA"}, + {"eigenNonSymmetric", CV_PY_FN_WITH_KW_(pyopencv_cv_eigenNonSymmetric, 0), "eigenNonSymmetric(src[, eigenvalues[, eigenvectors]]) -> eigenvalues, eigenvectors\n. @brief Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only).\n. \n. @note Assumes real eigenvalues.\n. \n. The function calculates eigenvalues and eigenvectors (optional) of the square matrix src:\n. @code\n. src*eigenvectors.row(i).t() = eigenvalues.at(i)*eigenvectors.row(i).t()\n. @endcode\n. \n. @param src input matrix (CV_32FC1 or CV_64FC1 type).\n. @param eigenvalues output vector of eigenvalues (type is the same type as src).\n. @param eigenvectors output matrix of eigenvectors (type is the same type as src). The eigenvectors are stored as subsequent matrix rows, in the same order as the corresponding eigenvalues.\n. @sa eigen"}, + {"ellipse", CV_PY_FN_WITH_KW_(pyopencv_cv_ellipse, 0), "ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> img\n. @brief Draws a simple or thick elliptic arc or fills an ellipse sector.\n. \n. The function cv::ellipse with more parameters draws an ellipse outline, a filled ellipse, an elliptic\n. arc, or a filled ellipse sector. The drawing code uses general parametric form.\n. A piecewise-linear curve is used to approximate the elliptic arc\n. boundary. If you need more control of the ellipse rendering, you can retrieve the curve using\n. #ellipse2Poly and then render it with #polylines or fill it with #fillPoly. If you use the first\n. variant of the function and want to draw the whole ellipse, not an arc, pass `startAngle=0` and\n. `endAngle=360`. If `startAngle` is greater than `endAngle`, they are swapped. The figure below explains\n. the meaning of the parameters to draw the blue arc.\n. \n. ![Parameters of Elliptic Arc](pics/ellipse.svg)\n. \n. @param img Image.\n. @param center Center of the ellipse.\n. @param axes Half of the size of the ellipse main axes.\n. @param angle Ellipse rotation angle in degrees.\n. @param startAngle Starting angle of the elliptic arc in degrees.\n. @param endAngle Ending angle of the elliptic arc in degrees.\n. @param color Ellipse color.\n. @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that\n. a filled ellipse sector is to be drawn.\n. @param lineType Type of the ellipse boundary. See #LineTypes\n. @param shift Number of fractional bits in the coordinates of the center and values of axes.\n\n\n\nellipse(img, box, color[, thickness[, lineType]]) -> img\n. @overload\n. @param img Image.\n. @param box Alternative ellipse representation via RotatedRect. This means that the function draws\n. an ellipse inscribed in the rotated rectangle.\n. @param color Ellipse color.\n. @param thickness Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that\n. a filled ellipse sector is to be drawn.\n. @param lineType Type of the ellipse boundary. See #LineTypes"}, + {"ellipse2Poly", CV_PY_FN_WITH_KW_(pyopencv_cv_ellipse2Poly, 0), "ellipse2Poly(center, axes, angle, arcStart, arcEnd, delta) -> pts\n. @brief Approximates an elliptic arc with a polyline.\n. \n. The function ellipse2Poly computes the vertices of a polyline that approximates the specified\n. elliptic arc. It is used by #ellipse. If `arcStart` is greater than `arcEnd`, they are swapped.\n. \n. @param center Center of the arc.\n. @param axes Half of the size of the ellipse main axes. See #ellipse for details.\n. @param angle Rotation angle of the ellipse in degrees. See #ellipse for details.\n. @param arcStart Starting angle of the elliptic arc in degrees.\n. @param arcEnd Ending angle of the elliptic arc in degrees.\n. @param delta Angle between the subsequent polyline vertices. It defines the approximation\n. accuracy.\n. @param pts Output vector of polyline vertices."}, + {"equalizeHist", CV_PY_FN_WITH_KW_(pyopencv_cv_equalizeHist, 0), "equalizeHist(src[, dst]) -> dst\n. @brief Equalizes the histogram of a grayscale image.\n. \n. The function equalizes the histogram of the input image using the following algorithm:\n. \n. - Calculate the histogram \\f$H\\f$ for src .\n. - Normalize the histogram so that the sum of histogram bins is 255.\n. - Compute the integral of the histogram:\n. \\f[H'_i = \\sum _{0 \\le j < i} H(j)\\f]\n. - Transform the image using \\f$H'\\f$ as a look-up table: \\f$\\texttt{dst}(x,y) = H'(\\texttt{src}(x,y))\\f$\n. \n. The algorithm normalizes the brightness and increases the contrast of the image.\n. \n. @param src Source 8-bit single channel image.\n. @param dst Destination image of the same size and type as src ."}, + {"erode", CV_PY_FN_WITH_KW_(pyopencv_cv_erode, 0), "erode(src, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst\n. @brief Erodes an image by using a specific structuring element.\n. \n. The function erodes the source image using the specified structuring element that determines the\n. shape of a pixel neighborhood over which the minimum is taken:\n. \n. \\f[\\texttt{dst} (x,y) = \\min _{(x',y'): \\, \\texttt{element} (x',y') \\ne0 } \\texttt{src} (x+x',y+y')\\f]\n. \n. The function supports the in-place mode. Erosion can be applied several ( iterations ) times. In\n. case of multi-channel images, each channel is processed independently.\n. \n. @param src input image; the number of channels can be arbitrary, but the depth should be one of\n. CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n. @param dst output image of the same size and type as src.\n. @param kernel structuring element used for erosion; if `element=Mat()`, a `3 x 3` rectangular\n. structuring element is used. Kernel can be created using #getStructuringElement.\n. @param anchor position of the anchor within the element; default value (-1, -1) means that the\n. anchor is at the element center.\n. @param iterations number of times erosion is applied.\n. @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @param borderValue border value in case of a constant border\n. @sa dilate, morphologyEx, getStructuringElement"}, + {"exp", CV_PY_FN_WITH_KW_(pyopencv_cv_exp, 0), "exp(src[, dst]) -> dst\n. @brief Calculates the exponent of every array element.\n. \n. The function cv::exp calculates the exponent of every element of the input\n. array:\n. \\f[\\texttt{dst} [I] = e^{ src(I) }\\f]\n. \n. The maximum relative error is about 7e-6 for single-precision input and\n. less than 1e-10 for double-precision input. Currently, the function\n. converts denormalized values to zeros on output. Special values (NaN,\n. Inf) are not handled.\n. @param src input array.\n. @param dst output array of the same size and type as src.\n. @sa log , cartToPolar , polarToCart , phase , pow , sqrt , magnitude"}, + {"extractChannel", CV_PY_FN_WITH_KW_(pyopencv_cv_extractChannel, 0), "extractChannel(src, coi[, dst]) -> dst\n. @brief Extracts a single channel from src (coi is 0-based index)\n. @param src input array\n. @param dst output array\n. @param coi index of channel to extract\n. @sa mixChannels, split"}, + {"fastAtan2", CV_PY_FN_WITH_KW_(pyopencv_cv_fastAtan2, 0), "fastAtan2(y, x) -> retval\n. @brief Calculates the angle of a 2D vector in degrees.\n. \n. The function fastAtan2 calculates the full-range angle of an input 2D vector. The angle is measured\n. in degrees and varies from 0 to 360 degrees. The accuracy is about 0.3 degrees.\n. @param x x-coordinate of the vector.\n. @param y y-coordinate of the vector."}, + {"fillConvexPoly", CV_PY_FN_WITH_KW_(pyopencv_cv_fillConvexPoly, 0), "fillConvexPoly(img, points, color[, lineType[, shift]]) -> img\n. @brief Fills a convex polygon.\n. \n. The function cv::fillConvexPoly draws a filled convex polygon. This function is much faster than the\n. function #fillPoly . It can fill not only convex polygons but any monotonic polygon without\n. self-intersections, that is, a polygon whose contour intersects every horizontal line (scan line)\n. twice at the most (though, its top-most and/or the bottom edge could be horizontal).\n. \n. @param img Image.\n. @param points Polygon vertices.\n. @param color Polygon color.\n. @param lineType Type of the polygon boundaries. See #LineTypes\n. @param shift Number of fractional bits in the vertex coordinates."}, + {"fillPoly", CV_PY_FN_WITH_KW_(pyopencv_cv_fillPoly, 0), "fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> img\n. @brief Fills the area bounded by one or more polygons.\n. \n. The function cv::fillPoly fills an area bounded by several polygonal contours. The function can fill\n. complex areas, for example, areas with holes, contours with self-intersections (some of their\n. parts), and so forth.\n. \n. @param img Image.\n. @param pts Array of polygons where each polygon is represented as an array of points.\n. @param color Polygon color.\n. @param lineType Type of the polygon boundaries. See #LineTypes\n. @param shift Number of fractional bits in the vertex coordinates.\n. @param offset Optional offset of all points of the contours."}, + {"filter2D", CV_PY_FN_WITH_KW_(pyopencv_cv_filter2D, 0), "filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst\n. @brief Convolves an image with the kernel.\n. \n. The function applies an arbitrary linear filter to an image. In-place operation is supported. When\n. the aperture is partially outside the image, the function interpolates outlier pixel values\n. according to the specified border mode.\n. \n. The function does actually compute correlation, not the convolution:\n. \n. \\f[\\texttt{dst} (x,y) = \\sum _{ \\substack{0\\leq x' < \\texttt{kernel.cols}\\\\{0\\leq y' < \\texttt{kernel.rows}}}} \\texttt{kernel} (x',y')* \\texttt{src} (x+x'- \\texttt{anchor.x} ,y+y'- \\texttt{anchor.y} )\\f]\n. \n. That is, the kernel is not mirrored around the anchor point. If you need a real convolution, flip\n. the kernel using #flip and set the new anchor to `(kernel.cols - anchor.x - 1, kernel.rows -\n. anchor.y - 1)`.\n. \n. The function uses the DFT-based algorithm in case of sufficiently large kernels (~`11 x 11` or\n. larger) and the direct algorithm for small kernels.\n. \n. @param src input image.\n. @param dst output image of the same size and the same number of channels as src.\n. @param ddepth desired depth of the destination image, see @ref filter_depths \"combinations\"\n. @param kernel convolution kernel (or rather a correlation kernel), a single-channel floating point\n. matrix; if you want to apply different kernels to different channels, split the image into\n. separate color planes using split and process them individually.\n. @param anchor anchor of the kernel that indicates the relative position of a filtered point within\n. the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor\n. is at the kernel center.\n. @param delta optional value added to the filtered pixels before storing them in dst.\n. @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa sepFilter2D, dft, matchTemplate"}, + {"findContours", CV_PY_FN_WITH_KW_(pyopencv_cv_findContours, 0), "findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> contours, hierarchy\n. @brief Finds contours in a binary image.\n. \n. The function retrieves contours from the binary image using the algorithm @cite Suzuki85 . The contours\n. are a useful tool for shape analysis and object detection and recognition. See squares.cpp in the\n. OpenCV sample directory.\n. @note Since opencv 3.2 source image is not modified by this function.\n. \n. @param image Source, an 8-bit single-channel image. Non-zero pixels are treated as 1's. Zero\n. pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,\n. #adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.\n. If mode equals to #RETR_CCOMP or #RETR_FLOODFILL, the input can also be a 32-bit integer image of labels (CV_32SC1).\n. @param contours Detected contours. Each contour is stored as a vector of points (e.g.\n. std::vector >).\n. @param hierarchy Optional output vector (e.g. std::vector), containing information about the image topology. It has\n. as many elements as the number of contours. For each i-th contour contours[i], the elements\n. hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based indices\n. in contours of the next and previous contours at the same hierarchical level, the first child\n. contour and the parent contour, respectively. If for the contour i there are no next, previous,\n. parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.\n. @note In Python, hierarchy is nested inside a top level array. Use hierarchy[0][i] to access hierarchical elements of i-th contour.\n. @param mode Contour retrieval mode, see #RetrievalModes\n. @param method Contour approximation method, see #ContourApproximationModes\n. @param offset Optional offset by which every contour point is shifted. This is useful if the\n. contours are extracted from the image ROI and then they should be analyzed in the whole image\n. context."}, + {"findNonZero", CV_PY_FN_WITH_KW_(pyopencv_cv_findNonZero, 0), "findNonZero(src[, idx]) -> idx\n. @brief Returns the list of locations of non-zero pixels\n. \n. Given a binary matrix (likely returned from an operation such\n. as threshold(), compare(), >, ==, etc, return all of\n. the non-zero indices as a cv::Mat or std::vector (x,y)\n. For example:\n. @code{.cpp}\n. cv::Mat binaryImage; // input, binary image\n. cv::Mat locations; // output, locations of non-zero pixels\n. cv::findNonZero(binaryImage, locations);\n. \n. // access pixel coordinates\n. Point pnt = locations.at(i);\n. @endcode\n. or\n. @code{.cpp}\n. cv::Mat binaryImage; // input, binary image\n. vector locations; // output, locations of non-zero pixels\n. cv::findNonZero(binaryImage, locations);\n. \n. // access pixel coordinates\n. Point pnt = locations[i];\n. @endcode\n. @param src single-channel array\n. @param idx the output array, type of cv::Mat or std::vector, corresponding to non-zero indices in the input"}, + {"fitEllipse", CV_PY_FN_WITH_KW_(pyopencv_cv_fitEllipse, 0), "fitEllipse(points) -> retval\n. @brief Fits an ellipse around a set of 2D points.\n. \n. The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of\n. all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by @cite Fitzgibbon95\n. is used. Developer should keep in mind that it is possible that the returned\n. ellipse/rotatedRect data contains negative indices, due to the data points being close to the\n. border of the containing Mat element.\n. \n. @param points Input 2D point set, stored in std::vector\\<\\> or Mat"}, + {"fitEllipseAMS", CV_PY_FN_WITH_KW_(pyopencv_cv_fitEllipseAMS, 0), "fitEllipseAMS(points) -> retval\n. @brief Fits an ellipse around a set of 2D points.\n. \n. The function calculates the ellipse that fits a set of 2D points.\n. It returns the rotated rectangle in which the ellipse is inscribed.\n. The Approximate Mean Square (AMS) proposed by @cite Taubin1991 is used.\n. \n. For an ellipse, this basis set is \\f$ \\chi= \\left(x^2, x y, y^2, x, y, 1\\right) \\f$,\n. which is a set of six free coefficients \\f$ A^T=\\left\\{A_{\\text{xx}},A_{\\text{xy}},A_{\\text{yy}},A_x,A_y,A_0\\right\\} \\f$.\n. However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \\f$ (a,b) \\f$,\n. the position \\f$ (x_0,y_0) \\f$, and the orientation \\f$ \\theta \\f$. This is because the basis set includes lines,\n. quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.\n. If the fit is found to be a parabolic or hyperbolic function then the standard #fitEllipse method is used.\n. The AMS method restricts the fit to parabolic, hyperbolic and elliptical curves\n. by imposing the condition that \\f$ A^T ( D_x^T D_x + D_y^T D_y) A = 1 \\f$ where\n. the matrices \\f$ Dx \\f$ and \\f$ Dy \\f$ are the partial derivatives of the design matrix \\f$ D \\f$ with\n. respect to x and y. The matrices are formed row by row applying the following to\n. each of the points in the set:\n. \\f{align*}{\n. D(i,:)&=\\left\\{x_i^2, x_i y_i, y_i^2, x_i, y_i, 1\\right\\} &\n. D_x(i,:)&=\\left\\{2 x_i,y_i,0,1,0,0\\right\\} &\n. D_y(i,:)&=\\left\\{0,x_i,2 y_i,0,1,0\\right\\}\n. \\f}\n. The AMS method minimizes the cost function\n. \\f{equation*}{\n. \\epsilon ^2=\\frac{ A^T D^T D A }{ A^T (D_x^T D_x + D_y^T D_y) A^T }\n. \\f}\n. \n. The minimum cost is found by solving the generalized eigenvalue problem.\n. \n. \\f{equation*}{\n. D^T D A = \\lambda \\left( D_x^T D_x + D_y^T D_y\\right) A\n. \\f}\n. \n. @param points Input 2D point set, stored in std::vector\\<\\> or Mat"}, + {"fitEllipseDirect", CV_PY_FN_WITH_KW_(pyopencv_cv_fitEllipseDirect, 0), "fitEllipseDirect(points) -> retval\n. @brief Fits an ellipse around a set of 2D points.\n. \n. The function calculates the ellipse that fits a set of 2D points.\n. It returns the rotated rectangle in which the ellipse is inscribed.\n. The Direct least square (Direct) method by @cite Fitzgibbon1999 is used.\n. \n. For an ellipse, this basis set is \\f$ \\chi= \\left(x^2, x y, y^2, x, y, 1\\right) \\f$,\n. which is a set of six free coefficients \\f$ A^T=\\left\\{A_{\\text{xx}},A_{\\text{xy}},A_{\\text{yy}},A_x,A_y,A_0\\right\\} \\f$.\n. However, to specify an ellipse, all that is needed is five numbers; the major and minor axes lengths \\f$ (a,b) \\f$,\n. the position \\f$ (x_0,y_0) \\f$, and the orientation \\f$ \\theta \\f$. This is because the basis set includes lines,\n. quadratics, parabolic and hyperbolic functions as well as elliptical functions as possible fits.\n. The Direct method confines the fit to ellipses by ensuring that \\f$ 4 A_{xx} A_{yy}- A_{xy}^2 > 0 \\f$.\n. The condition imposed is that \\f$ 4 A_{xx} A_{yy}- A_{xy}^2=1 \\f$ which satisfies the inequality\n. and as the coefficients can be arbitrarily scaled is not overly restrictive.\n. \n. \\f{equation*}{\n. \\epsilon ^2= A^T D^T D A \\quad \\text{with} \\quad A^T C A =1 \\quad \\text{and} \\quad C=\\left(\\begin{matrix}\n. 0 & 0 & 2 & 0 & 0 & 0 \\\\\n. 0 & -1 & 0 & 0 & 0 & 0 \\\\\n. 2 & 0 & 0 & 0 & 0 & 0 \\\\\n. 0 & 0 & 0 & 0 & 0 & 0 \\\\\n. 0 & 0 & 0 & 0 & 0 & 0 \\\\\n. 0 & 0 & 0 & 0 & 0 & 0\n. \\end{matrix} \\right)\n. \\f}\n. \n. The minimum cost is found by solving the generalized eigenvalue problem.\n. \n. \\f{equation*}{\n. D^T D A = \\lambda \\left( C\\right) A\n. \\f}\n. \n. The system produces only one positive eigenvalue \\f$ \\lambda\\f$ which is chosen as the solution\n. with its eigenvector \\f$\\mathbf{u}\\f$. These are used to find the coefficients\n. \n. \\f{equation*}{\n. A = \\sqrt{\\frac{1}{\\mathbf{u}^T C \\mathbf{u}}} \\mathbf{u}\n. \\f}\n. The scaling factor guarantees that \\f$A^T C A =1\\f$.\n. \n. @param points Input 2D point set, stored in std::vector\\<\\> or Mat"}, + {"fitLine", CV_PY_FN_WITH_KW_(pyopencv_cv_fitLine, 0), "fitLine(points, distType, param, reps, aeps[, line]) -> line\n. @brief Fits a line to a 2D or 3D point set.\n. \n. The function fitLine fits a line to a 2D or 3D point set by minimizing \\f$\\sum_i \\rho(r_i)\\f$ where\n. \\f$r_i\\f$ is a distance between the \\f$i^{th}\\f$ point, the line and \\f$\\rho(r)\\f$ is a distance function, one\n. of the following:\n. - DIST_L2\n. \\f[\\rho (r) = r^2/2 \\quad \\text{(the simplest and the fastest least-squares method)}\\f]\n. - DIST_L1\n. \\f[\\rho (r) = r\\f]\n. - DIST_L12\n. \\f[\\rho (r) = 2 \\cdot ( \\sqrt{1 + \\frac{r^2}{2}} - 1)\\f]\n. - DIST_FAIR\n. \\f[\\rho \\left (r \\right ) = C^2 \\cdot \\left ( \\frac{r}{C} - \\log{\\left(1 + \\frac{r}{C}\\right)} \\right ) \\quad \\text{where} \\quad C=1.3998\\f]\n. - DIST_WELSCH\n. \\f[\\rho \\left (r \\right ) = \\frac{C^2}{2} \\cdot \\left ( 1 - \\exp{\\left(-\\left(\\frac{r}{C}\\right)^2\\right)} \\right ) \\quad \\text{where} \\quad C=2.9846\\f]\n. - DIST_HUBER\n. \\f[\\rho (r) = \\fork{r^2/2}{if \\(r < C\\)}{C \\cdot (r-C/2)}{otherwise} \\quad \\text{where} \\quad C=1.345\\f]\n. \n. The algorithm is based on the M-estimator ( ) technique\n. that iteratively fits the line using the weighted least-squares algorithm. After each iteration the\n. weights \\f$w_i\\f$ are adjusted to be inversely proportional to \\f$\\rho(r_i)\\f$ .\n. \n. @param points Input vector of 2D or 3D points, stored in std::vector\\<\\> or Mat.\n. @param line Output line parameters. In case of 2D fitting, it should be a vector of 4 elements\n. (like Vec4f) - (vx, vy, x0, y0), where (vx, vy) is a normalized vector collinear to the line and\n. (x0, y0) is a point on the line. In case of 3D fitting, it should be a vector of 6 elements (like\n. Vec6f) - (vx, vy, vz, x0, y0, z0), where (vx, vy, vz) is a normalized vector collinear to the line\n. and (x0, y0, z0) is a point on the line.\n. @param distType Distance used by the M-estimator, see #DistanceTypes\n. @param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value\n. is chosen.\n. @param reps Sufficient accuracy for the radius (distance between the coordinate origin and the line).\n. @param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for reps and aeps."}, + {"flip", CV_PY_FN_WITH_KW_(pyopencv_cv_flip, 0), "flip(src, flipCode[, dst]) -> dst\n. @brief Flips a 2D array around vertical, horizontal, or both axes.\n. \n. The function cv::flip flips the array in one of three different ways (row\n. and column indices are 0-based):\n. \\f[\\texttt{dst} _{ij} =\n. \\left\\{\n. \\begin{array}{l l}\n. \\texttt{src} _{\\texttt{src.rows}-i-1,j} & if\\; \\texttt{flipCode} = 0 \\\\\n. \\texttt{src} _{i, \\texttt{src.cols} -j-1} & if\\; \\texttt{flipCode} > 0 \\\\\n. \\texttt{src} _{ \\texttt{src.rows} -i-1, \\texttt{src.cols} -j-1} & if\\; \\texttt{flipCode} < 0 \\\\\n. \\end{array}\n. \\right.\\f]\n. The example scenarios of using the function are the following:\n. * Vertical flipping of the image (flipCode == 0) to switch between\n. top-left and bottom-left image origin. This is a typical operation\n. in video processing on Microsoft Windows\\* OS.\n. * Horizontal flipping of the image with the subsequent horizontal\n. shift and absolute difference calculation to check for a\n. vertical-axis symmetry (flipCode \\> 0).\n. * Simultaneous horizontal and vertical flipping of the image with\n. the subsequent shift and absolute difference calculation to check\n. for a central symmetry (flipCode \\< 0).\n. * Reversing the order of point arrays (flipCode \\> 0 or\n. flipCode == 0).\n. @param src input array.\n. @param dst output array of the same size and type as src.\n. @param flipCode a flag to specify how to flip the array; 0 means\n. flipping around the x-axis and positive value (for example, 1) means\n. flipping around y-axis. Negative value (for example, -1) means flipping\n. around both axes.\n. @sa transpose , repeat , completeSymm"}, + {"floodFill", CV_PY_FN_WITH_KW_(pyopencv_cv_floodFill, 0), "floodFill(image, mask, seedPoint, newVal[, loDiff[, upDiff[, flags]]]) -> retval, image, mask, rect\n. @brief Fills a connected component with the given color.\n. \n. The function cv::floodFill fills a connected component starting from the seed point with the specified\n. color. The connectivity is determined by the color/brightness closeness of the neighbor pixels. The\n. pixel at \\f$(x,y)\\f$ is considered to belong to the repainted domain if:\n. \n. - in case of a grayscale image and floating range\n. \\f[\\texttt{src} (x',y')- \\texttt{loDiff} \\leq \\texttt{src} (x,y) \\leq \\texttt{src} (x',y')+ \\texttt{upDiff}\\f]\n. \n. \n. - in case of a grayscale image and fixed range\n. \\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)- \\texttt{loDiff} \\leq \\texttt{src} (x,y) \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)+ \\texttt{upDiff}\\f]\n. \n. \n. - in case of a color image and floating range\n. \\f[\\texttt{src} (x',y')_r- \\texttt{loDiff} _r \\leq \\texttt{src} (x,y)_r \\leq \\texttt{src} (x',y')_r+ \\texttt{upDiff} _r,\\f]\n. \\f[\\texttt{src} (x',y')_g- \\texttt{loDiff} _g \\leq \\texttt{src} (x,y)_g \\leq \\texttt{src} (x',y')_g+ \\texttt{upDiff} _g\\f]\n. and\n. \\f[\\texttt{src} (x',y')_b- \\texttt{loDiff} _b \\leq \\texttt{src} (x,y)_b \\leq \\texttt{src} (x',y')_b+ \\texttt{upDiff} _b\\f]\n. \n. \n. - in case of a color image and fixed range\n. \\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_r- \\texttt{loDiff} _r \\leq \\texttt{src} (x,y)_r \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_r+ \\texttt{upDiff} _r,\\f]\n. \\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_g- \\texttt{loDiff} _g \\leq \\texttt{src} (x,y)_g \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_g+ \\texttt{upDiff} _g\\f]\n. and\n. \\f[\\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_b- \\texttt{loDiff} _b \\leq \\texttt{src} (x,y)_b \\leq \\texttt{src} ( \\texttt{seedPoint} .x, \\texttt{seedPoint} .y)_b+ \\texttt{upDiff} _b\\f]\n. \n. \n. where \\f$src(x',y')\\f$ is the value of one of pixel neighbors that is already known to belong to the\n. component. That is, to be added to the connected component, a color/brightness of the pixel should\n. be close enough to:\n. - Color/brightness of one of its neighbors that already belong to the connected component in case\n. of a floating range.\n. - Color/brightness of the seed point in case of a fixed range.\n. \n. Use these functions to either mark a connected component with the specified color in-place, or build\n. a mask and then extract the contour, or copy the region to another image, and so on.\n. \n. @param image Input/output 1- or 3-channel, 8-bit, or floating-point image. It is modified by the\n. function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See\n. the details below.\n. @param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels\n. taller than image. Since this is both an input and output parameter, you must take responsibility\n. of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,\n. an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the\n. mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags\n. as described below. Additionally, the function fills the border of the mask with ones to simplify\n. internal processing. It is therefore possible to use the same mask in multiple calls to the function\n. to make sure the filled areas do not overlap.\n. @param seedPoint Starting point.\n. @param newVal New value of the repainted domain pixels.\n. @param loDiff Maximal lower brightness/color difference between the currently observed pixel and\n. one of its neighbors belonging to the component, or a seed pixel being added to the component.\n. @param upDiff Maximal upper brightness/color difference between the currently observed pixel and\n. one of its neighbors belonging to the component, or a seed pixel being added to the component.\n. @param rect Optional output parameter set by the function to the minimum bounding rectangle of the\n. repainted domain.\n. @param flags Operation flags. The first 8 bits contain a connectivity value. The default value of\n. 4 means that only the four nearest neighbor pixels (those that share an edge) are considered. A\n. connectivity value of 8 means that the eight nearest neighbor pixels (those that share a corner)\n. will be considered. The next 8 bits (8-16) contain a value between 1 and 255 with which to fill\n. the mask (the default value is 1). For example, 4 | ( 255 \\<\\< 8 ) will consider 4 nearest\n. neighbours and fill the mask with a value of 255. The following additional options occupy higher\n. bits and therefore may be further combined with the connectivity and mask fill values using\n. bit-wise or (|), see #FloodFillFlags.\n. \n. @note Since the mask is larger than the filled image, a pixel \\f$(x, y)\\f$ in image corresponds to the\n. pixel \\f$(x+1, y+1)\\f$ in the mask .\n. \n. @sa findContours"}, + {"gemm", CV_PY_FN_WITH_KW_(pyopencv_cv_gemm, 0), "gemm(src1, src2, alpha, src3, beta[, dst[, flags]]) -> dst\n. @brief Performs generalized matrix multiplication.\n. \n. The function cv::gemm performs generalized matrix multiplication similar to the\n. gemm functions in BLAS level 3. For example,\n. `gemm(src1, src2, alpha, src3, beta, dst, GEMM_1_T + GEMM_3_T)`\n. corresponds to\n. \\f[\\texttt{dst} = \\texttt{alpha} \\cdot \\texttt{src1} ^T \\cdot \\texttt{src2} + \\texttt{beta} \\cdot \\texttt{src3} ^T\\f]\n. \n. In case of complex (two-channel) data, performed a complex matrix\n. multiplication.\n. \n. The function can be replaced with a matrix expression. For example, the\n. above call can be replaced with:\n. @code{.cpp}\n. dst = alpha*src1.t()*src2 + beta*src3.t();\n. @endcode\n. @param src1 first multiplied input matrix that could be real(CV_32FC1,\n. CV_64FC1) or complex(CV_32FC2, CV_64FC2).\n. @param src2 second multiplied input matrix of the same type as src1.\n. @param alpha weight of the matrix product.\n. @param src3 third optional delta matrix added to the matrix product; it\n. should have the same type as src1 and src2.\n. @param beta weight of src3.\n. @param dst output matrix; it has the proper size and the same type as\n. input matrices.\n. @param flags operation flags (cv::GemmFlags)\n. @sa mulTransposed , transform"}, + {"getAffineTransform", CV_PY_FN_WITH_KW_(pyopencv_cv_getAffineTransform, 0), "getAffineTransform(src, dst) -> retval\n. @overload"}, + {"getBuildInformation", CV_PY_FN_WITH_KW_(pyopencv_cv_getBuildInformation, 0), "getBuildInformation() -> retval\n. @brief Returns full configuration time cmake output.\n. \n. Returned value is raw cmake output including version control system revision, compiler version,\n. compiler flags, enabled modules and third party libraries, etc. Output format depends on target\n. architecture."}, + {"getCPUFeaturesLine", CV_PY_FN_WITH_KW_(pyopencv_cv_getCPUFeaturesLine, 0), "getCPUFeaturesLine() -> retval\n. @brief Returns list of CPU features enabled during compilation.\n. \n. Returned value is a string containing space separated list of CPU features with following markers:\n. \n. - no markers - baseline features\n. - prefix `*` - features enabled in dispatcher\n. - suffix `?` - features enabled but not available in HW\n. \n. Example: `SSE SSE2 SSE3 *SSE4.1 *SSE4.2 *FP16 *AVX *AVX2 *AVX512-SKX?`"}, + {"getCPUTickCount", CV_PY_FN_WITH_KW_(pyopencv_cv_getCPUTickCount, 0), "getCPUTickCount() -> retval\n. @brief Returns the number of CPU ticks.\n. \n. The function returns the current number of CPU ticks on some architectures (such as x86, x64,\n. PowerPC). On other platforms the function is equivalent to getTickCount. It can also be used for\n. very accurate time measurements, as well as for RNG initialization. Note that in case of multi-CPU\n. systems a thread, from which getCPUTickCount is called, can be suspended and resumed at another CPU\n. with its own counter. So, theoretically (and practically) the subsequent calls to the function do\n. not necessary return the monotonously increasing values. Also, since a modern CPU varies the CPU\n. frequency depending on the load, the number of CPU clocks spent in some code cannot be directly\n. converted to time units. Therefore, getTickCount is generally a preferable solution for measuring\n. execution time."}, + {"getDerivKernels", CV_PY_FN_WITH_KW_(pyopencv_cv_getDerivKernels, 0), "getDerivKernels(dx, dy, ksize[, kx[, ky[, normalize[, ktype]]]]) -> kx, ky\n. @brief Returns filter coefficients for computing spatial image derivatives.\n. \n. The function computes and returns the filter coefficients for spatial image derivatives. When\n. `ksize=FILTER_SCHARR`, the Scharr \\f$3 \\times 3\\f$ kernels are generated (see #Scharr). Otherwise, Sobel\n. kernels are generated (see #Sobel). The filters are normally passed to #sepFilter2D or to\n. \n. @param kx Output matrix of row filter coefficients. It has the type ktype .\n. @param ky Output matrix of column filter coefficients. It has the type ktype .\n. @param dx Derivative order in respect of x.\n. @param dy Derivative order in respect of y.\n. @param ksize Aperture size. It can be FILTER_SCHARR, 1, 3, 5, or 7.\n. @param normalize Flag indicating whether to normalize (scale down) the filter coefficients or not.\n. Theoretically, the coefficients should have the denominator \\f$=2^{ksize*2-dx-dy-2}\\f$. If you are\n. going to filter floating-point images, you are likely to use the normalized kernels. But if you\n. compute derivatives of an 8-bit image, store the results in a 16-bit image, and wish to preserve\n. all the fractional bits, you may want to set normalize=false .\n. @param ktype Type of filter coefficients. It can be CV_32f or CV_64F ."}, + {"getFontScaleFromHeight", CV_PY_FN_WITH_KW_(pyopencv_cv_getFontScaleFromHeight, 0), "getFontScaleFromHeight(fontFace, pixelHeight[, thickness]) -> retval\n. @brief Calculates the font-specific size to use to achieve a given height in pixels.\n. \n. @param fontFace Font to use, see cv::HersheyFonts.\n. @param pixelHeight Pixel height to compute the fontScale for\n. @param thickness Thickness of lines used to render the text.See putText for details.\n. @return The fontSize to use for cv::putText\n. \n. @see cv::putText"}, + {"getGaborKernel", CV_PY_FN_WITH_KW_(pyopencv_cv_getGaborKernel, 0), "getGaborKernel(ksize, sigma, theta, lambd, gamma[, psi[, ktype]]) -> retval\n. @brief Returns Gabor filter coefficients.\n. \n. For more details about gabor filter equations and parameters, see: [Gabor\n. Filter](http://en.wikipedia.org/wiki/Gabor_filter).\n. \n. @param ksize Size of the filter returned.\n. @param sigma Standard deviation of the gaussian envelope.\n. @param theta Orientation of the normal to the parallel stripes of a Gabor function.\n. @param lambd Wavelength of the sinusoidal factor.\n. @param gamma Spatial aspect ratio.\n. @param psi Phase offset.\n. @param ktype Type of filter coefficients. It can be CV_32F or CV_64F ."}, + {"getGaussianKernel", CV_PY_FN_WITH_KW_(pyopencv_cv_getGaussianKernel, 0), "getGaussianKernel(ksize, sigma[, ktype]) -> retval\n. @brief Returns Gaussian filter coefficients.\n. \n. The function computes and returns the \\f$\\texttt{ksize} \\times 1\\f$ matrix of Gaussian filter\n. coefficients:\n. \n. \\f[G_i= \\alpha *e^{-(i-( \\texttt{ksize} -1)/2)^2/(2* \\texttt{sigma}^2)},\\f]\n. \n. where \\f$i=0..\\texttt{ksize}-1\\f$ and \\f$\\alpha\\f$ is the scale factor chosen so that \\f$\\sum_i G_i=1\\f$.\n. \n. Two of such generated kernels can be passed to sepFilter2D. Those functions automatically recognize\n. smoothing kernels (a symmetrical kernel with sum of weights equal to 1) and handle them accordingly.\n. You may also use the higher-level GaussianBlur.\n. @param ksize Aperture size. It should be odd ( \\f$\\texttt{ksize} \\mod 2 = 1\\f$ ) and positive.\n. @param sigma Gaussian standard deviation. If it is non-positive, it is computed from ksize as\n. `sigma = 0.3*((ksize-1)*0.5 - 1) + 0.8`.\n. @param ktype Type of filter coefficients. It can be CV_32F or CV_64F .\n. @sa sepFilter2D, getDerivKernels, getStructuringElement, GaussianBlur"}, + {"getHardwareFeatureName", CV_PY_FN_WITH_KW_(pyopencv_cv_getHardwareFeatureName, 0), "getHardwareFeatureName(feature) -> retval\n. @brief Returns feature name by ID\n. \n. Returns empty string if feature is not defined"}, + {"getLogLevel", CV_PY_FN_WITH_KW_(pyopencv_cv_getLogLevel, 0), "getLogLevel() -> retval\n."}, + {"getNumThreads", CV_PY_FN_WITH_KW_(pyopencv_cv_getNumThreads, 0), "getNumThreads() -> retval\n. @brief Returns the number of threads used by OpenCV for parallel regions.\n. \n. Always returns 1 if OpenCV is built without threading support.\n. \n. The exact meaning of return value depends on the threading framework used by OpenCV library:\n. - `TBB` - The number of threads, that OpenCV will try to use for parallel regions. If there is\n. any tbb::thread_scheduler_init in user code conflicting with OpenCV, then function returns\n. default number of threads used by TBB library.\n. - `OpenMP` - An upper bound on the number of threads that could be used to form a new team.\n. - `Concurrency` - The number of threads, that OpenCV will try to use for parallel regions.\n. - `GCD` - Unsupported; returns the GCD thread pool limit (512) for compatibility.\n. - `C=` - The number of threads, that OpenCV will try to use for parallel regions, if before\n. called setNumThreads with threads \\> 0, otherwise returns the number of logical CPUs,\n. available for the process.\n. @sa setNumThreads, getThreadNum"}, + {"getNumberOfCPUs", CV_PY_FN_WITH_KW_(pyopencv_cv_getNumberOfCPUs, 0), "getNumberOfCPUs() -> retval\n. @brief Returns the number of logical CPUs available for the process."}, + {"getOptimalDFTSize", CV_PY_FN_WITH_KW_(pyopencv_cv_getOptimalDFTSize, 0), "getOptimalDFTSize(vecsize) -> retval\n. @brief Returns the optimal DFT size for a given vector size.\n. \n. DFT performance is not a monotonic function of a vector size. Therefore, when you calculate\n. convolution of two arrays or perform the spectral analysis of an array, it usually makes sense to\n. pad the input data with zeros to get a bit larger array that can be transformed much faster than the\n. original one. Arrays whose size is a power-of-two (2, 4, 8, 16, 32, ...) are the fastest to process.\n. Though, the arrays whose size is a product of 2's, 3's, and 5's (for example, 300 = 5\\*5\\*3\\*2\\*2)\n. are also processed quite efficiently.\n. \n. The function cv::getOptimalDFTSize returns the minimum number N that is greater than or equal to vecsize\n. so that the DFT of a vector of size N can be processed efficiently. In the current implementation N\n. = 2 ^p^ \\* 3 ^q^ \\* 5 ^r^ for some integer p, q, r.\n. \n. The function returns a negative number if vecsize is too large (very close to INT_MAX ).\n. \n. While the function cannot be used directly to estimate the optimal vector size for DCT transform\n. (since the current DCT implementation supports only even-size vectors), it can be easily processed\n. as getOptimalDFTSize((vecsize+1)/2)\\*2.\n. @param vecsize vector size.\n. @sa dft , dct , idft , idct , mulSpectrums"}, + {"getPerspectiveTransform", CV_PY_FN_WITH_KW_(pyopencv_cv_getPerspectiveTransform, 0), "getPerspectiveTransform(src, dst[, solveMethod]) -> retval\n. @brief Calculates a perspective transform from four pairs of the corresponding points.\n. \n. The function calculates the \\f$3 \\times 3\\f$ matrix of a perspective transform so that:\n. \n. \\f[\\begin{bmatrix} t_i x'_i \\\\ t_i y'_i \\\\ t_i \\end{bmatrix} = \\texttt{map_matrix} \\cdot \\begin{bmatrix} x_i \\\\ y_i \\\\ 1 \\end{bmatrix}\\f]\n. \n. where\n. \n. \\f[dst(i)=(x'_i,y'_i), src(i)=(x_i, y_i), i=0,1,2,3\\f]\n. \n. @param src Coordinates of quadrangle vertices in the source image.\n. @param dst Coordinates of the corresponding quadrangle vertices in the destination image.\n. @param solveMethod method passed to cv::solve (#DecompTypes)\n. \n. @sa findHomography, warpPerspective, perspectiveTransform"}, + {"getRectSubPix", CV_PY_FN_WITH_KW_(pyopencv_cv_getRectSubPix, 0), "getRectSubPix(image, patchSize, center[, patch[, patchType]]) -> patch\n. @brief Retrieves a pixel rectangle from an image with sub-pixel accuracy.\n. \n. The function getRectSubPix extracts pixels from src:\n. \n. \\f[patch(x, y) = src(x + \\texttt{center.x} - ( \\texttt{dst.cols} -1)*0.5, y + \\texttt{center.y} - ( \\texttt{dst.rows} -1)*0.5)\\f]\n. \n. where the values of the pixels at non-integer coordinates are retrieved using bilinear\n. interpolation. Every channel of multi-channel images is processed independently. Also\n. the image should be a single channel or three channel image. While the center of the\n. rectangle must be inside the image, parts of the rectangle may be outside.\n. \n. @param image Source image.\n. @param patchSize Size of the extracted patch.\n. @param center Floating point coordinates of the center of the extracted rectangle within the\n. source image. The center must be inside the image.\n. @param patch Extracted patch that has the size patchSize and the same number of channels as src .\n. @param patchType Depth of the extracted pixels. By default, they have the same depth as src .\n. \n. @sa warpAffine, warpPerspective"}, + {"getRotationMatrix2D", CV_PY_FN_WITH_KW_(pyopencv_cv_getRotationMatrix2D, 0), "getRotationMatrix2D(center, angle, scale) -> retval\n. @brief Calculates an affine matrix of 2D rotation.\n. \n. The function calculates the following matrix:\n. \n. \\f[\\begin{bmatrix} \\alpha & \\beta & (1- \\alpha ) \\cdot \\texttt{center.x} - \\beta \\cdot \\texttt{center.y} \\\\ - \\beta & \\alpha & \\beta \\cdot \\texttt{center.x} + (1- \\alpha ) \\cdot \\texttt{center.y} \\end{bmatrix}\\f]\n. \n. where\n. \n. \\f[\\begin{array}{l} \\alpha = \\texttt{scale} \\cdot \\cos \\texttt{angle} , \\\\ \\beta = \\texttt{scale} \\cdot \\sin \\texttt{angle} \\end{array}\\f]\n. \n. The transformation maps the rotation center to itself. If this is not the target, adjust the shift.\n. \n. @param center Center of the rotation in the source image.\n. @param angle Rotation angle in degrees. Positive values mean counter-clockwise rotation (the\n. coordinate origin is assumed to be the top-left corner).\n. @param scale Isotropic scale factor.\n. \n. @sa getAffineTransform, warpAffine, transform"}, + {"getStructuringElement", CV_PY_FN_WITH_KW_(pyopencv_cv_getStructuringElement, 0), "getStructuringElement(shape, ksize[, anchor]) -> retval\n. @brief Returns a structuring element of the specified size and shape for morphological operations.\n. \n. The function constructs and returns the structuring element that can be further passed to #erode,\n. #dilate or #morphologyEx. But you can also construct an arbitrary binary mask yourself and use it as\n. the structuring element.\n. \n. @param shape Element shape that could be one of #MorphShapes\n. @param ksize Size of the structuring element.\n. @param anchor Anchor position within the element. The default value \\f$(-1, -1)\\f$ means that the\n. anchor is at the center. Note that only the shape of a cross-shaped element depends on the anchor\n. position. In other cases the anchor just regulates how much the result of the morphological\n. operation is shifted."}, + {"getTextSize", CV_PY_FN_WITH_KW_(pyopencv_cv_getTextSize, 0), "getTextSize(text, fontFace, fontScale, thickness) -> retval, baseLine\n. @brief Calculates the width and height of a text string.\n. \n. The function cv::getTextSize calculates and returns the size of a box that contains the specified text.\n. That is, the following code renders some text, the tight box surrounding it, and the baseline: :\n. @code\n. String text = \"Funny text inside the box\";\n. int fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;\n. double fontScale = 2;\n. int thickness = 3;\n. \n. Mat img(600, 800, CV_8UC3, Scalar::all(0));\n. \n. int baseline=0;\n. Size textSize = getTextSize(text, fontFace,\n. fontScale, thickness, &baseline);\n. baseline += thickness;\n. \n. // center the text\n. Point textOrg((img.cols - textSize.width)/2,\n. (img.rows + textSize.height)/2);\n. \n. // draw the box\n. rectangle(img, textOrg + Point(0, baseline),\n. textOrg + Point(textSize.width, -textSize.height),\n. Scalar(0,0,255));\n. // ... and the baseline first\n. line(img, textOrg + Point(0, thickness),\n. textOrg + Point(textSize.width, thickness),\n. Scalar(0, 0, 255));\n. \n. // then put the text itself\n. putText(img, text, textOrg, fontFace, fontScale,\n. Scalar::all(255), thickness, 8);\n. @endcode\n. \n. @param text Input text string.\n. @param fontFace Font to use, see #HersheyFonts.\n. @param fontScale Font scale factor that is multiplied by the font-specific base size.\n. @param thickness Thickness of lines used to render the text. See #putText for details.\n. @param[out] baseLine y-coordinate of the baseline relative to the bottom-most text\n. point.\n. @return The size of a box that contains the specified text.\n. \n. @see putText"}, + {"getThreadNum", CV_PY_FN_WITH_KW_(pyopencv_cv_getThreadNum, 0), "getThreadNum() -> retval\n. @brief Returns the index of the currently executed thread within the current parallel region. Always\n. returns 0 if called outside of parallel region.\n. \n. @deprecated Current implementation doesn't corresponding to this documentation.\n. \n. The exact meaning of the return value depends on the threading framework used by OpenCV library:\n. - `TBB` - Unsupported with current 4.1 TBB release. Maybe will be supported in future.\n. - `OpenMP` - The thread number, within the current team, of the calling thread.\n. - `Concurrency` - An ID for the virtual processor that the current context is executing on (0\n. for master thread and unique number for others, but not necessary 1,2,3,...).\n. - `GCD` - System calling thread's ID. Never returns 0 inside parallel region.\n. - `C=` - The index of the current parallel task.\n. @sa setNumThreads, getNumThreads"}, + {"getTickCount", CV_PY_FN_WITH_KW_(pyopencv_cv_getTickCount, 0), "getTickCount() -> retval\n. @brief Returns the number of ticks.\n. \n. The function returns the number of ticks after the certain event (for example, when the machine was\n. turned on). It can be used to initialize RNG or to measure a function execution time by reading the\n. tick count before and after the function call.\n. @sa getTickFrequency, TickMeter"}, + {"getTickFrequency", CV_PY_FN_WITH_KW_(pyopencv_cv_getTickFrequency, 0), "getTickFrequency() -> retval\n. @brief Returns the number of ticks per second.\n. \n. The function returns the number of ticks per second. That is, the following code computes the\n. execution time in seconds:\n. @code\n. double t = (double)getTickCount();\n. // do something ...\n. t = ((double)getTickCount() - t)/getTickFrequency();\n. @endcode\n. @sa getTickCount, TickMeter"}, + {"getVersionMajor", CV_PY_FN_WITH_KW_(pyopencv_cv_getVersionMajor, 0), "getVersionMajor() -> retval\n. @brief Returns major library version"}, + {"getVersionMinor", CV_PY_FN_WITH_KW_(pyopencv_cv_getVersionMinor, 0), "getVersionMinor() -> retval\n. @brief Returns minor library version"}, + {"getVersionRevision", CV_PY_FN_WITH_KW_(pyopencv_cv_getVersionRevision, 0), "getVersionRevision() -> retval\n. @brief Returns revision field of the library version"}, + {"getVersionString", CV_PY_FN_WITH_KW_(pyopencv_cv_getVersionString, 0), "getVersionString() -> retval\n. @brief Returns library version string\n. \n. For example \"3.4.1-dev\".\n. \n. @sa getMajorVersion, getMinorVersion, getRevisionVersion"}, + {"goodFeaturesToTrack", CV_PY_FN_WITH_KW_(pyopencv_cv_goodFeaturesToTrack, 0), "goodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance[, corners[, mask[, blockSize[, useHarrisDetector[, k]]]]]) -> corners\n. @brief Determines strong corners on an image.\n. \n. The function finds the most prominent corners in the image or in the specified image region, as\n. described in @cite Shi94\n. \n. - Function calculates the corner quality measure at every source image pixel using the\n. #cornerMinEigenVal or #cornerHarris .\n. - Function performs a non-maximum suppression (the local maximums in *3 x 3* neighborhood are\n. retained).\n. - The corners with the minimal eigenvalue less than\n. \\f$\\texttt{qualityLevel} \\cdot \\max_{x,y} qualityMeasureMap(x,y)\\f$ are rejected.\n. - The remaining corners are sorted by the quality measure in the descending order.\n. - Function throws away each corner for which there is a stronger corner at a distance less than\n. maxDistance.\n. \n. The function can be used to initialize a point-based tracker of an object.\n. \n. @note If the function is called with different values A and B of the parameter qualityLevel , and\n. A \\> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector\n. with qualityLevel=B .\n. \n. @param image Input 8-bit or floating-point 32-bit, single-channel image.\n. @param corners Output vector of detected corners.\n. @param maxCorners Maximum number of corners to return. If there are more corners than are found,\n. the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set\n. and all detected corners are returned.\n. @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The\n. parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue\n. (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the\n. quality measure less than the product are rejected. For example, if the best corner has the\n. quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure\n. less than 15 are rejected.\n. @param minDistance Minimum possible Euclidean distance between the returned corners.\n. @param mask Optional region of interest. If the image is not empty (it needs to have the type\n. CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.\n. @param blockSize Size of an average block for computing a derivative covariation matrix over each\n. pixel neighborhood. See cornerEigenValsAndVecs .\n. @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)\n. or #cornerMinEigenVal.\n. @param k Free parameter of the Harris detector.\n. \n. @sa cornerMinEigenVal, cornerHarris, calcOpticalFlowPyrLK, estimateRigidTransform,\n\n\n\ngoodFeaturesToTrack(image, maxCorners, qualityLevel, minDistance, mask, blockSize, gradientSize[, corners[, useHarrisDetector[, k]]]) -> corners\n."}, + {"goodFeaturesToTrackWithQuality", CV_PY_FN_WITH_KW_(pyopencv_cv_goodFeaturesToTrackWithQuality, 0), "goodFeaturesToTrackWithQuality(image, maxCorners, qualityLevel, minDistance, mask[, corners[, cornersQuality[, blockSize[, gradientSize[, useHarrisDetector[, k]]]]]]) -> corners, cornersQuality\n. @brief Same as above, but returns also quality measure of the detected corners.\n. \n. @param image Input 8-bit or floating-point 32-bit, single-channel image.\n. @param corners Output vector of detected corners.\n. @param maxCorners Maximum number of corners to return. If there are more corners than are found,\n. the strongest of them is returned. `maxCorners <= 0` implies that no limit on the maximum is set\n. and all detected corners are returned.\n. @param qualityLevel Parameter characterizing the minimal accepted quality of image corners. The\n. parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue\n. (see #cornerMinEigenVal ) or the Harris function response (see #cornerHarris ). The corners with the\n. quality measure less than the product are rejected. For example, if the best corner has the\n. quality measure = 1500, and the qualityLevel=0.01 , then all the corners with the quality measure\n. less than 15 are rejected.\n. @param minDistance Minimum possible Euclidean distance between the returned corners.\n. @param mask Region of interest. If the image is not empty (it needs to have the type\n. CV_8UC1 and the same size as image ), it specifies the region in which the corners are detected.\n. @param cornersQuality Output vector of quality measure of the detected corners.\n. @param blockSize Size of an average block for computing a derivative covariation matrix over each\n. pixel neighborhood. See cornerEigenValsAndVecs .\n. @param gradientSize Aperture parameter for the Sobel operator used for derivatives computation.\n. See cornerEigenValsAndVecs .\n. @param useHarrisDetector Parameter indicating whether to use a Harris detector (see #cornerHarris)\n. or #cornerMinEigenVal.\n. @param k Free parameter of the Harris detector."}, + {"grabCut", CV_PY_FN_WITH_KW_(pyopencv_cv_grabCut, 0), "grabCut(img, mask, rect, bgdModel, fgdModel, iterCount[, mode]) -> mask, bgdModel, fgdModel\n. @brief Runs the GrabCut algorithm.\n. \n. The function implements the [GrabCut image segmentation algorithm](http://en.wikipedia.org/wiki/GrabCut).\n. \n. @param img Input 8-bit 3-channel image.\n. @param mask Input/output 8-bit single-channel mask. The mask is initialized by the function when\n. mode is set to #GC_INIT_WITH_RECT. Its elements may have one of the #GrabCutClasses.\n. @param rect ROI containing a segmented object. The pixels outside of the ROI are marked as\n. \"obvious background\". The parameter is only used when mode==#GC_INIT_WITH_RECT .\n. @param bgdModel Temporary array for the background model. Do not modify it while you are\n. processing the same image.\n. @param fgdModel Temporary arrays for the foreground model. Do not modify it while you are\n. processing the same image.\n. @param iterCount Number of iterations the algorithm should make before returning the result. Note\n. that the result can be refined with further calls with mode==#GC_INIT_WITH_MASK or\n. mode==GC_EVAL .\n. @param mode Operation mode that could be one of the #GrabCutModes"}, + {"haveImageReader", CV_PY_FN_WITH_KW_(pyopencv_cv_haveImageReader, 0), "haveImageReader(filename) -> retval\n. @brief Returns true if the specified image can be decoded by OpenCV\n. \n. @param filename File name of the image"}, + {"haveImageWriter", CV_PY_FN_WITH_KW_(pyopencv_cv_haveImageWriter, 0), "haveImageWriter(filename) -> retval\n. @brief Returns true if an image with the specified filename can be encoded by OpenCV\n. \n. @param filename File name of the image"}, + {"haveOpenVX", CV_PY_FN_WITH_KW_(pyopencv_cv_haveOpenVX, 0), "haveOpenVX() -> retval\n."}, + {"hconcat", CV_PY_FN_WITH_KW_(pyopencv_cv_hconcat, 0), "hconcat(src[, dst]) -> dst\n. @overload\n. @code{.cpp}\n. std::vector matrices = { cv::Mat(4, 1, CV_8UC1, cv::Scalar(1)),\n. cv::Mat(4, 1, CV_8UC1, cv::Scalar(2)),\n. cv::Mat(4, 1, CV_8UC1, cv::Scalar(3)),};\n. \n. cv::Mat out;\n. cv::hconcat( matrices, out );\n. //out:\n. //[1, 2, 3;\n. // 1, 2, 3;\n. // 1, 2, 3;\n. // 1, 2, 3]\n. @endcode\n. @param src input array or vector of matrices. all of the matrices must have the same number of rows and the same depth.\n. @param dst output array. It has the same number of rows and depth as the src, and the sum of cols of the src.\n. same depth."}, + {"idct", CV_PY_FN_WITH_KW_(pyopencv_cv_idct, 0), "idct(src[, dst[, flags]]) -> dst\n. @brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.\n. \n. idct(src, dst, flags) is equivalent to dct(src, dst, flags | DCT_INVERSE).\n. @param src input floating-point single-channel array.\n. @param dst output array of the same size and type as src.\n. @param flags operation flags.\n. @sa dct, dft, idft, getOptimalDFTSize"}, + {"idft", CV_PY_FN_WITH_KW_(pyopencv_cv_idft, 0), "idft(src[, dst[, flags[, nonzeroRows]]]) -> dst\n. @brief Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.\n. \n. idft(src, dst, flags) is equivalent to dft(src, dst, flags | #DFT_INVERSE) .\n. @note None of dft and idft scales the result by default. So, you should pass #DFT_SCALE to one of\n. dft or idft explicitly to make these transforms mutually inverse.\n. @sa dft, dct, idct, mulSpectrums, getOptimalDFTSize\n. @param src input floating-point real or complex array.\n. @param dst output array whose size and type depend on the flags.\n. @param flags operation flags (see dft and #DftFlags).\n. @param nonzeroRows number of dst rows to process; the rest of the rows have undefined content (see\n. the convolution sample in dft description."}, + {"imcount", CV_PY_FN_WITH_KW_(pyopencv_cv_imcount, 0), "imcount(filename[, flags]) -> retval\n. @brief Returns the number of images inside the give file\n. \n. The function imcount will return the number of pages in a multi-page image, or 1 for single-page images\n. @param filename Name of file to be loaded.\n. @param flags Flag that can take values of cv::ImreadModes, default with cv::IMREAD_ANYCOLOR."}, + {"imdecode", CV_PY_FN_WITH_KW_(pyopencv_cv_imdecode, 0), "imdecode(buf, flags) -> retval\n. @brief Reads an image from a buffer in memory.\n. \n. The function imdecode reads an image from the specified buffer in the memory. If the buffer is too short or\n. contains invalid data, the function returns an empty matrix ( Mat::data==NULL ).\n. \n. See cv::imread for the list of supported formats and flags description.\n. \n. @note In the case of color images, the decoded images will have the channels stored in **B G R** order.\n. @param buf Input array or vector of bytes.\n. @param flags The same flags as in cv::imread, see cv::ImreadModes."}, + {"imencode", CV_PY_FN_WITH_KW_(pyopencv_cv_imencode, 0), "imencode(ext, img[, params]) -> retval, buf\n. @brief Encodes an image into a memory buffer.\n. \n. The function imencode compresses the image and stores it in the memory buffer that is resized to fit the\n. result. See cv::imwrite for the list of supported formats and flags description.\n. \n. @param ext File extension that defines the output format.\n. @param img Image to be written.\n. @param buf Output buffer resized to fit the compressed image.\n. @param params Format-specific parameters. See cv::imwrite and cv::ImwriteFlags."}, + {"imread", CV_PY_FN_WITH_KW_(pyopencv_cv_imread, 0), "imread(filename[, flags]) -> retval\n. @brief Loads an image from a file.\n. \n. @anchor imread\n. \n. The function imread loads an image from the specified file and returns it. If the image cannot be\n. read (because of missing file, improper permissions, unsupported or invalid format), the function\n. returns an empty matrix ( Mat::data==NULL ).\n. \n. Currently, the following file formats are supported:\n. \n. - Windows bitmaps - \\*.bmp, \\*.dib (always supported)\n. - JPEG files - \\*.jpeg, \\*.jpg, \\*.jpe (see the *Note* section)\n. - JPEG 2000 files - \\*.jp2 (see the *Note* section)\n. - Portable Network Graphics - \\*.png (see the *Note* section)\n. - WebP - \\*.webp (see the *Note* section)\n. - Portable image format - \\*.pbm, \\*.pgm, \\*.ppm \\*.pxm, \\*.pnm (always supported)\n. - PFM files - \\*.pfm (see the *Note* section)\n. - Sun rasters - \\*.sr, \\*.ras (always supported)\n. - TIFF files - \\*.tiff, \\*.tif (see the *Note* section)\n. - OpenEXR Image files - \\*.exr (see the *Note* section)\n. - Radiance HDR - \\*.hdr, \\*.pic (always supported)\n. - Raster and Vector geospatial data supported by GDAL (see the *Note* section)\n. \n. @note\n. - The function determines the type of an image by the content, not by the file extension.\n. - In the case of color images, the decoded images will have the channels stored in **B G R** order.\n. - When using IMREAD_GRAYSCALE, the codec's internal grayscale conversion will be used, if available.\n. Results may differ to the output of cvtColor()\n. - On Microsoft Windows\\* OS and MacOSX\\*, the codecs shipped with an OpenCV image (libjpeg,\n. libpng, libtiff, and libjasper) are used by default. So, OpenCV can always read JPEGs, PNGs,\n. and TIFFs. On MacOSX, there is also an option to use native MacOSX image readers. But beware\n. that currently these native image loaders give images with different pixel values because of\n. the color management embedded into MacOSX.\n. - On Linux\\*, BSD flavors and other Unix-like open-source operating systems, OpenCV looks for\n. codecs supplied with an OS image. Install the relevant packages (do not forget the development\n. files, for example, \"libjpeg-dev\", in Debian\\* and Ubuntu\\*) to get the codec support or turn\n. on the OPENCV_BUILD_3RDPARTY_LIBS flag in CMake.\n. - In the case you set *WITH_GDAL* flag to true in CMake and @ref IMREAD_LOAD_GDAL to load the image,\n. then the [GDAL](http://www.gdal.org) driver will be used in order to decode the image, supporting\n. the following formats: [Raster](http://www.gdal.org/formats_list.html),\n. [Vector](http://www.gdal.org/ogr_formats.html).\n. - If EXIF information is embedded in the image file, the EXIF orientation will be taken into account\n. and thus the image will be rotated accordingly except if the flags @ref IMREAD_IGNORE_ORIENTATION\n. or @ref IMREAD_UNCHANGED are passed.\n. - Use the IMREAD_UNCHANGED flag to keep the floating point values from PFM image.\n. - By default number of pixels must be less than 2^30. Limit can be set using system\n. variable OPENCV_IO_MAX_IMAGE_PIXELS\n. \n. @param filename Name of file to be loaded.\n. @param flags Flag that can take values of cv::ImreadModes"}, + {"imreadmulti", CV_PY_FN_WITH_KW_(pyopencv_cv_imreadmulti, 0), "imreadmulti(filename[, mats[, flags]]) -> retval, mats\n. @brief Loads a multi-page image from a file.\n. \n. The function imreadmulti loads a multi-page image from the specified file into a vector of Mat objects.\n. @param filename Name of file to be loaded.\n. @param flags Flag that can take values of cv::ImreadModes, default with cv::IMREAD_ANYCOLOR.\n. @param mats A vector of Mat objects holding each page, if more than one.\n. @sa cv::imread\n\n\n\nimreadmulti(filename, start, count[, mats[, flags]]) -> retval, mats\n. @brief Loads a of images of a multi-page image from a file.\n. \n. The function imreadmulti loads a specified range from a multi-page image from the specified file into a vector of Mat objects.\n. @param filename Name of file to be loaded.\n. @param start Start index of the image to load\n. @param count Count number of images to load\n. @param flags Flag that can take values of cv::ImreadModes, default with cv::IMREAD_ANYCOLOR.\n. @param mats A vector of Mat objects holding each page, if more than one.\n. @sa cv::imread"}, + {"imwrite", CV_PY_FN_WITH_KW_(pyopencv_cv_imwrite, 0), "imwrite(filename, img[, params]) -> retval\n. @brief Saves an image to a specified file.\n. \n. The function imwrite saves the image to the specified file. The image format is chosen based on the\n. filename extension (see cv::imread for the list of extensions). In general, only 8-bit\n. single-channel or 3-channel (with 'BGR' channel order) images\n. can be saved using this function, with these exceptions:\n. \n. - 16-bit unsigned (CV_16U) images can be saved in the case of PNG, JPEG 2000, and TIFF formats\n. - 32-bit float (CV_32F) images can be saved in PFM, TIFF, OpenEXR, and Radiance HDR formats;\n. 3-channel (CV_32FC3) TIFF images will be saved using the LogLuv high dynamic range encoding\n. (4 bytes per pixel)\n. - PNG images with an alpha channel can be saved using this function. To do this, create\n. 8-bit (or 16-bit) 4-channel image BGRA, where the alpha channel goes last. Fully transparent pixels\n. should have alpha set to 0, fully opaque pixels should have alpha set to 255/65535 (see the code sample below).\n. - Multiple images (vector of Mat) can be saved in TIFF format (see the code sample below).\n. \n. If the image format is not supported, the image will be converted to 8-bit unsigned (CV_8U) and saved that way.\n. \n. If the format, depth or channel order is different, use\n. Mat::convertTo and cv::cvtColor to convert it before saving. Or, use the universal FileStorage I/O\n. functions to save the image to XML or YAML format.\n. \n. The sample below shows how to create a BGRA image, how to set custom compression parameters and save it to a PNG file.\n. It also demonstrates how to save multiple images in a TIFF file:\n. @include snippets/imgcodecs_imwrite.cpp\n. @param filename Name of the file.\n. @param img (Mat or vector of Mat) Image or Images to be saved.\n. @param params Format-specific parameters encoded as pairs (paramId_1, paramValue_1, paramId_2, paramValue_2, ... .) see cv::ImwriteFlags"}, + {"imwritemulti", CV_PY_FN_WITH_KW_(pyopencv_cv_imwritemulti, 0), "imwritemulti(filename, img[, params]) -> retval\n."}, + {"inRange", CV_PY_FN_WITH_KW_(pyopencv_cv_inRange, 0), "inRange(src, lowerb, upperb[, dst]) -> dst\n. @brief Checks if array elements lie between the elements of two other arrays.\n. \n. The function checks the range as follows:\n. - For every element of a single-channel input array:\n. \\f[\\texttt{dst} (I)= \\texttt{lowerb} (I)_0 \\leq \\texttt{src} (I)_0 \\leq \\texttt{upperb} (I)_0\\f]\n. - For two-channel arrays:\n. \\f[\\texttt{dst} (I)= \\texttt{lowerb} (I)_0 \\leq \\texttt{src} (I)_0 \\leq \\texttt{upperb} (I)_0 \\land \\texttt{lowerb} (I)_1 \\leq \\texttt{src} (I)_1 \\leq \\texttt{upperb} (I)_1\\f]\n. - and so forth.\n. \n. That is, dst (I) is set to 255 (all 1 -bits) if src (I) is within the\n. specified 1D, 2D, 3D, ... box and 0 otherwise.\n. \n. When the lower and/or upper boundary parameters are scalars, the indexes\n. (I) at lowerb and upperb in the above formulas should be omitted.\n. @param src first input array.\n. @param lowerb inclusive lower boundary array or a scalar.\n. @param upperb inclusive upper boundary array or a scalar.\n. @param dst output array of the same size as src and CV_8U type."}, + {"insertChannel", CV_PY_FN_WITH_KW_(pyopencv_cv_insertChannel, 0), "insertChannel(src, dst, coi) -> dst\n. @brief Inserts a single channel to dst (coi is 0-based index)\n. @param src input array\n. @param dst output array\n. @param coi index of channel for insertion\n. @sa mixChannels, merge"}, + {"integral", CV_PY_FN_WITH_KW_(pyopencv_cv_integral, 0), "integral(src[, sum[, sdepth]]) -> sum\n. @overload"}, + {"integral2", CV_PY_FN_WITH_KW_(pyopencv_cv_integral2, 0), "integral2(src[, sum[, sqsum[, sdepth[, sqdepth]]]]) -> sum, sqsum\n. @overload"}, + {"integral3", CV_PY_FN_WITH_KW_(pyopencv_cv_integral3, 0), "integral3(src[, sum[, sqsum[, tilted[, sdepth[, sqdepth]]]]]) -> sum, sqsum, tilted\n. @brief Calculates the integral of an image.\n. \n. The function calculates one or more integral images for the source image as follows:\n. \n. \\f[\\texttt{sum} (X,Y) = \\sum _{x retval, p12\n. @brief Finds intersection of two convex polygons\n. \n. @param p1 First polygon\n. @param p2 Second polygon\n. @param p12 Output polygon describing the intersecting area\n. @param handleNested When true, an intersection is found if one of the polygons is fully enclosed in the other.\n. When false, no intersection is found. If the polygons share a side or the vertex of one polygon lies on an edge\n. of the other, they are not considered nested and an intersection will be found regardless of the value of handleNested.\n. \n. @returns Absolute value of area of intersecting polygon\n. \n. @note intersectConvexConvex doesn't confirm that both polygons are convex and will return invalid results if they aren't."}, + {"invert", CV_PY_FN_WITH_KW_(pyopencv_cv_invert, 0), "invert(src[, dst[, flags]]) -> retval, dst\n. @brief Finds the inverse or pseudo-inverse of a matrix.\n. \n. The function cv::invert inverts the matrix src and stores the result in dst\n. . When the matrix src is singular or non-square, the function calculates\n. the pseudo-inverse matrix (the dst matrix) so that norm(src\\*dst - I) is\n. minimal, where I is an identity matrix.\n. \n. In case of the #DECOMP_LU method, the function returns non-zero value if\n. the inverse has been successfully calculated and 0 if src is singular.\n. \n. In case of the #DECOMP_SVD method, the function returns the inverse\n. condition number of src (the ratio of the smallest singular value to the\n. largest singular value) and 0 if src is singular. The SVD method\n. calculates a pseudo-inverse matrix if src is singular.\n. \n. Similarly to #DECOMP_LU, the method #DECOMP_CHOLESKY works only with\n. non-singular square matrices that should also be symmetrical and\n. positively defined. In this case, the function stores the inverted\n. matrix in dst and returns non-zero. Otherwise, it returns 0.\n. \n. @param src input floating-point M x N matrix.\n. @param dst output matrix of N x M size and the same type as src.\n. @param flags inversion method (cv::DecompTypes)\n. @sa solve, SVD"}, + {"invertAffineTransform", CV_PY_FN_WITH_KW_(pyopencv_cv_invertAffineTransform, 0), "invertAffineTransform(M[, iM]) -> iM\n. @brief Inverts an affine transformation.\n. \n. The function computes an inverse affine transformation represented by \\f$2 \\times 3\\f$ matrix M:\n. \n. \\f[\\begin{bmatrix} a_{11} & a_{12} & b_1 \\\\ a_{21} & a_{22} & b_2 \\end{bmatrix}\\f]\n. \n. The result is also a \\f$2 \\times 3\\f$ matrix of the same type as M.\n. \n. @param M Original affine transformation.\n. @param iM Output reverse affine transformation."}, + {"isContourConvex", CV_PY_FN_WITH_KW_(pyopencv_cv_isContourConvex, 0), "isContourConvex(contour) -> retval\n. @brief Tests a contour convexity.\n. \n. The function tests whether the input contour is convex or not. The contour must be simple, that is,\n. without self-intersections. Otherwise, the function output is undefined.\n. \n. @param contour Input vector of 2D points, stored in std::vector\\<\\> or Mat"}, + {"kmeans", CV_PY_FN_WITH_KW_(pyopencv_cv_kmeans, 0), "kmeans(data, K, bestLabels, criteria, attempts, flags[, centers]) -> retval, bestLabels, centers\n. @brief Finds centers of clusters and groups input samples around the clusters.\n. \n. The function kmeans implements a k-means algorithm that finds the centers of cluster_count clusters\n. and groups the input samples around the clusters. As an output, \\f$\\texttt{bestLabels}_i\\f$ contains a\n. 0-based cluster index for the sample stored in the \\f$i^{th}\\f$ row of the samples matrix.\n. \n. @note\n. - (Python) An example on K-means clustering can be found at\n. opencv_source_code/samples/python/kmeans.py\n. @param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.\n. Examples of this array can be:\n. - Mat points(count, 2, CV_32F);\n. - Mat points(count, 1, CV_32FC2);\n. - Mat points(1, count, CV_32FC2);\n. - std::vector\\ points(sampleCount);\n. @param K Number of clusters to split the set by.\n. @param bestLabels Input/output integer array that stores the cluster indices for every sample.\n. @param criteria The algorithm termination criteria, that is, the maximum number of iterations and/or\n. the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of the cluster\n. centers moves by less than criteria.epsilon on some iteration, the algorithm stops.\n. @param attempts Flag to specify the number of times the algorithm is executed using different\n. initial labellings. The algorithm returns the labels that yield the best compactness (see the last\n. function parameter).\n. @param flags Flag that can take values of cv::KmeansFlags\n. @param centers Output matrix of the cluster centers, one row per each cluster center.\n. @return The function returns the compactness measure that is computed as\n. \\f[\\sum _i \\| \\texttt{samples} _i - \\texttt{centers} _{ \\texttt{labels} _i} \\| ^2\\f]\n. after every attempt. The best (minimum) value is chosen and the corresponding labels and the\n. compactness value are returned by the function. Basically, you can use only the core of the\n. function, set the number of attempts to 1, initialize labels each time using a custom algorithm,\n. pass them with the ( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best\n. (most-compact) clustering."}, + {"line", CV_PY_FN_WITH_KW_(pyopencv_cv_line, 0), "line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img\n. @brief Draws a line segment connecting two points.\n. \n. The function line draws the line segment between pt1 and pt2 points in the image. The line is\n. clipped by the image boundaries. For non-antialiased lines with integer coordinates, the 8-connected\n. or 4-connected Bresenham algorithm is used. Thick lines are drawn with rounding endings. Antialiased\n. lines are drawn using Gaussian filtering.\n. \n. @param img Image.\n. @param pt1 First point of the line segment.\n. @param pt2 Second point of the line segment.\n. @param color Line color.\n. @param thickness Line thickness.\n. @param lineType Type of the line. See #LineTypes.\n. @param shift Number of fractional bits in the point coordinates."}, + {"linearPolar", CV_PY_FN_WITH_KW_(pyopencv_cv_linearPolar, 0), "linearPolar(src, center, maxRadius, flags[, dst]) -> dst\n. @brief Remaps an image to polar coordinates space.\n. \n. @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags)\n. \n. @internal\n. Transform the source image using the following transformation (See @ref polar_remaps_reference_image \"Polar remaps reference image c)\"):\n. \\f[\\begin{array}{l}\n. dst( \\rho , \\phi ) = src(x,y) \\\\\n. dst.size() \\leftarrow src.size()\n. \\end{array}\\f]\n. \n. where\n. \\f[\\begin{array}{l}\n. I = (dx,dy) = (x - center.x,y - center.y) \\\\\n. \\rho = Kmag \\cdot \\texttt{magnitude} (I) ,\\\\\n. \\phi = angle \\cdot \\texttt{angle} (I)\n. \\end{array}\\f]\n. \n. and\n. \\f[\\begin{array}{l}\n. Kx = src.cols / maxRadius \\\\\n. Ky = src.rows / 2\\Pi\n. \\end{array}\\f]\n. \n. \n. @param src Source image\n. @param dst Destination image. It will have same size and type as src.\n. @param center The transformation center;\n. @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.\n. @param flags A combination of interpolation methods, see #InterpolationFlags\n. \n. @note\n. - The function can not operate in-place.\n. - To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.\n. \n. @sa cv::logPolar\n. @endinternal"}, + {"log", CV_PY_FN_WITH_KW_(pyopencv_cv_log, 0), "log(src[, dst]) -> dst\n. @brief Calculates the natural logarithm of every array element.\n. \n. The function cv::log calculates the natural logarithm of every element of the input array:\n. \\f[\\texttt{dst} (I) = \\log (\\texttt{src}(I)) \\f]\n. \n. Output on zero, negative and special (NaN, Inf) values is undefined.\n. \n. @param src input array.\n. @param dst output array of the same size and type as src .\n. @sa exp, cartToPolar, polarToCart, phase, pow, sqrt, magnitude"}, + {"logPolar", CV_PY_FN_WITH_KW_(pyopencv_cv_logPolar, 0), "logPolar(src, center, M, flags[, dst]) -> dst\n. @brief Remaps an image to semilog-polar coordinates space.\n. \n. @deprecated This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG);\n. \n. @internal\n. Transform the source image using the following transformation (See @ref polar_remaps_reference_image \"Polar remaps reference image d)\"):\n. \\f[\\begin{array}{l}\n. dst( \\rho , \\phi ) = src(x,y) \\\\\n. dst.size() \\leftarrow src.size()\n. \\end{array}\\f]\n. \n. where\n. \\f[\\begin{array}{l}\n. I = (dx,dy) = (x - center.x,y - center.y) \\\\\n. \\rho = M \\cdot log_e(\\texttt{magnitude} (I)) ,\\\\\n. \\phi = Kangle \\cdot \\texttt{angle} (I) \\\\\n. \\end{array}\\f]\n. \n. and\n. \\f[\\begin{array}{l}\n. M = src.cols / log_e(maxRadius) \\\\\n. Kangle = src.rows / 2\\Pi \\\\\n. \\end{array}\\f]\n. \n. The function emulates the human \"foveal\" vision and can be used for fast scale and\n. rotation-invariant template matching, for object tracking and so forth.\n. @param src Source image\n. @param dst Destination image. It will have same size and type as src.\n. @param center The transformation center; where the output precision is maximal\n. @param M Magnitude scale parameter. It determines the radius of the bounding circle to transform too.\n. @param flags A combination of interpolation methods, see #InterpolationFlags\n. \n. @note\n. - The function can not operate in-place.\n. - To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.\n. \n. @sa cv::linearPolar\n. @endinternal"}, + {"magnitude", CV_PY_FN_WITH_KW_(pyopencv_cv_magnitude, 0), "magnitude(x, y[, magnitude]) -> magnitude\n. @brief Calculates the magnitude of 2D vectors.\n. \n. The function cv::magnitude calculates the magnitude of 2D vectors formed\n. from the corresponding elements of x and y arrays:\n. \\f[\\texttt{dst} (I) = \\sqrt{\\texttt{x}(I)^2 + \\texttt{y}(I)^2}\\f]\n. @param x floating-point array of x-coordinates of the vectors.\n. @param y floating-point array of y-coordinates of the vectors; it must\n. have the same size as x.\n. @param magnitude output array of the same size and type as x.\n. @sa cartToPolar, polarToCart, phase, sqrt"}, + {"matchShapes", CV_PY_FN_WITH_KW_(pyopencv_cv_matchShapes, 0), "matchShapes(contour1, contour2, method, parameter) -> retval\n. @brief Compares two shapes.\n. \n. The function compares two shapes. All three implemented methods use the Hu invariants (see #HuMoments)\n. \n. @param contour1 First contour or grayscale image.\n. @param contour2 Second contour or grayscale image.\n. @param method Comparison method, see #ShapeMatchModes\n. @param parameter Method-specific parameter (not supported now)."}, + {"matchTemplate", CV_PY_FN_WITH_KW_(pyopencv_cv_matchTemplate, 0), "matchTemplate(image, templ, method[, result[, mask]]) -> result\n. @brief Compares a template against overlapped image regions.\n. \n. The function slides through image , compares the overlapped patches of size \\f$w \\times h\\f$ against\n. templ using the specified method and stores the comparison results in result . #TemplateMatchModes\n. describes the formulae for the available comparison methods ( \\f$I\\f$ denotes image, \\f$T\\f$\n. template, \\f$R\\f$ result, \\f$M\\f$ the optional mask ). The summation is done over template and/or\n. the image patch: \\f$x' = 0...w-1, y' = 0...h-1\\f$\n. \n. After the function finishes the comparison, the best matches can be found as global minimums (when\n. #TM_SQDIFF was used) or maximums (when #TM_CCORR or #TM_CCOEFF was used) using the\n. #minMaxLoc function. In case of a color image, template summation in the numerator and each sum in\n. the denominator is done over all of the channels and separate mean values are used for each channel.\n. That is, the function can take a color template and a color image. The result will still be a\n. single-channel image, which is easier to analyze.\n. \n. @param image Image where the search is running. It must be 8-bit or 32-bit floating-point.\n. @param templ Searched template. It must be not greater than the source image and have the same\n. data type.\n. @param result Map of comparison results. It must be single-channel 32-bit floating-point. If image\n. is \\f$W \\times H\\f$ and templ is \\f$w \\times h\\f$ , then result is \\f$(W-w+1) \\times (H-h+1)\\f$ .\n. @param method Parameter specifying the comparison method, see #TemplateMatchModes\n. @param mask Optional mask. It must have the same size as templ. It must either have the same number\n. of channels as template or only one channel, which is then used for all template and\n. image channels. If the data type is #CV_8U, the mask is interpreted as a binary mask,\n. meaning only elements where mask is nonzero are used and are kept unchanged independent\n. of the actual mask value (weight equals 1). For data tpye #CV_32F, the mask values are\n. used as weights. The exact formulas are documented in #TemplateMatchModes."}, + {"max", CV_PY_FN_WITH_KW_(pyopencv_cv_max, 0), "max(src1, src2[, dst]) -> dst\n. @brief Calculates per-element maximum of two arrays or an array and a scalar.\n. \n. The function cv::max calculates the per-element maximum of two arrays:\n. \\f[\\texttt{dst} (I)= \\max ( \\texttt{src1} (I), \\texttt{src2} (I))\\f]\n. or array and a scalar:\n. \\f[\\texttt{dst} (I)= \\max ( \\texttt{src1} (I), \\texttt{value} )\\f]\n. @param src1 first input array.\n. @param src2 second input array of the same size and type as src1 .\n. @param dst output array of the same size and type as src1.\n. @sa min, compare, inRange, minMaxLoc, @ref MatrixExpressions"}, + {"mean", CV_PY_FN_WITH_KW_(pyopencv_cv_mean, 0), "mean(src[, mask]) -> retval\n. @brief Calculates an average (mean) of array elements.\n. \n. The function cv::mean calculates the mean value M of array elements,\n. independently for each channel, and return it:\n. \\f[\\begin{array}{l} N = \\sum _{I: \\; \\texttt{mask} (I) \\ne 0} 1 \\\\ M_c = \\left ( \\sum _{I: \\; \\texttt{mask} (I) \\ne 0}{ \\texttt{mtx} (I)_c} \\right )/N \\end{array}\\f]\n. When all the mask elements are 0's, the function returns Scalar::all(0)\n. @param src input array that should have from 1 to 4 channels so that the result can be stored in\n. Scalar_ .\n. @param mask optional operation mask.\n. @sa countNonZero, meanStdDev, norm, minMaxLoc"}, + {"meanStdDev", CV_PY_FN_WITH_KW_(pyopencv_cv_meanStdDev, 0), "meanStdDev(src[, mean[, stddev[, mask]]]) -> mean, stddev\n. Calculates a mean and standard deviation of array elements.\n. \n. The function cv::meanStdDev calculates the mean and the standard deviation M\n. of array elements independently for each channel and returns it via the\n. output parameters:\n. \\f[\\begin{array}{l} N = \\sum _{I, \\texttt{mask} (I) \\ne 0} 1 \\\\ \\texttt{mean} _c = \\frac{\\sum_{ I: \\; \\texttt{mask}(I) \\ne 0} \\texttt{src} (I)_c}{N} \\\\ \\texttt{stddev} _c = \\sqrt{\\frac{\\sum_{ I: \\; \\texttt{mask}(I) \\ne 0} \\left ( \\texttt{src} (I)_c - \\texttt{mean} _c \\right )^2}{N}} \\end{array}\\f]\n. When all the mask elements are 0's, the function returns\n. mean=stddev=Scalar::all(0).\n. @note The calculated standard deviation is only the diagonal of the\n. complete normalized covariance matrix. If the full matrix is needed, you\n. can reshape the multi-channel array M x N to the single-channel array\n. M\\*N x mtx.channels() (only possible when the matrix is continuous) and\n. then pass the matrix to calcCovarMatrix .\n. @param src input array that should have from 1 to 4 channels so that the results can be stored in\n. Scalar_ 's.\n. @param mean output parameter: calculated mean value.\n. @param stddev output parameter: calculated standard deviation.\n. @param mask optional operation mask.\n. @sa countNonZero, mean, norm, minMaxLoc, calcCovarMatrix"}, + {"medianBlur", CV_PY_FN_WITH_KW_(pyopencv_cv_medianBlur, 0), "medianBlur(src, ksize[, dst]) -> dst\n. @brief Blurs an image using the median filter.\n. \n. The function smoothes an image using the median filter with the \\f$\\texttt{ksize} \\times\n. \\texttt{ksize}\\f$ aperture. Each channel of a multi-channel image is processed independently.\n. In-place operation is supported.\n. \n. @note The median filter uses #BORDER_REPLICATE internally to cope with border pixels, see #BorderTypes\n. \n. @param src input 1-, 3-, or 4-channel image; when ksize is 3 or 5, the image depth should be\n. CV_8U, CV_16U, or CV_32F, for larger aperture sizes, it can only be CV_8U.\n. @param dst destination array of the same size and type as src.\n. @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ...\n. @sa bilateralFilter, blur, boxFilter, GaussianBlur"}, + {"merge", CV_PY_FN_WITH_KW_(pyopencv_cv_merge, 0), "merge(mv[, dst]) -> dst\n. @overload\n. @param mv input vector of matrices to be merged; all the matrices in mv must have the same\n. size and the same depth.\n. @param dst output array of the same size and the same depth as mv[0]; The number of channels will\n. be the total number of channels in the matrix array."}, + {"min", CV_PY_FN_WITH_KW_(pyopencv_cv_min, 0), "min(src1, src2[, dst]) -> dst\n. @brief Calculates per-element minimum of two arrays or an array and a scalar.\n. \n. The function cv::min calculates the per-element minimum of two arrays:\n. \\f[\\texttt{dst} (I)= \\min ( \\texttt{src1} (I), \\texttt{src2} (I))\\f]\n. or array and a scalar:\n. \\f[\\texttt{dst} (I)= \\min ( \\texttt{src1} (I), \\texttt{value} )\\f]\n. @param src1 first input array.\n. @param src2 second input array of the same size and type as src1.\n. @param dst output array of the same size and type as src1.\n. @sa max, compare, inRange, minMaxLoc"}, + {"minAreaRect", CV_PY_FN_WITH_KW_(pyopencv_cv_minAreaRect, 0), "minAreaRect(points) -> retval\n. @brief Finds a rotated rectangle of the minimum area enclosing the input 2D point set.\n. \n. The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a\n. specified point set. Developer should keep in mind that the returned RotatedRect can contain negative\n. indices when data is close to the containing Mat element boundary.\n. \n. @param points Input vector of 2D points, stored in std::vector\\<\\> or Mat"}, + {"minEnclosingCircle", CV_PY_FN_WITH_KW_(pyopencv_cv_minEnclosingCircle, 0), "minEnclosingCircle(points) -> center, radius\n. @brief Finds a circle of the minimum area enclosing a 2D point set.\n. \n. The function finds the minimal enclosing circle of a 2D point set using an iterative algorithm.\n. \n. @param points Input vector of 2D points, stored in std::vector\\<\\> or Mat\n. @param center Output center of the circle.\n. @param radius Output radius of the circle."}, + {"minEnclosingTriangle", CV_PY_FN_WITH_KW_(pyopencv_cv_minEnclosingTriangle, 0), "minEnclosingTriangle(points[, triangle]) -> retval, triangle\n. @brief Finds a triangle of minimum area enclosing a 2D point set and returns its area.\n. \n. The function finds a triangle of minimum area enclosing the given set of 2D points and returns its\n. area. The output for a given 2D point set is shown in the image below. 2D points are depicted in\n. *red* and the enclosing triangle in *yellow*.\n. \n. ![Sample output of the minimum enclosing triangle function](pics/minenclosingtriangle.png)\n. \n. The implementation of the algorithm is based on O'Rourke's @cite ORourke86 and Klee and Laskowski's\n. @cite KleeLaskowski85 papers. O'Rourke provides a \\f$\\theta(n)\\f$ algorithm for finding the minimal\n. enclosing triangle of a 2D convex polygon with n vertices. Since the #minEnclosingTriangle function\n. takes a 2D point set as input an additional preprocessing step of computing the convex hull of the\n. 2D point set is required. The complexity of the #convexHull function is \\f$O(n log(n))\\f$ which is higher\n. than \\f$\\theta(n)\\f$. Thus the overall complexity of the function is \\f$O(n log(n))\\f$.\n. \n. @param points Input vector of 2D points with depth CV_32S or CV_32F, stored in std::vector\\<\\> or Mat\n. @param triangle Output vector of three 2D points defining the vertices of the triangle. The depth\n. of the OutputArray must be CV_32F."}, + {"minMaxLoc", CV_PY_FN_WITH_KW_(pyopencv_cv_minMaxLoc, 0), "minMaxLoc(src[, mask]) -> minVal, maxVal, minLoc, maxLoc\n. @brief Finds the global minimum and maximum in an array.\n. \n. The function cv::minMaxLoc finds the minimum and maximum element values and their positions. The\n. extremums are searched across the whole array or, if mask is not an empty array, in the specified\n. array region.\n. \n. The function do not work with multi-channel arrays. If you need to find minimum or maximum\n. elements across all the channels, use Mat::reshape first to reinterpret the array as\n. single-channel. Or you may extract the particular channel using either extractImageCOI , or\n. mixChannels , or split .\n. @param src input single-channel array.\n. @param minVal pointer to the returned minimum value; NULL is used if not required.\n. @param maxVal pointer to the returned maximum value; NULL is used if not required.\n. @param minLoc pointer to the returned minimum location (in 2D case); NULL is used if not required.\n. @param maxLoc pointer to the returned maximum location (in 2D case); NULL is used if not required.\n. @param mask optional mask used to select a sub-array.\n. @sa max, min, compare, inRange, extractImageCOI, mixChannels, split, Mat::reshape"}, + {"mixChannels", CV_PY_FN_WITH_KW_(pyopencv_cv_mixChannels, 0), "mixChannels(src, dst, fromTo) -> dst\n. @overload\n. @param src input array or vector of matrices; all of the matrices must have the same size and the\n. same depth.\n. @param dst output array or vector of matrices; all the matrices **must be allocated**; their size and\n. depth must be the same as in src[0].\n. @param fromTo array of index pairs specifying which channels are copied and where; fromTo[k\\*2] is\n. a 0-based index of the input channel in src, fromTo[k\\*2+1] is an index of the output channel in\n. dst; the continuous channel numbering is used: the first input image channels are indexed from 0 to\n. src[0].channels()-1, the second input image channels are indexed from src[0].channels() to\n. src[0].channels() + src[1].channels()-1, and so on, the same scheme is used for the output image\n. channels; as a special case, when fromTo[k\\*2] is negative, the corresponding output channel is\n. filled with zero ."}, + {"moments", CV_PY_FN_WITH_KW_(pyopencv_cv_moments, 0), "moments(array[, binaryImage]) -> retval\n. @brief Calculates all of the moments up to the third order of a polygon or rasterized shape.\n. \n. The function computes moments, up to the 3rd order, of a vector shape or a rasterized shape. The\n. results are returned in the structure cv::Moments.\n. \n. @param array Raster image (single-channel, 8-bit or floating-point 2D array) or an array (\n. \\f$1 \\times N\\f$ or \\f$N \\times 1\\f$ ) of 2D points (Point or Point2f ).\n. @param binaryImage If it is true, all non-zero image pixels are treated as 1's. The parameter is\n. used for images only.\n. @returns moments.\n. \n. @note Only applicable to contour moments calculations from Python bindings: Note that the numpy\n. type for the input array should be either np.int32 or np.float32.\n. \n. @sa contourArea, arcLength"}, + {"morphologyEx", CV_PY_FN_WITH_KW_(pyopencv_cv_morphologyEx, 0), "morphologyEx(src, op, kernel[, dst[, anchor[, iterations[, borderType[, borderValue]]]]]) -> dst\n. @brief Performs advanced morphological transformations.\n. \n. The function cv::morphologyEx can perform advanced morphological transformations using an erosion and dilation as\n. basic operations.\n. \n. Any of the operations can be done in-place. In case of multi-channel images, each channel is\n. processed independently.\n. \n. @param src Source image. The number of channels can be arbitrary. The depth should be one of\n. CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.\n. @param dst Destination image of the same size and type as source image.\n. @param op Type of a morphological operation, see #MorphTypes\n. @param kernel Structuring element. It can be created using #getStructuringElement.\n. @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the\n. kernel center.\n. @param iterations Number of times erosion and dilation are applied.\n. @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @param borderValue Border value in case of a constant border. The default value has a special\n. meaning.\n. @sa dilate, erode, getStructuringElement\n. @note The number of iterations is the number of times erosion or dilatation operation will be applied.\n. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to apply\n. successively: erode -> erode -> dilate -> dilate (and not erode -> dilate -> erode -> dilate)."}, + {"mulSpectrums", CV_PY_FN_WITH_KW_(pyopencv_cv_mulSpectrums, 0), "mulSpectrums(a, b, flags[, c[, conjB]]) -> c\n. @brief Performs the per-element multiplication of two Fourier spectrums.\n. \n. The function cv::mulSpectrums performs the per-element multiplication of the two CCS-packed or complex\n. matrices that are results of a real or complex Fourier transform.\n. \n. The function, together with dft and idft , may be used to calculate convolution (pass conjB=false )\n. or correlation (pass conjB=true ) of two arrays rapidly. When the arrays are complex, they are\n. simply multiplied (per element) with an optional conjugation of the second-array elements. When the\n. arrays are real, they are assumed to be CCS-packed (see dft for details).\n. @param a first input array.\n. @param b second input array of the same size and type as src1 .\n. @param c output array of the same size and type as src1 .\n. @param flags operation flags; currently, the only supported flag is cv::DFT_ROWS, which indicates that\n. each row of src1 and src2 is an independent 1D Fourier spectrum. If you do not want to use this flag, then simply add a `0` as value.\n. @param conjB optional flag that conjugates the second input array before the multiplication (true)\n. or not (false)."}, + {"mulTransposed", CV_PY_FN_WITH_KW_(pyopencv_cv_mulTransposed, 0), "mulTransposed(src, aTa[, dst[, delta[, scale[, dtype]]]]) -> dst\n. @brief Calculates the product of a matrix and its transposition.\n. \n. The function cv::mulTransposed calculates the product of src and its\n. transposition:\n. \\f[\\texttt{dst} = \\texttt{scale} ( \\texttt{src} - \\texttt{delta} )^T ( \\texttt{src} - \\texttt{delta} )\\f]\n. if aTa=true , and\n. \\f[\\texttt{dst} = \\texttt{scale} ( \\texttt{src} - \\texttt{delta} ) ( \\texttt{src} - \\texttt{delta} )^T\\f]\n. otherwise. The function is used to calculate the covariance matrix. With\n. zero delta, it can be used as a faster substitute for general matrix\n. product A\\*B when B=A'\n. @param src input single-channel matrix. Note that unlike gemm, the\n. function can multiply not only floating-point matrices.\n. @param dst output square matrix.\n. @param aTa Flag specifying the multiplication ordering. See the\n. description below.\n. @param delta Optional delta matrix subtracted from src before the\n. multiplication. When the matrix is empty ( delta=noArray() ), it is\n. assumed to be zero, that is, nothing is subtracted. If it has the same\n. size as src , it is simply subtracted. Otherwise, it is \"repeated\" (see\n. repeat ) to cover the full src and then subtracted. Type of the delta\n. matrix, when it is not empty, must be the same as the type of created\n. output matrix. See the dtype parameter description below.\n. @param scale Optional scale factor for the matrix product.\n. @param dtype Optional type of the output matrix. When it is negative,\n. the output matrix will have the same type as src . Otherwise, it will be\n. type=CV_MAT_DEPTH(dtype) that should be either CV_32F or CV_64F .\n. @sa calcCovarMatrix, gemm, repeat, reduce"}, + {"multiply", CV_PY_FN_WITH_KW_(pyopencv_cv_multiply, 0), "multiply(src1, src2[, dst[, scale[, dtype]]]) -> dst\n. @brief Calculates the per-element scaled product of two arrays.\n. \n. The function multiply calculates the per-element product of two arrays:\n. \n. \\f[\\texttt{dst} (I)= \\texttt{saturate} ( \\texttt{scale} \\cdot \\texttt{src1} (I) \\cdot \\texttt{src2} (I))\\f]\n. \n. There is also a @ref MatrixExpressions -friendly variant of the first function. See Mat::mul .\n. \n. For a not-per-element matrix product, see gemm .\n. \n. @note Saturation is not applied when the output array has the depth\n. CV_32S. You may even get result of an incorrect sign in the case of\n. overflow.\n. @param src1 first input array.\n. @param src2 second input array of the same size and the same type as src1.\n. @param dst output array of the same size and type as src1.\n. @param scale optional scale factor.\n. @param dtype optional depth of the output array\n. @sa add, subtract, divide, scaleAdd, addWeighted, accumulate, accumulateProduct, accumulateSquare,\n. Mat::convertTo"}, + {"norm", CV_PY_FN_WITH_KW_(pyopencv_cv_norm, 0), "norm(src1[, normType[, mask]]) -> retval\n. @brief Calculates the absolute norm of an array.\n. \n. This version of #norm calculates the absolute norm of src1. The type of norm to calculate is specified using #NormTypes.\n. \n. As example for one array consider the function \\f$r(x)= \\begin{pmatrix} x \\\\ 1-x \\end{pmatrix}, x \\in [-1;1]\\f$.\n. The \\f$ L_{1}, L_{2} \\f$ and \\f$ L_{\\infty} \\f$ norm for the sample value \\f$r(-1) = \\begin{pmatrix} -1 \\\\ 2 \\end{pmatrix}\\f$\n. is calculated as follows\n. \\f{align*}\n. \\| r(-1) \\|_{L_1} &= |-1| + |2| = 3 \\\\\n. \\| r(-1) \\|_{L_2} &= \\sqrt{(-1)^{2} + (2)^{2}} = \\sqrt{5} \\\\\n. \\| r(-1) \\|_{L_\\infty} &= \\max(|-1|,|2|) = 2\n. \\f}\n. and for \\f$r(0.5) = \\begin{pmatrix} 0.5 \\\\ 0.5 \\end{pmatrix}\\f$ the calculation is\n. \\f{align*}\n. \\| r(0.5) \\|_{L_1} &= |0.5| + |0.5| = 1 \\\\\n. \\| r(0.5) \\|_{L_2} &= \\sqrt{(0.5)^{2} + (0.5)^{2}} = \\sqrt{0.5} \\\\\n. \\| r(0.5) \\|_{L_\\infty} &= \\max(|0.5|,|0.5|) = 0.5.\n. \\f}\n. The following graphic shows all values for the three norm functions \\f$\\| r(x) \\|_{L_1}, \\| r(x) \\|_{L_2}\\f$ and \\f$\\| r(x) \\|_{L_\\infty}\\f$.\n. It is notable that the \\f$ L_{1} \\f$ norm forms the upper and the \\f$ L_{\\infty} \\f$ norm forms the lower border for the example function \\f$ r(x) \\f$.\n. ![Graphs for the different norm functions from the above example](pics/NormTypes_OneArray_1-2-INF.png)\n. \n. When the mask parameter is specified and it is not empty, the norm is\n. \n. If normType is not specified, #NORM_L2 is used.\n. calculated only over the region specified by the mask.\n. \n. Multi-channel input arrays are treated as single-channel arrays, that is,\n. the results for all channels are combined.\n. \n. Hamming norms can only be calculated with CV_8U depth arrays.\n. \n. @param src1 first input array.\n. @param normType type of the norm (see #NormTypes).\n. @param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type.\n\n\n\nnorm(src1, src2[, normType[, mask]]) -> retval\n. @brief Calculates an absolute difference norm or a relative difference norm.\n. \n. This version of cv::norm calculates the absolute difference norm\n. or the relative difference norm of arrays src1 and src2.\n. The type of norm to calculate is specified using #NormTypes.\n. \n. @param src1 first input array.\n. @param src2 second input array of the same size and the same type as src1.\n. @param normType type of the norm (see #NormTypes).\n. @param mask optional operation mask; it must have the same size as src1 and CV_8UC1 type."}, + {"normalize", CV_PY_FN_WITH_KW_(pyopencv_cv_normalize, 0), "normalize(src, dst[, alpha[, beta[, norm_type[, dtype[, mask]]]]]) -> dst\n. @brief Normalizes the norm or value range of an array.\n. \n. The function cv::normalize normalizes scale and shift the input array elements so that\n. \\f[\\| \\texttt{dst} \\| _{L_p}= \\texttt{alpha}\\f]\n. (where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that\n. \\f[\\min _I \\texttt{dst} (I)= \\texttt{alpha} , \\, \\, \\max _I \\texttt{dst} (I)= \\texttt{beta}\\f]\n. \n. when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be\n. normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this\n. sub-array is modified to be normalized. If you want to only use the mask to calculate the norm or\n. min-max but modify the whole array, you can use norm and Mat::convertTo.\n. \n. In case of sparse matrices, only the non-zero values are analyzed and transformed. Because of this,\n. the range transformation for sparse matrices is not allowed since it can shift the zero level.\n. \n. Possible usage with some positive example data:\n. @code{.cpp}\n. vector positiveData = { 2.0, 8.0, 10.0 };\n. vector normalizedData_l1, normalizedData_l2, normalizedData_inf, normalizedData_minmax;\n. \n. // Norm to probability (total count)\n. // sum(numbers) = 20.0\n. // 2.0 0.1 (2.0/20.0)\n. // 8.0 0.4 (8.0/20.0)\n. // 10.0 0.5 (10.0/20.0)\n. normalize(positiveData, normalizedData_l1, 1.0, 0.0, NORM_L1);\n. \n. // Norm to unit vector: ||positiveData|| = 1.0\n. // 2.0 0.15\n. // 8.0 0.62\n. // 10.0 0.77\n. normalize(positiveData, normalizedData_l2, 1.0, 0.0, NORM_L2);\n. \n. // Norm to max element\n. // 2.0 0.2 (2.0/10.0)\n. // 8.0 0.8 (8.0/10.0)\n. // 10.0 1.0 (10.0/10.0)\n. normalize(positiveData, normalizedData_inf, 1.0, 0.0, NORM_INF);\n. \n. // Norm to range [0.0;1.0]\n. // 2.0 0.0 (shift to left border)\n. // 8.0 0.75 (6.0/8.0)\n. // 10.0 1.0 (shift to right border)\n. normalize(positiveData, normalizedData_minmax, 1.0, 0.0, NORM_MINMAX);\n. @endcode\n. \n. @param src input array.\n. @param dst output array of the same size as src .\n. @param alpha norm value to normalize to or the lower range boundary in case of the range\n. normalization.\n. @param beta upper range boundary in case of the range normalization; it is not used for the norm\n. normalization.\n. @param norm_type normalization type (see cv::NormTypes).\n. @param dtype when negative, the output array has the same type as src; otherwise, it has the same\n. number of channels as src and the depth =CV_MAT_DEPTH(dtype).\n. @param mask optional operation mask.\n. @sa norm, Mat::convertTo, SparseMat::convertTo"}, + {"patchNaNs", CV_PY_FN_WITH_KW_(pyopencv_cv_patchNaNs, 0), "patchNaNs(a[, val]) -> a\n. @brief converts NaNs to the given number\n. @param a input/output matrix (CV_32F type).\n. @param val value to convert the NaNs"}, + {"perspectiveTransform", CV_PY_FN_WITH_KW_(pyopencv_cv_perspectiveTransform, 0), "perspectiveTransform(src, m[, dst]) -> dst\n. @brief Performs the perspective matrix transformation of vectors.\n. \n. The function cv::perspectiveTransform transforms every element of src by\n. treating it as a 2D or 3D vector, in the following way:\n. \\f[(x, y, z) \\rightarrow (x'/w, y'/w, z'/w)\\f]\n. where\n. \\f[(x', y', z', w') = \\texttt{mat} \\cdot \\begin{bmatrix} x & y & z & 1 \\end{bmatrix}\\f]\n. and\n. \\f[w = \\fork{w'}{if \\(w' \\ne 0\\)}{\\infty}{otherwise}\\f]\n. \n. Here a 3D vector transformation is shown. In case of a 2D vector\n. transformation, the z component is omitted.\n. \n. @note The function transforms a sparse set of 2D or 3D vectors. If you\n. want to transform an image using perspective transformation, use\n. warpPerspective . If you have an inverse problem, that is, you want to\n. compute the most probable perspective transformation out of several\n. pairs of corresponding points, you can use getPerspectiveTransform or\n. findHomography .\n. @param src input two-channel or three-channel floating-point array; each\n. element is a 2D/3D vector to be transformed.\n. @param dst output array of the same size and type as src.\n. @param m 3x3 or 4x4 floating-point transformation matrix.\n. @sa transform, warpPerspective, getPerspectiveTransform, findHomography"}, + {"phase", CV_PY_FN_WITH_KW_(pyopencv_cv_phase, 0), "phase(x, y[, angle[, angleInDegrees]]) -> angle\n. @brief Calculates the rotation angle of 2D vectors.\n. \n. The function cv::phase calculates the rotation angle of each 2D vector that\n. is formed from the corresponding elements of x and y :\n. \\f[\\texttt{angle} (I) = \\texttt{atan2} ( \\texttt{y} (I), \\texttt{x} (I))\\f]\n. \n. The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 ,\n. the corresponding angle(I) is set to 0.\n. @param x input floating-point array of x-coordinates of 2D vectors.\n. @param y input array of y-coordinates of 2D vectors; it must have the\n. same size and the same type as x.\n. @param angle output array of vector angles; it has the same size and\n. same type as x .\n. @param angleInDegrees when true, the function calculates the angle in\n. degrees, otherwise, they are measured in radians."}, + {"phaseCorrelate", CV_PY_FN_WITH_KW_(pyopencv_cv_phaseCorrelate, 0), "phaseCorrelate(src1, src2[, window]) -> retval, response\n. @brief The function is used to detect translational shifts that occur between two images.\n. \n. The operation takes advantage of the Fourier shift theorem for detecting the translational shift in\n. the frequency domain. It can be used for fast image registration as well as motion estimation. For\n. more information please see \n. \n. Calculates the cross-power spectrum of two supplied source arrays. The arrays are padded if needed\n. with getOptimalDFTSize.\n. \n. The function performs the following equations:\n. - First it applies a Hanning window (see ) to each\n. image to remove possible edge effects. This window is cached until the array size changes to speed\n. up processing time.\n. - Next it computes the forward DFTs of each source array:\n. \\f[\\mathbf{G}_a = \\mathcal{F}\\{src_1\\}, \\; \\mathbf{G}_b = \\mathcal{F}\\{src_2\\}\\f]\n. where \\f$\\mathcal{F}\\f$ is the forward DFT.\n. - It then computes the cross-power spectrum of each frequency domain array:\n. \\f[R = \\frac{ \\mathbf{G}_a \\mathbf{G}_b^*}{|\\mathbf{G}_a \\mathbf{G}_b^*|}\\f]\n. - Next the cross-correlation is converted back into the time domain via the inverse DFT:\n. \\f[r = \\mathcal{F}^{-1}\\{R\\}\\f]\n. - Finally, it computes the peak location and computes a 5x5 weighted centroid around the peak to\n. achieve sub-pixel accuracy.\n. \\f[(\\Delta x, \\Delta y) = \\texttt{weightedCentroid} \\{\\arg \\max_{(x, y)}\\{r\\}\\}\\f]\n. - If non-zero, the response parameter is computed as the sum of the elements of r within the 5x5\n. centroid around the peak location. It is normalized to a maximum of 1 (meaning there is a single\n. peak) and will be smaller when there are multiple peaks.\n. \n. @param src1 Source floating point array (CV_32FC1 or CV_64FC1)\n. @param src2 Source floating point array (CV_32FC1 or CV_64FC1)\n. @param window Floating point array with windowing coefficients to reduce edge effects (optional).\n. @param response Signal power within the 5x5 centroid around the peak, between 0 and 1 (optional).\n. @returns detected phase shift (sub-pixel) between the two arrays.\n. \n. @sa dft, getOptimalDFTSize, idft, mulSpectrums createHanningWindow"}, + {"pointPolygonTest", CV_PY_FN_WITH_KW_(pyopencv_cv_pointPolygonTest, 0), "pointPolygonTest(contour, pt, measureDist) -> retval\n. @brief Performs a point-in-contour test.\n. \n. The function determines whether the point is inside a contour, outside, or lies on an edge (or\n. coincides with a vertex). It returns positive (inside), negative (outside), or zero (on an edge)\n. value, correspondingly. When measureDist=false , the return value is +1, -1, and 0, respectively.\n. Otherwise, the return value is a signed distance between the point and the nearest contour edge.\n. \n. See below a sample output of the function where each image pixel is tested against the contour:\n. \n. ![sample output](pics/pointpolygon.png)\n. \n. @param contour Input contour.\n. @param pt Point tested against the contour.\n. @param measureDist If true, the function estimates the signed distance from the point to the\n. nearest contour edge. Otherwise, the function only checks if the point is inside a contour or not."}, + {"polarToCart", CV_PY_FN_WITH_KW_(pyopencv_cv_polarToCart, 0), "polarToCart(magnitude, angle[, x[, y[, angleInDegrees]]]) -> x, y\n. @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.\n. \n. The function cv::polarToCart calculates the Cartesian coordinates of each 2D\n. vector represented by the corresponding elements of magnitude and angle:\n. \\f[\\begin{array}{l} \\texttt{x} (I) = \\texttt{magnitude} (I) \\cos ( \\texttt{angle} (I)) \\\\ \\texttt{y} (I) = \\texttt{magnitude} (I) \\sin ( \\texttt{angle} (I)) \\\\ \\end{array}\\f]\n. \n. The relative accuracy of the estimated coordinates is about 1e-6.\n. @param magnitude input floating-point array of magnitudes of 2D vectors;\n. it can be an empty matrix (=Mat()), in this case, the function assumes\n. that all the magnitudes are =1; if it is not empty, it must have the\n. same size and type as angle.\n. @param angle input floating-point array of angles of 2D vectors.\n. @param x output array of x-coordinates of 2D vectors; it has the same\n. size and type as angle.\n. @param y output array of y-coordinates of 2D vectors; it has the same\n. size and type as angle.\n. @param angleInDegrees when true, the input angles are measured in\n. degrees, otherwise, they are measured in radians.\n. @sa cartToPolar, magnitude, phase, exp, log, pow, sqrt"}, + {"polylines", CV_PY_FN_WITH_KW_(pyopencv_cv_polylines, 0), "polylines(img, pts, isClosed, color[, thickness[, lineType[, shift]]]) -> img\n. @brief Draws several polygonal curves.\n. \n. @param img Image.\n. @param pts Array of polygonal curves.\n. @param isClosed Flag indicating whether the drawn polylines are closed or not. If they are closed,\n. the function draws a line from the last vertex of each curve to its first vertex.\n. @param color Polyline color.\n. @param thickness Thickness of the polyline edges.\n. @param lineType Type of the line segments. See #LineTypes\n. @param shift Number of fractional bits in the vertex coordinates.\n. \n. The function cv::polylines draws one or more polygonal curves."}, + {"pow", CV_PY_FN_WITH_KW_(pyopencv_cv_pow, 0), "pow(src, power[, dst]) -> dst\n. @brief Raises every array element to a power.\n. \n. The function cv::pow raises every element of the input array to power :\n. \\f[\\texttt{dst} (I) = \\fork{\\texttt{src}(I)^{power}}{if \\(\\texttt{power}\\) is integer}{|\\texttt{src}(I)|^{power}}{otherwise}\\f]\n. \n. So, for a non-integer power exponent, the absolute values of input array\n. elements are used. However, it is possible to get true values for\n. negative values using some extra operations. In the example below,\n. computing the 5th root of array src shows:\n. @code{.cpp}\n. Mat mask = src < 0;\n. pow(src, 1./5, dst);\n. subtract(Scalar::all(0), dst, dst, mask);\n. @endcode\n. For some values of power, such as integer values, 0.5 and -0.5,\n. specialized faster algorithms are used.\n. \n. Special values (NaN, Inf) are not handled.\n. @param src input array.\n. @param power exponent of power.\n. @param dst output array of the same size and type as src.\n. @sa sqrt, exp, log, cartToPolar, polarToCart"}, + {"preCornerDetect", CV_PY_FN_WITH_KW_(pyopencv_cv_preCornerDetect, 0), "preCornerDetect(src, ksize[, dst[, borderType]]) -> dst\n. @brief Calculates a feature map for corner detection.\n. \n. The function calculates the complex spatial derivative-based function of the source image\n. \n. \\f[\\texttt{dst} = (D_x \\texttt{src} )^2 \\cdot D_{yy} \\texttt{src} + (D_y \\texttt{src} )^2 \\cdot D_{xx} \\texttt{src} - 2 D_x \\texttt{src} \\cdot D_y \\texttt{src} \\cdot D_{xy} \\texttt{src}\\f]\n. \n. where \\f$D_x\\f$,\\f$D_y\\f$ are the first image derivatives, \\f$D_{xx}\\f$,\\f$D_{yy}\\f$ are the second image\n. derivatives, and \\f$D_{xy}\\f$ is the mixed derivative.\n. \n. The corners can be found as local maximums of the functions, as shown below:\n. @code\n. Mat corners, dilated_corners;\n. preCornerDetect(image, corners, 3);\n. // dilation with 3x3 rectangular structuring element\n. dilate(corners, dilated_corners, Mat(), 1);\n. Mat corner_mask = corners == dilated_corners;\n. @endcode\n. \n. @param src Source single-channel 8-bit of floating-point image.\n. @param dst Output image that has the type CV_32F and the same size as src .\n. @param ksize %Aperture size of the Sobel .\n. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported."}, + {"putText", CV_PY_FN_WITH_KW_(pyopencv_cv_putText, 0), "putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> img\n. @brief Draws a text string.\n. \n. The function cv::putText renders the specified text string in the image. Symbols that cannot be rendered\n. using the specified font are replaced by question marks. See #getTextSize for a text rendering code\n. example.\n. \n. @param img Image.\n. @param text Text string to be drawn.\n. @param org Bottom-left corner of the text string in the image.\n. @param fontFace Font type, see #HersheyFonts.\n. @param fontScale Font scale factor that is multiplied by the font-specific base size.\n. @param color Text color.\n. @param thickness Thickness of the lines used to draw a text.\n. @param lineType Line type. See #LineTypes\n. @param bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise,\n. it is at the top-left corner."}, + {"pyrDown", CV_PY_FN_WITH_KW_(pyopencv_cv_pyrDown, 0), "pyrDown(src[, dst[, dstsize[, borderType]]]) -> dst\n. @brief Blurs an image and downsamples it.\n. \n. By default, size of the output image is computed as `Size((src.cols+1)/2, (src.rows+1)/2)`, but in\n. any case, the following conditions should be satisfied:\n. \n. \\f[\\begin{array}{l} | \\texttt{dstsize.width} *2-src.cols| \\leq 2 \\\\ | \\texttt{dstsize.height} *2-src.rows| \\leq 2 \\end{array}\\f]\n. \n. The function performs the downsampling step of the Gaussian pyramid construction. First, it\n. convolves the source image with the kernel:\n. \n. \\f[\\frac{1}{256} \\begin{bmatrix} 1 & 4 & 6 & 4 & 1 \\\\ 4 & 16 & 24 & 16 & 4 \\\\ 6 & 24 & 36 & 24 & 6 \\\\ 4 & 16 & 24 & 16 & 4 \\\\ 1 & 4 & 6 & 4 & 1 \\end{bmatrix}\\f]\n. \n. Then, it downsamples the image by rejecting even rows and columns.\n. \n. @param src input image.\n. @param dst output image; it has the specified size and the same type as src.\n. @param dstsize size of the output image.\n. @param borderType Pixel extrapolation method, see #BorderTypes (#BORDER_CONSTANT isn't supported)"}, + {"pyrMeanShiftFiltering", CV_PY_FN_WITH_KW_(pyopencv_cv_pyrMeanShiftFiltering, 0), "pyrMeanShiftFiltering(src, sp, sr[, dst[, maxLevel[, termcrit]]]) -> dst\n. @brief Performs initial step of meanshift segmentation of an image.\n. \n. The function implements the filtering stage of meanshift segmentation, that is, the output of the\n. function is the filtered \"posterized\" image with color gradients and fine-grain texture flattened.\n. At every pixel (X,Y) of the input image (or down-sized input image, see below) the function executes\n. meanshift iterations, that is, the pixel (X,Y) neighborhood in the joint space-color hyperspace is\n. considered:\n. \n. \\f[(x,y): X- \\texttt{sp} \\le x \\le X+ \\texttt{sp} , Y- \\texttt{sp} \\le y \\le Y+ \\texttt{sp} , ||(R,G,B)-(r,g,b)|| \\le \\texttt{sr}\\f]\n. \n. where (R,G,B) and (r,g,b) are the vectors of color components at (X,Y) and (x,y), respectively\n. (though, the algorithm does not depend on the color space used, so any 3-component color space can\n. be used instead). Over the neighborhood the average spatial value (X',Y') and average color vector\n. (R',G',B') are found and they act as the neighborhood center on the next iteration:\n. \n. \\f[(X,Y)~(X',Y'), (R,G,B)~(R',G',B').\\f]\n. \n. After the iterations over, the color components of the initial pixel (that is, the pixel from where\n. the iterations started) are set to the final value (average color at the last iteration):\n. \n. \\f[I(X,Y) <- (R*,G*,B*)\\f]\n. \n. When maxLevel \\> 0, the gaussian pyramid of maxLevel+1 levels is built, and the above procedure is\n. run on the smallest layer first. After that, the results are propagated to the larger layer and the\n. iterations are run again only on those pixels where the layer colors differ by more than sr from the\n. lower-resolution layer of the pyramid. That makes boundaries of color regions sharper. Note that the\n. results will be actually different from the ones obtained by running the meanshift procedure on the\n. whole original image (i.e. when maxLevel==0).\n. \n. @param src The source 8-bit, 3-channel image.\n. @param dst The destination image of the same format and the same size as the source.\n. @param sp The spatial window radius.\n. @param sr The color window radius.\n. @param maxLevel Maximum level of the pyramid for the segmentation.\n. @param termcrit Termination criteria: when to stop meanshift iterations."}, + {"pyrUp", CV_PY_FN_WITH_KW_(pyopencv_cv_pyrUp, 0), "pyrUp(src[, dst[, dstsize[, borderType]]]) -> dst\n. @brief Upsamples an image and then blurs it.\n. \n. By default, size of the output image is computed as `Size(src.cols\\*2, (src.rows\\*2)`, but in any\n. case, the following conditions should be satisfied:\n. \n. \\f[\\begin{array}{l} | \\texttt{dstsize.width} -src.cols*2| \\leq ( \\texttt{dstsize.width} \\mod 2) \\\\ | \\texttt{dstsize.height} -src.rows*2| \\leq ( \\texttt{dstsize.height} \\mod 2) \\end{array}\\f]\n. \n. The function performs the upsampling step of the Gaussian pyramid construction, though it can\n. actually be used to construct the Laplacian pyramid. First, it upsamples the source image by\n. injecting even zero rows and columns and then convolves the result with the same kernel as in\n. pyrDown multiplied by 4.\n. \n. @param src input image.\n. @param dst output image. It has the specified size and the same type as src .\n. @param dstsize size of the output image.\n. @param borderType Pixel extrapolation method, see #BorderTypes (only #BORDER_DEFAULT is supported)"}, + {"randShuffle", CV_PY_FN_WITH_KW_(pyopencv_cv_randShuffle, 0), "randShuffle(dst[, iterFactor]) -> dst\n. @brief Shuffles the array elements randomly.\n. \n. The function cv::randShuffle shuffles the specified 1D array by randomly choosing pairs of elements and\n. swapping them. The number of such swap operations will be dst.rows\\*dst.cols\\*iterFactor .\n. @param dst input/output numerical 1D array.\n. @param iterFactor scale factor that determines the number of random swap operations (see the details\n. below).\n. @param rng optional random number generator used for shuffling; if it is zero, theRNG () is used\n. instead.\n. @sa RNG, sort"}, + {"randn", CV_PY_FN_WITH_KW_(pyopencv_cv_randn, 0), "randn(dst, mean, stddev) -> dst\n. @brief Fills the array with normally distributed random numbers.\n. \n. The function cv::randn fills the matrix dst with normally distributed random numbers with the specified\n. mean vector and the standard deviation matrix. The generated random numbers are clipped to fit the\n. value range of the output array data type.\n. @param dst output array of random numbers; the array must be pre-allocated and have 1 to 4 channels.\n. @param mean mean value (expectation) of the generated random numbers.\n. @param stddev standard deviation of the generated random numbers; it can be either a vector (in\n. which case a diagonal standard deviation matrix is assumed) or a square matrix.\n. @sa RNG, randu"}, + {"randu", CV_PY_FN_WITH_KW_(pyopencv_cv_randu, 0), "randu(dst, low, high) -> dst\n. @brief Generates a single uniformly-distributed random number or an array of random numbers.\n. \n. Non-template variant of the function fills the matrix dst with uniformly-distributed\n. random numbers from the specified range:\n. \\f[\\texttt{low} _c \\leq \\texttt{dst} (I)_c < \\texttt{high} _c\\f]\n. @param dst output array of random numbers; the array must be pre-allocated.\n. @param low inclusive lower boundary of the generated random numbers.\n. @param high exclusive upper boundary of the generated random numbers.\n. @sa RNG, randn, theRNG"}, + {"rectangle", CV_PY_FN_WITH_KW_(pyopencv_cv_rectangle, 0), "rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img\n. @brief Draws a simple, thick, or filled up-right rectangle.\n. \n. The function cv::rectangle draws a rectangle outline or a filled rectangle whose two opposite corners\n. are pt1 and pt2.\n. \n. @param img Image.\n. @param pt1 Vertex of the rectangle.\n. @param pt2 Vertex of the rectangle opposite to pt1 .\n. @param color Rectangle color or brightness (grayscale image).\n. @param thickness Thickness of lines that make up the rectangle. Negative values, like #FILLED,\n. mean that the function has to draw a filled rectangle.\n. @param lineType Type of the line. See #LineTypes\n. @param shift Number of fractional bits in the point coordinates.\n\n\n\nrectangle(img, rec, color[, thickness[, lineType[, shift]]]) -> img\n. @overload\n. \n. use `rec` parameter as alternative specification of the drawn rectangle: `r.tl() and\n. r.br()-Point(1,1)` are opposite corners"}, + {"reduce", CV_PY_FN_WITH_KW_(pyopencv_cv_reduce, 0), "reduce(src, dim, rtype[, dst[, dtype]]) -> dst\n. @brief Reduces a matrix to a vector.\n. \n. The function #reduce reduces the matrix to a vector by treating the matrix rows/columns as a set of\n. 1D vectors and performing the specified operation on the vectors until a single row/column is\n. obtained. For example, the function can be used to compute horizontal and vertical projections of a\n. raster image. In case of #REDUCE_MAX and #REDUCE_MIN , the output image should have the same type as the source one.\n. In case of #REDUCE_SUM and #REDUCE_AVG , the output may have a larger element bit-depth to preserve accuracy.\n. And multi-channel arrays are also supported in these two reduction modes.\n. \n. The following code demonstrates its usage for a single channel matrix.\n. @snippet snippets/core_reduce.cpp example\n. \n. And the following code demonstrates its usage for a two-channel matrix.\n. @snippet snippets/core_reduce.cpp example2\n. \n. @param src input 2D matrix.\n. @param dst output vector. Its size and type is defined by dim and dtype parameters.\n. @param dim dimension index along which the matrix is reduced. 0 means that the matrix is reduced to\n. a single row. 1 means that the matrix is reduced to a single column.\n. @param rtype reduction operation that could be one of #ReduceTypes\n. @param dtype when negative, the output vector will have the same type as the input matrix,\n. otherwise, its type will be CV_MAKE_TYPE(CV_MAT_DEPTH(dtype), src.channels()).\n. @sa repeat"}, + {"remap", CV_PY_FN_WITH_KW_(pyopencv_cv_remap, 0), "remap(src, map1, map2, interpolation[, dst[, borderMode[, borderValue]]]) -> dst\n. @brief Applies a generic geometrical transformation to an image.\n. \n. The function remap transforms the source image using the specified map:\n. \n. \\f[\\texttt{dst} (x,y) = \\texttt{src} (map_x(x,y),map_y(x,y))\\f]\n. \n. where values of pixels with non-integer coordinates are computed using one of available\n. interpolation methods. \\f$map_x\\f$ and \\f$map_y\\f$ can be encoded as separate floating-point maps\n. in \\f$map_1\\f$ and \\f$map_2\\f$ respectively, or interleaved floating-point maps of \\f$(x,y)\\f$ in\n. \\f$map_1\\f$, or fixed-point maps created by using convertMaps. The reason you might want to\n. convert from floating to fixed-point representations of a map is that they can yield much faster\n. (\\~2x) remapping operations. In the converted case, \\f$map_1\\f$ contains pairs (cvFloor(x),\n. cvFloor(y)) and \\f$map_2\\f$ contains indices in a table of interpolation coefficients.\n. \n. This function cannot operate in-place.\n. \n. @param src Source image.\n. @param dst Destination image. It has the same size as map1 and the same type as src .\n. @param map1 The first map of either (x,y) points or just x values having the type CV_16SC2 ,\n. CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating point\n. representation to fixed-point for speed.\n. @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map\n. if map1 is (x,y) points), respectively.\n. @param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA\n. and #INTER_LINEAR_EXACT are not supported by this function.\n. @param borderMode Pixel extrapolation method (see #BorderTypes). When\n. borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that\n. corresponds to the \"outliers\" in the source image are not modified by the function.\n. @param borderValue Value used in case of a constant border. By default, it is 0.\n. @note\n. Due to current implementation limitations the size of an input and output images should be less than 32767x32767."}, + {"repeat", CV_PY_FN_WITH_KW_(pyopencv_cv_repeat, 0), "repeat(src, ny, nx[, dst]) -> dst\n. @brief Fills the output array with repeated copies of the input array.\n. \n. The function cv::repeat duplicates the input array one or more times along each of the two axes:\n. \\f[\\texttt{dst} _{ij}= \\texttt{src} _{i\\mod src.rows, \\; j\\mod src.cols }\\f]\n. The second variant of the function is more convenient to use with @ref MatrixExpressions.\n. @param src input array to replicate.\n. @param ny Flag to specify how many times the `src` is repeated along the\n. vertical axis.\n. @param nx Flag to specify how many times the `src` is repeated along the\n. horizontal axis.\n. @param dst output array of the same type as `src`.\n. @sa cv::reduce"}, + {"resize", CV_PY_FN_WITH_KW_(pyopencv_cv_resize, 0), "resize(src, dsize[, dst[, fx[, fy[, interpolation]]]]) -> dst\n. @brief Resizes an image.\n. \n. The function resize resizes the image src down to or up to the specified size. Note that the\n. initial dst type or size are not taken into account. Instead, the size and type are derived from\n. the `src`,`dsize`,`fx`, and `fy`. If you want to resize src so that it fits the pre-created dst,\n. you may call the function as follows:\n. @code\n. // explicitly specify dsize=dst.size(); fx and fy will be computed from that.\n. resize(src, dst, dst.size(), 0, 0, interpolation);\n. @endcode\n. If you want to decimate the image by factor of 2 in each direction, you can call the function this\n. way:\n. @code\n. // specify fx and fy and let the function compute the destination image size.\n. resize(src, dst, Size(), 0.5, 0.5, interpolation);\n. @endcode\n. To shrink an image, it will generally look best with #INTER_AREA interpolation, whereas to\n. enlarge an image, it will generally look best with c#INTER_CUBIC (slow) or #INTER_LINEAR\n. (faster but still looks OK).\n. \n. @param src input image.\n. @param dst output image; it has the size dsize (when it is non-zero) or the size computed from\n. src.size(), fx, and fy; the type of dst is the same as of src.\n. @param dsize output image size; if it equals zero (`None` in Python), it is computed as:\n. \\f[\\texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}\\f]\n. Either dsize or both fx and fy must be non-zero.\n. @param fx scale factor along the horizontal axis; when it equals 0, it is computed as\n. \\f[\\texttt{(double)dsize.width/src.cols}\\f]\n. @param fy scale factor along the vertical axis; when it equals 0, it is computed as\n. \\f[\\texttt{(double)dsize.height/src.rows}\\f]\n. @param interpolation interpolation method, see #InterpolationFlags\n. \n. @sa warpAffine, warpPerspective, remap"}, + {"rotate", CV_PY_FN_WITH_KW_(pyopencv_cv_rotate, 0), "rotate(src, rotateCode[, dst]) -> dst\n. @brief Rotates a 2D array in multiples of 90 degrees.\n. The function cv::rotate rotates the array in one of three different ways:\n. * Rotate by 90 degrees clockwise (rotateCode = ROTATE_90_CLOCKWISE).\n. * Rotate by 180 degrees clockwise (rotateCode = ROTATE_180).\n. * Rotate by 270 degrees clockwise (rotateCode = ROTATE_90_COUNTERCLOCKWISE).\n. @param src input array.\n. @param dst output array of the same type as src. The size is the same with ROTATE_180,\n. and the rows and cols are switched for ROTATE_90_CLOCKWISE and ROTATE_90_COUNTERCLOCKWISE.\n. @param rotateCode an enum to specify how to rotate the array; see the enum #RotateFlags\n. @sa transpose , repeat , completeSymm, flip, RotateFlags"}, + {"rotatedRectangleIntersection", CV_PY_FN_WITH_KW_(pyopencv_cv_rotatedRectangleIntersection, 0), "rotatedRectangleIntersection(rect1, rect2[, intersectingRegion]) -> retval, intersectingRegion\n. @brief Finds out if there is any intersection between two rotated rectangles.\n. \n. If there is then the vertices of the intersecting region are returned as well.\n. \n. Below are some examples of intersection configurations. The hatched pattern indicates the\n. intersecting region and the red vertices are returned by the function.\n. \n. ![intersection examples](pics/intersection.png)\n. \n. @param rect1 First rectangle\n. @param rect2 Second rectangle\n. @param intersectingRegion The output array of the vertices of the intersecting region. It returns\n. at most 8 vertices. Stored as std::vector\\ or cv::Mat as Mx1 of type CV_32FC2.\n. @returns One of #RectanglesIntersectTypes"}, + {"scaleAdd", CV_PY_FN_WITH_KW_(pyopencv_cv_scaleAdd, 0), "scaleAdd(src1, alpha, src2[, dst]) -> dst\n. @brief Calculates the sum of a scaled array and another array.\n. \n. The function scaleAdd is one of the classical primitive linear algebra operations, known as DAXPY\n. or SAXPY in [BLAS](http://en.wikipedia.org/wiki/Basic_Linear_Algebra_Subprograms). It calculates\n. the sum of a scaled array and another array:\n. \\f[\\texttt{dst} (I)= \\texttt{scale} \\cdot \\texttt{src1} (I) + \\texttt{src2} (I)\\f]\n. The function can also be emulated with a matrix expression, for example:\n. @code{.cpp}\n. Mat A(3, 3, CV_64F);\n. ...\n. A.row(0) = A.row(1)*2 + A.row(2);\n. @endcode\n. @param src1 first input array.\n. @param alpha scale factor for the first array.\n. @param src2 second input array of the same size and type as src1.\n. @param dst output array of the same size and type as src1.\n. @sa add, addWeighted, subtract, Mat::dot, Mat::convertTo"}, + {"sepFilter2D", CV_PY_FN_WITH_KW_(pyopencv_cv_sepFilter2D, 0), "sepFilter2D(src, ddepth, kernelX, kernelY[, dst[, anchor[, delta[, borderType]]]]) -> dst\n. @brief Applies a separable linear filter to an image.\n. \n. The function applies a separable linear filter to the image. That is, first, every row of src is\n. filtered with the 1D kernel kernelX. Then, every column of the result is filtered with the 1D\n. kernel kernelY. The final result shifted by delta is stored in dst .\n. \n. @param src Source image.\n. @param dst Destination image of the same size and the same number of channels as src .\n. @param ddepth Destination image depth, see @ref filter_depths \"combinations\"\n. @param kernelX Coefficients for filtering each row.\n. @param kernelY Coefficients for filtering each column.\n. @param anchor Anchor position within the kernel. The default value \\f$(-1,-1)\\f$ means that the anchor\n. is at the kernel center.\n. @param delta Value added to the filtered results before storing them.\n. @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa filter2D, Sobel, GaussianBlur, boxFilter, blur"}, + {"setIdentity", CV_PY_FN_WITH_KW_(pyopencv_cv_setIdentity, 0), "setIdentity(mtx[, s]) -> mtx\n. @brief Initializes a scaled identity matrix.\n. \n. The function cv::setIdentity initializes a scaled identity matrix:\n. \\f[\\texttt{mtx} (i,j)= \\fork{\\texttt{value}}{ if \\(i=j\\)}{0}{otherwise}\\f]\n. \n. The function can also be emulated using the matrix initializers and the\n. matrix expressions:\n. @code\n. Mat A = Mat::eye(4, 3, CV_32F)*5;\n. // A will be set to [[5, 0, 0], [0, 5, 0], [0, 0, 5], [0, 0, 0]]\n. @endcode\n. @param mtx matrix to initialize (not necessarily square).\n. @param s value to assign to diagonal elements.\n. @sa Mat::zeros, Mat::ones, Mat::setTo, Mat::operator="}, + {"setLogLevel", CV_PY_FN_WITH_KW_(pyopencv_cv_setLogLevel, 0), "setLogLevel(level) -> retval\n."}, + {"setNumThreads", CV_PY_FN_WITH_KW_(pyopencv_cv_setNumThreads, 0), "setNumThreads(nthreads) -> None\n. @brief OpenCV will try to set the number of threads for the next parallel region.\n. \n. If threads == 0, OpenCV will disable threading optimizations and run all it's functions\n. sequentially. Passing threads \\< 0 will reset threads number to system default. This function must\n. be called outside of parallel region.\n. \n. OpenCV will try to run its functions with specified threads number, but some behaviour differs from\n. framework:\n. - `TBB` - User-defined parallel constructions will run with the same threads number, if\n. another is not specified. If later on user creates his own scheduler, OpenCV will use it.\n. - `OpenMP` - No special defined behaviour.\n. - `Concurrency` - If threads == 1, OpenCV will disable threading optimizations and run its\n. functions sequentially.\n. - `GCD` - Supports only values \\<= 0.\n. - `C=` - No special defined behaviour.\n. @param nthreads Number of threads used by OpenCV.\n. @sa getNumThreads, getThreadNum"}, + {"setRNGSeed", CV_PY_FN_WITH_KW_(pyopencv_cv_setRNGSeed, 0), "setRNGSeed(seed) -> None\n. @brief Sets state of default random number generator.\n. \n. The function cv::setRNGSeed sets state of default random number generator to custom value.\n. @param seed new state for default random number generator\n. @sa RNG, randu, randn"}, + {"setUseOpenVX", CV_PY_FN_WITH_KW_(pyopencv_cv_setUseOpenVX, 0), "setUseOpenVX(flag) -> None\n."}, + {"setUseOptimized", CV_PY_FN_WITH_KW_(pyopencv_cv_setUseOptimized, 0), "setUseOptimized(onoff) -> None\n. @brief Enables or disables the optimized code.\n. \n. The function can be used to dynamically turn on and off optimized dispatched code (code that uses SSE4.2, AVX/AVX2,\n. and other instructions on the platforms that support it). It sets a global flag that is further\n. checked by OpenCV functions. Since the flag is not checked in the inner OpenCV loops, it is only\n. safe to call the function on the very top level in your application where you can be sure that no\n. other OpenCV function is currently executed.\n. \n. By default, the optimized code is enabled unless you disable it in CMake. The current status can be\n. retrieved using useOptimized.\n. @param onoff The boolean flag specifying whether the optimized code should be used (onoff=true)\n. or not (onoff=false)."}, + {"solve", CV_PY_FN_WITH_KW_(pyopencv_cv_solve, 0), "solve(src1, src2[, dst[, flags]]) -> retval, dst\n. @brief Solves one or more linear systems or least-squares problems.\n. \n. The function cv::solve solves a linear system or least-squares problem (the\n. latter is possible with SVD or QR methods, or by specifying the flag\n. #DECOMP_NORMAL ):\n. \\f[\\texttt{dst} = \\arg \\min _X \\| \\texttt{src1} \\cdot \\texttt{X} - \\texttt{src2} \\|\\f]\n. \n. If #DECOMP_LU or #DECOMP_CHOLESKY method is used, the function returns 1\n. if src1 (or \\f$\\texttt{src1}^T\\texttt{src1}\\f$ ) is non-singular. Otherwise,\n. it returns 0. In the latter case, dst is not valid. Other methods find a\n. pseudo-solution in case of a singular left-hand side part.\n. \n. @note If you want to find a unity-norm solution of an under-defined\n. singular system \\f$\\texttt{src1}\\cdot\\texttt{dst}=0\\f$ , the function solve\n. will not do the work. Use SVD::solveZ instead.\n. \n. @param src1 input matrix on the left-hand side of the system.\n. @param src2 input matrix on the right-hand side of the system.\n. @param dst output solution.\n. @param flags solution (matrix inversion) method (#DecompTypes)\n. @sa invert, SVD, eigen"}, + {"solveCubic", CV_PY_FN_WITH_KW_(pyopencv_cv_solveCubic, 0), "solveCubic(coeffs[, roots]) -> retval, roots\n. @brief Finds the real roots of a cubic equation.\n. \n. The function solveCubic finds the real roots of a cubic equation:\n. - if coeffs is a 4-element vector:\n. \\f[\\texttt{coeffs} [0] x^3 + \\texttt{coeffs} [1] x^2 + \\texttt{coeffs} [2] x + \\texttt{coeffs} [3] = 0\\f]\n. - if coeffs is a 3-element vector:\n. \\f[x^3 + \\texttt{coeffs} [0] x^2 + \\texttt{coeffs} [1] x + \\texttt{coeffs} [2] = 0\\f]\n. \n. The roots are stored in the roots array.\n. @param coeffs equation coefficients, an array of 3 or 4 elements.\n. @param roots output array of real roots that has 1 or 3 elements.\n. @return number of real roots. It can be 0, 1 or 2."}, + {"solveLP", CV_PY_FN_WITH_KW_(pyopencv_cv_solveLP, 0), "solveLP(Func, Constr[, z]) -> retval, z\n. @brief Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method).\n. \n. What we mean here by \"linear programming problem\" (or LP problem, for short) can be formulated as:\n. \n. \\f[\\mbox{Maximize } c\\cdot x\\\\\n. \\mbox{Subject to:}\\\\\n. Ax\\leq b\\\\\n. x\\geq 0\\f]\n. \n. Where \\f$c\\f$ is fixed `1`-by-`n` row-vector, \\f$A\\f$ is fixed `m`-by-`n` matrix, \\f$b\\f$ is fixed `m`-by-`1`\n. column vector and \\f$x\\f$ is an arbitrary `n`-by-`1` column vector, which satisfies the constraints.\n. \n. Simplex algorithm is one of many algorithms that are designed to handle this sort of problems\n. efficiently. Although it is not optimal in theoretical sense (there exist algorithms that can solve\n. any problem written as above in polynomial time, while simplex method degenerates to exponential\n. time for some special cases), it is well-studied, easy to implement and is shown to work well for\n. real-life purposes.\n. \n. The particular implementation is taken almost verbatim from **Introduction to Algorithms, third\n. edition** by T. H. Cormen, C. E. Leiserson, R. L. Rivest and Clifford Stein. In particular, the\n. Bland's rule is used to prevent cycling.\n. \n. @param Func This row-vector corresponds to \\f$c\\f$ in the LP problem formulation (see above). It should\n. contain 32- or 64-bit floating point numbers. As a convenience, column-vector may be also submitted,\n. in the latter case it is understood to correspond to \\f$c^T\\f$.\n. @param Constr `m`-by-`n+1` matrix, whose rightmost column corresponds to \\f$b\\f$ in formulation above\n. and the remaining to \\f$A\\f$. It should contain 32- or 64-bit floating point numbers.\n. @param z The solution will be returned here as a column-vector - it corresponds to \\f$c\\f$ in the\n. formulation above. It will contain 64-bit floating point numbers.\n. @return One of cv::SolveLPResult"}, + {"solvePoly", CV_PY_FN_WITH_KW_(pyopencv_cv_solvePoly, 0), "solvePoly(coeffs[, roots[, maxIters]]) -> retval, roots\n. @brief Finds the real or complex roots of a polynomial equation.\n. \n. The function cv::solvePoly finds real and complex roots of a polynomial equation:\n. \\f[\\texttt{coeffs} [n] x^{n} + \\texttt{coeffs} [n-1] x^{n-1} + ... + \\texttt{coeffs} [1] x + \\texttt{coeffs} [0] = 0\\f]\n. @param coeffs array of polynomial coefficients.\n. @param roots output (complex) array of roots.\n. @param maxIters maximum number of iterations the algorithm does."}, + {"sort", CV_PY_FN_WITH_KW_(pyopencv_cv_sort, 0), "sort(src, flags[, dst]) -> dst\n. @brief Sorts each row or each column of a matrix.\n. \n. The function cv::sort sorts each matrix row or each matrix column in\n. ascending or descending order. So you should pass two operation flags to\n. get desired behaviour. If you want to sort matrix rows or columns\n. lexicographically, you can use STL std::sort generic function with the\n. proper comparison predicate.\n. \n. @param src input single-channel array.\n. @param dst output array of the same size and type as src.\n. @param flags operation flags, a combination of #SortFlags\n. @sa sortIdx, randShuffle"}, + {"sortIdx", CV_PY_FN_WITH_KW_(pyopencv_cv_sortIdx, 0), "sortIdx(src, flags[, dst]) -> dst\n. @brief Sorts each row or each column of a matrix.\n. \n. The function cv::sortIdx sorts each matrix row or each matrix column in the\n. ascending or descending order. So you should pass two operation flags to\n. get desired behaviour. Instead of reordering the elements themselves, it\n. stores the indices of sorted elements in the output array. For example:\n. @code\n. Mat A = Mat::eye(3,3,CV_32F), B;\n. sortIdx(A, B, SORT_EVERY_ROW + SORT_ASCENDING);\n. // B will probably contain\n. // (because of equal elements in A some permutations are possible):\n. // [[1, 2, 0], [0, 2, 1], [0, 1, 2]]\n. @endcode\n. @param src input single-channel array.\n. @param dst output integer array of the same size as src.\n. @param flags operation flags that could be a combination of cv::SortFlags\n. @sa sort, randShuffle"}, + {"spatialGradient", CV_PY_FN_WITH_KW_(pyopencv_cv_spatialGradient, 0), "spatialGradient(src[, dx[, dy[, ksize[, borderType]]]]) -> dx, dy\n. @brief Calculates the first order image derivative in both x and y using a Sobel operator\n. \n. Equivalent to calling:\n. \n. @code\n. Sobel( src, dx, CV_16SC1, 1, 0, 3 );\n. Sobel( src, dy, CV_16SC1, 0, 1, 3 );\n. @endcode\n. \n. @param src input image.\n. @param dx output image with first-order derivative in x.\n. @param dy output image with first-order derivative in y.\n. @param ksize size of Sobel kernel. It must be 3.\n. @param borderType pixel extrapolation method, see #BorderTypes.\n. Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.\n. \n. @sa Sobel"}, + {"split", CV_PY_FN_WITH_KW_(pyopencv_cv_split, 0), "split(m[, mv]) -> mv\n. @overload\n. @param m input multi-channel array.\n. @param mv output vector of arrays; the arrays themselves are reallocated, if needed."}, + {"sqrBoxFilter", CV_PY_FN_WITH_KW_(pyopencv_cv_sqrBoxFilter, 0), "sqrBoxFilter(src, ddepth, ksize[, dst[, anchor[, normalize[, borderType]]]]) -> dst\n. @brief Calculates the normalized sum of squares of the pixel values overlapping the filter.\n. \n. For every pixel \\f$ (x, y) \\f$ in the source image, the function calculates the sum of squares of those neighboring\n. pixel values which overlap the filter placed over the pixel \\f$ (x, y) \\f$.\n. \n. The unnormalized square box filter can be useful in computing local image statistics such as the the local\n. variance and standard deviation around the neighborhood of a pixel.\n. \n. @param src input image\n. @param dst output image of the same size and type as src\n. @param ddepth the output image depth (-1 to use src.depth())\n. @param ksize kernel size\n. @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel\n. center.\n. @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.\n. @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.\n. @sa boxFilter"}, + {"sqrt", CV_PY_FN_WITH_KW_(pyopencv_cv_sqrt, 0), "sqrt(src[, dst]) -> dst\n. @brief Calculates a square root of array elements.\n. \n. The function cv::sqrt calculates a square root of each input array element.\n. In case of multi-channel arrays, each channel is processed\n. independently. The accuracy is approximately the same as of the built-in\n. std::sqrt .\n. @param src input floating-point array.\n. @param dst output array of the same size and type as src."}, + {"subtract", CV_PY_FN_WITH_KW_(pyopencv_cv_subtract, 0), "subtract(src1, src2[, dst[, mask[, dtype]]]) -> dst\n. @brief Calculates the per-element difference between two arrays or array and a scalar.\n. \n. The function subtract calculates:\n. - Difference between two arrays, when both input arrays have the same size and the same number of\n. channels:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) - \\texttt{src2}(I)) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Difference between an array and a scalar, when src2 is constructed from Scalar or has the same\n. number of elements as `src1.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1}(I) - \\texttt{src2} ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - Difference between a scalar and an array, when src1 is constructed from Scalar or has the same\n. number of elements as `src2.channels()`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src1} - \\texttt{src2}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. - The reverse difference between a scalar and an array in the case of `SubRS`:\n. \\f[\\texttt{dst}(I) = \\texttt{saturate} ( \\texttt{src2} - \\texttt{src1}(I) ) \\quad \\texttt{if mask}(I) \\ne0\\f]\n. where I is a multi-dimensional index of array elements. In case of multi-channel arrays, each\n. channel is processed independently.\n. \n. The first function in the list above can be replaced with matrix expressions:\n. @code{.cpp}\n. dst = src1 - src2;\n. dst -= src1; // equivalent to subtract(dst, src1, dst);\n. @endcode\n. The input arrays and the output array can all have the same or different depths. For example, you\n. can subtract to 8-bit unsigned arrays and store the difference in a 16-bit signed array. Depth of\n. the output array is determined by dtype parameter. In the second and third cases above, as well as\n. in the first case, when src1.depth() == src2.depth(), dtype can be set to the default -1. In this\n. case the output array will have the same depth as the input array, be it src1, src2 or both.\n. @note Saturation is not applied when the output array has the depth CV_32S. You may even get\n. result of an incorrect sign in the case of overflow.\n. @param src1 first input array or a scalar.\n. @param src2 second input array or a scalar.\n. @param dst output array of the same size and the same number of channels as the input array.\n. @param mask optional operation mask; this is an 8-bit single channel array that specifies elements\n. of the output array to be changed.\n. @param dtype optional depth of the output array\n. @sa add, addWeighted, scaleAdd, Mat::convertTo"}, + {"sumElems", CV_PY_FN_WITH_KW_(pyopencv_cv_sumElems, 0), "sumElems(src) -> retval\n. @brief Calculates the sum of array elements.\n. \n. The function cv::sum calculates and returns the sum of array elements,\n. independently for each channel.\n. @param src input array that must have from 1 to 4 channels.\n. @sa countNonZero, mean, meanStdDev, norm, minMaxLoc, reduce"}, + {"threshold", CV_PY_FN_WITH_KW_(pyopencv_cv_threshold, 0), "threshold(src, thresh, maxval, type[, dst]) -> retval, dst\n. @brief Applies a fixed-level threshold to each array element.\n. \n. The function applies fixed-level thresholding to a multiple-channel array. The function is typically\n. used to get a bi-level (binary) image out of a grayscale image ( #compare could be also used for\n. this purpose) or for removing a noise, that is, filtering out pixels with too small or too large\n. values. There are several types of thresholding supported by the function. They are determined by\n. type parameter.\n. \n. Also, the special values #THRESH_OTSU or #THRESH_TRIANGLE may be combined with one of the\n. above values. In these cases, the function determines the optimal threshold value using the Otsu's\n. or Triangle algorithm and uses it instead of the specified thresh.\n. \n. @note Currently, the Otsu's and Triangle methods are implemented only for 8-bit single-channel images.\n. \n. @param src input array (multiple-channel, 8-bit or 32-bit floating point).\n. @param dst output array of the same size and type and the same number of channels as src.\n. @param thresh threshold value.\n. @param maxval maximum value to use with the #THRESH_BINARY and #THRESH_BINARY_INV thresholding\n. types.\n. @param type thresholding type (see #ThresholdTypes).\n. @return the computed threshold value if Otsu's or Triangle methods used.\n. \n. @sa adaptiveThreshold, findContours, compare, min, max"}, + {"trace", CV_PY_FN_WITH_KW_(pyopencv_cv_trace, 0), "trace(mtx) -> retval\n. @brief Returns the trace of a matrix.\n. \n. The function cv::trace returns the sum of the diagonal elements of the\n. matrix mtx .\n. \\f[\\mathrm{tr} ( \\texttt{mtx} ) = \\sum _i \\texttt{mtx} (i,i)\\f]\n. @param mtx input matrix."}, + {"transform", CV_PY_FN_WITH_KW_(pyopencv_cv_transform, 0), "transform(src, m[, dst]) -> dst\n. @brief Performs the matrix transformation of every array element.\n. \n. The function cv::transform performs the matrix transformation of every\n. element of the array src and stores the results in dst :\n. \\f[\\texttt{dst} (I) = \\texttt{m} \\cdot \\texttt{src} (I)\\f]\n. (when m.cols=src.channels() ), or\n. \\f[\\texttt{dst} (I) = \\texttt{m} \\cdot [ \\texttt{src} (I); 1]\\f]\n. (when m.cols=src.channels()+1 )\n. \n. Every element of the N -channel array src is interpreted as N -element\n. vector that is transformed using the M x N or M x (N+1) matrix m to\n. M-element vector - the corresponding element of the output array dst .\n. \n. The function may be used for geometrical transformation of\n. N -dimensional points, arbitrary linear color space transformation (such\n. as various kinds of RGB to YUV transforms), shuffling the image\n. channels, and so forth.\n. @param src input array that must have as many channels (1 to 4) as\n. m.cols or m.cols-1.\n. @param dst output array of the same size and depth as src; it has as\n. many channels as m.rows.\n. @param m transformation 2x2 or 2x3 floating-point matrix.\n. @sa perspectiveTransform, getAffineTransform, estimateAffine2D, warpAffine, warpPerspective"}, + {"transpose", CV_PY_FN_WITH_KW_(pyopencv_cv_transpose, 0), "transpose(src[, dst]) -> dst\n. @brief Transposes a matrix.\n. \n. The function cv::transpose transposes the matrix src :\n. \\f[\\texttt{dst} (i,j) = \\texttt{src} (j,i)\\f]\n. @note No complex conjugation is done in case of a complex matrix. It\n. should be done separately if needed.\n. @param src input array.\n. @param dst output array of the same type as src."}, + {"useOpenVX", CV_PY_FN_WITH_KW_(pyopencv_cv_useOpenVX, 0), "useOpenVX() -> retval\n."}, + {"useOptimized", CV_PY_FN_WITH_KW_(pyopencv_cv_useOptimized, 0), "useOptimized() -> retval\n. @brief Returns the status of optimized code usage.\n. \n. The function returns true if the optimized code is enabled. Otherwise, it returns false."}, + {"vconcat", CV_PY_FN_WITH_KW_(pyopencv_cv_vconcat, 0), "vconcat(src[, dst]) -> dst\n. @overload\n. @code{.cpp}\n. std::vector matrices = { cv::Mat(1, 4, CV_8UC1, cv::Scalar(1)),\n. cv::Mat(1, 4, CV_8UC1, cv::Scalar(2)),\n. cv::Mat(1, 4, CV_8UC1, cv::Scalar(3)),};\n. \n. cv::Mat out;\n. cv::vconcat( matrices, out );\n. //out:\n. //[1, 1, 1, 1;\n. // 2, 2, 2, 2;\n. // 3, 3, 3, 3]\n. @endcode\n. @param src input array or vector of matrices. all of the matrices must have the same number of cols and the same depth\n. @param dst output array. It has the same number of cols and depth as the src, and the sum of rows of the src.\n. same depth."}, + {"warpAffine", CV_PY_FN_WITH_KW_(pyopencv_cv_warpAffine, 0), "warpAffine(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst\n. @brief Applies an affine transformation to an image.\n. \n. The function warpAffine transforms the source image using the specified matrix:\n. \n. \\f[\\texttt{dst} (x,y) = \\texttt{src} ( \\texttt{M} _{11} x + \\texttt{M} _{12} y + \\texttt{M} _{13}, \\texttt{M} _{21} x + \\texttt{M} _{22} y + \\texttt{M} _{23})\\f]\n. \n. when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted\n. with #invertAffineTransform and then put in the formula above instead of M. The function cannot\n. operate in-place.\n. \n. @param src input image.\n. @param dst output image that has the size dsize and the same type as src .\n. @param M \\f$2\\times 3\\f$ transformation matrix.\n. @param dsize size of the output image.\n. @param flags combination of interpolation methods (see #InterpolationFlags) and the optional\n. flag #WARP_INVERSE_MAP that means that M is the inverse transformation (\n. \\f$\\texttt{dst}\\rightarrow\\texttt{src}\\f$ ).\n. @param borderMode pixel extrapolation method (see #BorderTypes); when\n. borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image corresponding to\n. the \"outliers\" in the source image are not modified by the function.\n. @param borderValue value used in case of a constant border; by default, it is 0.\n. \n. @sa warpPerspective, resize, remap, getRectSubPix, transform"}, + {"warpPerspective", CV_PY_FN_WITH_KW_(pyopencv_cv_warpPerspective, 0), "warpPerspective(src, M, dsize[, dst[, flags[, borderMode[, borderValue]]]]) -> dst\n. @brief Applies a perspective transformation to an image.\n. \n. The function warpPerspective transforms the source image using the specified matrix:\n. \n. \\f[\\texttt{dst} (x,y) = \\texttt{src} \\left ( \\frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,\n. \\frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \\right )\\f]\n. \n. when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert\n. and then put in the formula above instead of M. The function cannot operate in-place.\n. \n. @param src input image.\n. @param dst output image that has the size dsize and the same type as src .\n. @param M \\f$3\\times 3\\f$ transformation matrix.\n. @param dsize size of the output image.\n. @param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the\n. optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (\n. \\f$\\texttt{dst}\\rightarrow\\texttt{src}\\f$ ).\n. @param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).\n. @param borderValue value used in case of a constant border; by default, it equals 0.\n. \n. @sa warpAffine, resize, remap, getRectSubPix, perspectiveTransform"}, + {"warpPolar", CV_PY_FN_WITH_KW_(pyopencv_cv_warpPolar, 0), "warpPolar(src, dsize, center, maxRadius, flags[, dst]) -> dst\n. \\brief Remaps an image to polar or semilog-polar coordinates space\n. \n. @anchor polar_remaps_reference_image\n. ![Polar remaps reference](pics/polar_remap_doc.png)\n. \n. Transform the source image using the following transformation:\n. \\f[\n. dst(\\rho , \\phi ) = src(x,y)\n. \\f]\n. \n. where\n. \\f[\n. \\begin{array}{l}\n. \\vec{I} = (x - center.x, \\;y - center.y) \\\\\n. \\phi = Kangle \\cdot \\texttt{angle} (\\vec{I}) \\\\\n. \\rho = \\left\\{\\begin{matrix}\n. Klin \\cdot \\texttt{magnitude} (\\vec{I}) & default \\\\\n. Klog \\cdot log_e(\\texttt{magnitude} (\\vec{I})) & if \\; semilog \\\\\n. \\end{matrix}\\right.\n. \\end{array}\n. \\f]\n. \n. and\n. \\f[\n. \\begin{array}{l}\n. Kangle = dsize.height / 2\\Pi \\\\\n. Klin = dsize.width / maxRadius \\\\\n. Klog = dsize.width / log_e(maxRadius) \\\\\n. \\end{array}\n. \\f]\n. \n. \n. \\par Linear vs semilog mapping\n. \n. Polar mapping can be linear or semi-log. Add one of #WarpPolarMode to `flags` to specify the polar mapping mode.\n. \n. Linear is the default mode.\n. \n. The semilog mapping emulates the human \"foveal\" vision that permit very high acuity on the line of sight (central vision)\n. in contrast to peripheral vision where acuity is minor.\n. \n. \\par Option on `dsize`:\n. \n. - if both values in `dsize <=0 ` (default),\n. the destination image will have (almost) same area of source bounding circle:\n. \\f[\\begin{array}{l}\n. dsize.area \\leftarrow (maxRadius^2 \\cdot \\Pi) \\\\\n. dsize.width = \\texttt{cvRound}(maxRadius) \\\\\n. dsize.height = \\texttt{cvRound}(maxRadius \\cdot \\Pi) \\\\\n. \\end{array}\\f]\n. \n. \n. - if only `dsize.height <= 0`,\n. the destination image area will be proportional to the bounding circle area but scaled by `Kx * Kx`:\n. \\f[\\begin{array}{l}\n. dsize.height = \\texttt{cvRound}(dsize.width \\cdot \\Pi) \\\\\n. \\end{array}\n. \\f]\n. \n. - if both values in `dsize > 0 `,\n. the destination image will have the given size therefore the area of the bounding circle will be scaled to `dsize`.\n. \n. \n. \\par Reverse mapping\n. \n. You can get reverse mapping adding #WARP_INVERSE_MAP to `flags`\n. \\snippet polar_transforms.cpp InverseMap\n. \n. In addiction, to calculate the original coordinate from a polar mapped coordinate \\f$(rho, phi)->(x, y)\\f$:\n. \\snippet polar_transforms.cpp InverseCoordinate\n. \n. @param src Source image.\n. @param dst Destination image. It will have same type as src.\n. @param dsize The destination image size (see description for valid options).\n. @param center The transformation center.\n. @param maxRadius The radius of the bounding circle to transform. It determines the inverse magnitude scale parameter too.\n. @param flags A combination of interpolation methods, #InterpolationFlags + #WarpPolarMode.\n. - Add #WARP_POLAR_LINEAR to select linear polar mapping (default)\n. - Add #WARP_POLAR_LOG to select semilog polar mapping\n. - Add #WARP_INVERSE_MAP for reverse mapping.\n. @note\n. - The function can not operate in-place.\n. - To calculate magnitude and angle in degrees #cartToPolar is used internally thus angles are measured from 0 to 360 with accuracy about 0.3 degrees.\n. - This function uses #remap. Due to current implementation limitations the size of an input and output images should be less than 32767x32767.\n. \n. @sa cv::remap"}, + {"watershed", CV_PY_FN_WITH_KW_(pyopencv_cv_watershed, 0), "watershed(image, markers) -> markers\n. @brief Performs a marker-based image segmentation using the watershed algorithm.\n. \n. The function implements one of the variants of watershed, non-parametric marker-based segmentation\n. algorithm, described in @cite Meyer92 .\n. \n. Before passing the image to the function, you have to roughly outline the desired regions in the\n. image markers with positive (\\>0) indices. So, every region is represented as one or more connected\n. components with the pixel values 1, 2, 3, and so on. Such markers can be retrieved from a binary\n. mask using #findContours and #drawContours (see the watershed.cpp demo). The markers are \"seeds\" of\n. the future image regions. All the other pixels in markers , whose relation to the outlined regions\n. is not known and should be defined by the algorithm, should be set to 0's. In the function output,\n. each pixel in markers is set to a value of the \"seed\" components or to -1 at boundaries between the\n. regions.\n. \n. @note Any two neighbor connected components are not necessarily separated by a watershed boundary\n. (-1's pixels); for example, they can touch each other in the initial marker image passed to the\n. function.\n. \n. @param image Input 8-bit 3-channel image.\n. @param markers Input/output 32-bit single-channel image (map) of markers. It should have the same\n. size as image .\n. \n. @sa findContours"}, +#ifdef PYOPENCV_EXTRA_METHODS_CV + PYOPENCV_EXTRA_METHODS_CV +#endif + {NULL, NULL} +}; + +static ConstDef consts_cv[] = { + {"ACCESS_FAST", static_cast(cv::ACCESS_FAST)}, + {"ACCESS_MASK", static_cast(cv::ACCESS_MASK)}, + {"ACCESS_READ", static_cast(cv::ACCESS_READ)}, + {"ACCESS_RW", static_cast(cv::ACCESS_RW)}, + {"ACCESS_WRITE", static_cast(cv::ACCESS_WRITE)}, + {"ADAPTIVE_THRESH_GAUSSIAN_C", static_cast(cv::ADAPTIVE_THRESH_GAUSSIAN_C)}, + {"ADAPTIVE_THRESH_MEAN_C", static_cast(cv::ADAPTIVE_THRESH_MEAN_C)}, + {"BORDER_CONSTANT", static_cast(cv::BORDER_CONSTANT)}, + {"BORDER_DEFAULT", static_cast(cv::BORDER_DEFAULT)}, + {"BORDER_ISOLATED", static_cast(cv::BORDER_ISOLATED)}, + {"BORDER_REFLECT", static_cast(cv::BORDER_REFLECT)}, + {"BORDER_REFLECT101", static_cast(cv::BORDER_REFLECT101)}, + {"BORDER_REFLECT_101", static_cast(cv::BORDER_REFLECT_101)}, + {"BORDER_REPLICATE", static_cast(cv::BORDER_REPLICATE)}, + {"BORDER_TRANSPARENT", static_cast(cv::BORDER_TRANSPARENT)}, + {"BORDER_WRAP", static_cast(cv::BORDER_WRAP)}, + {"CAP_ANDROID", static_cast(cv::CAP_ANDROID)}, + {"CAP_ANY", static_cast(cv::CAP_ANY)}, + {"CAP_ARAVIS", static_cast(cv::CAP_ARAVIS)}, + {"CAP_AVFOUNDATION", static_cast(cv::CAP_AVFOUNDATION)}, + {"CAP_CMU1394", static_cast(cv::CAP_CMU1394)}, + {"CAP_DC1394", static_cast(cv::CAP_DC1394)}, + {"CAP_DSHOW", static_cast(cv::CAP_DSHOW)}, + {"CAP_FFMPEG", static_cast(cv::CAP_FFMPEG)}, + {"CAP_FIREWARE", static_cast(cv::CAP_FIREWARE)}, + {"CAP_FIREWIRE", static_cast(cv::CAP_FIREWIRE)}, + {"CAP_GIGANETIX", static_cast(cv::CAP_GIGANETIX)}, + {"CAP_GPHOTO2", static_cast(cv::CAP_GPHOTO2)}, + {"CAP_GSTREAMER", static_cast(cv::CAP_GSTREAMER)}, + {"CAP_IEEE1394", static_cast(cv::CAP_IEEE1394)}, + {"CAP_IMAGES", static_cast(cv::CAP_IMAGES)}, + {"CAP_INTELPERC", static_cast(cv::CAP_INTELPERC)}, + {"CAP_INTELPERC_DEPTH_GENERATOR", static_cast(cv::CAP_INTELPERC_DEPTH_GENERATOR)}, + {"CAP_INTELPERC_DEPTH_MAP", static_cast(cv::CAP_INTELPERC_DEPTH_MAP)}, + {"CAP_INTELPERC_GENERATORS_MASK", static_cast(cv::CAP_INTELPERC_GENERATORS_MASK)}, + {"CAP_INTELPERC_IMAGE", static_cast(cv::CAP_INTELPERC_IMAGE)}, + {"CAP_INTELPERC_IMAGE_GENERATOR", static_cast(cv::CAP_INTELPERC_IMAGE_GENERATOR)}, + {"CAP_INTELPERC_IR_GENERATOR", static_cast(cv::CAP_INTELPERC_IR_GENERATOR)}, + {"CAP_INTELPERC_IR_MAP", static_cast(cv::CAP_INTELPERC_IR_MAP)}, + {"CAP_INTELPERC_UVDEPTH_MAP", static_cast(cv::CAP_INTELPERC_UVDEPTH_MAP)}, + {"CAP_INTEL_MFX", static_cast(cv::CAP_INTEL_MFX)}, + {"CAP_MSMF", static_cast(cv::CAP_MSMF)}, + {"CAP_OPENCV_MJPEG", static_cast(cv::CAP_OPENCV_MJPEG)}, + {"CAP_OPENNI", static_cast(cv::CAP_OPENNI)}, + {"CAP_OPENNI2", static_cast(cv::CAP_OPENNI2)}, + {"CAP_OPENNI2_ASTRA", static_cast(cv::CAP_OPENNI2_ASTRA)}, + {"CAP_OPENNI2_ASUS", static_cast(cv::CAP_OPENNI2_ASUS)}, + {"CAP_OPENNI_ASUS", static_cast(cv::CAP_OPENNI_ASUS)}, + {"CAP_OPENNI_BGR_IMAGE", static_cast(cv::CAP_OPENNI_BGR_IMAGE)}, + {"CAP_OPENNI_DEPTH_GENERATOR", static_cast(cv::CAP_OPENNI_DEPTH_GENERATOR)}, + {"CAP_OPENNI_DEPTH_GENERATOR_BASELINE", static_cast(cv::CAP_OPENNI_DEPTH_GENERATOR_BASELINE)}, + {"CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH", static_cast(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH)}, + {"CAP_OPENNI_DEPTH_GENERATOR_PRESENT", static_cast(cv::CAP_OPENNI_DEPTH_GENERATOR_PRESENT)}, + {"CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION", static_cast(cv::CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION)}, + {"CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON", static_cast(cv::CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON)}, + {"CAP_OPENNI_DEPTH_MAP", static_cast(cv::CAP_OPENNI_DEPTH_MAP)}, + {"CAP_OPENNI_DISPARITY_MAP", static_cast(cv::CAP_OPENNI_DISPARITY_MAP)}, + {"CAP_OPENNI_DISPARITY_MAP_32F", static_cast(cv::CAP_OPENNI_DISPARITY_MAP_32F)}, + {"CAP_OPENNI_GENERATORS_MASK", static_cast(cv::CAP_OPENNI_GENERATORS_MASK)}, + {"CAP_OPENNI_GRAY_IMAGE", static_cast(cv::CAP_OPENNI_GRAY_IMAGE)}, + {"CAP_OPENNI_IMAGE_GENERATOR", static_cast(cv::CAP_OPENNI_IMAGE_GENERATOR)}, + {"CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE", static_cast(cv::CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE)}, + {"CAP_OPENNI_IMAGE_GENERATOR_PRESENT", static_cast(cv::CAP_OPENNI_IMAGE_GENERATOR_PRESENT)}, + {"CAP_OPENNI_IR_GENERATOR", static_cast(cv::CAP_OPENNI_IR_GENERATOR)}, + {"CAP_OPENNI_IR_GENERATOR_PRESENT", static_cast(cv::CAP_OPENNI_IR_GENERATOR_PRESENT)}, + {"CAP_OPENNI_IR_IMAGE", static_cast(cv::CAP_OPENNI_IR_IMAGE)}, + {"CAP_OPENNI_POINT_CLOUD_MAP", static_cast(cv::CAP_OPENNI_POINT_CLOUD_MAP)}, + {"CAP_OPENNI_QVGA_30HZ", static_cast(cv::CAP_OPENNI_QVGA_30HZ)}, + {"CAP_OPENNI_QVGA_60HZ", static_cast(cv::CAP_OPENNI_QVGA_60HZ)}, + {"CAP_OPENNI_SXGA_15HZ", static_cast(cv::CAP_OPENNI_SXGA_15HZ)}, + {"CAP_OPENNI_SXGA_30HZ", static_cast(cv::CAP_OPENNI_SXGA_30HZ)}, + {"CAP_OPENNI_VALID_DEPTH_MASK", static_cast(cv::CAP_OPENNI_VALID_DEPTH_MASK)}, + {"CAP_OPENNI_VGA_30HZ", static_cast(cv::CAP_OPENNI_VGA_30HZ)}, + {"CAP_PROP_APERTURE", static_cast(cv::CAP_PROP_APERTURE)}, + {"CAP_PROP_ARAVIS_AUTOTRIGGER", static_cast(cv::CAP_PROP_ARAVIS_AUTOTRIGGER)}, + {"CAP_PROP_AUTOFOCUS", static_cast(cv::CAP_PROP_AUTOFOCUS)}, + {"CAP_PROP_AUTO_EXPOSURE", static_cast(cv::CAP_PROP_AUTO_EXPOSURE)}, + {"CAP_PROP_AUTO_WB", static_cast(cv::CAP_PROP_AUTO_WB)}, + {"CAP_PROP_BACKEND", static_cast(cv::CAP_PROP_BACKEND)}, + {"CAP_PROP_BACKLIGHT", static_cast(cv::CAP_PROP_BACKLIGHT)}, + {"CAP_PROP_BITRATE", static_cast(cv::CAP_PROP_BITRATE)}, + {"CAP_PROP_BRIGHTNESS", static_cast(cv::CAP_PROP_BRIGHTNESS)}, + {"CAP_PROP_BUFFERSIZE", static_cast(cv::CAP_PROP_BUFFERSIZE)}, + {"CAP_PROP_CHANNEL", static_cast(cv::CAP_PROP_CHANNEL)}, + {"CAP_PROP_CODEC_PIXEL_FORMAT", static_cast(cv::CAP_PROP_CODEC_PIXEL_FORMAT)}, + {"CAP_PROP_CONTRAST", static_cast(cv::CAP_PROP_CONTRAST)}, + {"CAP_PROP_CONVERT_RGB", static_cast(cv::CAP_PROP_CONVERT_RGB)}, + {"CAP_PROP_DC1394_MAX", static_cast(cv::CAP_PROP_DC1394_MAX)}, + {"CAP_PROP_DC1394_MODE_AUTO", static_cast(cv::CAP_PROP_DC1394_MODE_AUTO)}, + {"CAP_PROP_DC1394_MODE_MANUAL", static_cast(cv::CAP_PROP_DC1394_MODE_MANUAL)}, + {"CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO", static_cast(cv::CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO)}, + {"CAP_PROP_DC1394_OFF", static_cast(cv::CAP_PROP_DC1394_OFF)}, + {"CAP_PROP_EXPOSURE", static_cast(cv::CAP_PROP_EXPOSURE)}, + {"CAP_PROP_EXPOSUREPROGRAM", static_cast(cv::CAP_PROP_EXPOSUREPROGRAM)}, + {"CAP_PROP_FOCUS", static_cast(cv::CAP_PROP_FOCUS)}, + {"CAP_PROP_FORMAT", static_cast(cv::CAP_PROP_FORMAT)}, + {"CAP_PROP_FOURCC", static_cast(cv::CAP_PROP_FOURCC)}, + {"CAP_PROP_FPS", static_cast(cv::CAP_PROP_FPS)}, + {"CAP_PROP_FRAME_COUNT", static_cast(cv::CAP_PROP_FRAME_COUNT)}, + {"CAP_PROP_FRAME_HEIGHT", static_cast(cv::CAP_PROP_FRAME_HEIGHT)}, + {"CAP_PROP_FRAME_WIDTH", static_cast(cv::CAP_PROP_FRAME_WIDTH)}, + {"CAP_PROP_GAIN", static_cast(cv::CAP_PROP_GAIN)}, + {"CAP_PROP_GAMMA", static_cast(cv::CAP_PROP_GAMMA)}, + {"CAP_PROP_GIGA_FRAME_HEIGH_MAX", static_cast(cv::CAP_PROP_GIGA_FRAME_HEIGH_MAX)}, + {"CAP_PROP_GIGA_FRAME_OFFSET_X", static_cast(cv::CAP_PROP_GIGA_FRAME_OFFSET_X)}, + {"CAP_PROP_GIGA_FRAME_OFFSET_Y", static_cast(cv::CAP_PROP_GIGA_FRAME_OFFSET_Y)}, + {"CAP_PROP_GIGA_FRAME_SENS_HEIGH", static_cast(cv::CAP_PROP_GIGA_FRAME_SENS_HEIGH)}, + {"CAP_PROP_GIGA_FRAME_SENS_WIDTH", static_cast(cv::CAP_PROP_GIGA_FRAME_SENS_WIDTH)}, + {"CAP_PROP_GIGA_FRAME_WIDTH_MAX", static_cast(cv::CAP_PROP_GIGA_FRAME_WIDTH_MAX)}, + {"CAP_PROP_GPHOTO2_COLLECT_MSGS", static_cast(cv::CAP_PROP_GPHOTO2_COLLECT_MSGS)}, + {"CAP_PROP_GPHOTO2_FLUSH_MSGS", static_cast(cv::CAP_PROP_GPHOTO2_FLUSH_MSGS)}, + {"CAP_PROP_GPHOTO2_PREVIEW", static_cast(cv::CAP_PROP_GPHOTO2_PREVIEW)}, + {"CAP_PROP_GPHOTO2_RELOAD_CONFIG", static_cast(cv::CAP_PROP_GPHOTO2_RELOAD_CONFIG)}, + {"CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE", static_cast(cv::CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE)}, + {"CAP_PROP_GPHOTO2_WIDGET_ENUMERATE", static_cast(cv::CAP_PROP_GPHOTO2_WIDGET_ENUMERATE)}, + {"CAP_PROP_GSTREAMER_QUEUE_LENGTH", static_cast(cv::CAP_PROP_GSTREAMER_QUEUE_LENGTH)}, + {"CAP_PROP_GUID", static_cast(cv::CAP_PROP_GUID)}, + {"CAP_PROP_HUE", static_cast(cv::CAP_PROP_HUE)}, + {"CAP_PROP_HW_ACCELERATION", static_cast(cv::CAP_PROP_HW_ACCELERATION)}, + {"CAP_PROP_HW_ACCELERATION_USE_OPENCL", static_cast(cv::CAP_PROP_HW_ACCELERATION_USE_OPENCL)}, + {"CAP_PROP_HW_DEVICE", static_cast(cv::CAP_PROP_HW_DEVICE)}, + {"CAP_PROP_IMAGES_BASE", static_cast(cv::CAP_PROP_IMAGES_BASE)}, + {"CAP_PROP_IMAGES_LAST", static_cast(cv::CAP_PROP_IMAGES_LAST)}, + {"CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD", static_cast(cv::CAP_PROP_INTELPERC_DEPTH_CONFIDENCE_THRESHOLD)}, + {"CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ", static_cast(cv::CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_HORZ)}, + {"CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT", static_cast(cv::CAP_PROP_INTELPERC_DEPTH_FOCAL_LENGTH_VERT)}, + {"CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE", static_cast(cv::CAP_PROP_INTELPERC_DEPTH_LOW_CONFIDENCE_VALUE)}, + {"CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE", static_cast(cv::CAP_PROP_INTELPERC_DEPTH_SATURATION_VALUE)}, + {"CAP_PROP_INTELPERC_PROFILE_COUNT", static_cast(cv::CAP_PROP_INTELPERC_PROFILE_COUNT)}, + {"CAP_PROP_INTELPERC_PROFILE_IDX", static_cast(cv::CAP_PROP_INTELPERC_PROFILE_IDX)}, + {"CAP_PROP_IOS_DEVICE_EXPOSURE", static_cast(cv::CAP_PROP_IOS_DEVICE_EXPOSURE)}, + {"CAP_PROP_IOS_DEVICE_FLASH", static_cast(cv::CAP_PROP_IOS_DEVICE_FLASH)}, + {"CAP_PROP_IOS_DEVICE_FOCUS", static_cast(cv::CAP_PROP_IOS_DEVICE_FOCUS)}, + {"CAP_PROP_IOS_DEVICE_TORCH", static_cast(cv::CAP_PROP_IOS_DEVICE_TORCH)}, + {"CAP_PROP_IOS_DEVICE_WHITEBALANCE", static_cast(cv::CAP_PROP_IOS_DEVICE_WHITEBALANCE)}, + {"CAP_PROP_IRIS", static_cast(cv::CAP_PROP_IRIS)}, + {"CAP_PROP_ISO_SPEED", static_cast(cv::CAP_PROP_ISO_SPEED)}, + {"CAP_PROP_MODE", static_cast(cv::CAP_PROP_MODE)}, + {"CAP_PROP_MONOCHROME", static_cast(cv::CAP_PROP_MONOCHROME)}, + {"CAP_PROP_OPENNI2_MIRROR", static_cast(cv::CAP_PROP_OPENNI2_MIRROR)}, + {"CAP_PROP_OPENNI2_SYNC", static_cast(cv::CAP_PROP_OPENNI2_SYNC)}, + {"CAP_PROP_OPENNI_APPROX_FRAME_SYNC", static_cast(cv::CAP_PROP_OPENNI_APPROX_FRAME_SYNC)}, + {"CAP_PROP_OPENNI_BASELINE", static_cast(cv::CAP_PROP_OPENNI_BASELINE)}, + {"CAP_PROP_OPENNI_CIRCLE_BUFFER", static_cast(cv::CAP_PROP_OPENNI_CIRCLE_BUFFER)}, + {"CAP_PROP_OPENNI_FOCAL_LENGTH", static_cast(cv::CAP_PROP_OPENNI_FOCAL_LENGTH)}, + {"CAP_PROP_OPENNI_FRAME_MAX_DEPTH", static_cast(cv::CAP_PROP_OPENNI_FRAME_MAX_DEPTH)}, + {"CAP_PROP_OPENNI_GENERATOR_PRESENT", static_cast(cv::CAP_PROP_OPENNI_GENERATOR_PRESENT)}, + {"CAP_PROP_OPENNI_MAX_BUFFER_SIZE", static_cast(cv::CAP_PROP_OPENNI_MAX_BUFFER_SIZE)}, + {"CAP_PROP_OPENNI_MAX_TIME_DURATION", static_cast(cv::CAP_PROP_OPENNI_MAX_TIME_DURATION)}, + {"CAP_PROP_OPENNI_OUTPUT_MODE", static_cast(cv::CAP_PROP_OPENNI_OUTPUT_MODE)}, + {"CAP_PROP_OPENNI_REGISTRATION", static_cast(cv::CAP_PROP_OPENNI_REGISTRATION)}, + {"CAP_PROP_OPENNI_REGISTRATION_ON", static_cast(cv::CAP_PROP_OPENNI_REGISTRATION_ON)}, + {"CAP_PROP_OPEN_TIMEOUT_MSEC", static_cast(cv::CAP_PROP_OPEN_TIMEOUT_MSEC)}, + {"CAP_PROP_ORIENTATION_AUTO", static_cast(cv::CAP_PROP_ORIENTATION_AUTO)}, + {"CAP_PROP_ORIENTATION_META", static_cast(cv::CAP_PROP_ORIENTATION_META)}, + {"CAP_PROP_PAN", static_cast(cv::CAP_PROP_PAN)}, + {"CAP_PROP_POS_AVI_RATIO", static_cast(cv::CAP_PROP_POS_AVI_RATIO)}, + {"CAP_PROP_POS_FRAMES", static_cast(cv::CAP_PROP_POS_FRAMES)}, + {"CAP_PROP_POS_MSEC", static_cast(cv::CAP_PROP_POS_MSEC)}, + {"CAP_PROP_PVAPI_BINNINGX", static_cast(cv::CAP_PROP_PVAPI_BINNINGX)}, + {"CAP_PROP_PVAPI_BINNINGY", static_cast(cv::CAP_PROP_PVAPI_BINNINGY)}, + {"CAP_PROP_PVAPI_DECIMATIONHORIZONTAL", static_cast(cv::CAP_PROP_PVAPI_DECIMATIONHORIZONTAL)}, + {"CAP_PROP_PVAPI_DECIMATIONVERTICAL", static_cast(cv::CAP_PROP_PVAPI_DECIMATIONVERTICAL)}, + {"CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE", static_cast(cv::CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE)}, + {"CAP_PROP_PVAPI_MULTICASTIP", static_cast(cv::CAP_PROP_PVAPI_MULTICASTIP)}, + {"CAP_PROP_PVAPI_PIXELFORMAT", static_cast(cv::CAP_PROP_PVAPI_PIXELFORMAT)}, + {"CAP_PROP_READ_TIMEOUT_MSEC", static_cast(cv::CAP_PROP_READ_TIMEOUT_MSEC)}, + {"CAP_PROP_RECTIFICATION", static_cast(cv::CAP_PROP_RECTIFICATION)}, + {"CAP_PROP_ROLL", static_cast(cv::CAP_PROP_ROLL)}, + {"CAP_PROP_SAR_DEN", static_cast(cv::CAP_PROP_SAR_DEN)}, + {"CAP_PROP_SAR_NUM", static_cast(cv::CAP_PROP_SAR_NUM)}, + {"CAP_PROP_SATURATION", static_cast(cv::CAP_PROP_SATURATION)}, + {"CAP_PROP_SETTINGS", static_cast(cv::CAP_PROP_SETTINGS)}, + {"CAP_PROP_SHARPNESS", static_cast(cv::CAP_PROP_SHARPNESS)}, + {"CAP_PROP_SPEED", static_cast(cv::CAP_PROP_SPEED)}, + {"CAP_PROP_STREAM_OPEN_TIME_USEC", static_cast(cv::CAP_PROP_STREAM_OPEN_TIME_USEC)}, + {"CAP_PROP_TEMPERATURE", static_cast(cv::CAP_PROP_TEMPERATURE)}, + {"CAP_PROP_TILT", static_cast(cv::CAP_PROP_TILT)}, + {"CAP_PROP_TRIGGER", static_cast(cv::CAP_PROP_TRIGGER)}, + {"CAP_PROP_TRIGGER_DELAY", static_cast(cv::CAP_PROP_TRIGGER_DELAY)}, + {"CAP_PROP_VIEWFINDER", static_cast(cv::CAP_PROP_VIEWFINDER)}, + {"CAP_PROP_WB_TEMPERATURE", static_cast(cv::CAP_PROP_WB_TEMPERATURE)}, + {"CAP_PROP_WHITE_BALANCE_BLUE_U", static_cast(cv::CAP_PROP_WHITE_BALANCE_BLUE_U)}, + {"CAP_PROP_WHITE_BALANCE_RED_V", static_cast(cv::CAP_PROP_WHITE_BALANCE_RED_V)}, + {"CAP_PROP_XI_ACQ_BUFFER_SIZE", static_cast(cv::CAP_PROP_XI_ACQ_BUFFER_SIZE)}, + {"CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT", static_cast(cv::CAP_PROP_XI_ACQ_BUFFER_SIZE_UNIT)}, + {"CAP_PROP_XI_ACQ_FRAME_BURST_COUNT", static_cast(cv::CAP_PROP_XI_ACQ_FRAME_BURST_COUNT)}, + {"CAP_PROP_XI_ACQ_TIMING_MODE", static_cast(cv::CAP_PROP_XI_ACQ_TIMING_MODE)}, + {"CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT", static_cast(cv::CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_COMMIT)}, + {"CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE", static_cast(cv::CAP_PROP_XI_ACQ_TRANSPORT_BUFFER_SIZE)}, + {"CAP_PROP_XI_AEAG", static_cast(cv::CAP_PROP_XI_AEAG)}, + {"CAP_PROP_XI_AEAG_LEVEL", static_cast(cv::CAP_PROP_XI_AEAG_LEVEL)}, + {"CAP_PROP_XI_AEAG_ROI_HEIGHT", static_cast(cv::CAP_PROP_XI_AEAG_ROI_HEIGHT)}, + {"CAP_PROP_XI_AEAG_ROI_OFFSET_X", static_cast(cv::CAP_PROP_XI_AEAG_ROI_OFFSET_X)}, + {"CAP_PROP_XI_AEAG_ROI_OFFSET_Y", static_cast(cv::CAP_PROP_XI_AEAG_ROI_OFFSET_Y)}, + {"CAP_PROP_XI_AEAG_ROI_WIDTH", static_cast(cv::CAP_PROP_XI_AEAG_ROI_WIDTH)}, + {"CAP_PROP_XI_AE_MAX_LIMIT", static_cast(cv::CAP_PROP_XI_AE_MAX_LIMIT)}, + {"CAP_PROP_XI_AG_MAX_LIMIT", static_cast(cv::CAP_PROP_XI_AG_MAX_LIMIT)}, + {"CAP_PROP_XI_APPLY_CMS", static_cast(cv::CAP_PROP_XI_APPLY_CMS)}, + {"CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION", static_cast(cv::CAP_PROP_XI_AUTO_BANDWIDTH_CALCULATION)}, + {"CAP_PROP_XI_AUTO_WB", static_cast(cv::CAP_PROP_XI_AUTO_WB)}, + {"CAP_PROP_XI_AVAILABLE_BANDWIDTH", static_cast(cv::CAP_PROP_XI_AVAILABLE_BANDWIDTH)}, + {"CAP_PROP_XI_BINNING_HORIZONTAL", static_cast(cv::CAP_PROP_XI_BINNING_HORIZONTAL)}, + {"CAP_PROP_XI_BINNING_PATTERN", static_cast(cv::CAP_PROP_XI_BINNING_PATTERN)}, + {"CAP_PROP_XI_BINNING_SELECTOR", static_cast(cv::CAP_PROP_XI_BINNING_SELECTOR)}, + {"CAP_PROP_XI_BINNING_VERTICAL", static_cast(cv::CAP_PROP_XI_BINNING_VERTICAL)}, + {"CAP_PROP_XI_BPC", static_cast(cv::CAP_PROP_XI_BPC)}, + {"CAP_PROP_XI_BUFFERS_QUEUE_SIZE", static_cast(cv::CAP_PROP_XI_BUFFERS_QUEUE_SIZE)}, + {"CAP_PROP_XI_BUFFER_POLICY", static_cast(cv::CAP_PROP_XI_BUFFER_POLICY)}, + {"CAP_PROP_XI_CC_MATRIX_00", static_cast(cv::CAP_PROP_XI_CC_MATRIX_00)}, + {"CAP_PROP_XI_CC_MATRIX_01", static_cast(cv::CAP_PROP_XI_CC_MATRIX_01)}, + {"CAP_PROP_XI_CC_MATRIX_02", static_cast(cv::CAP_PROP_XI_CC_MATRIX_02)}, + {"CAP_PROP_XI_CC_MATRIX_03", static_cast(cv::CAP_PROP_XI_CC_MATRIX_03)}, + {"CAP_PROP_XI_CC_MATRIX_10", static_cast(cv::CAP_PROP_XI_CC_MATRIX_10)}, + {"CAP_PROP_XI_CC_MATRIX_11", static_cast(cv::CAP_PROP_XI_CC_MATRIX_11)}, + {"CAP_PROP_XI_CC_MATRIX_12", static_cast(cv::CAP_PROP_XI_CC_MATRIX_12)}, + {"CAP_PROP_XI_CC_MATRIX_13", static_cast(cv::CAP_PROP_XI_CC_MATRIX_13)}, + {"CAP_PROP_XI_CC_MATRIX_20", static_cast(cv::CAP_PROP_XI_CC_MATRIX_20)}, + {"CAP_PROP_XI_CC_MATRIX_21", static_cast(cv::CAP_PROP_XI_CC_MATRIX_21)}, + {"CAP_PROP_XI_CC_MATRIX_22", static_cast(cv::CAP_PROP_XI_CC_MATRIX_22)}, + {"CAP_PROP_XI_CC_MATRIX_23", static_cast(cv::CAP_PROP_XI_CC_MATRIX_23)}, + {"CAP_PROP_XI_CC_MATRIX_30", static_cast(cv::CAP_PROP_XI_CC_MATRIX_30)}, + {"CAP_PROP_XI_CC_MATRIX_31", static_cast(cv::CAP_PROP_XI_CC_MATRIX_31)}, + {"CAP_PROP_XI_CC_MATRIX_32", static_cast(cv::CAP_PROP_XI_CC_MATRIX_32)}, + {"CAP_PROP_XI_CC_MATRIX_33", static_cast(cv::CAP_PROP_XI_CC_MATRIX_33)}, + {"CAP_PROP_XI_CHIP_TEMP", static_cast(cv::CAP_PROP_XI_CHIP_TEMP)}, + {"CAP_PROP_XI_CMS", static_cast(cv::CAP_PROP_XI_CMS)}, + {"CAP_PROP_XI_COLOR_FILTER_ARRAY", static_cast(cv::CAP_PROP_XI_COLOR_FILTER_ARRAY)}, + {"CAP_PROP_XI_COLUMN_FPN_CORRECTION", static_cast(cv::CAP_PROP_XI_COLUMN_FPN_CORRECTION)}, + {"CAP_PROP_XI_COOLING", static_cast(cv::CAP_PROP_XI_COOLING)}, + {"CAP_PROP_XI_COUNTER_SELECTOR", static_cast(cv::CAP_PROP_XI_COUNTER_SELECTOR)}, + {"CAP_PROP_XI_COUNTER_VALUE", static_cast(cv::CAP_PROP_XI_COUNTER_VALUE)}, + {"CAP_PROP_XI_DATA_FORMAT", static_cast(cv::CAP_PROP_XI_DATA_FORMAT)}, + {"CAP_PROP_XI_DEBOUNCE_EN", static_cast(cv::CAP_PROP_XI_DEBOUNCE_EN)}, + {"CAP_PROP_XI_DEBOUNCE_POL", static_cast(cv::CAP_PROP_XI_DEBOUNCE_POL)}, + {"CAP_PROP_XI_DEBOUNCE_T0", static_cast(cv::CAP_PROP_XI_DEBOUNCE_T0)}, + {"CAP_PROP_XI_DEBOUNCE_T1", static_cast(cv::CAP_PROP_XI_DEBOUNCE_T1)}, + {"CAP_PROP_XI_DEBUG_LEVEL", static_cast(cv::CAP_PROP_XI_DEBUG_LEVEL)}, + {"CAP_PROP_XI_DECIMATION_HORIZONTAL", static_cast(cv::CAP_PROP_XI_DECIMATION_HORIZONTAL)}, + {"CAP_PROP_XI_DECIMATION_PATTERN", static_cast(cv::CAP_PROP_XI_DECIMATION_PATTERN)}, + {"CAP_PROP_XI_DECIMATION_SELECTOR", static_cast(cv::CAP_PROP_XI_DECIMATION_SELECTOR)}, + {"CAP_PROP_XI_DECIMATION_VERTICAL", static_cast(cv::CAP_PROP_XI_DECIMATION_VERTICAL)}, + {"CAP_PROP_XI_DEFAULT_CC_MATRIX", static_cast(cv::CAP_PROP_XI_DEFAULT_CC_MATRIX)}, + {"CAP_PROP_XI_DEVICE_MODEL_ID", static_cast(cv::CAP_PROP_XI_DEVICE_MODEL_ID)}, + {"CAP_PROP_XI_DEVICE_RESET", static_cast(cv::CAP_PROP_XI_DEVICE_RESET)}, + {"CAP_PROP_XI_DEVICE_SN", static_cast(cv::CAP_PROP_XI_DEVICE_SN)}, + {"CAP_PROP_XI_DOWNSAMPLING", static_cast(cv::CAP_PROP_XI_DOWNSAMPLING)}, + {"CAP_PROP_XI_DOWNSAMPLING_TYPE", static_cast(cv::CAP_PROP_XI_DOWNSAMPLING_TYPE)}, + {"CAP_PROP_XI_EXPOSURE", static_cast(cv::CAP_PROP_XI_EXPOSURE)}, + {"CAP_PROP_XI_EXPOSURE_BURST_COUNT", static_cast(cv::CAP_PROP_XI_EXPOSURE_BURST_COUNT)}, + {"CAP_PROP_XI_EXP_PRIORITY", static_cast(cv::CAP_PROP_XI_EXP_PRIORITY)}, + {"CAP_PROP_XI_FFS_ACCESS_KEY", static_cast(cv::CAP_PROP_XI_FFS_ACCESS_KEY)}, + {"CAP_PROP_XI_FFS_FILE_ID", static_cast(cv::CAP_PROP_XI_FFS_FILE_ID)}, + {"CAP_PROP_XI_FFS_FILE_SIZE", static_cast(cv::CAP_PROP_XI_FFS_FILE_SIZE)}, + {"CAP_PROP_XI_FRAMERATE", static_cast(cv::CAP_PROP_XI_FRAMERATE)}, + {"CAP_PROP_XI_FREE_FFS_SIZE", static_cast(cv::CAP_PROP_XI_FREE_FFS_SIZE)}, + {"CAP_PROP_XI_GAIN", static_cast(cv::CAP_PROP_XI_GAIN)}, + {"CAP_PROP_XI_GAIN_SELECTOR", static_cast(cv::CAP_PROP_XI_GAIN_SELECTOR)}, + {"CAP_PROP_XI_GAMMAC", static_cast(cv::CAP_PROP_XI_GAMMAC)}, + {"CAP_PROP_XI_GAMMAY", static_cast(cv::CAP_PROP_XI_GAMMAY)}, + {"CAP_PROP_XI_GPI_LEVEL", static_cast(cv::CAP_PROP_XI_GPI_LEVEL)}, + {"CAP_PROP_XI_GPI_MODE", static_cast(cv::CAP_PROP_XI_GPI_MODE)}, + {"CAP_PROP_XI_GPI_SELECTOR", static_cast(cv::CAP_PROP_XI_GPI_SELECTOR)}, + {"CAP_PROP_XI_GPO_MODE", static_cast(cv::CAP_PROP_XI_GPO_MODE)}, + {"CAP_PROP_XI_GPO_SELECTOR", static_cast(cv::CAP_PROP_XI_GPO_SELECTOR)}, + {"CAP_PROP_XI_HDR", static_cast(cv::CAP_PROP_XI_HDR)}, + {"CAP_PROP_XI_HDR_KNEEPOINT_COUNT", static_cast(cv::CAP_PROP_XI_HDR_KNEEPOINT_COUNT)}, + {"CAP_PROP_XI_HDR_T1", static_cast(cv::CAP_PROP_XI_HDR_T1)}, + {"CAP_PROP_XI_HDR_T2", static_cast(cv::CAP_PROP_XI_HDR_T2)}, + {"CAP_PROP_XI_HEIGHT", static_cast(cv::CAP_PROP_XI_HEIGHT)}, + {"CAP_PROP_XI_HOUS_BACK_SIDE_TEMP", static_cast(cv::CAP_PROP_XI_HOUS_BACK_SIDE_TEMP)}, + {"CAP_PROP_XI_HOUS_TEMP", static_cast(cv::CAP_PROP_XI_HOUS_TEMP)}, + {"CAP_PROP_XI_HW_REVISION", static_cast(cv::CAP_PROP_XI_HW_REVISION)}, + {"CAP_PROP_XI_IMAGE_BLACK_LEVEL", static_cast(cv::CAP_PROP_XI_IMAGE_BLACK_LEVEL)}, + {"CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH", static_cast(cv::CAP_PROP_XI_IMAGE_DATA_BIT_DEPTH)}, + {"CAP_PROP_XI_IMAGE_DATA_FORMAT", static_cast(cv::CAP_PROP_XI_IMAGE_DATA_FORMAT)}, + {"CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA", static_cast(cv::CAP_PROP_XI_IMAGE_DATA_FORMAT_RGB32_ALPHA)}, + {"CAP_PROP_XI_IMAGE_IS_COLOR", static_cast(cv::CAP_PROP_XI_IMAGE_IS_COLOR)}, + {"CAP_PROP_XI_IMAGE_PAYLOAD_SIZE", static_cast(cv::CAP_PROP_XI_IMAGE_PAYLOAD_SIZE)}, + {"CAP_PROP_XI_IS_COOLED", static_cast(cv::CAP_PROP_XI_IS_COOLED)}, + {"CAP_PROP_XI_IS_DEVICE_EXIST", static_cast(cv::CAP_PROP_XI_IS_DEVICE_EXIST)}, + {"CAP_PROP_XI_KNEEPOINT1", static_cast(cv::CAP_PROP_XI_KNEEPOINT1)}, + {"CAP_PROP_XI_KNEEPOINT2", static_cast(cv::CAP_PROP_XI_KNEEPOINT2)}, + {"CAP_PROP_XI_LED_MODE", static_cast(cv::CAP_PROP_XI_LED_MODE)}, + {"CAP_PROP_XI_LED_SELECTOR", static_cast(cv::CAP_PROP_XI_LED_SELECTOR)}, + {"CAP_PROP_XI_LENS_APERTURE_VALUE", static_cast(cv::CAP_PROP_XI_LENS_APERTURE_VALUE)}, + {"CAP_PROP_XI_LENS_FEATURE", static_cast(cv::CAP_PROP_XI_LENS_FEATURE)}, + {"CAP_PROP_XI_LENS_FEATURE_SELECTOR", static_cast(cv::CAP_PROP_XI_LENS_FEATURE_SELECTOR)}, + {"CAP_PROP_XI_LENS_FOCAL_LENGTH", static_cast(cv::CAP_PROP_XI_LENS_FOCAL_LENGTH)}, + {"CAP_PROP_XI_LENS_FOCUS_DISTANCE", static_cast(cv::CAP_PROP_XI_LENS_FOCUS_DISTANCE)}, + {"CAP_PROP_XI_LENS_FOCUS_MOVE", static_cast(cv::CAP_PROP_XI_LENS_FOCUS_MOVE)}, + {"CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE", static_cast(cv::CAP_PROP_XI_LENS_FOCUS_MOVEMENT_VALUE)}, + {"CAP_PROP_XI_LENS_MODE", static_cast(cv::CAP_PROP_XI_LENS_MODE)}, + {"CAP_PROP_XI_LIMIT_BANDWIDTH", static_cast(cv::CAP_PROP_XI_LIMIT_BANDWIDTH)}, + {"CAP_PROP_XI_LUT_EN", static_cast(cv::CAP_PROP_XI_LUT_EN)}, + {"CAP_PROP_XI_LUT_INDEX", static_cast(cv::CAP_PROP_XI_LUT_INDEX)}, + {"CAP_PROP_XI_LUT_VALUE", static_cast(cv::CAP_PROP_XI_LUT_VALUE)}, + {"CAP_PROP_XI_MANUAL_WB", static_cast(cv::CAP_PROP_XI_MANUAL_WB)}, + {"CAP_PROP_XI_OFFSET_X", static_cast(cv::CAP_PROP_XI_OFFSET_X)}, + {"CAP_PROP_XI_OFFSET_Y", static_cast(cv::CAP_PROP_XI_OFFSET_Y)}, + {"CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH", static_cast(cv::CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH)}, + {"CAP_PROP_XI_OUTPUT_DATA_PACKING", static_cast(cv::CAP_PROP_XI_OUTPUT_DATA_PACKING)}, + {"CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE", static_cast(cv::CAP_PROP_XI_OUTPUT_DATA_PACKING_TYPE)}, + {"CAP_PROP_XI_RECENT_FRAME", static_cast(cv::CAP_PROP_XI_RECENT_FRAME)}, + {"CAP_PROP_XI_REGION_MODE", static_cast(cv::CAP_PROP_XI_REGION_MODE)}, + {"CAP_PROP_XI_REGION_SELECTOR", static_cast(cv::CAP_PROP_XI_REGION_SELECTOR)}, + {"CAP_PROP_XI_ROW_FPN_CORRECTION", static_cast(cv::CAP_PROP_XI_ROW_FPN_CORRECTION)}, + {"CAP_PROP_XI_SENSOR_BOARD_TEMP", static_cast(cv::CAP_PROP_XI_SENSOR_BOARD_TEMP)}, + {"CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ", static_cast(cv::CAP_PROP_XI_SENSOR_CLOCK_FREQ_HZ)}, + {"CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX", static_cast(cv::CAP_PROP_XI_SENSOR_CLOCK_FREQ_INDEX)}, + {"CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH", static_cast(cv::CAP_PROP_XI_SENSOR_DATA_BIT_DEPTH)}, + {"CAP_PROP_XI_SENSOR_FEATURE_SELECTOR", static_cast(cv::CAP_PROP_XI_SENSOR_FEATURE_SELECTOR)}, + {"CAP_PROP_XI_SENSOR_FEATURE_VALUE", static_cast(cv::CAP_PROP_XI_SENSOR_FEATURE_VALUE)}, + {"CAP_PROP_XI_SENSOR_MODE", static_cast(cv::CAP_PROP_XI_SENSOR_MODE)}, + {"CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT", static_cast(cv::CAP_PROP_XI_SENSOR_OUTPUT_CHANNEL_COUNT)}, + {"CAP_PROP_XI_SENSOR_TAPS", static_cast(cv::CAP_PROP_XI_SENSOR_TAPS)}, + {"CAP_PROP_XI_SHARPNESS", static_cast(cv::CAP_PROP_XI_SHARPNESS)}, + {"CAP_PROP_XI_SHUTTER_TYPE", static_cast(cv::CAP_PROP_XI_SHUTTER_TYPE)}, + {"CAP_PROP_XI_TARGET_TEMP", static_cast(cv::CAP_PROP_XI_TARGET_TEMP)}, + {"CAP_PROP_XI_TEST_PATTERN", static_cast(cv::CAP_PROP_XI_TEST_PATTERN)}, + {"CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR", static_cast(cv::CAP_PROP_XI_TEST_PATTERN_GENERATOR_SELECTOR)}, + {"CAP_PROP_XI_TIMEOUT", static_cast(cv::CAP_PROP_XI_TIMEOUT)}, + {"CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT", static_cast(cv::CAP_PROP_XI_TRANSPORT_PIXEL_FORMAT)}, + {"CAP_PROP_XI_TRG_DELAY", static_cast(cv::CAP_PROP_XI_TRG_DELAY)}, + {"CAP_PROP_XI_TRG_SELECTOR", static_cast(cv::CAP_PROP_XI_TRG_SELECTOR)}, + {"CAP_PROP_XI_TRG_SOFTWARE", static_cast(cv::CAP_PROP_XI_TRG_SOFTWARE)}, + {"CAP_PROP_XI_TRG_SOURCE", static_cast(cv::CAP_PROP_XI_TRG_SOURCE)}, + {"CAP_PROP_XI_TS_RST_MODE", static_cast(cv::CAP_PROP_XI_TS_RST_MODE)}, + {"CAP_PROP_XI_TS_RST_SOURCE", static_cast(cv::CAP_PROP_XI_TS_RST_SOURCE)}, + {"CAP_PROP_XI_USED_FFS_SIZE", static_cast(cv::CAP_PROP_XI_USED_FFS_SIZE)}, + {"CAP_PROP_XI_WB_KB", static_cast(cv::CAP_PROP_XI_WB_KB)}, + {"CAP_PROP_XI_WB_KG", static_cast(cv::CAP_PROP_XI_WB_KG)}, + {"CAP_PROP_XI_WB_KR", static_cast(cv::CAP_PROP_XI_WB_KR)}, + {"CAP_PROP_XI_WIDTH", static_cast(cv::CAP_PROP_XI_WIDTH)}, + {"CAP_PROP_ZOOM", static_cast(cv::CAP_PROP_ZOOM)}, + {"CAP_PVAPI", static_cast(cv::CAP_PVAPI)}, + {"CAP_PVAPI_DECIMATION_2OUTOF16", static_cast(cv::CAP_PVAPI_DECIMATION_2OUTOF16)}, + {"CAP_PVAPI_DECIMATION_2OUTOF4", static_cast(cv::CAP_PVAPI_DECIMATION_2OUTOF4)}, + {"CAP_PVAPI_DECIMATION_2OUTOF8", static_cast(cv::CAP_PVAPI_DECIMATION_2OUTOF8)}, + {"CAP_PVAPI_DECIMATION_OFF", static_cast(cv::CAP_PVAPI_DECIMATION_OFF)}, + {"CAP_PVAPI_FSTRIGMODE_FIXEDRATE", static_cast(cv::CAP_PVAPI_FSTRIGMODE_FIXEDRATE)}, + {"CAP_PVAPI_FSTRIGMODE_FREERUN", static_cast(cv::CAP_PVAPI_FSTRIGMODE_FREERUN)}, + {"CAP_PVAPI_FSTRIGMODE_SOFTWARE", static_cast(cv::CAP_PVAPI_FSTRIGMODE_SOFTWARE)}, + {"CAP_PVAPI_FSTRIGMODE_SYNCIN1", static_cast(cv::CAP_PVAPI_FSTRIGMODE_SYNCIN1)}, + {"CAP_PVAPI_FSTRIGMODE_SYNCIN2", static_cast(cv::CAP_PVAPI_FSTRIGMODE_SYNCIN2)}, + {"CAP_PVAPI_PIXELFORMAT_BAYER16", static_cast(cv::CAP_PVAPI_PIXELFORMAT_BAYER16)}, + {"CAP_PVAPI_PIXELFORMAT_BAYER8", static_cast(cv::CAP_PVAPI_PIXELFORMAT_BAYER8)}, + {"CAP_PVAPI_PIXELFORMAT_BGR24", static_cast(cv::CAP_PVAPI_PIXELFORMAT_BGR24)}, + {"CAP_PVAPI_PIXELFORMAT_BGRA32", static_cast(cv::CAP_PVAPI_PIXELFORMAT_BGRA32)}, + {"CAP_PVAPI_PIXELFORMAT_MONO16", static_cast(cv::CAP_PVAPI_PIXELFORMAT_MONO16)}, + {"CAP_PVAPI_PIXELFORMAT_MONO8", static_cast(cv::CAP_PVAPI_PIXELFORMAT_MONO8)}, + {"CAP_PVAPI_PIXELFORMAT_RGB24", static_cast(cv::CAP_PVAPI_PIXELFORMAT_RGB24)}, + {"CAP_PVAPI_PIXELFORMAT_RGBA32", static_cast(cv::CAP_PVAPI_PIXELFORMAT_RGBA32)}, + {"CAP_QT", static_cast(cv::CAP_QT)}, + {"CAP_REALSENSE", static_cast(cv::CAP_REALSENSE)}, + {"CAP_UEYE", static_cast(cv::CAP_UEYE)}, + {"CAP_UNICAP", static_cast(cv::CAP_UNICAP)}, + {"CAP_V4L", static_cast(cv::CAP_V4L)}, + {"CAP_V4L2", static_cast(cv::CAP_V4L2)}, + {"CAP_VFW", static_cast(cv::CAP_VFW)}, + {"CAP_WINRT", static_cast(cv::CAP_WINRT)}, + {"CAP_XIAPI", static_cast(cv::CAP_XIAPI)}, + {"CAP_XINE", static_cast(cv::CAP_XINE)}, + {"CCL_BBDT", static_cast(cv::CCL_BBDT)}, + {"CCL_BOLELLI", static_cast(cv::CCL_BOLELLI)}, + {"CCL_DEFAULT", static_cast(cv::CCL_DEFAULT)}, + {"CCL_GRANA", static_cast(cv::CCL_GRANA)}, + {"CCL_SAUF", static_cast(cv::CCL_SAUF)}, + {"CCL_SPAGHETTI", static_cast(cv::CCL_SPAGHETTI)}, + {"CCL_WU", static_cast(cv::CCL_WU)}, + {"CC_STAT_AREA", static_cast(cv::CC_STAT_AREA)}, + {"CC_STAT_HEIGHT", static_cast(cv::CC_STAT_HEIGHT)}, + {"CC_STAT_LEFT", static_cast(cv::CC_STAT_LEFT)}, + {"CC_STAT_MAX", static_cast(cv::CC_STAT_MAX)}, + {"CC_STAT_TOP", static_cast(cv::CC_STAT_TOP)}, + {"CC_STAT_WIDTH", static_cast(cv::CC_STAT_WIDTH)}, + {"CHAIN_APPROX_NONE", static_cast(cv::CHAIN_APPROX_NONE)}, + {"CHAIN_APPROX_SIMPLE", static_cast(cv::CHAIN_APPROX_SIMPLE)}, + {"CHAIN_APPROX_TC89_KCOS", static_cast(cv::CHAIN_APPROX_TC89_KCOS)}, + {"CHAIN_APPROX_TC89_L1", static_cast(cv::CHAIN_APPROX_TC89_L1)}, + {"CMP_EQ", static_cast(cv::CMP_EQ)}, + {"CMP_GE", static_cast(cv::CMP_GE)}, + {"CMP_GT", static_cast(cv::CMP_GT)}, + {"CMP_LE", static_cast(cv::CMP_LE)}, + {"CMP_LT", static_cast(cv::CMP_LT)}, + {"CMP_NE", static_cast(cv::CMP_NE)}, + {"COLORMAP_AUTUMN", static_cast(cv::COLORMAP_AUTUMN)}, + {"COLORMAP_BONE", static_cast(cv::COLORMAP_BONE)}, + {"COLORMAP_CIVIDIS", static_cast(cv::COLORMAP_CIVIDIS)}, + {"COLORMAP_COOL", static_cast(cv::COLORMAP_COOL)}, + {"COLORMAP_DEEPGREEN", static_cast(cv::COLORMAP_DEEPGREEN)}, + {"COLORMAP_HOT", static_cast(cv::COLORMAP_HOT)}, + {"COLORMAP_HSV", static_cast(cv::COLORMAP_HSV)}, + {"COLORMAP_INFERNO", static_cast(cv::COLORMAP_INFERNO)}, + {"COLORMAP_JET", static_cast(cv::COLORMAP_JET)}, + {"COLORMAP_MAGMA", static_cast(cv::COLORMAP_MAGMA)}, + {"COLORMAP_OCEAN", static_cast(cv::COLORMAP_OCEAN)}, + {"COLORMAP_PARULA", static_cast(cv::COLORMAP_PARULA)}, + {"COLORMAP_PINK", static_cast(cv::COLORMAP_PINK)}, + {"COLORMAP_PLASMA", static_cast(cv::COLORMAP_PLASMA)}, + {"COLORMAP_RAINBOW", static_cast(cv::COLORMAP_RAINBOW)}, + {"COLORMAP_SPRING", static_cast(cv::COLORMAP_SPRING)}, + {"COLORMAP_SUMMER", static_cast(cv::COLORMAP_SUMMER)}, + {"COLORMAP_TURBO", static_cast(cv::COLORMAP_TURBO)}, + {"COLORMAP_TWILIGHT", static_cast(cv::COLORMAP_TWILIGHT)}, + {"COLORMAP_TWILIGHT_SHIFTED", static_cast(cv::COLORMAP_TWILIGHT_SHIFTED)}, + {"COLORMAP_VIRIDIS", static_cast(cv::COLORMAP_VIRIDIS)}, + {"COLORMAP_WINTER", static_cast(cv::COLORMAP_WINTER)}, + {"COLOR_BGR2BGR555", static_cast(cv::COLOR_BGR2BGR555)}, + {"COLOR_BGR2BGR565", static_cast(cv::COLOR_BGR2BGR565)}, + {"COLOR_BGR2BGRA", static_cast(cv::COLOR_BGR2BGRA)}, + {"COLOR_BGR2GRAY", static_cast(cv::COLOR_BGR2GRAY)}, + {"COLOR_BGR2HLS", static_cast(cv::COLOR_BGR2HLS)}, + {"COLOR_BGR2HLS_FULL", static_cast(cv::COLOR_BGR2HLS_FULL)}, + {"COLOR_BGR2HSV", static_cast(cv::COLOR_BGR2HSV)}, + {"COLOR_BGR2HSV_FULL", static_cast(cv::COLOR_BGR2HSV_FULL)}, + {"COLOR_BGR2Lab", static_cast(cv::COLOR_BGR2Lab)}, + {"COLOR_BGR2LAB", static_cast(cv::COLOR_BGR2Lab)}, + {"COLOR_BGR2Luv", static_cast(cv::COLOR_BGR2Luv)}, + {"COLOR_BGR2LUV", static_cast(cv::COLOR_BGR2Luv)}, + {"COLOR_BGR2RGB", static_cast(cv::COLOR_BGR2RGB)}, + {"COLOR_BGR2RGBA", static_cast(cv::COLOR_BGR2RGBA)}, + {"COLOR_BGR2XYZ", static_cast(cv::COLOR_BGR2XYZ)}, + {"COLOR_BGR2YCrCb", static_cast(cv::COLOR_BGR2YCrCb)}, + {"COLOR_BGR2YCR_CB", static_cast(cv::COLOR_BGR2YCrCb)}, + {"COLOR_BGR2YUV", static_cast(cv::COLOR_BGR2YUV)}, + {"COLOR_BGR2YUV_I420", static_cast(cv::COLOR_BGR2YUV_I420)}, + {"COLOR_BGR2YUV_IYUV", static_cast(cv::COLOR_BGR2YUV_IYUV)}, + {"COLOR_BGR2YUV_YV12", static_cast(cv::COLOR_BGR2YUV_YV12)}, + {"COLOR_BGR5552BGR", static_cast(cv::COLOR_BGR5552BGR)}, + {"COLOR_BGR5552BGRA", static_cast(cv::COLOR_BGR5552BGRA)}, + {"COLOR_BGR5552GRAY", static_cast(cv::COLOR_BGR5552GRAY)}, + {"COLOR_BGR5552RGB", static_cast(cv::COLOR_BGR5552RGB)}, + {"COLOR_BGR5552RGBA", static_cast(cv::COLOR_BGR5552RGBA)}, + {"COLOR_BGR5652BGR", static_cast(cv::COLOR_BGR5652BGR)}, + {"COLOR_BGR5652BGRA", static_cast(cv::COLOR_BGR5652BGRA)}, + {"COLOR_BGR5652GRAY", static_cast(cv::COLOR_BGR5652GRAY)}, + {"COLOR_BGR5652RGB", static_cast(cv::COLOR_BGR5652RGB)}, + {"COLOR_BGR5652RGBA", static_cast(cv::COLOR_BGR5652RGBA)}, + {"COLOR_BGRA2BGR", static_cast(cv::COLOR_BGRA2BGR)}, + {"COLOR_BGRA2BGR555", static_cast(cv::COLOR_BGRA2BGR555)}, + {"COLOR_BGRA2BGR565", static_cast(cv::COLOR_BGRA2BGR565)}, + {"COLOR_BGRA2GRAY", static_cast(cv::COLOR_BGRA2GRAY)}, + {"COLOR_BGRA2RGB", static_cast(cv::COLOR_BGRA2RGB)}, + {"COLOR_BGRA2RGBA", static_cast(cv::COLOR_BGRA2RGBA)}, + {"COLOR_BGRA2YUV_I420", static_cast(cv::COLOR_BGRA2YUV_I420)}, + {"COLOR_BGRA2YUV_IYUV", static_cast(cv::COLOR_BGRA2YUV_IYUV)}, + {"COLOR_BGRA2YUV_YV12", static_cast(cv::COLOR_BGRA2YUV_YV12)}, + {"COLOR_BayerBG2BGR", static_cast(cv::COLOR_BayerBG2BGR)}, + {"COLOR_BAYER_BG2BGR", static_cast(cv::COLOR_BayerBG2BGR)}, + {"COLOR_BayerBG2BGRA", static_cast(cv::COLOR_BayerBG2BGRA)}, + {"COLOR_BAYER_BG2BGRA", static_cast(cv::COLOR_BayerBG2BGRA)}, + {"COLOR_BayerBG2BGR_EA", static_cast(cv::COLOR_BayerBG2BGR_EA)}, + {"COLOR_BAYER_BG2BGR_EA", static_cast(cv::COLOR_BayerBG2BGR_EA)}, + {"COLOR_BayerBG2BGR_VNG", static_cast(cv::COLOR_BayerBG2BGR_VNG)}, + {"COLOR_BAYER_BG2BGR_VNG", static_cast(cv::COLOR_BayerBG2BGR_VNG)}, + {"COLOR_BayerBG2GRAY", static_cast(cv::COLOR_BayerBG2GRAY)}, + {"COLOR_BAYER_BG2GRAY", static_cast(cv::COLOR_BayerBG2GRAY)}, + {"COLOR_BayerBG2RGB", static_cast(cv::COLOR_BayerBG2RGB)}, + {"COLOR_BAYER_BG2RGB", static_cast(cv::COLOR_BayerBG2RGB)}, + {"COLOR_BayerBG2RGBA", static_cast(cv::COLOR_BayerBG2RGBA)}, + {"COLOR_BAYER_BG2RGBA", static_cast(cv::COLOR_BayerBG2RGBA)}, + {"COLOR_BayerBG2RGB_EA", static_cast(cv::COLOR_BayerBG2RGB_EA)}, + {"COLOR_BAYER_BG2RGB_EA", static_cast(cv::COLOR_BayerBG2RGB_EA)}, + {"COLOR_BayerBG2RGB_VNG", static_cast(cv::COLOR_BayerBG2RGB_VNG)}, + {"COLOR_BAYER_BG2RGB_VNG", static_cast(cv::COLOR_BayerBG2RGB_VNG)}, + {"COLOR_BayerGB2BGR", static_cast(cv::COLOR_BayerGB2BGR)}, + {"COLOR_BAYER_GB2BGR", static_cast(cv::COLOR_BayerGB2BGR)}, + {"COLOR_BayerGB2BGRA", static_cast(cv::COLOR_BayerGB2BGRA)}, + {"COLOR_BAYER_GB2BGRA", static_cast(cv::COLOR_BayerGB2BGRA)}, + {"COLOR_BayerGB2BGR_EA", static_cast(cv::COLOR_BayerGB2BGR_EA)}, + {"COLOR_BAYER_GB2BGR_EA", static_cast(cv::COLOR_BayerGB2BGR_EA)}, + {"COLOR_BayerGB2BGR_VNG", static_cast(cv::COLOR_BayerGB2BGR_VNG)}, + {"COLOR_BAYER_GB2BGR_VNG", static_cast(cv::COLOR_BayerGB2BGR_VNG)}, + {"COLOR_BayerGB2GRAY", static_cast(cv::COLOR_BayerGB2GRAY)}, + {"COLOR_BAYER_GB2GRAY", static_cast(cv::COLOR_BayerGB2GRAY)}, + {"COLOR_BayerGB2RGB", static_cast(cv::COLOR_BayerGB2RGB)}, + {"COLOR_BAYER_GB2RGB", static_cast(cv::COLOR_BayerGB2RGB)}, + {"COLOR_BayerGB2RGBA", static_cast(cv::COLOR_BayerGB2RGBA)}, + {"COLOR_BAYER_GB2RGBA", static_cast(cv::COLOR_BayerGB2RGBA)}, + {"COLOR_BayerGB2RGB_EA", static_cast(cv::COLOR_BayerGB2RGB_EA)}, + {"COLOR_BAYER_GB2RGB_EA", static_cast(cv::COLOR_BayerGB2RGB_EA)}, + {"COLOR_BayerGB2RGB_VNG", static_cast(cv::COLOR_BayerGB2RGB_VNG)}, + {"COLOR_BAYER_GB2RGB_VNG", static_cast(cv::COLOR_BayerGB2RGB_VNG)}, + {"COLOR_BayerGR2BGR", static_cast(cv::COLOR_BayerGR2BGR)}, + {"COLOR_BAYER_GR2BGR", static_cast(cv::COLOR_BayerGR2BGR)}, + {"COLOR_BayerGR2BGRA", static_cast(cv::COLOR_BayerGR2BGRA)}, + {"COLOR_BAYER_GR2BGRA", static_cast(cv::COLOR_BayerGR2BGRA)}, + {"COLOR_BayerGR2BGR_EA", static_cast(cv::COLOR_BayerGR2BGR_EA)}, + {"COLOR_BAYER_GR2BGR_EA", static_cast(cv::COLOR_BayerGR2BGR_EA)}, + {"COLOR_BayerGR2BGR_VNG", static_cast(cv::COLOR_BayerGR2BGR_VNG)}, + {"COLOR_BAYER_GR2BGR_VNG", static_cast(cv::COLOR_BayerGR2BGR_VNG)}, + {"COLOR_BayerGR2GRAY", static_cast(cv::COLOR_BayerGR2GRAY)}, + {"COLOR_BAYER_GR2GRAY", static_cast(cv::COLOR_BayerGR2GRAY)}, + {"COLOR_BayerGR2RGB", static_cast(cv::COLOR_BayerGR2RGB)}, + {"COLOR_BAYER_GR2RGB", static_cast(cv::COLOR_BayerGR2RGB)}, + {"COLOR_BayerGR2RGBA", static_cast(cv::COLOR_BayerGR2RGBA)}, + {"COLOR_BAYER_GR2RGBA", static_cast(cv::COLOR_BayerGR2RGBA)}, + {"COLOR_BayerGR2RGB_EA", static_cast(cv::COLOR_BayerGR2RGB_EA)}, + {"COLOR_BAYER_GR2RGB_EA", static_cast(cv::COLOR_BayerGR2RGB_EA)}, + {"COLOR_BayerGR2RGB_VNG", static_cast(cv::COLOR_BayerGR2RGB_VNG)}, + {"COLOR_BAYER_GR2RGB_VNG", static_cast(cv::COLOR_BayerGR2RGB_VNG)}, + {"COLOR_BayerRG2BGR", static_cast(cv::COLOR_BayerRG2BGR)}, + {"COLOR_BAYER_RG2BGR", static_cast(cv::COLOR_BayerRG2BGR)}, + {"COLOR_BayerRG2BGRA", static_cast(cv::COLOR_BayerRG2BGRA)}, + {"COLOR_BAYER_RG2BGRA", static_cast(cv::COLOR_BayerRG2BGRA)}, + {"COLOR_BayerRG2BGR_EA", static_cast(cv::COLOR_BayerRG2BGR_EA)}, + {"COLOR_BAYER_RG2BGR_EA", static_cast(cv::COLOR_BayerRG2BGR_EA)}, + {"COLOR_BayerRG2BGR_VNG", static_cast(cv::COLOR_BayerRG2BGR_VNG)}, + {"COLOR_BAYER_RG2BGR_VNG", static_cast(cv::COLOR_BayerRG2BGR_VNG)}, + {"COLOR_BayerRG2GRAY", static_cast(cv::COLOR_BayerRG2GRAY)}, + {"COLOR_BAYER_RG2GRAY", static_cast(cv::COLOR_BayerRG2GRAY)}, + {"COLOR_BayerRG2RGB", static_cast(cv::COLOR_BayerRG2RGB)}, + {"COLOR_BAYER_RG2RGB", static_cast(cv::COLOR_BayerRG2RGB)}, + {"COLOR_BayerRG2RGBA", static_cast(cv::COLOR_BayerRG2RGBA)}, + {"COLOR_BAYER_RG2RGBA", static_cast(cv::COLOR_BayerRG2RGBA)}, + {"COLOR_BayerRG2RGB_EA", static_cast(cv::COLOR_BayerRG2RGB_EA)}, + {"COLOR_BAYER_RG2RGB_EA", static_cast(cv::COLOR_BayerRG2RGB_EA)}, + {"COLOR_BayerRG2RGB_VNG", static_cast(cv::COLOR_BayerRG2RGB_VNG)}, + {"COLOR_BAYER_RG2RGB_VNG", static_cast(cv::COLOR_BayerRG2RGB_VNG)}, + {"COLOR_COLORCVT_MAX", static_cast(cv::COLOR_COLORCVT_MAX)}, + {"COLOR_GRAY2BGR", static_cast(cv::COLOR_GRAY2BGR)}, + {"COLOR_GRAY2BGR555", static_cast(cv::COLOR_GRAY2BGR555)}, + {"COLOR_GRAY2BGR565", static_cast(cv::COLOR_GRAY2BGR565)}, + {"COLOR_GRAY2BGRA", static_cast(cv::COLOR_GRAY2BGRA)}, + {"COLOR_GRAY2RGB", static_cast(cv::COLOR_GRAY2RGB)}, + {"COLOR_GRAY2RGBA", static_cast(cv::COLOR_GRAY2RGBA)}, + {"COLOR_HLS2BGR", static_cast(cv::COLOR_HLS2BGR)}, + {"COLOR_HLS2BGR_FULL", static_cast(cv::COLOR_HLS2BGR_FULL)}, + {"COLOR_HLS2RGB", static_cast(cv::COLOR_HLS2RGB)}, + {"COLOR_HLS2RGB_FULL", static_cast(cv::COLOR_HLS2RGB_FULL)}, + {"COLOR_HSV2BGR", static_cast(cv::COLOR_HSV2BGR)}, + {"COLOR_HSV2BGR_FULL", static_cast(cv::COLOR_HSV2BGR_FULL)}, + {"COLOR_HSV2RGB", static_cast(cv::COLOR_HSV2RGB)}, + {"COLOR_HSV2RGB_FULL", static_cast(cv::COLOR_HSV2RGB_FULL)}, + {"COLOR_LBGR2Lab", static_cast(cv::COLOR_LBGR2Lab)}, + {"COLOR_LBGR2LAB", static_cast(cv::COLOR_LBGR2Lab)}, + {"COLOR_LBGR2Luv", static_cast(cv::COLOR_LBGR2Luv)}, + {"COLOR_LBGR2LUV", static_cast(cv::COLOR_LBGR2Luv)}, + {"COLOR_LRGB2Lab", static_cast(cv::COLOR_LRGB2Lab)}, + {"COLOR_LRGB2LAB", static_cast(cv::COLOR_LRGB2Lab)}, + {"COLOR_LRGB2Luv", static_cast(cv::COLOR_LRGB2Luv)}, + {"COLOR_LRGB2LUV", static_cast(cv::COLOR_LRGB2Luv)}, + {"COLOR_Lab2BGR", static_cast(cv::COLOR_Lab2BGR)}, + {"COLOR_LAB2BGR", static_cast(cv::COLOR_Lab2BGR)}, + {"COLOR_Lab2LBGR", static_cast(cv::COLOR_Lab2LBGR)}, + {"COLOR_LAB2LBGR", static_cast(cv::COLOR_Lab2LBGR)}, + {"COLOR_Lab2LRGB", static_cast(cv::COLOR_Lab2LRGB)}, + {"COLOR_LAB2LRGB", static_cast(cv::COLOR_Lab2LRGB)}, + {"COLOR_Lab2RGB", static_cast(cv::COLOR_Lab2RGB)}, + {"COLOR_LAB2RGB", static_cast(cv::COLOR_Lab2RGB)}, + {"COLOR_Luv2BGR", static_cast(cv::COLOR_Luv2BGR)}, + {"COLOR_LUV2BGR", static_cast(cv::COLOR_Luv2BGR)}, + {"COLOR_Luv2LBGR", static_cast(cv::COLOR_Luv2LBGR)}, + {"COLOR_LUV2LBGR", static_cast(cv::COLOR_Luv2LBGR)}, + {"COLOR_Luv2LRGB", static_cast(cv::COLOR_Luv2LRGB)}, + {"COLOR_LUV2LRGB", static_cast(cv::COLOR_Luv2LRGB)}, + {"COLOR_Luv2RGB", static_cast(cv::COLOR_Luv2RGB)}, + {"COLOR_LUV2RGB", static_cast(cv::COLOR_Luv2RGB)}, + {"COLOR_RGB2BGR", static_cast(cv::COLOR_RGB2BGR)}, + {"COLOR_RGB2BGR555", static_cast(cv::COLOR_RGB2BGR555)}, + {"COLOR_RGB2BGR565", static_cast(cv::COLOR_RGB2BGR565)}, + {"COLOR_RGB2BGRA", static_cast(cv::COLOR_RGB2BGRA)}, + {"COLOR_RGB2GRAY", static_cast(cv::COLOR_RGB2GRAY)}, + {"COLOR_RGB2HLS", static_cast(cv::COLOR_RGB2HLS)}, + {"COLOR_RGB2HLS_FULL", static_cast(cv::COLOR_RGB2HLS_FULL)}, + {"COLOR_RGB2HSV", static_cast(cv::COLOR_RGB2HSV)}, + {"COLOR_RGB2HSV_FULL", static_cast(cv::COLOR_RGB2HSV_FULL)}, + {"COLOR_RGB2Lab", static_cast(cv::COLOR_RGB2Lab)}, + {"COLOR_RGB2LAB", static_cast(cv::COLOR_RGB2Lab)}, + {"COLOR_RGB2Luv", static_cast(cv::COLOR_RGB2Luv)}, + {"COLOR_RGB2LUV", static_cast(cv::COLOR_RGB2Luv)}, + {"COLOR_RGB2RGBA", static_cast(cv::COLOR_RGB2RGBA)}, + {"COLOR_RGB2XYZ", static_cast(cv::COLOR_RGB2XYZ)}, + {"COLOR_RGB2YCrCb", static_cast(cv::COLOR_RGB2YCrCb)}, + {"COLOR_RGB2YCR_CB", static_cast(cv::COLOR_RGB2YCrCb)}, + {"COLOR_RGB2YUV", static_cast(cv::COLOR_RGB2YUV)}, + {"COLOR_RGB2YUV_I420", static_cast(cv::COLOR_RGB2YUV_I420)}, + {"COLOR_RGB2YUV_IYUV", static_cast(cv::COLOR_RGB2YUV_IYUV)}, + {"COLOR_RGB2YUV_YV12", static_cast(cv::COLOR_RGB2YUV_YV12)}, + {"COLOR_RGBA2BGR", static_cast(cv::COLOR_RGBA2BGR)}, + {"COLOR_RGBA2BGR555", static_cast(cv::COLOR_RGBA2BGR555)}, + {"COLOR_RGBA2BGR565", static_cast(cv::COLOR_RGBA2BGR565)}, + {"COLOR_RGBA2BGRA", static_cast(cv::COLOR_RGBA2BGRA)}, + {"COLOR_RGBA2GRAY", static_cast(cv::COLOR_RGBA2GRAY)}, + {"COLOR_RGBA2RGB", static_cast(cv::COLOR_RGBA2RGB)}, + {"COLOR_RGBA2YUV_I420", static_cast(cv::COLOR_RGBA2YUV_I420)}, + {"COLOR_RGBA2YUV_IYUV", static_cast(cv::COLOR_RGBA2YUV_IYUV)}, + {"COLOR_RGBA2YUV_YV12", static_cast(cv::COLOR_RGBA2YUV_YV12)}, + {"COLOR_RGBA2mRGBA", static_cast(cv::COLOR_RGBA2mRGBA)}, + {"COLOR_RGBA2M_RGBA", static_cast(cv::COLOR_RGBA2mRGBA)}, + {"COLOR_XYZ2BGR", static_cast(cv::COLOR_XYZ2BGR)}, + {"COLOR_XYZ2RGB", static_cast(cv::COLOR_XYZ2RGB)}, + {"COLOR_YCrCb2BGR", static_cast(cv::COLOR_YCrCb2BGR)}, + {"COLOR_YCR_CB2BGR", static_cast(cv::COLOR_YCrCb2BGR)}, + {"COLOR_YCrCb2RGB", static_cast(cv::COLOR_YCrCb2RGB)}, + {"COLOR_YCR_CB2RGB", static_cast(cv::COLOR_YCrCb2RGB)}, + {"COLOR_YUV2BGR", static_cast(cv::COLOR_YUV2BGR)}, + {"COLOR_YUV2BGRA_I420", static_cast(cv::COLOR_YUV2BGRA_I420)}, + {"COLOR_YUV2BGRA_IYUV", static_cast(cv::COLOR_YUV2BGRA_IYUV)}, + {"COLOR_YUV2BGRA_NV12", static_cast(cv::COLOR_YUV2BGRA_NV12)}, + {"COLOR_YUV2BGRA_NV21", static_cast(cv::COLOR_YUV2BGRA_NV21)}, + {"COLOR_YUV2BGRA_UYNV", static_cast(cv::COLOR_YUV2BGRA_UYNV)}, + {"COLOR_YUV2BGRA_UYVY", static_cast(cv::COLOR_YUV2BGRA_UYVY)}, + {"COLOR_YUV2BGRA_Y422", static_cast(cv::COLOR_YUV2BGRA_Y422)}, + {"COLOR_YUV2BGRA_YUNV", static_cast(cv::COLOR_YUV2BGRA_YUNV)}, + {"COLOR_YUV2BGRA_YUY2", static_cast(cv::COLOR_YUV2BGRA_YUY2)}, + {"COLOR_YUV2BGRA_YUYV", static_cast(cv::COLOR_YUV2BGRA_YUYV)}, + {"COLOR_YUV2BGRA_YV12", static_cast(cv::COLOR_YUV2BGRA_YV12)}, + {"COLOR_YUV2BGRA_YVYU", static_cast(cv::COLOR_YUV2BGRA_YVYU)}, + {"COLOR_YUV2BGR_I420", static_cast(cv::COLOR_YUV2BGR_I420)}, + {"COLOR_YUV2BGR_IYUV", static_cast(cv::COLOR_YUV2BGR_IYUV)}, + {"COLOR_YUV2BGR_NV12", static_cast(cv::COLOR_YUV2BGR_NV12)}, + {"COLOR_YUV2BGR_NV21", static_cast(cv::COLOR_YUV2BGR_NV21)}, + {"COLOR_YUV2BGR_UYNV", static_cast(cv::COLOR_YUV2BGR_UYNV)}, + {"COLOR_YUV2BGR_UYVY", static_cast(cv::COLOR_YUV2BGR_UYVY)}, + {"COLOR_YUV2BGR_Y422", static_cast(cv::COLOR_YUV2BGR_Y422)}, + {"COLOR_YUV2BGR_YUNV", static_cast(cv::COLOR_YUV2BGR_YUNV)}, + {"COLOR_YUV2BGR_YUY2", static_cast(cv::COLOR_YUV2BGR_YUY2)}, + {"COLOR_YUV2BGR_YUYV", static_cast(cv::COLOR_YUV2BGR_YUYV)}, + {"COLOR_YUV2BGR_YV12", static_cast(cv::COLOR_YUV2BGR_YV12)}, + {"COLOR_YUV2BGR_YVYU", static_cast(cv::COLOR_YUV2BGR_YVYU)}, + {"COLOR_YUV2GRAY_420", static_cast(cv::COLOR_YUV2GRAY_420)}, + {"COLOR_YUV2GRAY_I420", static_cast(cv::COLOR_YUV2GRAY_I420)}, + {"COLOR_YUV2GRAY_IYUV", static_cast(cv::COLOR_YUV2GRAY_IYUV)}, + {"COLOR_YUV2GRAY_NV12", static_cast(cv::COLOR_YUV2GRAY_NV12)}, + {"COLOR_YUV2GRAY_NV21", static_cast(cv::COLOR_YUV2GRAY_NV21)}, + {"COLOR_YUV2GRAY_UYNV", static_cast(cv::COLOR_YUV2GRAY_UYNV)}, + {"COLOR_YUV2GRAY_UYVY", static_cast(cv::COLOR_YUV2GRAY_UYVY)}, + {"COLOR_YUV2GRAY_Y422", static_cast(cv::COLOR_YUV2GRAY_Y422)}, + {"COLOR_YUV2GRAY_YUNV", static_cast(cv::COLOR_YUV2GRAY_YUNV)}, + {"COLOR_YUV2GRAY_YUY2", static_cast(cv::COLOR_YUV2GRAY_YUY2)}, + {"COLOR_YUV2GRAY_YUYV", static_cast(cv::COLOR_YUV2GRAY_YUYV)}, + {"COLOR_YUV2GRAY_YV12", static_cast(cv::COLOR_YUV2GRAY_YV12)}, + {"COLOR_YUV2GRAY_YVYU", static_cast(cv::COLOR_YUV2GRAY_YVYU)}, + {"COLOR_YUV2RGB", static_cast(cv::COLOR_YUV2RGB)}, + {"COLOR_YUV2RGBA_I420", static_cast(cv::COLOR_YUV2RGBA_I420)}, + {"COLOR_YUV2RGBA_IYUV", static_cast(cv::COLOR_YUV2RGBA_IYUV)}, + {"COLOR_YUV2RGBA_NV12", static_cast(cv::COLOR_YUV2RGBA_NV12)}, + {"COLOR_YUV2RGBA_NV21", static_cast(cv::COLOR_YUV2RGBA_NV21)}, + {"COLOR_YUV2RGBA_UYNV", static_cast(cv::COLOR_YUV2RGBA_UYNV)}, + {"COLOR_YUV2RGBA_UYVY", static_cast(cv::COLOR_YUV2RGBA_UYVY)}, + {"COLOR_YUV2RGBA_Y422", static_cast(cv::COLOR_YUV2RGBA_Y422)}, + {"COLOR_YUV2RGBA_YUNV", static_cast(cv::COLOR_YUV2RGBA_YUNV)}, + {"COLOR_YUV2RGBA_YUY2", static_cast(cv::COLOR_YUV2RGBA_YUY2)}, + {"COLOR_YUV2RGBA_YUYV", static_cast(cv::COLOR_YUV2RGBA_YUYV)}, + {"COLOR_YUV2RGBA_YV12", static_cast(cv::COLOR_YUV2RGBA_YV12)}, + {"COLOR_YUV2RGBA_YVYU", static_cast(cv::COLOR_YUV2RGBA_YVYU)}, + {"COLOR_YUV2RGB_I420", static_cast(cv::COLOR_YUV2RGB_I420)}, + {"COLOR_YUV2RGB_IYUV", static_cast(cv::COLOR_YUV2RGB_IYUV)}, + {"COLOR_YUV2RGB_NV12", static_cast(cv::COLOR_YUV2RGB_NV12)}, + {"COLOR_YUV2RGB_NV21", static_cast(cv::COLOR_YUV2RGB_NV21)}, + {"COLOR_YUV2RGB_UYNV", static_cast(cv::COLOR_YUV2RGB_UYNV)}, + {"COLOR_YUV2RGB_UYVY", static_cast(cv::COLOR_YUV2RGB_UYVY)}, + {"COLOR_YUV2RGB_Y422", static_cast(cv::COLOR_YUV2RGB_Y422)}, + {"COLOR_YUV2RGB_YUNV", static_cast(cv::COLOR_YUV2RGB_YUNV)}, + {"COLOR_YUV2RGB_YUY2", static_cast(cv::COLOR_YUV2RGB_YUY2)}, + {"COLOR_YUV2RGB_YUYV", static_cast(cv::COLOR_YUV2RGB_YUYV)}, + {"COLOR_YUV2RGB_YV12", static_cast(cv::COLOR_YUV2RGB_YV12)}, + {"COLOR_YUV2RGB_YVYU", static_cast(cv::COLOR_YUV2RGB_YVYU)}, + {"COLOR_YUV420p2BGR", static_cast(cv::COLOR_YUV420p2BGR)}, + {"COLOR_YUV420P2BGR", static_cast(cv::COLOR_YUV420p2BGR)}, + {"COLOR_YUV420p2BGRA", static_cast(cv::COLOR_YUV420p2BGRA)}, + {"COLOR_YUV420P2BGRA", static_cast(cv::COLOR_YUV420p2BGRA)}, + {"COLOR_YUV420p2GRAY", static_cast(cv::COLOR_YUV420p2GRAY)}, + {"COLOR_YUV420P2GRAY", static_cast(cv::COLOR_YUV420p2GRAY)}, + {"COLOR_YUV420p2RGB", static_cast(cv::COLOR_YUV420p2RGB)}, + {"COLOR_YUV420P2RGB", static_cast(cv::COLOR_YUV420p2RGB)}, + {"COLOR_YUV420p2RGBA", static_cast(cv::COLOR_YUV420p2RGBA)}, + {"COLOR_YUV420P2RGBA", static_cast(cv::COLOR_YUV420p2RGBA)}, + {"COLOR_YUV420sp2BGR", static_cast(cv::COLOR_YUV420sp2BGR)}, + {"COLOR_YUV420SP2BGR", static_cast(cv::COLOR_YUV420sp2BGR)}, + {"COLOR_YUV420sp2BGRA", static_cast(cv::COLOR_YUV420sp2BGRA)}, + {"COLOR_YUV420SP2BGRA", static_cast(cv::COLOR_YUV420sp2BGRA)}, + {"COLOR_YUV420sp2GRAY", static_cast(cv::COLOR_YUV420sp2GRAY)}, + {"COLOR_YUV420SP2GRAY", static_cast(cv::COLOR_YUV420sp2GRAY)}, + {"COLOR_YUV420sp2RGB", static_cast(cv::COLOR_YUV420sp2RGB)}, + {"COLOR_YUV420SP2RGB", static_cast(cv::COLOR_YUV420sp2RGB)}, + {"COLOR_YUV420sp2RGBA", static_cast(cv::COLOR_YUV420sp2RGBA)}, + {"COLOR_YUV420SP2RGBA", static_cast(cv::COLOR_YUV420sp2RGBA)}, + {"COLOR_mRGBA2RGBA", static_cast(cv::COLOR_mRGBA2RGBA)}, + {"COLOR_M_RGBA2RGBA", static_cast(cv::COLOR_mRGBA2RGBA)}, + {"CONTOURS_MATCH_I1", static_cast(cv::CONTOURS_MATCH_I1)}, + {"CONTOURS_MATCH_I2", static_cast(cv::CONTOURS_MATCH_I2)}, + {"CONTOURS_MATCH_I3", static_cast(cv::CONTOURS_MATCH_I3)}, + {"COVAR_COLS", static_cast(cv::COVAR_COLS)}, + {"COVAR_NORMAL", static_cast(cv::COVAR_NORMAL)}, + {"COVAR_ROWS", static_cast(cv::COVAR_ROWS)}, + {"COVAR_SCALE", static_cast(cv::COVAR_SCALE)}, + {"COVAR_SCRAMBLED", static_cast(cv::COVAR_SCRAMBLED)}, + {"COVAR_USE_AVG", static_cast(cv::COVAR_USE_AVG)}, + {"DCT_INVERSE", static_cast(cv::DCT_INVERSE)}, + {"DCT_ROWS", static_cast(cv::DCT_ROWS)}, + {"DECOMP_CHOLESKY", static_cast(cv::DECOMP_CHOLESKY)}, + {"DECOMP_EIG", static_cast(cv::DECOMP_EIG)}, + {"DECOMP_LU", static_cast(cv::DECOMP_LU)}, + {"DECOMP_NORMAL", static_cast(cv::DECOMP_NORMAL)}, + {"DECOMP_QR", static_cast(cv::DECOMP_QR)}, + {"DECOMP_SVD", static_cast(cv::DECOMP_SVD)}, + {"DFT_COMPLEX_INPUT", static_cast(cv::DFT_COMPLEX_INPUT)}, + {"DFT_COMPLEX_OUTPUT", static_cast(cv::DFT_COMPLEX_OUTPUT)}, + {"DFT_INVERSE", static_cast(cv::DFT_INVERSE)}, + {"DFT_REAL_OUTPUT", static_cast(cv::DFT_REAL_OUTPUT)}, + {"DFT_ROWS", static_cast(cv::DFT_ROWS)}, + {"DFT_SCALE", static_cast(cv::DFT_SCALE)}, + {"DIST_C", static_cast(cv::DIST_C)}, + {"DIST_FAIR", static_cast(cv::DIST_FAIR)}, + {"DIST_HUBER", static_cast(cv::DIST_HUBER)}, + {"DIST_L1", static_cast(cv::DIST_L1)}, + {"DIST_L12", static_cast(cv::DIST_L12)}, + {"DIST_L2", static_cast(cv::DIST_L2)}, + {"DIST_LABEL_CCOMP", static_cast(cv::DIST_LABEL_CCOMP)}, + {"DIST_LABEL_PIXEL", static_cast(cv::DIST_LABEL_PIXEL)}, + {"DIST_MASK_3", static_cast(cv::DIST_MASK_3)}, + {"DIST_MASK_5", static_cast(cv::DIST_MASK_5)}, + {"DIST_MASK_PRECISE", static_cast(cv::DIST_MASK_PRECISE)}, + {"DIST_USER", static_cast(cv::DIST_USER)}, + {"DIST_WELSCH", static_cast(cv::DIST_WELSCH)}, + {"FILLED", static_cast(cv::FILLED)}, + {"FILTER_SCHARR", static_cast(cv::FILTER_SCHARR)}, + {"FLOODFILL_FIXED_RANGE", static_cast(cv::FLOODFILL_FIXED_RANGE)}, + {"FLOODFILL_MASK_ONLY", static_cast(cv::FLOODFILL_MASK_ONLY)}, + {"FONT_HERSHEY_COMPLEX", static_cast(cv::FONT_HERSHEY_COMPLEX)}, + {"FONT_HERSHEY_COMPLEX_SMALL", static_cast(cv::FONT_HERSHEY_COMPLEX_SMALL)}, + {"FONT_HERSHEY_DUPLEX", static_cast(cv::FONT_HERSHEY_DUPLEX)}, + {"FONT_HERSHEY_PLAIN", static_cast(cv::FONT_HERSHEY_PLAIN)}, + {"FONT_HERSHEY_SCRIPT_COMPLEX", static_cast(cv::FONT_HERSHEY_SCRIPT_COMPLEX)}, + {"FONT_HERSHEY_SCRIPT_SIMPLEX", static_cast(cv::FONT_HERSHEY_SCRIPT_SIMPLEX)}, + {"FONT_HERSHEY_SIMPLEX", static_cast(cv::FONT_HERSHEY_SIMPLEX)}, + {"FONT_HERSHEY_TRIPLEX", static_cast(cv::FONT_HERSHEY_TRIPLEX)}, + {"FONT_ITALIC", static_cast(cv::FONT_ITALIC)}, + {"FileNode_EMPTY", static_cast(cv::FileNode::EMPTY)}, + {"FILE_NODE_EMPTY", static_cast(cv::FileNode::EMPTY)}, + {"FileNode_FLOAT", static_cast(cv::FileNode::FLOAT)}, + {"FILE_NODE_FLOAT", static_cast(cv::FileNode::FLOAT)}, + {"FileNode_FLOW", static_cast(cv::FileNode::FLOW)}, + {"FILE_NODE_FLOW", static_cast(cv::FileNode::FLOW)}, + {"FileNode_INT", static_cast(cv::FileNode::INT)}, + {"FILE_NODE_INT", static_cast(cv::FileNode::INT)}, + {"FileNode_MAP", static_cast(cv::FileNode::MAP)}, + {"FILE_NODE_MAP", static_cast(cv::FileNode::MAP)}, + {"FileNode_NAMED", static_cast(cv::FileNode::NAMED)}, + {"FILE_NODE_NAMED", static_cast(cv::FileNode::NAMED)}, + {"FileNode_NONE", static_cast(cv::FileNode::NONE)}, + {"FILE_NODE_NONE", static_cast(cv::FileNode::NONE)}, + {"FileNode_REAL", static_cast(cv::FileNode::REAL)}, + {"FILE_NODE_REAL", static_cast(cv::FileNode::REAL)}, + {"FileNode_SEQ", static_cast(cv::FileNode::SEQ)}, + {"FILE_NODE_SEQ", static_cast(cv::FileNode::SEQ)}, + {"FileNode_STR", static_cast(cv::FileNode::STR)}, + {"FILE_NODE_STR", static_cast(cv::FileNode::STR)}, + {"FileNode_STRING", static_cast(cv::FileNode::STRING)}, + {"FILE_NODE_STRING", static_cast(cv::FileNode::STRING)}, + {"FileNode_TYPE_MASK", static_cast(cv::FileNode::TYPE_MASK)}, + {"FILE_NODE_TYPE_MASK", static_cast(cv::FileNode::TYPE_MASK)}, + {"FileNode_UNIFORM", static_cast(cv::FileNode::UNIFORM)}, + {"FILE_NODE_UNIFORM", static_cast(cv::FileNode::UNIFORM)}, + {"FileStorage_APPEND", static_cast(cv::FileStorage::APPEND)}, + {"FILE_STORAGE_APPEND", static_cast(cv::FileStorage::APPEND)}, + {"FileStorage_BASE64", static_cast(cv::FileStorage::BASE64)}, + {"FILE_STORAGE_BASE64", static_cast(cv::FileStorage::BASE64)}, + {"FileStorage_FORMAT_AUTO", static_cast(cv::FileStorage::FORMAT_AUTO)}, + {"FILE_STORAGE_FORMAT_AUTO", static_cast(cv::FileStorage::FORMAT_AUTO)}, + {"FileStorage_FORMAT_JSON", static_cast(cv::FileStorage::FORMAT_JSON)}, + {"FILE_STORAGE_FORMAT_JSON", static_cast(cv::FileStorage::FORMAT_JSON)}, + {"FileStorage_FORMAT_MASK", static_cast(cv::FileStorage::FORMAT_MASK)}, + {"FILE_STORAGE_FORMAT_MASK", static_cast(cv::FileStorage::FORMAT_MASK)}, + {"FileStorage_FORMAT_XML", static_cast(cv::FileStorage::FORMAT_XML)}, + {"FILE_STORAGE_FORMAT_XML", static_cast(cv::FileStorage::FORMAT_XML)}, + {"FileStorage_FORMAT_YAML", static_cast(cv::FileStorage::FORMAT_YAML)}, + {"FILE_STORAGE_FORMAT_YAML", static_cast(cv::FileStorage::FORMAT_YAML)}, + {"FileStorage_INSIDE_MAP", static_cast(cv::FileStorage::INSIDE_MAP)}, + {"FILE_STORAGE_INSIDE_MAP", static_cast(cv::FileStorage::INSIDE_MAP)}, + {"FileStorage_MEMORY", static_cast(cv::FileStorage::MEMORY)}, + {"FILE_STORAGE_MEMORY", static_cast(cv::FileStorage::MEMORY)}, + {"FileStorage_NAME_EXPECTED", static_cast(cv::FileStorage::NAME_EXPECTED)}, + {"FILE_STORAGE_NAME_EXPECTED", static_cast(cv::FileStorage::NAME_EXPECTED)}, + {"FileStorage_READ", static_cast(cv::FileStorage::READ)}, + {"FILE_STORAGE_READ", static_cast(cv::FileStorage::READ)}, + {"FileStorage_UNDEFINED", static_cast(cv::FileStorage::UNDEFINED)}, + {"FILE_STORAGE_UNDEFINED", static_cast(cv::FileStorage::UNDEFINED)}, + {"FileStorage_VALUE_EXPECTED", static_cast(cv::FileStorage::VALUE_EXPECTED)}, + {"FILE_STORAGE_VALUE_EXPECTED", static_cast(cv::FileStorage::VALUE_EXPECTED)}, + {"FileStorage_WRITE", static_cast(cv::FileStorage::WRITE)}, + {"FILE_STORAGE_WRITE", static_cast(cv::FileStorage::WRITE)}, + {"FileStorage_WRITE_BASE64", static_cast(cv::FileStorage::WRITE_BASE64)}, + {"FILE_STORAGE_WRITE_BASE64", static_cast(cv::FileStorage::WRITE_BASE64)}, + {"Formatter_FMT_C", static_cast(cv::Formatter::FMT_C)}, + {"FORMATTER_FMT_C", static_cast(cv::Formatter::FMT_C)}, + {"Formatter_FMT_CSV", static_cast(cv::Formatter::FMT_CSV)}, + {"FORMATTER_FMT_CSV", static_cast(cv::Formatter::FMT_CSV)}, + {"Formatter_FMT_DEFAULT", static_cast(cv::Formatter::FMT_DEFAULT)}, + {"FORMATTER_FMT_DEFAULT", static_cast(cv::Formatter::FMT_DEFAULT)}, + {"Formatter_FMT_MATLAB", static_cast(cv::Formatter::FMT_MATLAB)}, + {"FORMATTER_FMT_MATLAB", static_cast(cv::Formatter::FMT_MATLAB)}, + {"Formatter_FMT_NUMPY", static_cast(cv::Formatter::FMT_NUMPY)}, + {"FORMATTER_FMT_NUMPY", static_cast(cv::Formatter::FMT_NUMPY)}, + {"Formatter_FMT_PYTHON", static_cast(cv::Formatter::FMT_PYTHON)}, + {"FORMATTER_FMT_PYTHON", static_cast(cv::Formatter::FMT_PYTHON)}, + {"GC_BGD", static_cast(cv::GC_BGD)}, + {"GC_EVAL", static_cast(cv::GC_EVAL)}, + {"GC_EVAL_FREEZE_MODEL", static_cast(cv::GC_EVAL_FREEZE_MODEL)}, + {"GC_FGD", static_cast(cv::GC_FGD)}, + {"GC_INIT_WITH_MASK", static_cast(cv::GC_INIT_WITH_MASK)}, + {"GC_INIT_WITH_RECT", static_cast(cv::GC_INIT_WITH_RECT)}, + {"GC_PR_BGD", static_cast(cv::GC_PR_BGD)}, + {"GC_PR_FGD", static_cast(cv::GC_PR_FGD)}, + {"GEMM_1_T", static_cast(cv::GEMM_1_T)}, + {"GEMM_2_T", static_cast(cv::GEMM_2_T)}, + {"GEMM_3_T", static_cast(cv::GEMM_3_T)}, + {"HISTCMP_BHATTACHARYYA", static_cast(cv::HISTCMP_BHATTACHARYYA)}, + {"HISTCMP_CHISQR", static_cast(cv::HISTCMP_CHISQR)}, + {"HISTCMP_CHISQR_ALT", static_cast(cv::HISTCMP_CHISQR_ALT)}, + {"HISTCMP_CORREL", static_cast(cv::HISTCMP_CORREL)}, + {"HISTCMP_HELLINGER", static_cast(cv::HISTCMP_HELLINGER)}, + {"HISTCMP_INTERSECT", static_cast(cv::HISTCMP_INTERSECT)}, + {"HISTCMP_KL_DIV", static_cast(cv::HISTCMP_KL_DIV)}, + {"HOUGH_GRADIENT", static_cast(cv::HOUGH_GRADIENT)}, + {"HOUGH_GRADIENT_ALT", static_cast(cv::HOUGH_GRADIENT_ALT)}, + {"HOUGH_MULTI_SCALE", static_cast(cv::HOUGH_MULTI_SCALE)}, + {"HOUGH_PROBABILISTIC", static_cast(cv::HOUGH_PROBABILISTIC)}, + {"HOUGH_STANDARD", static_cast(cv::HOUGH_STANDARD)}, + {"IMREAD_ANYCOLOR", static_cast(cv::IMREAD_ANYCOLOR)}, + {"IMREAD_ANYDEPTH", static_cast(cv::IMREAD_ANYDEPTH)}, + {"IMREAD_COLOR", static_cast(cv::IMREAD_COLOR)}, + {"IMREAD_GRAYSCALE", static_cast(cv::IMREAD_GRAYSCALE)}, + {"IMREAD_IGNORE_ORIENTATION", static_cast(cv::IMREAD_IGNORE_ORIENTATION)}, + {"IMREAD_LOAD_GDAL", static_cast(cv::IMREAD_LOAD_GDAL)}, + {"IMREAD_REDUCED_COLOR_2", static_cast(cv::IMREAD_REDUCED_COLOR_2)}, + {"IMREAD_REDUCED_COLOR_4", static_cast(cv::IMREAD_REDUCED_COLOR_4)}, + {"IMREAD_REDUCED_COLOR_8", static_cast(cv::IMREAD_REDUCED_COLOR_8)}, + {"IMREAD_REDUCED_GRAYSCALE_2", static_cast(cv::IMREAD_REDUCED_GRAYSCALE_2)}, + {"IMREAD_REDUCED_GRAYSCALE_4", static_cast(cv::IMREAD_REDUCED_GRAYSCALE_4)}, + {"IMREAD_REDUCED_GRAYSCALE_8", static_cast(cv::IMREAD_REDUCED_GRAYSCALE_8)}, + {"IMREAD_UNCHANGED", static_cast(cv::IMREAD_UNCHANGED)}, + {"IMWRITE_EXR_COMPRESSION", static_cast(cv::IMWRITE_EXR_COMPRESSION)}, + {"IMWRITE_EXR_COMPRESSION_B44", static_cast(cv::IMWRITE_EXR_COMPRESSION_B44)}, + {"IMWRITE_EXR_COMPRESSION_B44A", static_cast(cv::IMWRITE_EXR_COMPRESSION_B44A)}, + {"IMWRITE_EXR_COMPRESSION_DWAA", static_cast(cv::IMWRITE_EXR_COMPRESSION_DWAA)}, + {"IMWRITE_EXR_COMPRESSION_DWAB", static_cast(cv::IMWRITE_EXR_COMPRESSION_DWAB)}, + {"IMWRITE_EXR_COMPRESSION_NO", static_cast(cv::IMWRITE_EXR_COMPRESSION_NO)}, + {"IMWRITE_EXR_COMPRESSION_PIZ", static_cast(cv::IMWRITE_EXR_COMPRESSION_PIZ)}, + {"IMWRITE_EXR_COMPRESSION_PXR24", static_cast(cv::IMWRITE_EXR_COMPRESSION_PXR24)}, + {"IMWRITE_EXR_COMPRESSION_RLE", static_cast(cv::IMWRITE_EXR_COMPRESSION_RLE)}, + {"IMWRITE_EXR_COMPRESSION_ZIP", static_cast(cv::IMWRITE_EXR_COMPRESSION_ZIP)}, + {"IMWRITE_EXR_COMPRESSION_ZIPS", static_cast(cv::IMWRITE_EXR_COMPRESSION_ZIPS)}, + {"IMWRITE_EXR_TYPE", static_cast(cv::IMWRITE_EXR_TYPE)}, + {"IMWRITE_EXR_TYPE_FLOAT", static_cast(cv::IMWRITE_EXR_TYPE_FLOAT)}, + {"IMWRITE_EXR_TYPE_HALF", static_cast(cv::IMWRITE_EXR_TYPE_HALF)}, + {"IMWRITE_JPEG2000_COMPRESSION_X1000", static_cast(cv::IMWRITE_JPEG2000_COMPRESSION_X1000)}, + {"IMWRITE_JPEG_CHROMA_QUALITY", static_cast(cv::IMWRITE_JPEG_CHROMA_QUALITY)}, + {"IMWRITE_JPEG_LUMA_QUALITY", static_cast(cv::IMWRITE_JPEG_LUMA_QUALITY)}, + {"IMWRITE_JPEG_OPTIMIZE", static_cast(cv::IMWRITE_JPEG_OPTIMIZE)}, + {"IMWRITE_JPEG_PROGRESSIVE", static_cast(cv::IMWRITE_JPEG_PROGRESSIVE)}, + {"IMWRITE_JPEG_QUALITY", static_cast(cv::IMWRITE_JPEG_QUALITY)}, + {"IMWRITE_JPEG_RST_INTERVAL", static_cast(cv::IMWRITE_JPEG_RST_INTERVAL)}, + {"IMWRITE_PAM_FORMAT_BLACKANDWHITE", static_cast(cv::IMWRITE_PAM_FORMAT_BLACKANDWHITE)}, + {"IMWRITE_PAM_FORMAT_GRAYSCALE", static_cast(cv::IMWRITE_PAM_FORMAT_GRAYSCALE)}, + {"IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA", static_cast(cv::IMWRITE_PAM_FORMAT_GRAYSCALE_ALPHA)}, + {"IMWRITE_PAM_FORMAT_NULL", static_cast(cv::IMWRITE_PAM_FORMAT_NULL)}, + {"IMWRITE_PAM_FORMAT_RGB", static_cast(cv::IMWRITE_PAM_FORMAT_RGB)}, + {"IMWRITE_PAM_FORMAT_RGB_ALPHA", static_cast(cv::IMWRITE_PAM_FORMAT_RGB_ALPHA)}, + {"IMWRITE_PAM_TUPLETYPE", static_cast(cv::IMWRITE_PAM_TUPLETYPE)}, + {"IMWRITE_PNG_BILEVEL", static_cast(cv::IMWRITE_PNG_BILEVEL)}, + {"IMWRITE_PNG_COMPRESSION", static_cast(cv::IMWRITE_PNG_COMPRESSION)}, + {"IMWRITE_PNG_STRATEGY", static_cast(cv::IMWRITE_PNG_STRATEGY)}, + {"IMWRITE_PNG_STRATEGY_DEFAULT", static_cast(cv::IMWRITE_PNG_STRATEGY_DEFAULT)}, + {"IMWRITE_PNG_STRATEGY_FILTERED", static_cast(cv::IMWRITE_PNG_STRATEGY_FILTERED)}, + {"IMWRITE_PNG_STRATEGY_FIXED", static_cast(cv::IMWRITE_PNG_STRATEGY_FIXED)}, + {"IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY", static_cast(cv::IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY)}, + {"IMWRITE_PNG_STRATEGY_RLE", static_cast(cv::IMWRITE_PNG_STRATEGY_RLE)}, + {"IMWRITE_PXM_BINARY", static_cast(cv::IMWRITE_PXM_BINARY)}, + {"IMWRITE_TIFF_COMPRESSION", static_cast(cv::IMWRITE_TIFF_COMPRESSION)}, + {"IMWRITE_TIFF_RESUNIT", static_cast(cv::IMWRITE_TIFF_RESUNIT)}, + {"IMWRITE_TIFF_XDPI", static_cast(cv::IMWRITE_TIFF_XDPI)}, + {"IMWRITE_TIFF_YDPI", static_cast(cv::IMWRITE_TIFF_YDPI)}, + {"IMWRITE_WEBP_QUALITY", static_cast(cv::IMWRITE_WEBP_QUALITY)}, + {"INTERSECT_FULL", static_cast(cv::INTERSECT_FULL)}, + {"INTERSECT_NONE", static_cast(cv::INTERSECT_NONE)}, + {"INTERSECT_PARTIAL", static_cast(cv::INTERSECT_PARTIAL)}, + {"INTER_AREA", static_cast(cv::INTER_AREA)}, + {"INTER_BITS", static_cast(cv::INTER_BITS)}, + {"INTER_BITS2", static_cast(cv::INTER_BITS2)}, + {"INTER_CUBIC", static_cast(cv::INTER_CUBIC)}, + {"INTER_LANCZOS4", static_cast(cv::INTER_LANCZOS4)}, + {"INTER_LINEAR", static_cast(cv::INTER_LINEAR)}, + {"INTER_LINEAR_EXACT", static_cast(cv::INTER_LINEAR_EXACT)}, + {"INTER_MAX", static_cast(cv::INTER_MAX)}, + {"INTER_NEAREST", static_cast(cv::INTER_NEAREST)}, + {"INTER_NEAREST_EXACT", static_cast(cv::INTER_NEAREST_EXACT)}, + {"INTER_TAB_SIZE", static_cast(cv::INTER_TAB_SIZE)}, + {"INTER_TAB_SIZE2", static_cast(cv::INTER_TAB_SIZE2)}, + {"KMEANS_PP_CENTERS", static_cast(cv::KMEANS_PP_CENTERS)}, + {"KMEANS_RANDOM_CENTERS", static_cast(cv::KMEANS_RANDOM_CENTERS)}, + {"KMEANS_USE_INITIAL_LABELS", static_cast(cv::KMEANS_USE_INITIAL_LABELS)}, + {"LINE_4", static_cast(cv::LINE_4)}, + {"LINE_8", static_cast(cv::LINE_8)}, + {"LINE_AA", static_cast(cv::LINE_AA)}, + {"LSD_REFINE_ADV", static_cast(cv::LSD_REFINE_ADV)}, + {"LSD_REFINE_NONE", static_cast(cv::LSD_REFINE_NONE)}, + {"LSD_REFINE_STD", static_cast(cv::LSD_REFINE_STD)}, + {"MARKER_CROSS", static_cast(cv::MARKER_CROSS)}, + {"MARKER_DIAMOND", static_cast(cv::MARKER_DIAMOND)}, + {"MARKER_SQUARE", static_cast(cv::MARKER_SQUARE)}, + {"MARKER_STAR", static_cast(cv::MARKER_STAR)}, + {"MARKER_TILTED_CROSS", static_cast(cv::MARKER_TILTED_CROSS)}, + {"MARKER_TRIANGLE_DOWN", static_cast(cv::MARKER_TRIANGLE_DOWN)}, + {"MARKER_TRIANGLE_UP", static_cast(cv::MARKER_TRIANGLE_UP)}, + {"MORPH_BLACKHAT", static_cast(cv::MORPH_BLACKHAT)}, + {"MORPH_CLOSE", static_cast(cv::MORPH_CLOSE)}, + {"MORPH_CROSS", static_cast(cv::MORPH_CROSS)}, + {"MORPH_DILATE", static_cast(cv::MORPH_DILATE)}, + {"MORPH_ELLIPSE", static_cast(cv::MORPH_ELLIPSE)}, + {"MORPH_ERODE", static_cast(cv::MORPH_ERODE)}, + {"MORPH_GRADIENT", static_cast(cv::MORPH_GRADIENT)}, + {"MORPH_HITMISS", static_cast(cv::MORPH_HITMISS)}, + {"MORPH_OPEN", static_cast(cv::MORPH_OPEN)}, + {"MORPH_RECT", static_cast(cv::MORPH_RECT)}, + {"MORPH_TOPHAT", static_cast(cv::MORPH_TOPHAT)}, + {"Mat_AUTO_STEP", static_cast(cv::Mat::AUTO_STEP)}, + {"MAT_AUTO_STEP", static_cast(cv::Mat::AUTO_STEP)}, + {"Mat_CONTINUOUS_FLAG", static_cast(cv::Mat::CONTINUOUS_FLAG)}, + {"MAT_CONTINUOUS_FLAG", static_cast(cv::Mat::CONTINUOUS_FLAG)}, + {"Mat_DEPTH_MASK", static_cast(cv::Mat::DEPTH_MASK)}, + {"MAT_DEPTH_MASK", static_cast(cv::Mat::DEPTH_MASK)}, + {"Mat_MAGIC_MASK", static_cast(cv::Mat::MAGIC_MASK)}, + {"MAT_MAGIC_MASK", static_cast(cv::Mat::MAGIC_MASK)}, + {"Mat_MAGIC_VAL", static_cast(cv::Mat::MAGIC_VAL)}, + {"MAT_MAGIC_VAL", static_cast(cv::Mat::MAGIC_VAL)}, + {"Mat_SUBMATRIX_FLAG", static_cast(cv::Mat::SUBMATRIX_FLAG)}, + {"MAT_SUBMATRIX_FLAG", static_cast(cv::Mat::SUBMATRIX_FLAG)}, + {"Mat_TYPE_MASK", static_cast(cv::Mat::TYPE_MASK)}, + {"MAT_TYPE_MASK", static_cast(cv::Mat::TYPE_MASK)}, + {"NORM_HAMMING", static_cast(cv::NORM_HAMMING)}, + {"NORM_HAMMING2", static_cast(cv::NORM_HAMMING2)}, + {"NORM_INF", static_cast(cv::NORM_INF)}, + {"NORM_L1", static_cast(cv::NORM_L1)}, + {"NORM_L2", static_cast(cv::NORM_L2)}, + {"NORM_L2SQR", static_cast(cv::NORM_L2SQR)}, + {"NORM_MINMAX", static_cast(cv::NORM_MINMAX)}, + {"NORM_RELATIVE", static_cast(cv::NORM_RELATIVE)}, + {"NORM_TYPE_MASK", static_cast(cv::NORM_TYPE_MASK)}, + {"PCA_DATA_AS_COL", static_cast(cv::PCA::DATA_AS_COL)}, + {"PCA_DATA_AS_ROW", static_cast(cv::PCA::DATA_AS_ROW)}, + {"PCA_USE_AVG", static_cast(cv::PCA::USE_AVG)}, + {"Param_ALGORITHM", static_cast(cv::Param::ALGORITHM)}, + {"PARAM_ALGORITHM", static_cast(cv::Param::ALGORITHM)}, + {"Param_BOOLEAN", static_cast(cv::Param::BOOLEAN)}, + {"PARAM_BOOLEAN", static_cast(cv::Param::BOOLEAN)}, + {"Param_FLOAT", static_cast(cv::Param::FLOAT)}, + {"PARAM_FLOAT", static_cast(cv::Param::FLOAT)}, + {"Param_INT", static_cast(cv::Param::INT)}, + {"PARAM_INT", static_cast(cv::Param::INT)}, + {"Param_MAT", static_cast(cv::Param::MAT)}, + {"PARAM_MAT", static_cast(cv::Param::MAT)}, + {"Param_MAT_VECTOR", static_cast(cv::Param::MAT_VECTOR)}, + {"PARAM_MAT_VECTOR", static_cast(cv::Param::MAT_VECTOR)}, + {"Param_REAL", static_cast(cv::Param::REAL)}, + {"PARAM_REAL", static_cast(cv::Param::REAL)}, + {"Param_SCALAR", static_cast(cv::Param::SCALAR)}, + {"PARAM_SCALAR", static_cast(cv::Param::SCALAR)}, + {"Param_STRING", static_cast(cv::Param::STRING)}, + {"PARAM_STRING", static_cast(cv::Param::STRING)}, + {"Param_UCHAR", static_cast(cv::Param::UCHAR)}, + {"PARAM_UCHAR", static_cast(cv::Param::UCHAR)}, + {"Param_UINT64", static_cast(cv::Param::UINT64)}, + {"PARAM_UINT64", static_cast(cv::Param::UINT64)}, + {"Param_UNSIGNED_INT", static_cast(cv::Param::UNSIGNED_INT)}, + {"PARAM_UNSIGNED_INT", static_cast(cv::Param::UNSIGNED_INT)}, + {"QUAT_ASSUME_NOT_UNIT", static_cast(cv::QUAT_ASSUME_NOT_UNIT)}, + {"QUAT_ASSUME_UNIT", static_cast(cv::QUAT_ASSUME_UNIT)}, + {"QuatEnum_EULER_ANGLES_MAX_VALUE", static_cast(cv::QuatEnum::EULER_ANGLES_MAX_VALUE)}, + {"QUAT_ENUM_EULER_ANGLES_MAX_VALUE", static_cast(cv::QuatEnum::EULER_ANGLES_MAX_VALUE)}, + {"QuatEnum_EXT_XYX", static_cast(cv::QuatEnum::EXT_XYX)}, + {"QUAT_ENUM_EXT_XYX", static_cast(cv::QuatEnum::EXT_XYX)}, + {"QuatEnum_EXT_XYZ", static_cast(cv::QuatEnum::EXT_XYZ)}, + {"QUAT_ENUM_EXT_XYZ", static_cast(cv::QuatEnum::EXT_XYZ)}, + {"QuatEnum_EXT_XZX", static_cast(cv::QuatEnum::EXT_XZX)}, + {"QUAT_ENUM_EXT_XZX", static_cast(cv::QuatEnum::EXT_XZX)}, + {"QuatEnum_EXT_XZY", static_cast(cv::QuatEnum::EXT_XZY)}, + {"QUAT_ENUM_EXT_XZY", static_cast(cv::QuatEnum::EXT_XZY)}, + {"QuatEnum_EXT_YXY", static_cast(cv::QuatEnum::EXT_YXY)}, + {"QUAT_ENUM_EXT_YXY", static_cast(cv::QuatEnum::EXT_YXY)}, + {"QuatEnum_EXT_YXZ", static_cast(cv::QuatEnum::EXT_YXZ)}, + {"QUAT_ENUM_EXT_YXZ", static_cast(cv::QuatEnum::EXT_YXZ)}, + {"QuatEnum_EXT_YZX", static_cast(cv::QuatEnum::EXT_YZX)}, + {"QUAT_ENUM_EXT_YZX", static_cast(cv::QuatEnum::EXT_YZX)}, + {"QuatEnum_EXT_YZY", static_cast(cv::QuatEnum::EXT_YZY)}, + {"QUAT_ENUM_EXT_YZY", static_cast(cv::QuatEnum::EXT_YZY)}, + {"QuatEnum_EXT_ZXY", static_cast(cv::QuatEnum::EXT_ZXY)}, + {"QUAT_ENUM_EXT_ZXY", static_cast(cv::QuatEnum::EXT_ZXY)}, + {"QuatEnum_EXT_ZXZ", static_cast(cv::QuatEnum::EXT_ZXZ)}, + {"QUAT_ENUM_EXT_ZXZ", static_cast(cv::QuatEnum::EXT_ZXZ)}, + {"QuatEnum_EXT_ZYX", static_cast(cv::QuatEnum::EXT_ZYX)}, + {"QUAT_ENUM_EXT_ZYX", static_cast(cv::QuatEnum::EXT_ZYX)}, + {"QuatEnum_EXT_ZYZ", static_cast(cv::QuatEnum::EXT_ZYZ)}, + {"QUAT_ENUM_EXT_ZYZ", static_cast(cv::QuatEnum::EXT_ZYZ)}, + {"QuatEnum_INT_XYX", static_cast(cv::QuatEnum::INT_XYX)}, + {"QUAT_ENUM_INT_XYX", static_cast(cv::QuatEnum::INT_XYX)}, + {"QuatEnum_INT_XYZ", static_cast(cv::QuatEnum::INT_XYZ)}, + {"QUAT_ENUM_INT_XYZ", static_cast(cv::QuatEnum::INT_XYZ)}, + {"QuatEnum_INT_XZX", static_cast(cv::QuatEnum::INT_XZX)}, + {"QUAT_ENUM_INT_XZX", static_cast(cv::QuatEnum::INT_XZX)}, + {"QuatEnum_INT_XZY", static_cast(cv::QuatEnum::INT_XZY)}, + {"QUAT_ENUM_INT_XZY", static_cast(cv::QuatEnum::INT_XZY)}, + {"QuatEnum_INT_YXY", static_cast(cv::QuatEnum::INT_YXY)}, + {"QUAT_ENUM_INT_YXY", static_cast(cv::QuatEnum::INT_YXY)}, + {"QuatEnum_INT_YXZ", static_cast(cv::QuatEnum::INT_YXZ)}, + {"QUAT_ENUM_INT_YXZ", static_cast(cv::QuatEnum::INT_YXZ)}, + {"QuatEnum_INT_YZX", static_cast(cv::QuatEnum::INT_YZX)}, + {"QUAT_ENUM_INT_YZX", static_cast(cv::QuatEnum::INT_YZX)}, + {"QuatEnum_INT_YZY", static_cast(cv::QuatEnum::INT_YZY)}, + {"QUAT_ENUM_INT_YZY", static_cast(cv::QuatEnum::INT_YZY)}, + {"QuatEnum_INT_ZXY", static_cast(cv::QuatEnum::INT_ZXY)}, + {"QUAT_ENUM_INT_ZXY", static_cast(cv::QuatEnum::INT_ZXY)}, + {"QuatEnum_INT_ZXZ", static_cast(cv::QuatEnum::INT_ZXZ)}, + {"QUAT_ENUM_INT_ZXZ", static_cast(cv::QuatEnum::INT_ZXZ)}, + {"QuatEnum_INT_ZYX", static_cast(cv::QuatEnum::INT_ZYX)}, + {"QUAT_ENUM_INT_ZYX", static_cast(cv::QuatEnum::INT_ZYX)}, + {"QuatEnum_INT_ZYZ", static_cast(cv::QuatEnum::INT_ZYZ)}, + {"QUAT_ENUM_INT_ZYZ", static_cast(cv::QuatEnum::INT_ZYZ)}, + {"REDUCE_AVG", static_cast(cv::REDUCE_AVG)}, + {"REDUCE_MAX", static_cast(cv::REDUCE_MAX)}, + {"REDUCE_MIN", static_cast(cv::REDUCE_MIN)}, + {"REDUCE_SUM", static_cast(cv::REDUCE_SUM)}, + {"RETR_CCOMP", static_cast(cv::RETR_CCOMP)}, + {"RETR_EXTERNAL", static_cast(cv::RETR_EXTERNAL)}, + {"RETR_FLOODFILL", static_cast(cv::RETR_FLOODFILL)}, + {"RETR_LIST", static_cast(cv::RETR_LIST)}, + {"RETR_TREE", static_cast(cv::RETR_TREE)}, + {"RNG_NORMAL", static_cast(cv::RNG::NORMAL)}, + {"RNG_UNIFORM", static_cast(cv::RNG::UNIFORM)}, + {"ROTATE_180", static_cast(cv::ROTATE_180)}, + {"ROTATE_90_CLOCKWISE", static_cast(cv::ROTATE_90_CLOCKWISE)}, + {"ROTATE_90_COUNTERCLOCKWISE", static_cast(cv::ROTATE_90_COUNTERCLOCKWISE)}, + {"SOLVELP_MULTI", static_cast(cv::SOLVELP_MULTI)}, + {"SOLVELP_SINGLE", static_cast(cv::SOLVELP_SINGLE)}, + {"SOLVELP_UNBOUNDED", static_cast(cv::SOLVELP_UNBOUNDED)}, + {"SOLVELP_UNFEASIBLE", static_cast(cv::SOLVELP_UNFEASIBLE)}, + {"SORT_ASCENDING", static_cast(cv::SORT_ASCENDING)}, + {"SORT_DESCENDING", static_cast(cv::SORT_DESCENDING)}, + {"SORT_EVERY_COLUMN", static_cast(cv::SORT_EVERY_COLUMN)}, + {"SORT_EVERY_ROW", static_cast(cv::SORT_EVERY_ROW)}, + {"SVD_FULL_UV", static_cast(cv::SVD::FULL_UV)}, + {"SVD_MODIFY_A", static_cast(cv::SVD::MODIFY_A)}, + {"SVD_NO_UV", static_cast(cv::SVD::NO_UV)}, + {"SparseMat_HASH_BIT", static_cast(cv::SparseMat::HASH_BIT)}, + {"SPARSE_MAT_HASH_BIT", static_cast(cv::SparseMat::HASH_BIT)}, + {"SparseMat_HASH_SCALE", static_cast(cv::SparseMat::HASH_SCALE)}, + {"SPARSE_MAT_HASH_SCALE", static_cast(cv::SparseMat::HASH_SCALE)}, + {"SparseMat_MAGIC_VAL", static_cast(cv::SparseMat::MAGIC_VAL)}, + {"SPARSE_MAT_MAGIC_VAL", static_cast(cv::SparseMat::MAGIC_VAL)}, + {"SparseMat_MAX_DIM", static_cast(cv::SparseMat::MAX_DIM)}, + {"SPARSE_MAT_MAX_DIM", static_cast(cv::SparseMat::MAX_DIM)}, + {"Subdiv2D_NEXT_AROUND_DST", static_cast(cv::Subdiv2D::NEXT_AROUND_DST)}, + {"SUBDIV2D_NEXT_AROUND_DST", static_cast(cv::Subdiv2D::NEXT_AROUND_DST)}, + {"Subdiv2D_NEXT_AROUND_LEFT", static_cast(cv::Subdiv2D::NEXT_AROUND_LEFT)}, + {"SUBDIV2D_NEXT_AROUND_LEFT", static_cast(cv::Subdiv2D::NEXT_AROUND_LEFT)}, + {"Subdiv2D_NEXT_AROUND_ORG", static_cast(cv::Subdiv2D::NEXT_AROUND_ORG)}, + {"SUBDIV2D_NEXT_AROUND_ORG", static_cast(cv::Subdiv2D::NEXT_AROUND_ORG)}, + {"Subdiv2D_NEXT_AROUND_RIGHT", static_cast(cv::Subdiv2D::NEXT_AROUND_RIGHT)}, + {"SUBDIV2D_NEXT_AROUND_RIGHT", static_cast(cv::Subdiv2D::NEXT_AROUND_RIGHT)}, + {"Subdiv2D_PREV_AROUND_DST", static_cast(cv::Subdiv2D::PREV_AROUND_DST)}, + {"SUBDIV2D_PREV_AROUND_DST", static_cast(cv::Subdiv2D::PREV_AROUND_DST)}, + {"Subdiv2D_PREV_AROUND_LEFT", static_cast(cv::Subdiv2D::PREV_AROUND_LEFT)}, + {"SUBDIV2D_PREV_AROUND_LEFT", static_cast(cv::Subdiv2D::PREV_AROUND_LEFT)}, + {"Subdiv2D_PREV_AROUND_ORG", static_cast(cv::Subdiv2D::PREV_AROUND_ORG)}, + {"SUBDIV2D_PREV_AROUND_ORG", static_cast(cv::Subdiv2D::PREV_AROUND_ORG)}, + {"Subdiv2D_PREV_AROUND_RIGHT", static_cast(cv::Subdiv2D::PREV_AROUND_RIGHT)}, + {"SUBDIV2D_PREV_AROUND_RIGHT", static_cast(cv::Subdiv2D::PREV_AROUND_RIGHT)}, + {"Subdiv2D_PTLOC_ERROR", static_cast(cv::Subdiv2D::PTLOC_ERROR)}, + {"SUBDIV2D_PTLOC_ERROR", static_cast(cv::Subdiv2D::PTLOC_ERROR)}, + {"Subdiv2D_PTLOC_INSIDE", static_cast(cv::Subdiv2D::PTLOC_INSIDE)}, + {"SUBDIV2D_PTLOC_INSIDE", static_cast(cv::Subdiv2D::PTLOC_INSIDE)}, + {"Subdiv2D_PTLOC_ON_EDGE", static_cast(cv::Subdiv2D::PTLOC_ON_EDGE)}, + {"SUBDIV2D_PTLOC_ON_EDGE", static_cast(cv::Subdiv2D::PTLOC_ON_EDGE)}, + {"Subdiv2D_PTLOC_OUTSIDE_RECT", static_cast(cv::Subdiv2D::PTLOC_OUTSIDE_RECT)}, + {"SUBDIV2D_PTLOC_OUTSIDE_RECT", static_cast(cv::Subdiv2D::PTLOC_OUTSIDE_RECT)}, + {"Subdiv2D_PTLOC_VERTEX", static_cast(cv::Subdiv2D::PTLOC_VERTEX)}, + {"SUBDIV2D_PTLOC_VERTEX", static_cast(cv::Subdiv2D::PTLOC_VERTEX)}, + {"THRESH_BINARY", static_cast(cv::THRESH_BINARY)}, + {"THRESH_BINARY_INV", static_cast(cv::THRESH_BINARY_INV)}, + {"THRESH_MASK", static_cast(cv::THRESH_MASK)}, + {"THRESH_OTSU", static_cast(cv::THRESH_OTSU)}, + {"THRESH_TOZERO", static_cast(cv::THRESH_TOZERO)}, + {"THRESH_TOZERO_INV", static_cast(cv::THRESH_TOZERO_INV)}, + {"THRESH_TRIANGLE", static_cast(cv::THRESH_TRIANGLE)}, + {"THRESH_TRUNC", static_cast(cv::THRESH_TRUNC)}, + {"TM_CCOEFF", static_cast(cv::TM_CCOEFF)}, + {"TM_CCOEFF_NORMED", static_cast(cv::TM_CCOEFF_NORMED)}, + {"TM_CCORR", static_cast(cv::TM_CCORR)}, + {"TM_CCORR_NORMED", static_cast(cv::TM_CCORR_NORMED)}, + {"TM_SQDIFF", static_cast(cv::TM_SQDIFF)}, + {"TM_SQDIFF_NORMED", static_cast(cv::TM_SQDIFF_NORMED)}, + {"TermCriteria_COUNT", static_cast(cv::TermCriteria::COUNT)}, + {"TERM_CRITERIA_COUNT", static_cast(cv::TermCriteria::COUNT)}, + {"TermCriteria_EPS", static_cast(cv::TermCriteria::EPS)}, + {"TERM_CRITERIA_EPS", static_cast(cv::TermCriteria::EPS)}, + {"TermCriteria_MAX_ITER", static_cast(cv::TermCriteria::MAX_ITER)}, + {"TERM_CRITERIA_MAX_ITER", static_cast(cv::TermCriteria::MAX_ITER)}, + {"UMatData_ASYNC_CLEANUP", static_cast(cv::UMatData::ASYNC_CLEANUP)}, + {"UMAT_DATA_ASYNC_CLEANUP", static_cast(cv::UMatData::ASYNC_CLEANUP)}, + {"UMatData_COPY_ON_MAP", static_cast(cv::UMatData::COPY_ON_MAP)}, + {"UMAT_DATA_COPY_ON_MAP", static_cast(cv::UMatData::COPY_ON_MAP)}, + {"UMatData_DEVICE_COPY_OBSOLETE", static_cast(cv::UMatData::DEVICE_COPY_OBSOLETE)}, + {"UMAT_DATA_DEVICE_COPY_OBSOLETE", static_cast(cv::UMatData::DEVICE_COPY_OBSOLETE)}, + {"UMatData_DEVICE_MEM_MAPPED", static_cast(cv::UMatData::DEVICE_MEM_MAPPED)}, + {"UMAT_DATA_DEVICE_MEM_MAPPED", static_cast(cv::UMatData::DEVICE_MEM_MAPPED)}, + {"UMatData_HOST_COPY_OBSOLETE", static_cast(cv::UMatData::HOST_COPY_OBSOLETE)}, + {"UMAT_DATA_HOST_COPY_OBSOLETE", static_cast(cv::UMatData::HOST_COPY_OBSOLETE)}, + {"UMatData_TEMP_COPIED_UMAT", static_cast(cv::UMatData::TEMP_COPIED_UMAT)}, + {"UMAT_DATA_TEMP_COPIED_UMAT", static_cast(cv::UMatData::TEMP_COPIED_UMAT)}, + {"UMatData_TEMP_UMAT", static_cast(cv::UMatData::TEMP_UMAT)}, + {"UMAT_DATA_TEMP_UMAT", static_cast(cv::UMatData::TEMP_UMAT)}, + {"UMatData_USER_ALLOCATED", static_cast(cv::UMatData::USER_ALLOCATED)}, + {"UMAT_DATA_USER_ALLOCATED", static_cast(cv::UMatData::USER_ALLOCATED)}, + {"UMat_AUTO_STEP", static_cast(cv::UMat::AUTO_STEP)}, + {"UMAT_AUTO_STEP", static_cast(cv::UMat::AUTO_STEP)}, + {"UMat_CONTINUOUS_FLAG", static_cast(cv::UMat::CONTINUOUS_FLAG)}, + {"UMAT_CONTINUOUS_FLAG", static_cast(cv::UMat::CONTINUOUS_FLAG)}, + {"UMat_DEPTH_MASK", static_cast(cv::UMat::DEPTH_MASK)}, + {"UMAT_DEPTH_MASK", static_cast(cv::UMat::DEPTH_MASK)}, + {"UMat_MAGIC_MASK", static_cast(cv::UMat::MAGIC_MASK)}, + {"UMAT_MAGIC_MASK", static_cast(cv::UMat::MAGIC_MASK)}, + {"UMat_MAGIC_VAL", static_cast(cv::UMat::MAGIC_VAL)}, + {"UMAT_MAGIC_VAL", static_cast(cv::UMat::MAGIC_VAL)}, + {"UMat_SUBMATRIX_FLAG", static_cast(cv::UMat::SUBMATRIX_FLAG)}, + {"UMAT_SUBMATRIX_FLAG", static_cast(cv::UMat::SUBMATRIX_FLAG)}, + {"UMat_TYPE_MASK", static_cast(cv::UMat::TYPE_MASK)}, + {"UMAT_TYPE_MASK", static_cast(cv::UMat::TYPE_MASK)}, + {"USAGE_ALLOCATE_DEVICE_MEMORY", static_cast(cv::USAGE_ALLOCATE_DEVICE_MEMORY)}, + {"USAGE_ALLOCATE_HOST_MEMORY", static_cast(cv::USAGE_ALLOCATE_HOST_MEMORY)}, + {"USAGE_ALLOCATE_SHARED_MEMORY", static_cast(cv::USAGE_ALLOCATE_SHARED_MEMORY)}, + {"USAGE_DEFAULT", static_cast(cv::USAGE_DEFAULT)}, + {"VIDEOWRITER_PROP_DEPTH", static_cast(cv::VIDEOWRITER_PROP_DEPTH)}, + {"VIDEOWRITER_PROP_FRAMEBYTES", static_cast(cv::VIDEOWRITER_PROP_FRAMEBYTES)}, + {"VIDEOWRITER_PROP_HW_ACCELERATION", static_cast(cv::VIDEOWRITER_PROP_HW_ACCELERATION)}, + {"VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL", static_cast(cv::VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL)}, + {"VIDEOWRITER_PROP_HW_DEVICE", static_cast(cv::VIDEOWRITER_PROP_HW_DEVICE)}, + {"VIDEOWRITER_PROP_IS_COLOR", static_cast(cv::VIDEOWRITER_PROP_IS_COLOR)}, + {"VIDEOWRITER_PROP_NSTRIPES", static_cast(cv::VIDEOWRITER_PROP_NSTRIPES)}, + {"VIDEOWRITER_PROP_QUALITY", static_cast(cv::VIDEOWRITER_PROP_QUALITY)}, + {"VIDEO_ACCELERATION_ANY", static_cast(cv::VIDEO_ACCELERATION_ANY)}, + {"VIDEO_ACCELERATION_D3D11", static_cast(cv::VIDEO_ACCELERATION_D3D11)}, + {"VIDEO_ACCELERATION_MFX", static_cast(cv::VIDEO_ACCELERATION_MFX)}, + {"VIDEO_ACCELERATION_NONE", static_cast(cv::VIDEO_ACCELERATION_NONE)}, + {"VIDEO_ACCELERATION_VAAPI", static_cast(cv::VIDEO_ACCELERATION_VAAPI)}, + {"WARP_FILL_OUTLIERS", static_cast(cv::WARP_FILL_OUTLIERS)}, + {"WARP_INVERSE_MAP", static_cast(cv::WARP_INVERSE_MAP)}, + {"WARP_POLAR_LINEAR", static_cast(cv::WARP_POLAR_LINEAR)}, + {"WARP_POLAR_LOG", static_cast(cv::WARP_POLAR_LOG)}, + {"_InputArray_CUDA_GPU_MAT", static_cast(cv::_InputArray::CUDA_GPU_MAT)}, + {"_INPUT_ARRAY_CUDA_GPU_MAT", static_cast(cv::_InputArray::CUDA_GPU_MAT)}, + {"_InputArray_CUDA_HOST_MEM", static_cast(cv::_InputArray::CUDA_HOST_MEM)}, + {"_INPUT_ARRAY_CUDA_HOST_MEM", static_cast(cv::_InputArray::CUDA_HOST_MEM)}, + {"_InputArray_EXPR", static_cast(cv::_InputArray::EXPR)}, + {"_INPUT_ARRAY_EXPR", static_cast(cv::_InputArray::EXPR)}, + {"_InputArray_FIXED_SIZE", static_cast(cv::_InputArray::FIXED_SIZE)}, + {"_INPUT_ARRAY_FIXED_SIZE", static_cast(cv::_InputArray::FIXED_SIZE)}, + {"_InputArray_FIXED_TYPE", static_cast(cv::_InputArray::FIXED_TYPE)}, + {"_INPUT_ARRAY_FIXED_TYPE", static_cast(cv::_InputArray::FIXED_TYPE)}, + {"_InputArray_KIND_MASK", static_cast(cv::_InputArray::KIND_MASK)}, + {"_INPUT_ARRAY_KIND_MASK", static_cast(cv::_InputArray::KIND_MASK)}, + {"_InputArray_KIND_SHIFT", static_cast(cv::_InputArray::KIND_SHIFT)}, + {"_INPUT_ARRAY_KIND_SHIFT", static_cast(cv::_InputArray::KIND_SHIFT)}, + {"_InputArray_MAT", static_cast(cv::_InputArray::MAT)}, + {"_INPUT_ARRAY_MAT", static_cast(cv::_InputArray::MAT)}, + {"_InputArray_MATX", static_cast(cv::_InputArray::MATX)}, + {"_INPUT_ARRAY_MATX", static_cast(cv::_InputArray::MATX)}, + {"_InputArray_NONE", static_cast(cv::_InputArray::NONE)}, + {"_INPUT_ARRAY_NONE", static_cast(cv::_InputArray::NONE)}, + {"_InputArray_OPENGL_BUFFER", static_cast(cv::_InputArray::OPENGL_BUFFER)}, + {"_INPUT_ARRAY_OPENGL_BUFFER", static_cast(cv::_InputArray::OPENGL_BUFFER)}, + {"_InputArray_STD_ARRAY", static_cast(cv::_InputArray::STD_ARRAY)}, + {"_INPUT_ARRAY_STD_ARRAY", static_cast(cv::_InputArray::STD_ARRAY)}, + {"_InputArray_STD_ARRAY_MAT", static_cast(cv::_InputArray::STD_ARRAY_MAT)}, + {"_INPUT_ARRAY_STD_ARRAY_MAT", static_cast(cv::_InputArray::STD_ARRAY_MAT)}, + {"_InputArray_STD_BOOL_VECTOR", static_cast(cv::_InputArray::STD_BOOL_VECTOR)}, + {"_INPUT_ARRAY_STD_BOOL_VECTOR", static_cast(cv::_InputArray::STD_BOOL_VECTOR)}, + {"_InputArray_STD_VECTOR", static_cast(cv::_InputArray::STD_VECTOR)}, + {"_INPUT_ARRAY_STD_VECTOR", static_cast(cv::_InputArray::STD_VECTOR)}, + {"_InputArray_STD_VECTOR_CUDA_GPU_MAT", static_cast(cv::_InputArray::STD_VECTOR_CUDA_GPU_MAT)}, + {"_INPUT_ARRAY_STD_VECTOR_CUDA_GPU_MAT", static_cast(cv::_InputArray::STD_VECTOR_CUDA_GPU_MAT)}, + {"_InputArray_STD_VECTOR_MAT", static_cast(cv::_InputArray::STD_VECTOR_MAT)}, + {"_INPUT_ARRAY_STD_VECTOR_MAT", static_cast(cv::_InputArray::STD_VECTOR_MAT)}, + {"_InputArray_STD_VECTOR_UMAT", static_cast(cv::_InputArray::STD_VECTOR_UMAT)}, + {"_INPUT_ARRAY_STD_VECTOR_UMAT", static_cast(cv::_InputArray::STD_VECTOR_UMAT)}, + {"_InputArray_STD_VECTOR_VECTOR", static_cast(cv::_InputArray::STD_VECTOR_VECTOR)}, + {"_INPUT_ARRAY_STD_VECTOR_VECTOR", static_cast(cv::_InputArray::STD_VECTOR_VECTOR)}, + {"_InputArray_UMAT", static_cast(cv::_InputArray::UMAT)}, + {"_INPUT_ARRAY_UMAT", static_cast(cv::_InputArray::UMAT)}, + {"_OutputArray_DEPTH_MASK_16F", static_cast(cv::_OutputArray::DEPTH_MASK_16F)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_16F", static_cast(cv::_OutputArray::DEPTH_MASK_16F)}, + {"_OutputArray_DEPTH_MASK_16S", static_cast(cv::_OutputArray::DEPTH_MASK_16S)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_16S", static_cast(cv::_OutputArray::DEPTH_MASK_16S)}, + {"_OutputArray_DEPTH_MASK_16U", static_cast(cv::_OutputArray::DEPTH_MASK_16U)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_16U", static_cast(cv::_OutputArray::DEPTH_MASK_16U)}, + {"_OutputArray_DEPTH_MASK_32F", static_cast(cv::_OutputArray::DEPTH_MASK_32F)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_32F", static_cast(cv::_OutputArray::DEPTH_MASK_32F)}, + {"_OutputArray_DEPTH_MASK_32S", static_cast(cv::_OutputArray::DEPTH_MASK_32S)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_32S", static_cast(cv::_OutputArray::DEPTH_MASK_32S)}, + {"_OutputArray_DEPTH_MASK_64F", static_cast(cv::_OutputArray::DEPTH_MASK_64F)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_64F", static_cast(cv::_OutputArray::DEPTH_MASK_64F)}, + {"_OutputArray_DEPTH_MASK_8S", static_cast(cv::_OutputArray::DEPTH_MASK_8S)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_8S", static_cast(cv::_OutputArray::DEPTH_MASK_8S)}, + {"_OutputArray_DEPTH_MASK_8U", static_cast(cv::_OutputArray::DEPTH_MASK_8U)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_8U", static_cast(cv::_OutputArray::DEPTH_MASK_8U)}, + {"_OutputArray_DEPTH_MASK_ALL", static_cast(cv::_OutputArray::DEPTH_MASK_ALL)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_ALL", static_cast(cv::_OutputArray::DEPTH_MASK_ALL)}, + {"_OutputArray_DEPTH_MASK_ALL_16F", static_cast(cv::_OutputArray::DEPTH_MASK_ALL_16F)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_ALL_16F", static_cast(cv::_OutputArray::DEPTH_MASK_ALL_16F)}, + {"_OutputArray_DEPTH_MASK_ALL_BUT_8S", static_cast(cv::_OutputArray::DEPTH_MASK_ALL_BUT_8S)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_ALL_BUT_8S", static_cast(cv::_OutputArray::DEPTH_MASK_ALL_BUT_8S)}, + {"_OutputArray_DEPTH_MASK_FLT", static_cast(cv::_OutputArray::DEPTH_MASK_FLT)}, + {"_OUTPUT_ARRAY_DEPTH_MASK_FLT", static_cast(cv::_OutputArray::DEPTH_MASK_FLT)}, + {"__UMAT_USAGE_FLAGS_32BIT", static_cast(cv::__UMAT_USAGE_FLAGS_32BIT)}, +#ifdef PYOPENCV_EXTRA_CONSTANTS_CV + PYOPENCV_EXTRA_CONSTANTS_CV +#endif + {NULL, 0} +}; + +static PyMethodDef methods_Error[] = { +#ifdef PYOPENCV_EXTRA_METHODS_ERROR + PYOPENCV_EXTRA_METHODS_ERROR +#endif + {NULL, NULL} +}; + +static ConstDef consts_Error[] = { + {"BadAlign", static_cast(cv::Error::BadAlign)}, + {"BAD_ALIGN", static_cast(cv::Error::BadAlign)}, + {"BadAlphaChannel", static_cast(cv::Error::BadAlphaChannel)}, + {"BAD_ALPHA_CHANNEL", static_cast(cv::Error::BadAlphaChannel)}, + {"BadCOI", static_cast(cv::Error::BadCOI)}, + {"BAD_COI", static_cast(cv::Error::BadCOI)}, + {"BadCallBack", static_cast(cv::Error::BadCallBack)}, + {"BAD_CALL_BACK", static_cast(cv::Error::BadCallBack)}, + {"BadDataPtr", static_cast(cv::Error::BadDataPtr)}, + {"BAD_DATA_PTR", static_cast(cv::Error::BadDataPtr)}, + {"BadDepth", static_cast(cv::Error::BadDepth)}, + {"BAD_DEPTH", static_cast(cv::Error::BadDepth)}, + {"BadImageSize", static_cast(cv::Error::BadImageSize)}, + {"BAD_IMAGE_SIZE", static_cast(cv::Error::BadImageSize)}, + {"BadModelOrChSeq", static_cast(cv::Error::BadModelOrChSeq)}, + {"BAD_MODEL_OR_CH_SEQ", static_cast(cv::Error::BadModelOrChSeq)}, + {"BadNumChannel1U", static_cast(cv::Error::BadNumChannel1U)}, + {"BAD_NUM_CHANNEL1U", static_cast(cv::Error::BadNumChannel1U)}, + {"BadNumChannels", static_cast(cv::Error::BadNumChannels)}, + {"BAD_NUM_CHANNELS", static_cast(cv::Error::BadNumChannels)}, + {"BadOffset", static_cast(cv::Error::BadOffset)}, + {"BAD_OFFSET", static_cast(cv::Error::BadOffset)}, + {"BadOrder", static_cast(cv::Error::BadOrder)}, + {"BAD_ORDER", static_cast(cv::Error::BadOrder)}, + {"BadOrigin", static_cast(cv::Error::BadOrigin)}, + {"BAD_ORIGIN", static_cast(cv::Error::BadOrigin)}, + {"BadROISize", static_cast(cv::Error::BadROISize)}, + {"BAD_ROISIZE", static_cast(cv::Error::BadROISize)}, + {"BadStep", static_cast(cv::Error::BadStep)}, + {"BAD_STEP", static_cast(cv::Error::BadStep)}, + {"BadTileSize", static_cast(cv::Error::BadTileSize)}, + {"BAD_TILE_SIZE", static_cast(cv::Error::BadTileSize)}, + {"GpuApiCallError", static_cast(cv::Error::GpuApiCallError)}, + {"GPU_API_CALL_ERROR", static_cast(cv::Error::GpuApiCallError)}, + {"GpuNotSupported", static_cast(cv::Error::GpuNotSupported)}, + {"GPU_NOT_SUPPORTED", static_cast(cv::Error::GpuNotSupported)}, + {"HeaderIsNull", static_cast(cv::Error::HeaderIsNull)}, + {"HEADER_IS_NULL", static_cast(cv::Error::HeaderIsNull)}, + {"MaskIsTiled", static_cast(cv::Error::MaskIsTiled)}, + {"MASK_IS_TILED", static_cast(cv::Error::MaskIsTiled)}, + {"OpenCLApiCallError", static_cast(cv::Error::OpenCLApiCallError)}, + {"OPEN_CLAPI_CALL_ERROR", static_cast(cv::Error::OpenCLApiCallError)}, + {"OpenCLDoubleNotSupported", static_cast(cv::Error::OpenCLDoubleNotSupported)}, + {"OPEN_CLDOUBLE_NOT_SUPPORTED", static_cast(cv::Error::OpenCLDoubleNotSupported)}, + {"OpenCLInitError", static_cast(cv::Error::OpenCLInitError)}, + {"OPEN_CLINIT_ERROR", static_cast(cv::Error::OpenCLInitError)}, + {"OpenCLNoAMDBlasFft", static_cast(cv::Error::OpenCLNoAMDBlasFft)}, + {"OPEN_CLNO_AMDBLAS_FFT", static_cast(cv::Error::OpenCLNoAMDBlasFft)}, + {"OpenGlApiCallError", static_cast(cv::Error::OpenGlApiCallError)}, + {"OPEN_GL_API_CALL_ERROR", static_cast(cv::Error::OpenGlApiCallError)}, + {"OpenGlNotSupported", static_cast(cv::Error::OpenGlNotSupported)}, + {"OPEN_GL_NOT_SUPPORTED", static_cast(cv::Error::OpenGlNotSupported)}, + {"StsAssert", static_cast(cv::Error::StsAssert)}, + {"STS_ASSERT", static_cast(cv::Error::StsAssert)}, + {"StsAutoTrace", static_cast(cv::Error::StsAutoTrace)}, + {"STS_AUTO_TRACE", static_cast(cv::Error::StsAutoTrace)}, + {"StsBackTrace", static_cast(cv::Error::StsBackTrace)}, + {"STS_BACK_TRACE", static_cast(cv::Error::StsBackTrace)}, + {"StsBadArg", static_cast(cv::Error::StsBadArg)}, + {"STS_BAD_ARG", static_cast(cv::Error::StsBadArg)}, + {"StsBadFlag", static_cast(cv::Error::StsBadFlag)}, + {"STS_BAD_FLAG", static_cast(cv::Error::StsBadFlag)}, + {"StsBadFunc", static_cast(cv::Error::StsBadFunc)}, + {"STS_BAD_FUNC", static_cast(cv::Error::StsBadFunc)}, + {"StsBadMask", static_cast(cv::Error::StsBadMask)}, + {"STS_BAD_MASK", static_cast(cv::Error::StsBadMask)}, + {"StsBadMemBlock", static_cast(cv::Error::StsBadMemBlock)}, + {"STS_BAD_MEM_BLOCK", static_cast(cv::Error::StsBadMemBlock)}, + {"StsBadPoint", static_cast(cv::Error::StsBadPoint)}, + {"STS_BAD_POINT", static_cast(cv::Error::StsBadPoint)}, + {"StsBadSize", static_cast(cv::Error::StsBadSize)}, + {"STS_BAD_SIZE", static_cast(cv::Error::StsBadSize)}, + {"StsDivByZero", static_cast(cv::Error::StsDivByZero)}, + {"STS_DIV_BY_ZERO", static_cast(cv::Error::StsDivByZero)}, + {"StsError", static_cast(cv::Error::StsError)}, + {"STS_ERROR", static_cast(cv::Error::StsError)}, + {"StsFilterOffsetErr", static_cast(cv::Error::StsFilterOffsetErr)}, + {"STS_FILTER_OFFSET_ERR", static_cast(cv::Error::StsFilterOffsetErr)}, + {"StsFilterStructContentErr", static_cast(cv::Error::StsFilterStructContentErr)}, + {"STS_FILTER_STRUCT_CONTENT_ERR", static_cast(cv::Error::StsFilterStructContentErr)}, + {"StsInplaceNotSupported", static_cast(cv::Error::StsInplaceNotSupported)}, + {"STS_INPLACE_NOT_SUPPORTED", static_cast(cv::Error::StsInplaceNotSupported)}, + {"StsInternal", static_cast(cv::Error::StsInternal)}, + {"STS_INTERNAL", static_cast(cv::Error::StsInternal)}, + {"StsKernelStructContentErr", static_cast(cv::Error::StsKernelStructContentErr)}, + {"STS_KERNEL_STRUCT_CONTENT_ERR", static_cast(cv::Error::StsKernelStructContentErr)}, + {"StsNoConv", static_cast(cv::Error::StsNoConv)}, + {"STS_NO_CONV", static_cast(cv::Error::StsNoConv)}, + {"StsNoMem", static_cast(cv::Error::StsNoMem)}, + {"STS_NO_MEM", static_cast(cv::Error::StsNoMem)}, + {"StsNotImplemented", static_cast(cv::Error::StsNotImplemented)}, + {"STS_NOT_IMPLEMENTED", static_cast(cv::Error::StsNotImplemented)}, + {"StsNullPtr", static_cast(cv::Error::StsNullPtr)}, + {"STS_NULL_PTR", static_cast(cv::Error::StsNullPtr)}, + {"StsObjectNotFound", static_cast(cv::Error::StsObjectNotFound)}, + {"STS_OBJECT_NOT_FOUND", static_cast(cv::Error::StsObjectNotFound)}, + {"StsOk", static_cast(cv::Error::StsOk)}, + {"STS_OK", static_cast(cv::Error::StsOk)}, + {"StsOutOfRange", static_cast(cv::Error::StsOutOfRange)}, + {"STS_OUT_OF_RANGE", static_cast(cv::Error::StsOutOfRange)}, + {"StsParseError", static_cast(cv::Error::StsParseError)}, + {"STS_PARSE_ERROR", static_cast(cv::Error::StsParseError)}, + {"StsUnmatchedFormats", static_cast(cv::Error::StsUnmatchedFormats)}, + {"STS_UNMATCHED_FORMATS", static_cast(cv::Error::StsUnmatchedFormats)}, + {"StsUnmatchedSizes", static_cast(cv::Error::StsUnmatchedSizes)}, + {"STS_UNMATCHED_SIZES", static_cast(cv::Error::StsUnmatchedSizes)}, + {"StsUnsupportedFormat", static_cast(cv::Error::StsUnsupportedFormat)}, + {"STS_UNSUPPORTED_FORMAT", static_cast(cv::Error::StsUnsupportedFormat)}, + {"StsVecLengthErr", static_cast(cv::Error::StsVecLengthErr)}, + {"STS_VEC_LENGTH_ERR", static_cast(cv::Error::StsVecLengthErr)}, +#ifdef PYOPENCV_EXTRA_CONSTANTS_ERROR + PYOPENCV_EXTRA_CONSTANTS_ERROR +#endif + {NULL, 0} +}; + +static PyMethodDef methods_cuda[] = { + {"Event_elapsedTime", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_Event_elapsedTime, 0), "Event_elapsedTime(start, end) -> retval\n."}, + {"GpuMat_defaultAllocator", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_GpuMat_defaultAllocator, 0), "GpuMat_defaultAllocator() -> retval\n."}, + {"GpuMat_setDefaultAllocator", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_GpuMat_setDefaultAllocator, 0), "GpuMat_setDefaultAllocator(allocator) -> None\n."}, + {"Stream_Null", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_Stream_Null, 0), "Stream_Null() -> retval\n. @brief Adds a callback to be called on the host after all currently enqueued items in the stream have\n. completed.\n. \n. @note Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization\n. that may depend on outstanding device work or other callbacks that are not mandated to run earlier.\n. Callbacks without a mandated order (in independent streams) execute in undefined order and may be\n. serialized."}, + {"TargetArchs_has", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_has, 0), "TargetArchs_has(major, minor) -> retval\n. @brief There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA\n. code for the given architecture(s):\n. \n. @param major Major compute capability version.\n. @param minor Minor compute capability version."}, + {"TargetArchs_hasBin", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_hasBin, 0), "TargetArchs_hasBin(major, minor) -> retval\n."}, + {"TargetArchs_hasEqualOrGreater", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_hasEqualOrGreater, 0), "TargetArchs_hasEqualOrGreater(major, minor) -> retval\n."}, + {"TargetArchs_hasEqualOrGreaterBin", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_hasEqualOrGreaterBin, 0), "TargetArchs_hasEqualOrGreaterBin(major, minor) -> retval\n."}, + {"TargetArchs_hasEqualOrGreaterPtx", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_hasEqualOrGreaterPtx, 0), "TargetArchs_hasEqualOrGreaterPtx(major, minor) -> retval\n."}, + {"TargetArchs_hasEqualOrLessPtx", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_hasEqualOrLessPtx, 0), "TargetArchs_hasEqualOrLessPtx(major, minor) -> retval\n."}, + {"TargetArchs_hasPtx", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_TargetArchs_hasPtx, 0), "TargetArchs_hasPtx(major, minor) -> retval\n."}, + {"createContinuous", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_createContinuous, 0), "createContinuous(rows, cols, type[, arr]) -> arr\n. @brief Creates a continuous matrix.\n. \n. @param rows Row count.\n. @param cols Column count.\n. @param type Type of the matrix.\n. @param arr Destination matrix. This parameter changes only if it has a proper type and area (\n. \\f$\\texttt{rows} \\times \\texttt{cols}\\f$ ).\n. \n. Matrix is called continuous if its elements are stored continuously, that is, without gaps at the\n. end of each row."}, + {"ensureSizeIsEnough", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_ensureSizeIsEnough, 0), "ensureSizeIsEnough(rows, cols, type[, arr]) -> arr\n. @brief Ensures that the size of a matrix is big enough and the matrix has a proper type.\n. \n. @param rows Minimum desired number of rows.\n. @param cols Minimum desired number of columns.\n. @param type Desired matrix type.\n. @param arr Destination matrix.\n. \n. The function does not reallocate memory if the matrix has proper attributes already."}, + {"getCudaEnabledDeviceCount", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_getCudaEnabledDeviceCount, 0), "getCudaEnabledDeviceCount() -> retval\n. @brief Returns the number of installed CUDA-enabled devices.\n. \n. Use this function before any other CUDA functions calls. If OpenCV is compiled without CUDA support,\n. this function returns 0. If the CUDA driver is not installed, or is incompatible, this function\n. returns -1."}, + {"getDevice", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_getDevice, 0), "getDevice() -> retval\n. @brief Returns the current device index set by cuda::setDevice or initialized by default."}, + {"printCudaDeviceInfo", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_printCudaDeviceInfo, 0), "printCudaDeviceInfo(device) -> None\n."}, + {"printShortCudaDeviceInfo", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_printShortCudaDeviceInfo, 0), "printShortCudaDeviceInfo(device) -> None\n."}, + {"registerPageLocked", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_registerPageLocked, 0), "registerPageLocked(m) -> None\n. @brief Page-locks the memory of matrix and maps it for the device(s).\n. \n. @param m Input matrix."}, + {"resetDevice", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_resetDevice, 0), "resetDevice() -> None\n. @brief Explicitly destroys and cleans up all resources associated with the current device in the current\n. process.\n. \n. Any subsequent API call to this device will reinitialize the device."}, + {"setBufferPoolConfig", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_setBufferPoolConfig, 0), "setBufferPoolConfig(deviceId, stackSize, stackCount) -> None\n."}, + {"setBufferPoolUsage", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_setBufferPoolUsage, 0), "setBufferPoolUsage(on) -> None\n."}, + {"setDevice", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_setDevice, 0), "setDevice(device) -> None\n. @brief Sets a device and initializes it for the current thread.\n. \n. @param device System index of a CUDA device starting with 0.\n. \n. If the call of this function is omitted, a default device is initialized at the fist CUDA usage."}, + {"unregisterPageLocked", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_unregisterPageLocked, 0), "unregisterPageLocked(m) -> None\n. @brief Unmaps the memory of matrix and makes it pageable again.\n. \n. @param m Input matrix."}, +#ifdef PYOPENCV_EXTRA_METHODS_CUDA + PYOPENCV_EXTRA_METHODS_CUDA +#endif + {NULL, NULL} +}; + +static ConstDef consts_cuda[] = { + {"DYNAMIC_PARALLELISM", static_cast(cv::cuda::DYNAMIC_PARALLELISM)}, + {"DeviceInfo_ComputeModeDefault", static_cast(cv::cuda::DeviceInfo::ComputeModeDefault)}, + {"DEVICE_INFO_COMPUTE_MODE_DEFAULT", static_cast(cv::cuda::DeviceInfo::ComputeModeDefault)}, + {"DeviceInfo_ComputeModeExclusive", static_cast(cv::cuda::DeviceInfo::ComputeModeExclusive)}, + {"DEVICE_INFO_COMPUTE_MODE_EXCLUSIVE", static_cast(cv::cuda::DeviceInfo::ComputeModeExclusive)}, + {"DeviceInfo_ComputeModeExclusiveProcess", static_cast(cv::cuda::DeviceInfo::ComputeModeExclusiveProcess)}, + {"DEVICE_INFO_COMPUTE_MODE_EXCLUSIVE_PROCESS", static_cast(cv::cuda::DeviceInfo::ComputeModeExclusiveProcess)}, + {"DeviceInfo_ComputeModeProhibited", static_cast(cv::cuda::DeviceInfo::ComputeModeProhibited)}, + {"DEVICE_INFO_COMPUTE_MODE_PROHIBITED", static_cast(cv::cuda::DeviceInfo::ComputeModeProhibited)}, + {"Event_BLOCKING_SYNC", static_cast(cv::cuda::Event::BLOCKING_SYNC)}, + {"EVENT_BLOCKING_SYNC", static_cast(cv::cuda::Event::BLOCKING_SYNC)}, + {"Event_DEFAULT", static_cast(cv::cuda::Event::DEFAULT)}, + {"EVENT_DEFAULT", static_cast(cv::cuda::Event::DEFAULT)}, + {"Event_DISABLE_TIMING", static_cast(cv::cuda::Event::DISABLE_TIMING)}, + {"EVENT_DISABLE_TIMING", static_cast(cv::cuda::Event::DISABLE_TIMING)}, + {"Event_INTERPROCESS", static_cast(cv::cuda::Event::INTERPROCESS)}, + {"EVENT_INTERPROCESS", static_cast(cv::cuda::Event::INTERPROCESS)}, + {"FEATURE_SET_COMPUTE_10", static_cast(cv::cuda::FEATURE_SET_COMPUTE_10)}, + {"FEATURE_SET_COMPUTE_11", static_cast(cv::cuda::FEATURE_SET_COMPUTE_11)}, + {"FEATURE_SET_COMPUTE_12", static_cast(cv::cuda::FEATURE_SET_COMPUTE_12)}, + {"FEATURE_SET_COMPUTE_13", static_cast(cv::cuda::FEATURE_SET_COMPUTE_13)}, + {"FEATURE_SET_COMPUTE_20", static_cast(cv::cuda::FEATURE_SET_COMPUTE_20)}, + {"FEATURE_SET_COMPUTE_21", static_cast(cv::cuda::FEATURE_SET_COMPUTE_21)}, + {"FEATURE_SET_COMPUTE_30", static_cast(cv::cuda::FEATURE_SET_COMPUTE_30)}, + {"FEATURE_SET_COMPUTE_32", static_cast(cv::cuda::FEATURE_SET_COMPUTE_32)}, + {"FEATURE_SET_COMPUTE_35", static_cast(cv::cuda::FEATURE_SET_COMPUTE_35)}, + {"FEATURE_SET_COMPUTE_50", static_cast(cv::cuda::FEATURE_SET_COMPUTE_50)}, + {"GLOBAL_ATOMICS", static_cast(cv::cuda::GLOBAL_ATOMICS)}, + {"HostMem_PAGE_LOCKED", static_cast(cv::cuda::HostMem::PAGE_LOCKED)}, + {"HOST_MEM_PAGE_LOCKED", static_cast(cv::cuda::HostMem::PAGE_LOCKED)}, + {"HostMem_SHARED", static_cast(cv::cuda::HostMem::SHARED)}, + {"HOST_MEM_SHARED", static_cast(cv::cuda::HostMem::SHARED)}, + {"HostMem_WRITE_COMBINED", static_cast(cv::cuda::HostMem::WRITE_COMBINED)}, + {"HOST_MEM_WRITE_COMBINED", static_cast(cv::cuda::HostMem::WRITE_COMBINED)}, + {"NATIVE_DOUBLE", static_cast(cv::cuda::NATIVE_DOUBLE)}, + {"SHARED_ATOMICS", static_cast(cv::cuda::SHARED_ATOMICS)}, + {"WARP_SHUFFLE_FUNCTIONS", static_cast(cv::cuda::WARP_SHUFFLE_FUNCTIONS)}, +#ifdef PYOPENCV_EXTRA_CONSTANTS_CUDA + PYOPENCV_EXTRA_CONSTANTS_CUDA +#endif + {NULL, 0} +}; + +static PyMethodDef methods_detail[] = { +#ifdef PYOPENCV_EXTRA_METHODS_DETAIL + PYOPENCV_EXTRA_METHODS_DETAIL +#endif + {NULL, NULL} +}; + +static ConstDef consts_detail[] = { + {"TEST_CUSTOM", static_cast(cv::detail::TEST_CUSTOM)}, + {"TEST_EQ", static_cast(cv::detail::TEST_EQ)}, + {"TEST_GE", static_cast(cv::detail::TEST_GE)}, + {"TEST_GT", static_cast(cv::detail::TEST_GT)}, + {"TEST_LE", static_cast(cv::detail::TEST_LE)}, + {"TEST_LT", static_cast(cv::detail::TEST_LT)}, + {"TEST_NE", static_cast(cv::detail::TEST_NE)}, +#ifdef PYOPENCV_EXTRA_CONSTANTS_DETAIL + PYOPENCV_EXTRA_CONSTANTS_DETAIL +#endif + {NULL, 0} +}; + +static PyMethodDef methods_ipp[] = { + {"getIppVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_ipp_getIppVersion, 0), "getIppVersion() -> retval\n."}, + {"setUseIPP", CV_PY_FN_WITH_KW_(pyopencv_cv_ipp_setUseIPP, 0), "setUseIPP(flag) -> None\n."}, + {"setUseIPP_NotExact", CV_PY_FN_WITH_KW_(pyopencv_cv_ipp_setUseIPP_NotExact, 0), "setUseIPP_NotExact(flag) -> None\n."}, + {"useIPP", CV_PY_FN_WITH_KW_(pyopencv_cv_ipp_useIPP, 0), "useIPP() -> retval\n. proxy for hal::Cholesky"}, + {"useIPP_NotExact", CV_PY_FN_WITH_KW_(pyopencv_cv_ipp_useIPP_NotExact, 0), "useIPP_NotExact() -> retval\n."}, +#ifdef PYOPENCV_EXTRA_METHODS_IPP + PYOPENCV_EXTRA_METHODS_IPP +#endif + {NULL, NULL} +}; + +static ConstDef consts_ipp[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_IPP + PYOPENCV_EXTRA_CONSTANTS_IPP +#endif + {NULL, 0} +}; + +static PyMethodDef methods_ocl[] = { + {"Device_getDefault", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_Device_getDefault, 0), "Device_getDefault() -> retval\n."}, + {"finish", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_finish, 0), "finish() -> None\n."}, + {"haveAmdBlas", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_haveAmdBlas, 0), "haveAmdBlas() -> retval\n."}, + {"haveAmdFft", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_haveAmdFft, 0), "haveAmdFft() -> retval\n."}, + {"haveOpenCL", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_haveOpenCL, 0), "haveOpenCL() -> retval\n."}, + {"setUseOpenCL", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_setUseOpenCL, 0), "setUseOpenCL(flag) -> None\n."}, + {"useOpenCL", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_useOpenCL, 0), "useOpenCL() -> retval\n."}, +#ifdef PYOPENCV_EXTRA_METHODS_OCL + PYOPENCV_EXTRA_METHODS_OCL +#endif + {NULL, NULL} +}; + +static ConstDef consts_ocl[] = { + {"Device_EXEC_KERNEL", static_cast(cv::ocl::Device::EXEC_KERNEL)}, + {"DEVICE_EXEC_KERNEL", static_cast(cv::ocl::Device::EXEC_KERNEL)}, + {"Device_EXEC_NATIVE_KERNEL", static_cast(cv::ocl::Device::EXEC_NATIVE_KERNEL)}, + {"DEVICE_EXEC_NATIVE_KERNEL", static_cast(cv::ocl::Device::EXEC_NATIVE_KERNEL)}, + {"Device_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT", static_cast(cv::ocl::Device::FP_CORRECTLY_ROUNDED_DIVIDE_SQRT)}, + {"DEVICE_FP_CORRECTLY_ROUNDED_DIVIDE_SQRT", static_cast(cv::ocl::Device::FP_CORRECTLY_ROUNDED_DIVIDE_SQRT)}, + {"Device_FP_DENORM", static_cast(cv::ocl::Device::FP_DENORM)}, + {"DEVICE_FP_DENORM", static_cast(cv::ocl::Device::FP_DENORM)}, + {"Device_FP_FMA", static_cast(cv::ocl::Device::FP_FMA)}, + {"DEVICE_FP_FMA", static_cast(cv::ocl::Device::FP_FMA)}, + {"Device_FP_INF_NAN", static_cast(cv::ocl::Device::FP_INF_NAN)}, + {"DEVICE_FP_INF_NAN", static_cast(cv::ocl::Device::FP_INF_NAN)}, + {"Device_FP_ROUND_TO_INF", static_cast(cv::ocl::Device::FP_ROUND_TO_INF)}, + {"DEVICE_FP_ROUND_TO_INF", static_cast(cv::ocl::Device::FP_ROUND_TO_INF)}, + {"Device_FP_ROUND_TO_NEAREST", static_cast(cv::ocl::Device::FP_ROUND_TO_NEAREST)}, + {"DEVICE_FP_ROUND_TO_NEAREST", static_cast(cv::ocl::Device::FP_ROUND_TO_NEAREST)}, + {"Device_FP_ROUND_TO_ZERO", static_cast(cv::ocl::Device::FP_ROUND_TO_ZERO)}, + {"DEVICE_FP_ROUND_TO_ZERO", static_cast(cv::ocl::Device::FP_ROUND_TO_ZERO)}, + {"Device_FP_SOFT_FLOAT", static_cast(cv::ocl::Device::FP_SOFT_FLOAT)}, + {"DEVICE_FP_SOFT_FLOAT", static_cast(cv::ocl::Device::FP_SOFT_FLOAT)}, + {"Device_LOCAL_IS_GLOBAL", static_cast(cv::ocl::Device::LOCAL_IS_GLOBAL)}, + {"DEVICE_LOCAL_IS_GLOBAL", static_cast(cv::ocl::Device::LOCAL_IS_GLOBAL)}, + {"Device_LOCAL_IS_LOCAL", static_cast(cv::ocl::Device::LOCAL_IS_LOCAL)}, + {"DEVICE_LOCAL_IS_LOCAL", static_cast(cv::ocl::Device::LOCAL_IS_LOCAL)}, + {"Device_NO_CACHE", static_cast(cv::ocl::Device::NO_CACHE)}, + {"DEVICE_NO_CACHE", static_cast(cv::ocl::Device::NO_CACHE)}, + {"Device_NO_LOCAL_MEM", static_cast(cv::ocl::Device::NO_LOCAL_MEM)}, + {"DEVICE_NO_LOCAL_MEM", static_cast(cv::ocl::Device::NO_LOCAL_MEM)}, + {"Device_READ_ONLY_CACHE", static_cast(cv::ocl::Device::READ_ONLY_CACHE)}, + {"DEVICE_READ_ONLY_CACHE", static_cast(cv::ocl::Device::READ_ONLY_CACHE)}, + {"Device_READ_WRITE_CACHE", static_cast(cv::ocl::Device::READ_WRITE_CACHE)}, + {"DEVICE_READ_WRITE_CACHE", static_cast(cv::ocl::Device::READ_WRITE_CACHE)}, + {"Device_TYPE_ACCELERATOR", static_cast(cv::ocl::Device::TYPE_ACCELERATOR)}, + {"DEVICE_TYPE_ACCELERATOR", static_cast(cv::ocl::Device::TYPE_ACCELERATOR)}, + {"Device_TYPE_ALL", static_cast(cv::ocl::Device::TYPE_ALL)}, + {"DEVICE_TYPE_ALL", static_cast(cv::ocl::Device::TYPE_ALL)}, + {"Device_TYPE_CPU", static_cast(cv::ocl::Device::TYPE_CPU)}, + {"DEVICE_TYPE_CPU", static_cast(cv::ocl::Device::TYPE_CPU)}, + {"Device_TYPE_DEFAULT", static_cast(cv::ocl::Device::TYPE_DEFAULT)}, + {"DEVICE_TYPE_DEFAULT", static_cast(cv::ocl::Device::TYPE_DEFAULT)}, + {"Device_TYPE_DGPU", static_cast(cv::ocl::Device::TYPE_DGPU)}, + {"DEVICE_TYPE_DGPU", static_cast(cv::ocl::Device::TYPE_DGPU)}, + {"Device_TYPE_GPU", static_cast(cv::ocl::Device::TYPE_GPU)}, + {"DEVICE_TYPE_GPU", static_cast(cv::ocl::Device::TYPE_GPU)}, + {"Device_TYPE_IGPU", static_cast(cv::ocl::Device::TYPE_IGPU)}, + {"DEVICE_TYPE_IGPU", static_cast(cv::ocl::Device::TYPE_IGPU)}, + {"Device_UNKNOWN_VENDOR", static_cast(cv::ocl::Device::UNKNOWN_VENDOR)}, + {"DEVICE_UNKNOWN_VENDOR", static_cast(cv::ocl::Device::UNKNOWN_VENDOR)}, + {"Device_VENDOR_AMD", static_cast(cv::ocl::Device::VENDOR_AMD)}, + {"DEVICE_VENDOR_AMD", static_cast(cv::ocl::Device::VENDOR_AMD)}, + {"Device_VENDOR_INTEL", static_cast(cv::ocl::Device::VENDOR_INTEL)}, + {"DEVICE_VENDOR_INTEL", static_cast(cv::ocl::Device::VENDOR_INTEL)}, + {"Device_VENDOR_NVIDIA", static_cast(cv::ocl::Device::VENDOR_NVIDIA)}, + {"DEVICE_VENDOR_NVIDIA", static_cast(cv::ocl::Device::VENDOR_NVIDIA)}, + {"KernelArg_CONSTANT", static_cast(cv::ocl::KernelArg::CONSTANT)}, + {"KERNEL_ARG_CONSTANT", static_cast(cv::ocl::KernelArg::CONSTANT)}, + {"KernelArg_LOCAL", static_cast(cv::ocl::KernelArg::LOCAL)}, + {"KERNEL_ARG_LOCAL", static_cast(cv::ocl::KernelArg::LOCAL)}, + {"KernelArg_NO_SIZE", static_cast(cv::ocl::KernelArg::NO_SIZE)}, + {"KERNEL_ARG_NO_SIZE", static_cast(cv::ocl::KernelArg::NO_SIZE)}, + {"KernelArg_PTR_ONLY", static_cast(cv::ocl::KernelArg::PTR_ONLY)}, + {"KERNEL_ARG_PTR_ONLY", static_cast(cv::ocl::KernelArg::PTR_ONLY)}, + {"KernelArg_READ_ONLY", static_cast(cv::ocl::KernelArg::READ_ONLY)}, + {"KERNEL_ARG_READ_ONLY", static_cast(cv::ocl::KernelArg::READ_ONLY)}, + {"KernelArg_READ_WRITE", static_cast(cv::ocl::KernelArg::READ_WRITE)}, + {"KERNEL_ARG_READ_WRITE", static_cast(cv::ocl::KernelArg::READ_WRITE)}, + {"KernelArg_WRITE_ONLY", static_cast(cv::ocl::KernelArg::WRITE_ONLY)}, + {"KERNEL_ARG_WRITE_ONLY", static_cast(cv::ocl::KernelArg::WRITE_ONLY)}, + {"OCL_VECTOR_DEFAULT", static_cast(cv::ocl::OCL_VECTOR_DEFAULT)}, + {"OCL_VECTOR_MAX", static_cast(cv::ocl::OCL_VECTOR_MAX)}, + {"OCL_VECTOR_OWN", static_cast(cv::ocl::OCL_VECTOR_OWN)}, +#ifdef PYOPENCV_EXTRA_CONSTANTS_OCL + PYOPENCV_EXTRA_CONSTANTS_OCL +#endif + {NULL, 0} +}; + +static PyMethodDef methods_ogl[] = { +#ifdef PYOPENCV_EXTRA_METHODS_OGL + PYOPENCV_EXTRA_METHODS_OGL +#endif + {NULL, NULL} +}; + +static ConstDef consts_ogl[] = { + {"Buffer_ARRAY_BUFFER", static_cast(cv::ogl::Buffer::ARRAY_BUFFER)}, + {"BUFFER_ARRAY_BUFFER", static_cast(cv::ogl::Buffer::ARRAY_BUFFER)}, + {"Buffer_ELEMENT_ARRAY_BUFFER", static_cast(cv::ogl::Buffer::ELEMENT_ARRAY_BUFFER)}, + {"BUFFER_ELEMENT_ARRAY_BUFFER", static_cast(cv::ogl::Buffer::ELEMENT_ARRAY_BUFFER)}, + {"Buffer_PIXEL_PACK_BUFFER", static_cast(cv::ogl::Buffer::PIXEL_PACK_BUFFER)}, + {"BUFFER_PIXEL_PACK_BUFFER", static_cast(cv::ogl::Buffer::PIXEL_PACK_BUFFER)}, + {"Buffer_PIXEL_UNPACK_BUFFER", static_cast(cv::ogl::Buffer::PIXEL_UNPACK_BUFFER)}, + {"BUFFER_PIXEL_UNPACK_BUFFER", static_cast(cv::ogl::Buffer::PIXEL_UNPACK_BUFFER)}, + {"Buffer_READ_ONLY", static_cast(cv::ogl::Buffer::READ_ONLY)}, + {"BUFFER_READ_ONLY", static_cast(cv::ogl::Buffer::READ_ONLY)}, + {"Buffer_READ_WRITE", static_cast(cv::ogl::Buffer::READ_WRITE)}, + {"BUFFER_READ_WRITE", static_cast(cv::ogl::Buffer::READ_WRITE)}, + {"Buffer_WRITE_ONLY", static_cast(cv::ogl::Buffer::WRITE_ONLY)}, + {"BUFFER_WRITE_ONLY", static_cast(cv::ogl::Buffer::WRITE_ONLY)}, + {"LINES", static_cast(cv::ogl::LINES)}, + {"LINE_LOOP", static_cast(cv::ogl::LINE_LOOP)}, + {"LINE_STRIP", static_cast(cv::ogl::LINE_STRIP)}, + {"POINTS", static_cast(cv::ogl::POINTS)}, + {"POLYGON", static_cast(cv::ogl::POLYGON)}, + {"QUADS", static_cast(cv::ogl::QUADS)}, + {"QUAD_STRIP", static_cast(cv::ogl::QUAD_STRIP)}, + {"TRIANGLES", static_cast(cv::ogl::TRIANGLES)}, + {"TRIANGLE_FAN", static_cast(cv::ogl::TRIANGLE_FAN)}, + {"TRIANGLE_STRIP", static_cast(cv::ogl::TRIANGLE_STRIP)}, + {"Texture2D_DEPTH_COMPONENT", static_cast(cv::ogl::Texture2D::DEPTH_COMPONENT)}, + {"TEXTURE2D_DEPTH_COMPONENT", static_cast(cv::ogl::Texture2D::DEPTH_COMPONENT)}, + {"Texture2D_NONE", static_cast(cv::ogl::Texture2D::NONE)}, + {"TEXTURE2D_NONE", static_cast(cv::ogl::Texture2D::NONE)}, + {"Texture2D_RGB", static_cast(cv::ogl::Texture2D::RGB)}, + {"TEXTURE2D_RGB", static_cast(cv::ogl::Texture2D::RGB)}, + {"Texture2D_RGBA", static_cast(cv::ogl::Texture2D::RGBA)}, + {"TEXTURE2D_RGBA", static_cast(cv::ogl::Texture2D::RGBA)}, +#ifdef PYOPENCV_EXTRA_CONSTANTS_OGL + PYOPENCV_EXTRA_CONSTANTS_OGL +#endif + {NULL, 0} +}; + +static PyMethodDef methods_parallel[] = { + {"setParallelForBackend", CV_PY_FN_WITH_KW_(pyopencv_cv_parallel_setParallelForBackend, 0), "setParallelForBackend(backendName[, propagateNumThreads]) -> retval\n. @brief Change OpenCV parallel_for backend\n. *\n. * @note This call is not thread-safe. Consider calling this function from the `main()` before any other OpenCV processing functions (and without any other created threads)."}, +#ifdef PYOPENCV_EXTRA_METHODS_PARALLEL + PYOPENCV_EXTRA_METHODS_PARALLEL +#endif + {NULL, NULL} +}; + +static ConstDef consts_parallel[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_PARALLEL + PYOPENCV_EXTRA_CONSTANTS_PARALLEL +#endif + {NULL, 0} +}; + +static PyMethodDef methods_samples[] = { + {"addSamplesDataSearchPath", CV_PY_FN_WITH_KW_(pyopencv_cv_samples_addSamplesDataSearchPath, 0), "addSamplesDataSearchPath(path) -> None\n. @brief Override search data path by adding new search location\n. \n. Use this only to override default behavior\n. Passed paths are used in LIFO order.\n. \n. @param path Path to used samples data"}, + {"addSamplesDataSearchSubDirectory", CV_PY_FN_WITH_KW_(pyopencv_cv_samples_addSamplesDataSearchSubDirectory, 0), "addSamplesDataSearchSubDirectory(subdir) -> None\n. @brief Append samples search data sub directory\n. \n. General usage is to add OpenCV modules name (`/modules//samples/data` -> `/samples/data` + `modules//samples/data`).\n. Passed subdirectories are used in LIFO order.\n. \n. @param subdir samples data sub directory"}, + {"findFile", CV_PY_FN_WITH_KW_(pyopencv_cv_samples_findFile, 0), "findFile(relative_path[, required[, silentMode]]) -> retval\n. @brief Try to find requested data file\n. \n. Search directories:\n. \n. 1. Directories passed via `addSamplesDataSearchPath()`\n. 2. OPENCV_SAMPLES_DATA_PATH_HINT environment variable\n. 3. OPENCV_SAMPLES_DATA_PATH environment variable\n. If parameter value is not empty and nothing is found then stop searching.\n. 4. Detects build/install path based on:\n. a. current working directory (CWD)\n. b. and/or binary module location (opencv_core/opencv_world, doesn't work with static linkage)\n. 5. Scan `/{,data,samples/data}` directories if build directory is detected or the current directory is in source tree.\n. 6. Scan `/share/OpenCV` directory if install directory is detected.\n. \n. @see cv::utils::findDataFile\n. \n. @param relative_path Relative path to data file\n. @param required Specify \"file not found\" handling.\n. If true, function prints information message and raises cv::Exception.\n. If false, function returns empty result\n. @param silentMode Disables messages\n. @return Returns path (absolute or relative to the current directory) or empty string if file is not found"}, + {"findFileOrKeep", CV_PY_FN_WITH_KW_(pyopencv_cv_samples_findFileOrKeep, 0), "findFileOrKeep(relative_path[, silentMode]) -> retval\n."}, +#ifdef PYOPENCV_EXTRA_METHODS_SAMPLES + PYOPENCV_EXTRA_METHODS_SAMPLES +#endif + {NULL, NULL} +}; + +static ConstDef consts_samples[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_SAMPLES + PYOPENCV_EXTRA_CONSTANTS_SAMPLES +#endif + {NULL, 0} +}; + +static PyMethodDef methods_segmentation[] = { +#ifdef PYOPENCV_EXTRA_METHODS_SEGMENTATION + PYOPENCV_EXTRA_METHODS_SEGMENTATION +#endif + {NULL, NULL} +}; + +static ConstDef consts_segmentation[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_SEGMENTATION + PYOPENCV_EXTRA_CONSTANTS_SEGMENTATION +#endif + {NULL, 0} +}; + +static PyMethodDef methods_utils[] = { + {"dumpBool", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpBool, 0), "dumpBool(argument) -> retval\n."}, + {"dumpCString", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpCString, 0), "dumpCString(argument) -> retval\n."}, + {"dumpDouble", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpDouble, 0), "dumpDouble(argument) -> retval\n."}, + {"dumpFloat", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpFloat, 0), "dumpFloat(argument) -> retval\n."}, + {"dumpInputArray", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpInputArray, 0), "dumpInputArray(argument) -> retval\n."}, + {"dumpInputArrayOfArrays", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpInputArrayOfArrays, 0), "dumpInputArrayOfArrays(argument) -> retval\n."}, + {"dumpInputOutputArray", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpInputOutputArray, 0), "dumpInputOutputArray(argument) -> retval, argument\n."}, + {"dumpInputOutputArrayOfArrays", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpInputOutputArrayOfArrays, 0), "dumpInputOutputArrayOfArrays(argument) -> retval, argument\n."}, + {"dumpInt", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpInt, 0), "dumpInt(argument) -> retval\n."}, + {"dumpRange", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpRange, 0), "dumpRange(argument) -> retval\n."}, + {"dumpRect", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpRect, 0), "dumpRect(argument) -> retval\n."}, + {"dumpRotatedRect", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpRotatedRect, 0), "dumpRotatedRect(argument) -> retval\n."}, + {"dumpSizeT", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpSizeT, 0), "dumpSizeT(argument) -> retval\n."}, + {"dumpString", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpString, 0), "dumpString(argument) -> retval\n."}, + {"dumpTermCriteria", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpTermCriteria, 0), "dumpTermCriteria(argument) -> retval\n."}, + {"dumpVectorOfDouble", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpVectorOfDouble, 0), "dumpVectorOfDouble(vec) -> retval\n."}, + {"dumpVectorOfInt", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpVectorOfInt, 0), "dumpVectorOfInt(vec) -> retval\n."}, + {"dumpVectorOfRect", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_dumpVectorOfRect, 0), "dumpVectorOfRect(vec) -> retval\n."}, + {"generateVectorOfInt", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_generateVectorOfInt, 0), "generateVectorOfInt(len) -> vec\n."}, + {"generateVectorOfMat", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_generateVectorOfMat, 0), "generateVectorOfMat(len, rows, cols, dtype[, vec]) -> vec\n."}, + {"generateVectorOfRect", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_generateVectorOfRect, 0), "generateVectorOfRect(len) -> vec\n."}, + {"testAsyncArray", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_testAsyncArray, 0), "testAsyncArray(argument) -> retval\n."}, + {"testAsyncException", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_testAsyncException, 0), "testAsyncException() -> retval\n."}, + {"testOverloadResolution", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_testOverloadResolution, 0), "testOverloadResolution(value[, point]) -> retval\n. \n\n\n\ntestOverloadResolution(rect) -> retval\n."}, + {"testOverwriteNativeMethod", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_testOverwriteNativeMethod, 0), "testOverwriteNativeMethod(argument) -> retval\n."}, + {"testRaiseGeneralException", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_testRaiseGeneralException, 0), "testRaiseGeneralException() -> None\n."}, + {"testReservedKeywordConversion", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_testReservedKeywordConversion, 0), "testReservedKeywordConversion(positional_argument[, lambda_[, from_]]) -> retval\n."}, +#ifdef PYOPENCV_EXTRA_METHODS_UTILS + PYOPENCV_EXTRA_METHODS_UTILS +#endif + {NULL, NULL} +}; + +static ConstDef consts_utils[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_UTILS + PYOPENCV_EXTRA_CONSTANTS_UTILS +#endif + {NULL, 0} +}; + +static PyMethodDef methods_utils_fs[] = { + {"getCacheDirectoryForDownloads", CV_PY_FN_WITH_KW_(pyopencv_cv_utils_fs_getCacheDirectoryForDownloads, 0), "getCacheDirectoryForDownloads() -> retval\n."}, +#ifdef PYOPENCV_EXTRA_METHODS_UTILS_FS + PYOPENCV_EXTRA_METHODS_UTILS_FS +#endif + {NULL, NULL} +}; + +static ConstDef consts_utils_fs[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_UTILS_FS + PYOPENCV_EXTRA_CONSTANTS_UTILS_FS +#endif + {NULL, 0} +}; + +static PyMethodDef methods_videoio_registry[] = { + {"getBackendName", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getBackendName, 0), "getBackendName(api) -> retval\n. @brief Returns backend API name or \"UnknownVideoAPI(xxx)\"\n. @param api backend ID (#VideoCaptureAPIs)"}, + {"getBackends", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getBackends, 0), "getBackends() -> retval\n. @brief Returns list of all available backends"}, + {"getCameraBackendPluginVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getCameraBackendPluginVersion, 0), "getCameraBackendPluginVersion(api) -> retval, version_ABI, version_API\n. @brief Returns description and ABI/API version of videoio plugin's camera interface"}, + {"getCameraBackends", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getCameraBackends, 0), "getCameraBackends() -> retval\n. @brief Returns list of available backends which works via `cv::VideoCapture(int index)`"}, + {"getStreamBackendPluginVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getStreamBackendPluginVersion, 0), "getStreamBackendPluginVersion(api) -> retval, version_ABI, version_API\n. @brief Returns description and ABI/API version of videoio plugin's stream capture interface"}, + {"getStreamBackends", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getStreamBackends, 0), "getStreamBackends() -> retval\n. @brief Returns list of available backends which works via `cv::VideoCapture(filename)`"}, + {"getWriterBackendPluginVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getWriterBackendPluginVersion, 0), "getWriterBackendPluginVersion(api) -> retval, version_ABI, version_API\n. @brief Returns description and ABI/API version of videoio plugin's writer interface"}, + {"getWriterBackends", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_getWriterBackends, 0), "getWriterBackends() -> retval\n. @brief Returns list of available backends which works via `cv::VideoWriter()`"}, + {"hasBackend", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_hasBackend, 0), "hasBackend(api) -> retval\n. @brief Returns true if backend is available"}, + {"isBackendBuiltIn", CV_PY_FN_WITH_KW_(pyopencv_cv_videoio_registry_isBackendBuiltIn, 0), "isBackendBuiltIn(api) -> retval\n. @brief Returns true if backend is built in (false if backend is used as plugin)"}, +#ifdef PYOPENCV_EXTRA_METHODS_VIDEOIO_REGISTRY + PYOPENCV_EXTRA_METHODS_VIDEOIO_REGISTRY +#endif + {NULL, NULL} +}; + +static ConstDef consts_videoio_registry[] = { +#ifdef PYOPENCV_EXTRA_CONSTANTS_VIDEOIO_REGISTRY + PYOPENCV_EXTRA_CONSTANTS_VIDEOIO_REGISTRY +#endif + {NULL, 0} +}; + diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_types.h b/generated/modules/python_bindings_generator/pyopencv_generated_types.h new file mode 100644 index 0000000..13e0f49 --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_types.h @@ -0,0 +1,29 @@ +CVPY_TYPE(Algorithm, Algorithm, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(AsyncArray, AsyncArray, Ptr, Ptr, NoBase, pyopencv_cv_AsyncArray_AsyncArray); +CVPY_TYPE(cuda_GpuMat, cuda_GpuMat, Ptr, Ptr, NoBase, pyopencv_cv_cuda_cuda_GpuMat_GpuMat); +CVPY_TYPE(cuda_GpuMat_Allocator, cuda_GpuMat_Allocator, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(cuda_GpuData, cuda_GpuData, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(cuda_GpuMatND, cuda_GpuMatND, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(cuda_BufferPool, cuda_BufferPool, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(cuda_HostMem, cuda_HostMem, Ptr, Ptr, NoBase, pyopencv_cv_cuda_cuda_HostMem_HostMem); +CVPY_TYPE(cuda_Stream, cuda_Stream, Ptr, Ptr, NoBase, pyopencv_cv_cuda_cuda_Stream_Stream); +CVPY_TYPE(cuda_Event, cuda_Event, Ptr, Ptr, NoBase, pyopencv_cv_cuda_cuda_Event_Event); +CVPY_TYPE(cuda_TargetArchs, cuda_TargetArchs, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(cuda_DeviceInfo, cuda_DeviceInfo, Ptr, Ptr, NoBase, pyopencv_cv_cuda_cuda_DeviceInfo_DeviceInfo); +CVPY_TYPE(ocl_Device, ocl_Device, cv::ocl::Device, Device, NoBase, pyopencv_cv_ocl_ocl_Device_Device); +CVPY_TYPE(ocl_OpenCLExecutionContext, ocl_OpenCLExecutionContext, Ptr, Ptr, NoBase, 0); +CVPY_TYPE(FileStorage, FileStorage, Ptr, Ptr, NoBase, pyopencv_cv_FileStorage_FileStorage); +CVPY_TYPE(FileNode, FileNode, cv::FileNode, FileNode, NoBase, pyopencv_cv_FileNode_FileNode); +CVPY_TYPE(KeyPoint, KeyPoint, cv::KeyPoint, KeyPoint, NoBase, pyopencv_cv_KeyPoint_KeyPoint); +CVPY_TYPE(DMatch, DMatch, cv::DMatch, DMatch, NoBase, pyopencv_cv_DMatch_DMatch); +CVPY_TYPE(TickMeter, TickMeter, Ptr, Ptr, NoBase, pyopencv_cv_TickMeter_TickMeter); +CVPY_TYPE(UMat, UMat, Ptr, Ptr, NoBase, pyopencv_cv_UMat_UMat); +CVPY_TYPE(GeneralizedHough, GeneralizedHough, Ptr, Ptr, Algorithm, 0); +CVPY_TYPE(GeneralizedHoughBallard, GeneralizedHoughBallard, Ptr, Ptr, GeneralizedHough, 0); +CVPY_TYPE(GeneralizedHoughGuil, GeneralizedHoughGuil, Ptr, Ptr, GeneralizedHough, 0); +CVPY_TYPE(CLAHE, CLAHE, Ptr, Ptr, Algorithm, 0); +CVPY_TYPE(Subdiv2D, Subdiv2D, Ptr, Ptr, NoBase, pyopencv_cv_Subdiv2D_Subdiv2D); +CVPY_TYPE(LineSegmentDetector, LineSegmentDetector, Ptr, Ptr, Algorithm, 0); +CVPY_TYPE(segmentation_IntelligentScissorsMB, segmentation_IntelligentScissorsMB, cv::segmentation::IntelligentScissorsMB, IntelligentScissorsMB, NoBase, pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_IntelligentScissorsMB); +CVPY_TYPE(VideoCapture, VideoCapture, Ptr, Ptr, NoBase, pyopencv_cv_VideoCapture_VideoCapture); +CVPY_TYPE(VideoWriter, VideoWriter, Ptr, Ptr, NoBase, pyopencv_cv_VideoWriter_VideoWriter); diff --git a/generated/modules/python_bindings_generator/pyopencv_generated_types_content.h b/generated/modules/python_bindings_generator/pyopencv_generated_types_content.h new file mode 100644 index 0000000..5dd7e8d --- /dev/null +++ b/generated/modules/python_bindings_generator/pyopencv_generated_types_content.h @@ -0,0 +1,12635 @@ +//================================================================================ +// Algorithm (Generic) +//================================================================================ + +// GetSet (Algorithm) + + + +// Methods (Algorithm) + +static PyObject* pyopencv_cv_Algorithm_clear(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Algorithm_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Algorithm' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->clear()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_Algorithm_empty(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Algorithm_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Algorithm' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->empty()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Algorithm_getDefaultName(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Algorithm_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Algorithm' or its derivative)"); + Ptr _self_ = *(self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getDefaultName()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Algorithm_read(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Algorithm_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Algorithm' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_fn = NULL; + FileNode fn; + + const char* keywords[] = { "fn", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Algorithm.read", (char**)keywords, &pyobj_fn) && + pyopencv_to_safe(pyobj_fn, fn, ArgInfo("fn", 0)) ) + { + ERRWRAP2(_self_->read(fn)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_Algorithm_save(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Algorithm_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Algorithm' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_filename = NULL; + String filename; + + const char* keywords[] = { "filename", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Algorithm.save", (char**)keywords, &pyobj_filename) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) ) + { + ERRWRAP2(_self_->save(filename)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_Algorithm_write(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Algorithm_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Algorithm' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_fs = NULL; + Ptr fs; + PyObject* pyobj_name = NULL; + String name; + + const char* keywords[] = { "fs", "name", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:Algorithm.write", (char**)keywords, &pyobj_fs, &pyobj_name) && + pyopencv_to_safe(pyobj_fs, fs, ArgInfo("fs", 0)) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) ) + { + ERRWRAP2(_self_->write(fs, name)); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (Algorithm) + +static PyGetSetDef pyopencv_Algorithm_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_Algorithm_methods[] = +{ + {"clear", CV_PY_FN_WITH_KW_(pyopencv_cv_Algorithm_clear, 0), "clear() -> None\n. @brief Clears the algorithm state"}, + {"empty", CV_PY_FN_WITH_KW_(pyopencv_cv_Algorithm_empty, 0), "empty() -> retval\n. @brief Returns true if the Algorithm is empty (e.g. in the very beginning or after unsuccessful read"}, + {"getDefaultName", CV_PY_FN_WITH_KW_(pyopencv_cv_Algorithm_getDefaultName, 0), "getDefaultName() -> retval\n. Returns the algorithm string identifier.\n. This string is used as top level xml/yml node tag when the object is saved to a file or string."}, + {"read", CV_PY_FN_WITH_KW_(pyopencv_cv_Algorithm_read, 0), "read(fn) -> None\n. @brief Reads algorithm parameters from a file storage"}, + {"save", CV_PY_FN_WITH_KW_(pyopencv_cv_Algorithm_save, 0), "save(filename) -> None\n. Saves the algorithm to a file.\n. In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs)."}, + {"write", CV_PY_FN_WITH_KW_(pyopencv_cv_Algorithm_write, 0), "write(fs[, name]) -> None\n. @brief simplified API for language bindings\n. * @overload"}, + + {NULL, NULL} +}; + +// Converter (Algorithm) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_Algorithm_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_Algorithm_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// AsyncArray (Generic) +//================================================================================ + +// GetSet (AsyncArray) + + + +// Methods (AsyncArray) + +static int pyopencv_cv_AsyncArray_AsyncArray(pyopencv_AsyncArray_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::AsyncArray())); + return 0; + } + + return -1; +} + +static PyObject* pyopencv_cv_AsyncArray_get(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_AsyncArray_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'AsyncArray' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:AsyncArray.get", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->get(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:AsyncArray.get", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->get(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_timeoutNs = NULL; + double timeoutNs=0; + bool retval; + + const char* keywords[] = { "timeoutNs", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:AsyncArray.get", (char**)keywords, &pyobj_timeoutNs, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_timeoutNs, timeoutNs, ArgInfo("timeoutNs", 0)) ) + { + ERRWRAP2(retval = _self_->get(dst, timeoutNs)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_timeoutNs = NULL; + double timeoutNs=0; + bool retval; + + const char* keywords[] = { "timeoutNs", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:AsyncArray.get", (char**)keywords, &pyobj_timeoutNs, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_timeoutNs, timeoutNs, ArgInfo("timeoutNs", 0)) ) + { + ERRWRAP2(retval = _self_->get(dst, timeoutNs)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dst)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("get"); + + return NULL; +} + +static PyObject* pyopencv_cv_AsyncArray_release(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_AsyncArray_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'AsyncArray' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->release()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_AsyncArray_valid(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_AsyncArray_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'AsyncArray' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->valid()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_AsyncArray_wait_for(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_AsyncArray_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'AsyncArray' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_timeoutNs = NULL; + double timeoutNs=0; + bool retval; + + const char* keywords[] = { "timeoutNs", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:AsyncArray.wait_for", (char**)keywords, &pyobj_timeoutNs) && + pyopencv_to_safe(pyobj_timeoutNs, timeoutNs, ArgInfo("timeoutNs", 0)) ) + { + ERRWRAP2(retval = _self_->wait_for(timeoutNs)); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (AsyncArray) + +static PyGetSetDef pyopencv_AsyncArray_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_AsyncArray_methods[] = +{ + {"get", CV_PY_FN_WITH_KW_(pyopencv_cv_AsyncArray_get, 0), "get([, dst]) -> dst\n. Fetch the result.\n. @param[out] dst destination array\n. \n. Waits for result until container has valid result.\n. Throws exception if exception was stored as a result.\n. \n. Throws exception on invalid container state.\n. \n. @note Result or stored exception can be fetched only once.\n\n\n\nget(timeoutNs[, dst]) -> retval, dst\n. Retrieving the result with timeout\n. @param[out] dst destination array\n. @param[in] timeoutNs timeout in nanoseconds, -1 for infinite wait\n. \n. @returns true if result is ready, false if the timeout has expired\n. \n. @note Result or stored exception can be fetched only once."}, + {"release", CV_PY_FN_WITH_KW_(pyopencv_cv_AsyncArray_release, 0), "release() -> None\n."}, + {"valid", CV_PY_FN_WITH_KW_(pyopencv_cv_AsyncArray_valid, 0), "valid() -> retval\n."}, + {"wait_for", CV_PY_FN_WITH_KW_(pyopencv_cv_AsyncArray_wait_for, 0), "wait_for(timeoutNs) -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (AsyncArray) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_AsyncArray_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_AsyncArray_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// CLAHE (Generic) +//================================================================================ + +// GetSet (CLAHE) + + + +// Methods (CLAHE) + +static PyObject* pyopencv_cv_CLAHE_apply(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_CLAHE_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'CLAHE' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_src = NULL; + Mat src; + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:CLAHE.apply", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->apply(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_src = NULL; + UMat src; + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "src", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:CLAHE.apply", (char**)keywords, &pyobj_src, &pyobj_dst) && + pyopencv_to_safe(pyobj_src, src, ArgInfo("src", 0)) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->apply(src, dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("apply"); + + return NULL; +} + +static PyObject* pyopencv_cv_CLAHE_collectGarbage(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_CLAHE_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'CLAHE' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->collectGarbage()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_CLAHE_getClipLimit(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_CLAHE_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'CLAHE' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getClipLimit()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_CLAHE_getTilesGridSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_CLAHE_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'CLAHE' or its derivative)"); + Ptr _self_ = *(self1); + Size retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getTilesGridSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_CLAHE_setClipLimit(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_CLAHE_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'CLAHE' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_clipLimit = NULL; + double clipLimit=0; + + const char* keywords[] = { "clipLimit", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:CLAHE.setClipLimit", (char**)keywords, &pyobj_clipLimit) && + pyopencv_to_safe(pyobj_clipLimit, clipLimit, ArgInfo("clipLimit", 0)) ) + { + ERRWRAP2(_self_->setClipLimit(clipLimit)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_CLAHE_setTilesGridSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_CLAHE_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'CLAHE' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_tileGridSize = NULL; + Size tileGridSize; + + const char* keywords[] = { "tileGridSize", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:CLAHE.setTilesGridSize", (char**)keywords, &pyobj_tileGridSize) && + pyopencv_to_safe(pyobj_tileGridSize, tileGridSize, ArgInfo("tileGridSize", 0)) ) + { + ERRWRAP2(_self_->setTilesGridSize(tileGridSize)); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (CLAHE) + +static PyGetSetDef pyopencv_CLAHE_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_CLAHE_methods[] = +{ + {"apply", CV_PY_FN_WITH_KW_(pyopencv_cv_CLAHE_apply, 0), "apply(src[, dst]) -> dst\n. @brief Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.\n. \n. @param src Source image of type CV_8UC1 or CV_16UC1.\n. @param dst Destination image."}, + {"collectGarbage", CV_PY_FN_WITH_KW_(pyopencv_cv_CLAHE_collectGarbage, 0), "collectGarbage() -> None\n."}, + {"getClipLimit", CV_PY_FN_WITH_KW_(pyopencv_cv_CLAHE_getClipLimit, 0), "getClipLimit() -> retval\n."}, + {"getTilesGridSize", CV_PY_FN_WITH_KW_(pyopencv_cv_CLAHE_getTilesGridSize, 0), "getTilesGridSize() -> retval\n."}, + {"setClipLimit", CV_PY_FN_WITH_KW_(pyopencv_cv_CLAHE_setClipLimit, 0), "setClipLimit(clipLimit) -> None\n. @brief Sets threshold for contrast limiting.\n. \n. @param clipLimit threshold value."}, + {"setTilesGridSize", CV_PY_FN_WITH_KW_(pyopencv_cv_CLAHE_setTilesGridSize, 0), "setTilesGridSize(tileGridSize) -> None\n. @brief Sets size of grid for histogram equalization. Input image will be divided into\n. equally sized rectangular tiles.\n. \n. @param tileGridSize defines the number of tiles in row and column."}, + + {NULL, NULL} +}; + +// Converter (CLAHE) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_CLAHE_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_CLAHE_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// DMatch (Generic) +//================================================================================ + +// GetSet (DMatch) + + +static PyObject* pyopencv_DMatch_get_distance(pyopencv_DMatch_t* p, void *closure) +{ + return pyopencv_from(p->v.distance); +} + +static int pyopencv_DMatch_set_distance(pyopencv_DMatch_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the distance attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.distance, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_DMatch_get_imgIdx(pyopencv_DMatch_t* p, void *closure) +{ + return pyopencv_from(p->v.imgIdx); +} + +static int pyopencv_DMatch_set_imgIdx(pyopencv_DMatch_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the imgIdx attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.imgIdx, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_DMatch_get_queryIdx(pyopencv_DMatch_t* p, void *closure) +{ + return pyopencv_from(p->v.queryIdx); +} + +static int pyopencv_DMatch_set_queryIdx(pyopencv_DMatch_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the queryIdx attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.queryIdx, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_DMatch_get_trainIdx(pyopencv_DMatch_t* p, void *closure) +{ + return pyopencv_from(p->v.trainIdx); +} + +static int pyopencv_DMatch_set_trainIdx(pyopencv_DMatch_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the trainIdx attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.trainIdx, ArgInfo("value", false)) ? 0 : -1; +} + + +// Methods (DMatch) + +static int pyopencv_cv_DMatch_DMatch(pyopencv_DMatch_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(3); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + if(self) ERRWRAP2(new (&(self->v)) cv::DMatch()); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj__queryIdx = NULL; + int _queryIdx=0; + PyObject* pyobj__trainIdx = NULL; + int _trainIdx=0; + PyObject* pyobj__distance = NULL; + float _distance=0.f; + + const char* keywords[] = { "_queryIdx", "_trainIdx", "_distance", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:DMatch", (char**)keywords, &pyobj__queryIdx, &pyobj__trainIdx, &pyobj__distance) && + pyopencv_to_safe(pyobj__queryIdx, _queryIdx, ArgInfo("_queryIdx", 0)) && + pyopencv_to_safe(pyobj__trainIdx, _trainIdx, ArgInfo("_trainIdx", 0)) && + pyopencv_to_safe(pyobj__distance, _distance, ArgInfo("_distance", 0)) ) + { + if(self) ERRWRAP2(new (&(self->v)) cv::DMatch(_queryIdx, _trainIdx, _distance)); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj__queryIdx = NULL; + int _queryIdx=0; + PyObject* pyobj__trainIdx = NULL; + int _trainIdx=0; + PyObject* pyobj__imgIdx = NULL; + int _imgIdx=0; + PyObject* pyobj__distance = NULL; + float _distance=0.f; + + const char* keywords[] = { "_queryIdx", "_trainIdx", "_imgIdx", "_distance", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:DMatch", (char**)keywords, &pyobj__queryIdx, &pyobj__trainIdx, &pyobj__imgIdx, &pyobj__distance) && + pyopencv_to_safe(pyobj__queryIdx, _queryIdx, ArgInfo("_queryIdx", 0)) && + pyopencv_to_safe(pyobj__trainIdx, _trainIdx, ArgInfo("_trainIdx", 0)) && + pyopencv_to_safe(pyobj__imgIdx, _imgIdx, ArgInfo("_imgIdx", 0)) && + pyopencv_to_safe(pyobj__distance, _distance, ArgInfo("_distance", 0)) ) + { + if(self) ERRWRAP2(new (&(self->v)) cv::DMatch(_queryIdx, _trainIdx, _imgIdx, _distance)); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("DMatch"); + + return -1; +} + + + +// Tables (DMatch) + +static PyGetSetDef pyopencv_DMatch_getseters[] = +{ + {(char*)"distance", (getter)pyopencv_DMatch_get_distance, (setter)pyopencv_DMatch_set_distance, (char*)"distance", NULL}, + {(char*)"imgIdx", (getter)pyopencv_DMatch_get_imgIdx, (setter)pyopencv_DMatch_set_imgIdx, (char*)"imgIdx", NULL}, + {(char*)"queryIdx", (getter)pyopencv_DMatch_get_queryIdx, (setter)pyopencv_DMatch_set_queryIdx, (char*)"queryIdx", NULL}, + {(char*)"trainIdx", (getter)pyopencv_DMatch_get_trainIdx, (setter)pyopencv_DMatch_set_trainIdx, (char*)"trainIdx", NULL}, + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_DMatch_methods[] = +{ + + {NULL, NULL} +}; + +// Converter (DMatch) + +template<> +struct PyOpenCV_Converter< cv::DMatch > +{ + static PyObject* from(const cv::DMatch& r) + { + return pyopencv_DMatch_Instance(r); + } + static bool to(PyObject* src, cv::DMatch& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + cv::DMatch * dst_; + if (pyopencv_DMatch_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected cv::DMatch for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// FileNode (Generic) +//================================================================================ + +// GetSet (FileNode) + + + +// Methods (FileNode) + +static int pyopencv_cv_FileNode_FileNode(pyopencv_FileNode_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + if(self) ERRWRAP2(new (&(self->v)) cv::FileNode()); + return 0; + } + + return -1; +} + +static PyObject* pyopencv_cv_FileNode_at(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + PyObject* pyobj_i = NULL; + int i=0; + FileNode retval; + + const char* keywords[] = { "i", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:FileNode.at", (char**)keywords, &pyobj_i) && + pyopencv_to_safe(pyobj_i, i, ArgInfo("i", 0)) ) + { + ERRWRAP2(retval = _self_->operator[](i)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_empty(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->empty()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_getNode(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + char* nodename=(char*)""; + FileNode retval; + + const char* keywords[] = { "nodename", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "s:FileNode.getNode", (char**)keywords, &nodename) ) + { + ERRWRAP2(retval = _self_->operator[](nodename)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isInt(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isInt()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isMap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isMap()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isNamed(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isNamed()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isNone(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isNone()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isReal(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isReal()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isSeq(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isSeq()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_isString(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isString()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_keys(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + std::vector retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->keys()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_mat(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + Mat retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->mat()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_name(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + std::string retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->name()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_rawSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->rawSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_real(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->real()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_size(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->size()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_string(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + std::string retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->string()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileNode_type(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + cv::FileNode * self1 = 0; + if (!pyopencv_FileNode_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileNode' or its derivative)"); + cv::FileNode* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->type()); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (FileNode) + +static PyGetSetDef pyopencv_FileNode_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_FileNode_methods[] = +{ + {"at", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_at, 0), "at(i) -> retval\n. @overload\n. @param i Index of an element in the sequence node."}, + {"empty", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_empty, 0), "empty() -> retval\n."}, + {"getNode", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_getNode, 0), "getNode(nodename) -> retval\n. @overload\n. @param nodename Name of an element in the mapping node."}, + {"isInt", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isInt, 0), "isInt() -> retval\n."}, + {"isMap", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isMap, 0), "isMap() -> retval\n."}, + {"isNamed", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isNamed, 0), "isNamed() -> retval\n."}, + {"isNone", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isNone, 0), "isNone() -> retval\n."}, + {"isReal", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isReal, 0), "isReal() -> retval\n."}, + {"isSeq", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isSeq, 0), "isSeq() -> retval\n."}, + {"isString", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_isString, 0), "isString() -> retval\n."}, + {"keys", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_keys, 0), "keys() -> retval\n. @brief Returns keys of a mapping node.\n. @returns Keys of a mapping node."}, + {"mat", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_mat, 0), "mat() -> retval\n."}, + {"name", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_name, 0), "name() -> retval\n."}, + {"rawSize", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_rawSize, 0), "rawSize() -> retval\n."}, + {"real", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_real, 0), "real() -> retval\n. Internal method used when reading FileStorage.\n. Sets the type (int, real or string) and value of the previously created node."}, + {"size", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_size, 0), "size() -> retval\n."}, + {"string", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_string, 0), "string() -> retval\n."}, + {"type", CV_PY_FN_WITH_KW_(pyopencv_cv_FileNode_type, 0), "type() -> retval\n. @brief Returns type of the node.\n. @returns Type of the node. See FileNode::Type"}, + + {NULL, NULL} +}; + +// Converter (FileNode) + +template<> +struct PyOpenCV_Converter< cv::FileNode > +{ + static PyObject* from(const cv::FileNode& r) + { + return pyopencv_FileNode_Instance(r); + } + static bool to(PyObject* src, cv::FileNode& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + cv::FileNode * dst_; + if (pyopencv_FileNode_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected cv::FileNode for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// FileStorage (Generic) +//================================================================================ + +// GetSet (FileStorage) + + + +// Methods (FileStorage) + +static int pyopencv_cv_FileStorage_FileStorage(pyopencv_FileStorage_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::FileStorage())); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_encoding = NULL; + String encoding; + + const char* keywords[] = { "filename", "flags", "encoding", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:FileStorage", (char**)keywords, &pyobj_filename, &pyobj_flags, &pyobj_encoding) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_encoding, encoding, ArgInfo("encoding", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::FileStorage(filename, flags, encoding))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("FileStorage"); + + return -1; +} + +static PyObject* pyopencv_cv_FileStorage_endWriteStruct(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->endWriteStruct()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_getFirstTopLevelNode(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + FileNode retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getFirstTopLevelNode()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_getFormat(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getFormat()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_getNode(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + char* nodename=(char*)""; + FileNode retval; + + const char* keywords[] = { "nodename", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "s:FileStorage.getNode", (char**)keywords, &nodename) ) + { + ERRWRAP2(retval = _self_->operator[](nodename)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_isOpened(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isOpened()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_open(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_encoding = NULL; + String encoding; + bool retval; + + const char* keywords[] = { "filename", "flags", "encoding", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:FileStorage.open", (char**)keywords, &pyobj_filename, &pyobj_flags, &pyobj_encoding) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_encoding, encoding, ArgInfo("encoding", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, flags, encoding)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_release(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->release()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_releaseAndGetString(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->releaseAndGetString()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_root(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_streamidx = NULL; + int streamidx=0; + FileNode retval; + + const char* keywords[] = { "streamidx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:FileStorage.root", (char**)keywords, &pyobj_streamidx) && + pyopencv_to_safe(pyobj_streamidx, streamidx, ArgInfo("streamidx", 0)) ) + { + ERRWRAP2(retval = _self_->root(streamidx)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_startWriteStruct(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_name = NULL; + String name; + PyObject* pyobj_flags = NULL; + int flags=0; + PyObject* pyobj_typeName = NULL; + String typeName; + + const char* keywords[] = { "name", "flags", "typeName", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:FileStorage.startWriteStruct", (char**)keywords, &pyobj_name, &pyobj_flags, &pyobj_typeName) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) && + pyopencv_to_safe(pyobj_typeName, typeName, ArgInfo("typeName", 0)) ) + { + ERRWRAP2(_self_->startWriteStruct(name, flags, typeName)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_write(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(5); + + { + PyObject* pyobj_name = NULL; + String name; + PyObject* pyobj_val = NULL; + int val=0; + + const char* keywords[] = { "name", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:FileStorage.write", (char**)keywords, &pyobj_name, &pyobj_val) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(_self_->write(name, val)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_name = NULL; + String name; + PyObject* pyobj_val = NULL; + double val=0; + + const char* keywords[] = { "name", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:FileStorage.write", (char**)keywords, &pyobj_name, &pyobj_val) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(_self_->write(name, val)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_name = NULL; + String name; + PyObject* pyobj_val = NULL; + String val; + + const char* keywords[] = { "name", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:FileStorage.write", (char**)keywords, &pyobj_name, &pyobj_val) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(_self_->write(name, val)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_name = NULL; + String name; + PyObject* pyobj_val = NULL; + Mat val; + + const char* keywords[] = { "name", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:FileStorage.write", (char**)keywords, &pyobj_name, &pyobj_val) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(_self_->write(name, val)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_name = NULL; + String name; + PyObject* pyobj_val = NULL; + vector_String val; + + const char* keywords[] = { "name", "val", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:FileStorage.write", (char**)keywords, &pyobj_name, &pyobj_val) && + pyopencv_to_safe(pyobj_name, name, ArgInfo("name", 0)) && + pyopencv_to_safe(pyobj_val, val, ArgInfo("val", 0)) ) + { + ERRWRAP2(_self_->write(name, val)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("write"); + + return NULL; +} + +static PyObject* pyopencv_cv_FileStorage_writeComment(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_FileStorage_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'FileStorage' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_comment = NULL; + String comment; + PyObject* pyobj_append = NULL; + bool append=false; + + const char* keywords[] = { "comment", "append", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:FileStorage.writeComment", (char**)keywords, &pyobj_comment, &pyobj_append) && + pyopencv_to_safe(pyobj_comment, comment, ArgInfo("comment", 0)) && + pyopencv_to_safe(pyobj_append, append, ArgInfo("append", 0)) ) + { + ERRWRAP2(_self_->writeComment(comment, append)); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (FileStorage) + +static PyGetSetDef pyopencv_FileStorage_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_FileStorage_methods[] = +{ + {"endWriteStruct", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_endWriteStruct, 0), "endWriteStruct() -> None\n. @brief Finishes writing nested structure (should pair startWriteStruct())"}, + {"getFirstTopLevelNode", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_getFirstTopLevelNode, 0), "getFirstTopLevelNode() -> retval\n. @brief Returns the first element of the top-level mapping.\n. @returns The first element of the top-level mapping."}, + {"getFormat", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_getFormat, 0), "getFormat() -> retval\n. @brief Returns the current format.\n. * @returns The current format, see FileStorage::Mode"}, + {"getNode", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_getNode, 0), "getNode(nodename) -> retval\n. @overload"}, + {"isOpened", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_isOpened, 0), "isOpened() -> retval\n. @brief Checks whether the file is opened.\n. \n. @returns true if the object is associated with the current file and false otherwise. It is a\n. good practice to call this method after you tried to open a file."}, + {"open", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_open, 0), "open(filename, flags[, encoding]) -> retval\n. @brief Opens a file.\n. \n. See description of parameters in FileStorage::FileStorage. The method calls FileStorage::release\n. before opening the file.\n. @param filename Name of the file to open or the text string to read the data from.\n. Extension of the file (.xml, .yml/.yaml or .json) determines its format (XML, YAML or JSON\n. respectively). Also you can append .gz to work with compressed files, for example myHugeMatrix.xml.gz. If both\n. FileStorage::WRITE and FileStorage::MEMORY flags are specified, source is used just to specify\n. the output file format (e.g. mydata.xml, .yml etc.). A file name can also contain parameters.\n. You can use this format, \"*?base64\" (e.g. \"file.json?base64\" (case sensitive)), as an alternative to\n. FileStorage::BASE64 flag.\n. @param flags Mode of operation. One of FileStorage::Mode\n. @param encoding Encoding of the file. Note that UTF-16 XML encoding is not supported currently and\n. you should use 8-bit encoding instead of it."}, + {"release", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_release, 0), "release() -> None\n. @brief Closes the file and releases all the memory buffers.\n. \n. Call this method after all I/O operations with the storage are finished."}, + {"releaseAndGetString", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_releaseAndGetString, 0), "releaseAndGetString() -> retval\n. @brief Closes the file and releases all the memory buffers.\n. \n. Call this method after all I/O operations with the storage are finished. If the storage was\n. opened for writing data and FileStorage::WRITE was specified"}, + {"root", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_root, 0), "root([, streamidx]) -> retval\n. @brief Returns the top-level mapping\n. @param streamidx Zero-based index of the stream. In most cases there is only one stream in the file.\n. However, YAML supports multiple streams and so there can be several.\n. @returns The top-level mapping."}, + {"startWriteStruct", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_startWriteStruct, 0), "startWriteStruct(name, flags[, typeName]) -> None\n. @brief Starts to write a nested structure (sequence or a mapping).\n. @param name name of the structure. When writing to sequences (a.k.a. \"arrays\"), pass an empty string.\n. @param flags type of the structure (FileNode::MAP or FileNode::SEQ (both with optional FileNode::FLOW)).\n. @param typeName optional name of the type you store. The effect of setting this depends on the storage format.\n. I.e. if the format has a specification for storing type information, this parameter is used."}, + {"write", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_write, 0), "write(name, val) -> None\n. * @brief Simplified writing API to use with bindings.\n. * @param name Name of the written object. When writing to sequences (a.k.a. \"arrays\"), pass an empty string.\n. * @param val Value of the written object."}, + {"writeComment", CV_PY_FN_WITH_KW_(pyopencv_cv_FileStorage_writeComment, 0), "writeComment(comment[, append]) -> None\n. @brief Writes a comment.\n. \n. The function writes a comment into file storage. The comments are skipped when the storage is read.\n. @param comment The written comment, single-line or multi-line\n. @param append If true, the function tries to put the comment at the end of current line.\n. Else if the comment is multi-line, or if it does not fit at the end of the current\n. line, the comment starts a new line."}, + + {NULL, NULL} +}; + +// Converter (FileStorage) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_FileStorage_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_FileStorage_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// GeneralizedHough (Generic) +//================================================================================ + +// GetSet (GeneralizedHough) + + + +// Methods (GeneralizedHough) + +static PyObject* pyopencv_cv_GeneralizedHough_detect(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_positions = NULL; + Mat positions; + PyObject* pyobj_votes = NULL; + Mat votes; + + const char* keywords[] = { "image", "positions", "votes", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:GeneralizedHough.detect", (char**)keywords, &pyobj_image, &pyobj_positions, &pyobj_votes) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_positions, positions, ArgInfo("positions", 1)) && + pyopencv_to_safe(pyobj_votes, votes, ArgInfo("votes", 1)) ) + { + ERRWRAP2(_self_->detect(image, positions, votes)); + return Py_BuildValue("(NN)", pyopencv_from(positions), pyopencv_from(votes)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_positions = NULL; + UMat positions; + PyObject* pyobj_votes = NULL; + UMat votes; + + const char* keywords[] = { "image", "positions", "votes", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:GeneralizedHough.detect", (char**)keywords, &pyobj_image, &pyobj_positions, &pyobj_votes) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_positions, positions, ArgInfo("positions", 1)) && + pyopencv_to_safe(pyobj_votes, votes, ArgInfo("votes", 1)) ) + { + ERRWRAP2(_self_->detect(image, positions, votes)); + return Py_BuildValue("(NN)", pyopencv_from(positions), pyopencv_from(votes)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_edges = NULL; + Mat edges; + PyObject* pyobj_dx = NULL; + Mat dx; + PyObject* pyobj_dy = NULL; + Mat dy; + PyObject* pyobj_positions = NULL; + Mat positions; + PyObject* pyobj_votes = NULL; + Mat votes; + + const char* keywords[] = { "edges", "dx", "dy", "positions", "votes", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:GeneralizedHough.detect", (char**)keywords, &pyobj_edges, &pyobj_dx, &pyobj_dy, &pyobj_positions, &pyobj_votes) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_positions, positions, ArgInfo("positions", 1)) && + pyopencv_to_safe(pyobj_votes, votes, ArgInfo("votes", 1)) ) + { + ERRWRAP2(_self_->detect(edges, dx, dy, positions, votes)); + return Py_BuildValue("(NN)", pyopencv_from(positions), pyopencv_from(votes)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_edges = NULL; + UMat edges; + PyObject* pyobj_dx = NULL; + UMat dx; + PyObject* pyobj_dy = NULL; + UMat dy; + PyObject* pyobj_positions = NULL; + UMat positions; + PyObject* pyobj_votes = NULL; + UMat votes; + + const char* keywords[] = { "edges", "dx", "dy", "positions", "votes", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OO:GeneralizedHough.detect", (char**)keywords, &pyobj_edges, &pyobj_dx, &pyobj_dy, &pyobj_positions, &pyobj_votes) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_positions, positions, ArgInfo("positions", 1)) && + pyopencv_to_safe(pyobj_votes, votes, ArgInfo("votes", 1)) ) + { + ERRWRAP2(_self_->detect(edges, dx, dy, positions, votes)); + return Py_BuildValue("(NN)", pyopencv_from(positions), pyopencv_from(votes)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("detect"); + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_getCannyHighThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getCannyHighThresh()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_getCannyLowThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getCannyLowThresh()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_getDp(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getDp()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_getMaxBufferSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getMaxBufferSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_getMinDist(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getMinDist()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_setCannyHighThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_cannyHighThresh = NULL; + int cannyHighThresh=0; + + const char* keywords[] = { "cannyHighThresh", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHough.setCannyHighThresh", (char**)keywords, &pyobj_cannyHighThresh) && + pyopencv_to_safe(pyobj_cannyHighThresh, cannyHighThresh, ArgInfo("cannyHighThresh", 0)) ) + { + ERRWRAP2(_self_->setCannyHighThresh(cannyHighThresh)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_setCannyLowThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_cannyLowThresh = NULL; + int cannyLowThresh=0; + + const char* keywords[] = { "cannyLowThresh", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHough.setCannyLowThresh", (char**)keywords, &pyobj_cannyLowThresh) && + pyopencv_to_safe(pyobj_cannyLowThresh, cannyLowThresh, ArgInfo("cannyLowThresh", 0)) ) + { + ERRWRAP2(_self_->setCannyLowThresh(cannyLowThresh)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_setDp(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_dp = NULL; + double dp=0; + + const char* keywords[] = { "dp", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHough.setDp", (char**)keywords, &pyobj_dp) && + pyopencv_to_safe(pyobj_dp, dp, ArgInfo("dp", 0)) ) + { + ERRWRAP2(_self_->setDp(dp)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_setMaxBufferSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_maxBufferSize = NULL; + int maxBufferSize=0; + + const char* keywords[] = { "maxBufferSize", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHough.setMaxBufferSize", (char**)keywords, &pyobj_maxBufferSize) && + pyopencv_to_safe(pyobj_maxBufferSize, maxBufferSize, ArgInfo("maxBufferSize", 0)) ) + { + ERRWRAP2(_self_->setMaxBufferSize(maxBufferSize)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_setMinDist(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_minDist = NULL; + double minDist=0; + + const char* keywords[] = { "minDist", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHough.setMinDist", (char**)keywords, &pyobj_minDist) && + pyopencv_to_safe(pyobj_minDist, minDist, ArgInfo("minDist", 0)) ) + { + ERRWRAP2(_self_->setMinDist(minDist)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHough_setTemplate(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHough_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHough' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_templ = NULL; + Mat templ; + PyObject* pyobj_templCenter = NULL; + Point templCenter=Point(-1, -1); + + const char* keywords[] = { "templ", "templCenter", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:GeneralizedHough.setTemplate", (char**)keywords, &pyobj_templ, &pyobj_templCenter) && + pyopencv_to_safe(pyobj_templ, templ, ArgInfo("templ", 0)) && + pyopencv_to_safe(pyobj_templCenter, templCenter, ArgInfo("templCenter", 0)) ) + { + ERRWRAP2(_self_->setTemplate(templ, templCenter)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_templ = NULL; + UMat templ; + PyObject* pyobj_templCenter = NULL; + Point templCenter=Point(-1, -1); + + const char* keywords[] = { "templ", "templCenter", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:GeneralizedHough.setTemplate", (char**)keywords, &pyobj_templ, &pyobj_templCenter) && + pyopencv_to_safe(pyobj_templ, templ, ArgInfo("templ", 0)) && + pyopencv_to_safe(pyobj_templCenter, templCenter, ArgInfo("templCenter", 0)) ) + { + ERRWRAP2(_self_->setTemplate(templ, templCenter)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_edges = NULL; + Mat edges; + PyObject* pyobj_dx = NULL; + Mat dx; + PyObject* pyobj_dy = NULL; + Mat dy; + PyObject* pyobj_templCenter = NULL; + Point templCenter=Point(-1, -1); + + const char* keywords[] = { "edges", "dx", "dy", "templCenter", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:GeneralizedHough.setTemplate", (char**)keywords, &pyobj_edges, &pyobj_dx, &pyobj_dy, &pyobj_templCenter) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_templCenter, templCenter, ArgInfo("templCenter", 0)) ) + { + ERRWRAP2(_self_->setTemplate(edges, dx, dy, templCenter)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_edges = NULL; + UMat edges; + PyObject* pyobj_dx = NULL; + UMat dx; + PyObject* pyobj_dy = NULL; + UMat dy; + PyObject* pyobj_templCenter = NULL; + Point templCenter=Point(-1, -1); + + const char* keywords[] = { "edges", "dx", "dy", "templCenter", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:GeneralizedHough.setTemplate", (char**)keywords, &pyobj_edges, &pyobj_dx, &pyobj_dy, &pyobj_templCenter) && + pyopencv_to_safe(pyobj_edges, edges, ArgInfo("edges", 0)) && + pyopencv_to_safe(pyobj_dx, dx, ArgInfo("dx", 0)) && + pyopencv_to_safe(pyobj_dy, dy, ArgInfo("dy", 0)) && + pyopencv_to_safe(pyobj_templCenter, templCenter, ArgInfo("templCenter", 0)) ) + { + ERRWRAP2(_self_->setTemplate(edges, dx, dy, templCenter)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("setTemplate"); + + return NULL; +} + + + +// Tables (GeneralizedHough) + +static PyGetSetDef pyopencv_GeneralizedHough_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_GeneralizedHough_methods[] = +{ + {"detect", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_detect, 0), "detect(image[, positions[, votes]]) -> positions, votes\n. \n\n\n\ndetect(edges, dx, dy[, positions[, votes]]) -> positions, votes\n."}, + {"getCannyHighThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_getCannyHighThresh, 0), "getCannyHighThresh() -> retval\n."}, + {"getCannyLowThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_getCannyLowThresh, 0), "getCannyLowThresh() -> retval\n."}, + {"getDp", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_getDp, 0), "getDp() -> retval\n."}, + {"getMaxBufferSize", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_getMaxBufferSize, 0), "getMaxBufferSize() -> retval\n."}, + {"getMinDist", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_getMinDist, 0), "getMinDist() -> retval\n."}, + {"setCannyHighThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_setCannyHighThresh, 0), "setCannyHighThresh(cannyHighThresh) -> None\n."}, + {"setCannyLowThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_setCannyLowThresh, 0), "setCannyLowThresh(cannyLowThresh) -> None\n."}, + {"setDp", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_setDp, 0), "setDp(dp) -> None\n."}, + {"setMaxBufferSize", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_setMaxBufferSize, 0), "setMaxBufferSize(maxBufferSize) -> None\n."}, + {"setMinDist", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_setMinDist, 0), "setMinDist(minDist) -> None\n."}, + {"setTemplate", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHough_setTemplate, 0), "setTemplate(templ[, templCenter]) -> None\n. \n\n\n\nsetTemplate(edges, dx, dy[, templCenter]) -> None\n."}, + + {NULL, NULL} +}; + +// Converter (GeneralizedHough) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_GeneralizedHough_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_GeneralizedHough_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// GeneralizedHoughBallard (Generic) +//================================================================================ + +// GetSet (GeneralizedHoughBallard) + + + +// Methods (GeneralizedHoughBallard) + +static PyObject* pyopencv_cv_GeneralizedHoughBallard_getLevels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughBallard_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughBallard' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getLevels()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughBallard_getVotesThreshold(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughBallard_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughBallard' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getVotesThreshold()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughBallard_setLevels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughBallard_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughBallard' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_levels = NULL; + int levels=0; + + const char* keywords[] = { "levels", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughBallard.setLevels", (char**)keywords, &pyobj_levels) && + pyopencv_to_safe(pyobj_levels, levels, ArgInfo("levels", 0)) ) + { + ERRWRAP2(_self_->setLevels(levels)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughBallard_setVotesThreshold(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughBallard_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughBallard' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_votesThreshold = NULL; + int votesThreshold=0; + + const char* keywords[] = { "votesThreshold", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughBallard.setVotesThreshold", (char**)keywords, &pyobj_votesThreshold) && + pyopencv_to_safe(pyobj_votesThreshold, votesThreshold, ArgInfo("votesThreshold", 0)) ) + { + ERRWRAP2(_self_->setVotesThreshold(votesThreshold)); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (GeneralizedHoughBallard) + +static PyGetSetDef pyopencv_GeneralizedHoughBallard_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_GeneralizedHoughBallard_methods[] = +{ + {"getLevels", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughBallard_getLevels, 0), "getLevels() -> retval\n."}, + {"getVotesThreshold", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughBallard_getVotesThreshold, 0), "getVotesThreshold() -> retval\n."}, + {"setLevels", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughBallard_setLevels, 0), "setLevels(levels) -> None\n."}, + {"setVotesThreshold", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughBallard_setVotesThreshold, 0), "setVotesThreshold(votesThreshold) -> None\n."}, + + {NULL, NULL} +}; + +// Converter (GeneralizedHoughBallard) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_GeneralizedHoughBallard_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_GeneralizedHoughBallard_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// GeneralizedHoughGuil (Generic) +//================================================================================ + +// GetSet (GeneralizedHoughGuil) + + + +// Methods (GeneralizedHoughGuil) + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getAngleEpsilon(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getAngleEpsilon()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getAngleStep(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getAngleStep()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getAngleThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getAngleThresh()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getLevels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getLevels()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getMaxAngle(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getMaxAngle()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getMaxScale(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getMaxScale()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getMinAngle(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getMinAngle()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getMinScale(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getMinScale()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getPosThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getPosThresh()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getScaleStep(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getScaleStep()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getScaleThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getScaleThresh()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_getXi(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getXi()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setAngleEpsilon(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_angleEpsilon = NULL; + double angleEpsilon=0; + + const char* keywords[] = { "angleEpsilon", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setAngleEpsilon", (char**)keywords, &pyobj_angleEpsilon) && + pyopencv_to_safe(pyobj_angleEpsilon, angleEpsilon, ArgInfo("angleEpsilon", 0)) ) + { + ERRWRAP2(_self_->setAngleEpsilon(angleEpsilon)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setAngleStep(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_angleStep = NULL; + double angleStep=0; + + const char* keywords[] = { "angleStep", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setAngleStep", (char**)keywords, &pyobj_angleStep) && + pyopencv_to_safe(pyobj_angleStep, angleStep, ArgInfo("angleStep", 0)) ) + { + ERRWRAP2(_self_->setAngleStep(angleStep)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setAngleThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_angleThresh = NULL; + int angleThresh=0; + + const char* keywords[] = { "angleThresh", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setAngleThresh", (char**)keywords, &pyobj_angleThresh) && + pyopencv_to_safe(pyobj_angleThresh, angleThresh, ArgInfo("angleThresh", 0)) ) + { + ERRWRAP2(_self_->setAngleThresh(angleThresh)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setLevels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_levels = NULL; + int levels=0; + + const char* keywords[] = { "levels", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setLevels", (char**)keywords, &pyobj_levels) && + pyopencv_to_safe(pyobj_levels, levels, ArgInfo("levels", 0)) ) + { + ERRWRAP2(_self_->setLevels(levels)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setMaxAngle(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_maxAngle = NULL; + double maxAngle=0; + + const char* keywords[] = { "maxAngle", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setMaxAngle", (char**)keywords, &pyobj_maxAngle) && + pyopencv_to_safe(pyobj_maxAngle, maxAngle, ArgInfo("maxAngle", 0)) ) + { + ERRWRAP2(_self_->setMaxAngle(maxAngle)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setMaxScale(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_maxScale = NULL; + double maxScale=0; + + const char* keywords[] = { "maxScale", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setMaxScale", (char**)keywords, &pyobj_maxScale) && + pyopencv_to_safe(pyobj_maxScale, maxScale, ArgInfo("maxScale", 0)) ) + { + ERRWRAP2(_self_->setMaxScale(maxScale)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setMinAngle(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_minAngle = NULL; + double minAngle=0; + + const char* keywords[] = { "minAngle", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setMinAngle", (char**)keywords, &pyobj_minAngle) && + pyopencv_to_safe(pyobj_minAngle, minAngle, ArgInfo("minAngle", 0)) ) + { + ERRWRAP2(_self_->setMinAngle(minAngle)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setMinScale(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_minScale = NULL; + double minScale=0; + + const char* keywords[] = { "minScale", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setMinScale", (char**)keywords, &pyobj_minScale) && + pyopencv_to_safe(pyobj_minScale, minScale, ArgInfo("minScale", 0)) ) + { + ERRWRAP2(_self_->setMinScale(minScale)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setPosThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_posThresh = NULL; + int posThresh=0; + + const char* keywords[] = { "posThresh", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setPosThresh", (char**)keywords, &pyobj_posThresh) && + pyopencv_to_safe(pyobj_posThresh, posThresh, ArgInfo("posThresh", 0)) ) + { + ERRWRAP2(_self_->setPosThresh(posThresh)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setScaleStep(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_scaleStep = NULL; + double scaleStep=0; + + const char* keywords[] = { "scaleStep", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setScaleStep", (char**)keywords, &pyobj_scaleStep) && + pyopencv_to_safe(pyobj_scaleStep, scaleStep, ArgInfo("scaleStep", 0)) ) + { + ERRWRAP2(_self_->setScaleStep(scaleStep)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setScaleThresh(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_scaleThresh = NULL; + int scaleThresh=0; + + const char* keywords[] = { "scaleThresh", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setScaleThresh", (char**)keywords, &pyobj_scaleThresh) && + pyopencv_to_safe(pyobj_scaleThresh, scaleThresh, ArgInfo("scaleThresh", 0)) ) + { + ERRWRAP2(_self_->setScaleThresh(scaleThresh)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_GeneralizedHoughGuil_setXi(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_GeneralizedHoughGuil_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'GeneralizedHoughGuil' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_xi = NULL; + double xi=0; + + const char* keywords[] = { "xi", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GeneralizedHoughGuil.setXi", (char**)keywords, &pyobj_xi) && + pyopencv_to_safe(pyobj_xi, xi, ArgInfo("xi", 0)) ) + { + ERRWRAP2(_self_->setXi(xi)); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (GeneralizedHoughGuil) + +static PyGetSetDef pyopencv_GeneralizedHoughGuil_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_GeneralizedHoughGuil_methods[] = +{ + {"getAngleEpsilon", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getAngleEpsilon, 0), "getAngleEpsilon() -> retval\n."}, + {"getAngleStep", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getAngleStep, 0), "getAngleStep() -> retval\n."}, + {"getAngleThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getAngleThresh, 0), "getAngleThresh() -> retval\n."}, + {"getLevels", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getLevels, 0), "getLevels() -> retval\n."}, + {"getMaxAngle", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getMaxAngle, 0), "getMaxAngle() -> retval\n."}, + {"getMaxScale", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getMaxScale, 0), "getMaxScale() -> retval\n."}, + {"getMinAngle", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getMinAngle, 0), "getMinAngle() -> retval\n."}, + {"getMinScale", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getMinScale, 0), "getMinScale() -> retval\n."}, + {"getPosThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getPosThresh, 0), "getPosThresh() -> retval\n."}, + {"getScaleStep", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getScaleStep, 0), "getScaleStep() -> retval\n."}, + {"getScaleThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getScaleThresh, 0), "getScaleThresh() -> retval\n."}, + {"getXi", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_getXi, 0), "getXi() -> retval\n."}, + {"setAngleEpsilon", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setAngleEpsilon, 0), "setAngleEpsilon(angleEpsilon) -> None\n."}, + {"setAngleStep", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setAngleStep, 0), "setAngleStep(angleStep) -> None\n."}, + {"setAngleThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setAngleThresh, 0), "setAngleThresh(angleThresh) -> None\n."}, + {"setLevels", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setLevels, 0), "setLevels(levels) -> None\n."}, + {"setMaxAngle", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setMaxAngle, 0), "setMaxAngle(maxAngle) -> None\n."}, + {"setMaxScale", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setMaxScale, 0), "setMaxScale(maxScale) -> None\n."}, + {"setMinAngle", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setMinAngle, 0), "setMinAngle(minAngle) -> None\n."}, + {"setMinScale", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setMinScale, 0), "setMinScale(minScale) -> None\n."}, + {"setPosThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setPosThresh, 0), "setPosThresh(posThresh) -> None\n."}, + {"setScaleStep", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setScaleStep, 0), "setScaleStep(scaleStep) -> None\n."}, + {"setScaleThresh", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setScaleThresh, 0), "setScaleThresh(scaleThresh) -> None\n."}, + {"setXi", CV_PY_FN_WITH_KW_(pyopencv_cv_GeneralizedHoughGuil_setXi, 0), "setXi(xi) -> None\n."}, + + {NULL, NULL} +}; + +// Converter (GeneralizedHoughGuil) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_GeneralizedHoughGuil_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_GeneralizedHoughGuil_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// KeyPoint (Generic) +//================================================================================ + +// GetSet (KeyPoint) + + +static PyObject* pyopencv_KeyPoint_get_angle(pyopencv_KeyPoint_t* p, void *closure) +{ + return pyopencv_from(p->v.angle); +} + +static int pyopencv_KeyPoint_set_angle(pyopencv_KeyPoint_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the angle attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.angle, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_KeyPoint_get_class_id(pyopencv_KeyPoint_t* p, void *closure) +{ + return pyopencv_from(p->v.class_id); +} + +static int pyopencv_KeyPoint_set_class_id(pyopencv_KeyPoint_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the class_id attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.class_id, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_KeyPoint_get_octave(pyopencv_KeyPoint_t* p, void *closure) +{ + return pyopencv_from(p->v.octave); +} + +static int pyopencv_KeyPoint_set_octave(pyopencv_KeyPoint_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the octave attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.octave, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_KeyPoint_get_pt(pyopencv_KeyPoint_t* p, void *closure) +{ + return pyopencv_from(p->v.pt); +} + +static int pyopencv_KeyPoint_set_pt(pyopencv_KeyPoint_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the pt attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.pt, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_KeyPoint_get_response(pyopencv_KeyPoint_t* p, void *closure) +{ + return pyopencv_from(p->v.response); +} + +static int pyopencv_KeyPoint_set_response(pyopencv_KeyPoint_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the response attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.response, ArgInfo("value", false)) ? 0 : -1; +} + +static PyObject* pyopencv_KeyPoint_get_size(pyopencv_KeyPoint_t* p, void *closure) +{ + return pyopencv_from(p->v.size); +} + +static int pyopencv_KeyPoint_set_size(pyopencv_KeyPoint_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the size attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v.size, ArgInfo("value", false)) ? 0 : -1; +} + + +// Methods (KeyPoint) + +static int pyopencv_cv_KeyPoint_KeyPoint(pyopencv_KeyPoint_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + if(self) ERRWRAP2(new (&(self->v)) cv::KeyPoint()); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_x = NULL; + float x=0.f; + PyObject* pyobj_y = NULL; + float y=0.f; + PyObject* pyobj_size = NULL; + float size=0.f; + PyObject* pyobj_angle = NULL; + float angle=-1; + PyObject* pyobj_response = NULL; + float response=0; + PyObject* pyobj_octave = NULL; + int octave=0; + PyObject* pyobj_class_id = NULL; + int class_id=-1; + + const char* keywords[] = { "x", "y", "size", "angle", "response", "octave", "class_id", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|OOOO:KeyPoint", (char**)keywords, &pyobj_x, &pyobj_y, &pyobj_size, &pyobj_angle, &pyobj_response, &pyobj_octave, &pyobj_class_id) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_angle, angle, ArgInfo("angle", 0)) && + pyopencv_to_safe(pyobj_response, response, ArgInfo("response", 0)) && + pyopencv_to_safe(pyobj_octave, octave, ArgInfo("octave", 0)) && + pyopencv_to_safe(pyobj_class_id, class_id, ArgInfo("class_id", 0)) ) + { + if(self) ERRWRAP2(new (&(self->v)) cv::KeyPoint(x, y, size, angle, response, octave, class_id)); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("KeyPoint"); + + return -1; +} + +static PyObject* pyopencv_cv_KeyPoint_convert_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_keypoints = NULL; + vector_KeyPoint keypoints; + vector_Point2f points2f; + PyObject* pyobj_keypointIndexes = NULL; + vector_int keypointIndexes=std::vector(); + + const char* keywords[] = { "keypoints", "keypointIndexes", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:KeyPoint.convert", (char**)keywords, &pyobj_keypoints, &pyobj_keypointIndexes) && + pyopencv_to_safe(pyobj_keypoints, keypoints, ArgInfo("keypoints", 0)) && + pyopencv_to_safe(pyobj_keypointIndexes, keypointIndexes, ArgInfo("keypointIndexes", 0)) ) + { + ERRWRAP2(cv::KeyPoint::convert(keypoints, points2f, keypointIndexes)); + return pyopencv_from(points2f); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_points2f = NULL; + vector_Point2f points2f; + vector_KeyPoint keypoints; + PyObject* pyobj_size = NULL; + float size=1; + PyObject* pyobj_response = NULL; + float response=1; + PyObject* pyobj_octave = NULL; + int octave=0; + PyObject* pyobj_class_id = NULL; + int class_id=-1; + + const char* keywords[] = { "points2f", "size", "response", "octave", "class_id", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:KeyPoint.convert", (char**)keywords, &pyobj_points2f, &pyobj_size, &pyobj_response, &pyobj_octave, &pyobj_class_id) && + pyopencv_to_safe(pyobj_points2f, points2f, ArgInfo("points2f", 0)) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_response, response, ArgInfo("response", 0)) && + pyopencv_to_safe(pyobj_octave, octave, ArgInfo("octave", 0)) && + pyopencv_to_safe(pyobj_class_id, class_id, ArgInfo("class_id", 0)) ) + { + ERRWRAP2(cv::KeyPoint::convert(points2f, keypoints, size, response, octave, class_id)); + return pyopencv_from(keypoints); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convert"); + + return NULL; +} + +static PyObject* pyopencv_cv_KeyPoint_overlap_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_kp1 = NULL; + KeyPoint kp1; + PyObject* pyobj_kp2 = NULL; + KeyPoint kp2; + float retval; + + const char* keywords[] = { "kp1", "kp2", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:KeyPoint.overlap", (char**)keywords, &pyobj_kp1, &pyobj_kp2) && + pyopencv_to_safe(pyobj_kp1, kp1, ArgInfo("kp1", 0)) && + pyopencv_to_safe(pyobj_kp2, kp2, ArgInfo("kp2", 0)) ) + { + ERRWRAP2(retval = cv::KeyPoint::overlap(kp1, kp2)); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (KeyPoint) + +static PyGetSetDef pyopencv_KeyPoint_getseters[] = +{ + {(char*)"angle", (getter)pyopencv_KeyPoint_get_angle, (setter)pyopencv_KeyPoint_set_angle, (char*)"angle", NULL}, + {(char*)"class_id", (getter)pyopencv_KeyPoint_get_class_id, (setter)pyopencv_KeyPoint_set_class_id, (char*)"class_id", NULL}, + {(char*)"octave", (getter)pyopencv_KeyPoint_get_octave, (setter)pyopencv_KeyPoint_set_octave, (char*)"octave", NULL}, + {(char*)"pt", (getter)pyopencv_KeyPoint_get_pt, (setter)pyopencv_KeyPoint_set_pt, (char*)"pt", NULL}, + {(char*)"response", (getter)pyopencv_KeyPoint_get_response, (setter)pyopencv_KeyPoint_set_response, (char*)"response", NULL}, + {(char*)"size", (getter)pyopencv_KeyPoint_get_size, (setter)pyopencv_KeyPoint_set_size, (char*)"size", NULL}, + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_KeyPoint_methods[] = +{ + {"convert", CV_PY_FN_WITH_KW_(pyopencv_cv_KeyPoint_convert_static, METH_STATIC), "convert(keypoints[, keypointIndexes]) -> points2f\n. This method converts vector of keypoints to vector of points or the reverse, where each keypoint is\n. assigned the same size and the same orientation.\n. \n. @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB\n. @param points2f Array of (x,y) coordinates of each keypoint\n. @param keypointIndexes Array of indexes of keypoints to be converted to points. (Acts like a mask to\n. convert only specified keypoints)\n\n\n\nconvert(points2f[, size[, response[, octave[, class_id]]]]) -> keypoints\n. @overload\n. @param points2f Array of (x,y) coordinates of each keypoint\n. @param keypoints Keypoints obtained from any feature detection algorithm like SIFT/SURF/ORB\n. @param size keypoint diameter\n. @param response keypoint detector response on the keypoint (that is, strength of the keypoint)\n. @param octave pyramid octave in which the keypoint has been detected\n. @param class_id object id"}, + {"overlap", CV_PY_FN_WITH_KW_(pyopencv_cv_KeyPoint_overlap_static, METH_STATIC), "overlap(kp1, kp2) -> retval\n. This method computes overlap for pair of keypoints. Overlap is the ratio between area of keypoint\n. regions' intersection and area of keypoint regions' union (considering keypoint region as circle).\n. If they don't overlap, we get zero. If they coincide at same location with same size, we get 1.\n. @param kp1 First keypoint\n. @param kp2 Second keypoint"}, + + {NULL, NULL} +}; + +// Converter (KeyPoint) + +template<> +struct PyOpenCV_Converter< cv::KeyPoint > +{ + static PyObject* from(const cv::KeyPoint& r) + { + return pyopencv_KeyPoint_Instance(r); + } + static bool to(PyObject* src, cv::KeyPoint& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + cv::KeyPoint * dst_; + if (pyopencv_KeyPoint_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected cv::KeyPoint for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// LineSegmentDetector (Generic) +//================================================================================ + +// GetSet (LineSegmentDetector) + + + +// Methods (LineSegmentDetector) + +static PyObject* pyopencv_cv_LineSegmentDetector_compareSegments(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_LineSegmentDetector_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'LineSegmentDetector' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_lines1 = NULL; + Mat lines1; + PyObject* pyobj_lines2 = NULL; + Mat lines2; + PyObject* pyobj_image = NULL; + Mat image; + int retval; + + const char* keywords[] = { "size", "lines1", "lines2", "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:LineSegmentDetector.compareSegments", (char**)keywords, &pyobj_size, &pyobj_lines1, &pyobj_lines2, &pyobj_image) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_lines1, lines1, ArgInfo("lines1", 0)) && + pyopencv_to_safe(pyobj_lines2, lines2, ArgInfo("lines2", 0)) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) ) + { + ERRWRAP2(retval = _self_->compareSegments(size, lines1, lines2, image)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(image)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_lines1 = NULL; + UMat lines1; + PyObject* pyobj_lines2 = NULL; + UMat lines2; + PyObject* pyobj_image = NULL; + UMat image; + int retval; + + const char* keywords[] = { "size", "lines1", "lines2", "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:LineSegmentDetector.compareSegments", (char**)keywords, &pyobj_size, &pyobj_lines1, &pyobj_lines2, &pyobj_image) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_lines1, lines1, ArgInfo("lines1", 0)) && + pyopencv_to_safe(pyobj_lines2, lines2, ArgInfo("lines2", 0)) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) ) + { + ERRWRAP2(retval = _self_->compareSegments(size, lines1, lines2, image)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(image)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("compareSegments"); + + return NULL; +} + +static PyObject* pyopencv_cv_LineSegmentDetector_detect(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_LineSegmentDetector_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'LineSegmentDetector' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_lines = NULL; + Mat lines; + PyObject* pyobj_width = NULL; + Mat width; + PyObject* pyobj_prec = NULL; + Mat prec; + PyObject* pyobj_nfa = NULL; + Mat nfa; + + const char* keywords[] = { "image", "lines", "width", "prec", "nfa", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:LineSegmentDetector.detect", (char**)keywords, &pyobj_image, &pyobj_lines, &pyobj_width, &pyobj_prec, &pyobj_nfa) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_width, width, ArgInfo("width", 1)) && + pyopencv_to_safe(pyobj_prec, prec, ArgInfo("prec", 1)) && + pyopencv_to_safe(pyobj_nfa, nfa, ArgInfo("nfa", 1)) ) + { + ERRWRAP2(_self_->detect(image, lines, width, prec, nfa)); + return Py_BuildValue("(NNNN)", pyopencv_from(lines), pyopencv_from(width), pyopencv_from(prec), pyopencv_from(nfa)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_lines = NULL; + UMat lines; + PyObject* pyobj_width = NULL; + UMat width; + PyObject* pyobj_prec = NULL; + UMat prec; + PyObject* pyobj_nfa = NULL; + UMat nfa; + + const char* keywords[] = { "image", "lines", "width", "prec", "nfa", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OOOO:LineSegmentDetector.detect", (char**)keywords, &pyobj_image, &pyobj_lines, &pyobj_width, &pyobj_prec, &pyobj_nfa) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 1)) && + pyopencv_to_safe(pyobj_width, width, ArgInfo("width", 1)) && + pyopencv_to_safe(pyobj_prec, prec, ArgInfo("prec", 1)) && + pyopencv_to_safe(pyobj_nfa, nfa, ArgInfo("nfa", 1)) ) + { + ERRWRAP2(_self_->detect(image, lines, width, prec, nfa)); + return Py_BuildValue("(NNNN)", pyopencv_from(lines), pyopencv_from(width), pyopencv_from(prec), pyopencv_from(nfa)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("detect"); + + return NULL; +} + +static PyObject* pyopencv_cv_LineSegmentDetector_drawSegments(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_LineSegmentDetector_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'LineSegmentDetector' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_lines = NULL; + Mat lines; + + const char* keywords[] = { "image", "lines", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:LineSegmentDetector.drawSegments", (char**)keywords, &pyobj_image, &pyobj_lines) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 0)) ) + { + ERRWRAP2(_self_->drawSegments(image, lines)); + return pyopencv_from(image); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_lines = NULL; + UMat lines; + + const char* keywords[] = { "image", "lines", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:LineSegmentDetector.drawSegments", (char**)keywords, &pyobj_image, &pyobj_lines) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_lines, lines, ArgInfo("lines", 0)) ) + { + ERRWRAP2(_self_->drawSegments(image, lines)); + return pyopencv_from(image); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("drawSegments"); + + return NULL; +} + + + +// Tables (LineSegmentDetector) + +static PyGetSetDef pyopencv_LineSegmentDetector_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_LineSegmentDetector_methods[] = +{ + {"compareSegments", CV_PY_FN_WITH_KW_(pyopencv_cv_LineSegmentDetector_compareSegments, 0), "compareSegments(size, lines1, lines2[, image]) -> retval, image\n. @brief Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.\n. \n. @param size The size of the image, where lines1 and lines2 were found.\n. @param lines1 The first group of lines that needs to be drawn. It is visualized in blue color.\n. @param lines2 The second group of lines. They visualized in red color.\n. @param image Optional image, where the lines will be drawn. The image should be color(3-channel)\n. in order for lines1 and lines2 to be drawn in the above mentioned colors."}, + {"detect", CV_PY_FN_WITH_KW_(pyopencv_cv_LineSegmentDetector_detect, 0), "detect(image[, lines[, width[, prec[, nfa]]]]) -> lines, width, prec, nfa\n. @brief Finds lines in the input image.\n. \n. This is the output of the default parameters of the algorithm on the above shown image.\n. \n. ![image](pics/building_lsd.png)\n. \n. @param image A grayscale (CV_8UC1) input image. If only a roi needs to be selected, use:\n. `lsd_ptr-\\>detect(image(roi), lines, ...); lines += Scalar(roi.x, roi.y, roi.x, roi.y);`\n. @param lines A vector of Vec4f elements specifying the beginning and ending point of a line. Where\n. Vec4f is (x1, y1, x2, y2), point 1 is the start, point 2 - end. Returned lines are strictly\n. oriented depending on the gradient.\n. @param width Vector of widths of the regions, where the lines are found. E.g. Width of line.\n. @param prec Vector of precisions with which the lines are found.\n. @param nfa Vector containing number of false alarms in the line region, with precision of 10%. The\n. bigger the value, logarithmically better the detection.\n. - -1 corresponds to 10 mean false alarms\n. - 0 corresponds to 1 mean false alarm\n. - 1 corresponds to 0.1 mean false alarms\n. This vector will be calculated only when the objects type is #LSD_REFINE_ADV."}, + {"drawSegments", CV_PY_FN_WITH_KW_(pyopencv_cv_LineSegmentDetector_drawSegments, 0), "drawSegments(image, lines) -> image\n. @brief Draws the line segments on a given image.\n. @param image The image, where the lines will be drawn. Should be bigger or equal to the image,\n. where the lines were found.\n. @param lines A vector of the lines that needed to be drawn."}, + + {NULL, NULL} +}; + +// Converter (LineSegmentDetector) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_LineSegmentDetector_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_LineSegmentDetector_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// Moments (Map) +//================================================================================ +static bool pyopencv_to(PyObject* src, cv::Moments& dst, const ArgInfo& info) +{ + PyObject* tmp; + bool ok; + + if( PyMapping_HasKeyString(src, (char*)"m00") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m00"); + ok = tmp && pyopencv_to_safe(tmp, dst.m00, ArgInfo("m00", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m10") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m10"); + ok = tmp && pyopencv_to_safe(tmp, dst.m10, ArgInfo("m10", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m01") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m01"); + ok = tmp && pyopencv_to_safe(tmp, dst.m01, ArgInfo("m01", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m20") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m20"); + ok = tmp && pyopencv_to_safe(tmp, dst.m20, ArgInfo("m20", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m11") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m11"); + ok = tmp && pyopencv_to_safe(tmp, dst.m11, ArgInfo("m11", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m02") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m02"); + ok = tmp && pyopencv_to_safe(tmp, dst.m02, ArgInfo("m02", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m30") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m30"); + ok = tmp && pyopencv_to_safe(tmp, dst.m30, ArgInfo("m30", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m21") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m21"); + ok = tmp && pyopencv_to_safe(tmp, dst.m21, ArgInfo("m21", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m12") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m12"); + ok = tmp && pyopencv_to_safe(tmp, dst.m12, ArgInfo("m12", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"m03") ) + { + tmp = PyMapping_GetItemString(src, (char*)"m03"); + ok = tmp && pyopencv_to_safe(tmp, dst.m03, ArgInfo("m03", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu20") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu20"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu20, ArgInfo("mu20", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu11") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu11"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu11, ArgInfo("mu11", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu02") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu02"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu02, ArgInfo("mu02", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu30") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu30"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu30, ArgInfo("mu30", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu21") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu21"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu21, ArgInfo("mu21", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu12") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu12"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu12, ArgInfo("mu12", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"mu03") ) + { + tmp = PyMapping_GetItemString(src, (char*)"mu03"); + ok = tmp && pyopencv_to_safe(tmp, dst.mu03, ArgInfo("mu03", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu20") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu20"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu20, ArgInfo("nu20", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu11") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu11"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu11, ArgInfo("nu11", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu02") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu02"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu02, ArgInfo("nu02", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu30") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu30"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu30, ArgInfo("nu30", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu21") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu21"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu21, ArgInfo("nu21", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu12") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu12"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu12, ArgInfo("nu12", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + if( PyMapping_HasKeyString(src, (char*)"nu03") ) + { + tmp = PyMapping_GetItemString(src, (char*)"nu03"); + ok = tmp && pyopencv_to_safe(tmp, dst.nu03, ArgInfo("nu03", false)); + Py_DECREF(tmp); + if(!ok) return false; + } + return true; +} + +template<> bool pyopencv_to(PyObject* src, cv::Moments& dst, const ArgInfo& info); + +//================================================================================ +// Subdiv2D (Generic) +//================================================================================ + +// GetSet (Subdiv2D) + + + +// Methods (Subdiv2D) + +static int pyopencv_cv_Subdiv2D_Subdiv2D(pyopencv_Subdiv2D_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::Subdiv2D())); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rect = NULL; + Rect rect; + + const char* keywords[] = { "rect", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D", (char**)keywords, &pyobj_rect) && + pyopencv_to_safe(pyobj_rect, rect, ArgInfo("rect", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::Subdiv2D(rect))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Subdiv2D"); + + return -1; +} + +static PyObject* pyopencv_cv_Subdiv2D_edgeDst(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_edge = NULL; + int edge=0; + Point2f dstpt; + int retval; + + const char* keywords[] = { "edge", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.edgeDst", (char**)keywords, &pyobj_edge) && + pyopencv_to_safe(pyobj_edge, edge, ArgInfo("edge", 0)) ) + { + ERRWRAP2(retval = _self_->edgeDst(edge, &dstpt)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(dstpt)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_edgeOrg(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_edge = NULL; + int edge=0; + Point2f orgpt; + int retval; + + const char* keywords[] = { "edge", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.edgeOrg", (char**)keywords, &pyobj_edge) && + pyopencv_to_safe(pyobj_edge, edge, ArgInfo("edge", 0)) ) + { + ERRWRAP2(retval = _self_->edgeOrg(edge, &orgpt)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(orgpt)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_findNearest(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_pt = NULL; + Point2f pt; + Point2f nearestPt; + int retval; + + const char* keywords[] = { "pt", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.findNearest", (char**)keywords, &pyobj_pt) && + pyopencv_to_safe(pyobj_pt, pt, ArgInfo("pt", 0)) ) + { + ERRWRAP2(retval = _self_->findNearest(pt, &nearestPt)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(nearestPt)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_getEdge(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_edge = NULL; + int edge=0; + PyObject* pyobj_nextEdgeType = NULL; + int nextEdgeType=0; + int retval; + + const char* keywords[] = { "edge", "nextEdgeType", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:Subdiv2D.getEdge", (char**)keywords, &pyobj_edge, &pyobj_nextEdgeType) && + pyopencv_to_safe(pyobj_edge, edge, ArgInfo("edge", 0)) && + pyopencv_to_safe(pyobj_nextEdgeType, nextEdgeType, ArgInfo("nextEdgeType", 0)) ) + { + ERRWRAP2(retval = _self_->getEdge(edge, nextEdgeType)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_getEdgeList(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + vector_Vec4f edgeList; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->getEdgeList(edgeList)); + return pyopencv_from(edgeList); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_getLeadingEdgeList(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + vector_int leadingEdgeList; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->getLeadingEdgeList(leadingEdgeList)); + return pyopencv_from(leadingEdgeList); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_getTriangleList(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + vector_Vec6f triangleList; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->getTriangleList(triangleList)); + return pyopencv_from(triangleList); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_getVertex(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_vertex = NULL; + int vertex=0; + int firstEdge; + Point2f retval; + + const char* keywords[] = { "vertex", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.getVertex", (char**)keywords, &pyobj_vertex) && + pyopencv_to_safe(pyobj_vertex, vertex, ArgInfo("vertex", 0)) ) + { + ERRWRAP2(retval = _self_->getVertex(vertex, &firstEdge)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(firstEdge)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_getVoronoiFacetList(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_idx = NULL; + vector_int idx; + vector_vector_Point2f facetList; + vector_Point2f facetCenters; + + const char* keywords[] = { "idx", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.getVoronoiFacetList", (char**)keywords, &pyobj_idx) && + pyopencv_to_safe(pyobj_idx, idx, ArgInfo("idx", 0)) ) + { + ERRWRAP2(_self_->getVoronoiFacetList(idx, facetList, facetCenters)); + return Py_BuildValue("(NN)", pyopencv_from(facetList), pyopencv_from(facetCenters)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_initDelaunay(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_rect = NULL; + Rect rect; + + const char* keywords[] = { "rect", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.initDelaunay", (char**)keywords, &pyobj_rect) && + pyopencv_to_safe(pyobj_rect, rect, ArgInfo("rect", 0)) ) + { + ERRWRAP2(_self_->initDelaunay(rect)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_insert(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_pt = NULL; + Point2f pt; + int retval; + + const char* keywords[] = { "pt", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.insert", (char**)keywords, &pyobj_pt) && + pyopencv_to_safe(pyobj_pt, pt, ArgInfo("pt", 0)) ) + { + ERRWRAP2(retval = _self_->insert(pt)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_ptvec = NULL; + vector_Point2f ptvec; + + const char* keywords[] = { "ptvec", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.insert", (char**)keywords, &pyobj_ptvec) && + pyopencv_to_safe(pyobj_ptvec, ptvec, ArgInfo("ptvec", 0)) ) + { + ERRWRAP2(_self_->insert(ptvec)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("insert"); + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_locate(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_pt = NULL; + Point2f pt; + int edge; + int vertex; + int retval; + + const char* keywords[] = { "pt", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.locate", (char**)keywords, &pyobj_pt) && + pyopencv_to_safe(pyobj_pt, pt, ArgInfo("pt", 0)) ) + { + ERRWRAP2(retval = _self_->locate(pt, edge, vertex)); + return Py_BuildValue("(NNN)", pyopencv_from(retval), pyopencv_from(edge), pyopencv_from(vertex)); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_nextEdge(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_edge = NULL; + int edge=0; + int retval; + + const char* keywords[] = { "edge", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.nextEdge", (char**)keywords, &pyobj_edge) && + pyopencv_to_safe(pyobj_edge, edge, ArgInfo("edge", 0)) ) + { + ERRWRAP2(retval = _self_->nextEdge(edge)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_rotateEdge(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_edge = NULL; + int edge=0; + PyObject* pyobj_rotate = NULL; + int rotate=0; + int retval; + + const char* keywords[] = { "edge", "rotate", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:Subdiv2D.rotateEdge", (char**)keywords, &pyobj_edge, &pyobj_rotate) && + pyopencv_to_safe(pyobj_edge, edge, ArgInfo("edge", 0)) && + pyopencv_to_safe(pyobj_rotate, rotate, ArgInfo("rotate", 0)) ) + { + ERRWRAP2(retval = _self_->rotateEdge(edge, rotate)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_Subdiv2D_symEdge(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_Subdiv2D_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'Subdiv2D' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_edge = NULL; + int edge=0; + int retval; + + const char* keywords[] = { "edge", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Subdiv2D.symEdge", (char**)keywords, &pyobj_edge) && + pyopencv_to_safe(pyobj_edge, edge, ArgInfo("edge", 0)) ) + { + ERRWRAP2(retval = _self_->symEdge(edge)); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (Subdiv2D) + +static PyGetSetDef pyopencv_Subdiv2D_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_Subdiv2D_methods[] = +{ + {"edgeDst", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_edgeDst, 0), "edgeDst(edge) -> retval, dstpt\n. @brief Returns the edge destination.\n. \n. @param edge Subdivision edge ID.\n. @param dstpt Output vertex location.\n. \n. @returns vertex ID."}, + {"edgeOrg", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_edgeOrg, 0), "edgeOrg(edge) -> retval, orgpt\n. @brief Returns the edge origin.\n. \n. @param edge Subdivision edge ID.\n. @param orgpt Output vertex location.\n. \n. @returns vertex ID."}, + {"findNearest", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_findNearest, 0), "findNearest(pt) -> retval, nearestPt\n. @brief Finds the subdivision vertex closest to the given point.\n. \n. @param pt Input point.\n. @param nearestPt Output subdivision vertex point.\n. \n. The function is another function that locates the input point within the subdivision. It finds the\n. subdivision vertex that is the closest to the input point. It is not necessarily one of vertices\n. of the facet containing the input point, though the facet (located using locate() ) is used as a\n. starting point.\n. \n. @returns vertex ID."}, + {"getEdge", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_getEdge, 0), "getEdge(edge, nextEdgeType) -> retval\n. @brief Returns one of the edges related to the given edge.\n. \n. @param edge Subdivision edge ID.\n. @param nextEdgeType Parameter specifying which of the related edges to return.\n. The following values are possible:\n. - NEXT_AROUND_ORG next around the edge origin ( eOnext on the picture below if e is the input edge)\n. - NEXT_AROUND_DST next around the edge vertex ( eDnext )\n. - PREV_AROUND_ORG previous around the edge origin (reversed eRnext )\n. - PREV_AROUND_DST previous around the edge destination (reversed eLnext )\n. - NEXT_AROUND_LEFT next around the left facet ( eLnext )\n. - NEXT_AROUND_RIGHT next around the right facet ( eRnext )\n. - PREV_AROUND_LEFT previous around the left facet (reversed eOnext )\n. - PREV_AROUND_RIGHT previous around the right facet (reversed eDnext )\n. \n. ![sample output](pics/quadedge.png)\n. \n. @returns edge ID related to the input edge."}, + {"getEdgeList", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_getEdgeList, 0), "getEdgeList() -> edgeList\n. @brief Returns a list of all edges.\n. \n. @param edgeList Output vector.\n. \n. The function gives each edge as a 4 numbers vector, where each two are one of the edge\n. vertices. i.e. org_x = v[0], org_y = v[1], dst_x = v[2], dst_y = v[3]."}, + {"getLeadingEdgeList", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_getLeadingEdgeList, 0), "getLeadingEdgeList() -> leadingEdgeList\n. @brief Returns a list of the leading edge ID connected to each triangle.\n. \n. @param leadingEdgeList Output vector.\n. \n. The function gives one edge ID for each triangle."}, + {"getTriangleList", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_getTriangleList, 0), "getTriangleList() -> triangleList\n. @brief Returns a list of all triangles.\n. \n. @param triangleList Output vector.\n. \n. The function gives each triangle as a 6 numbers vector, where each two are one of the triangle\n. vertices. i.e. p1_x = v[0], p1_y = v[1], p2_x = v[2], p2_y = v[3], p3_x = v[4], p3_y = v[5]."}, + {"getVertex", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_getVertex, 0), "getVertex(vertex) -> retval, firstEdge\n. @brief Returns vertex location from vertex ID.\n. \n. @param vertex vertex ID.\n. @param firstEdge Optional. The first edge ID which is connected to the vertex.\n. @returns vertex (x,y)"}, + {"getVoronoiFacetList", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_getVoronoiFacetList, 0), "getVoronoiFacetList(idx) -> facetList, facetCenters\n. @brief Returns a list of all Voronoi facets.\n. \n. @param idx Vector of vertices IDs to consider. For all vertices you can pass empty vector.\n. @param facetList Output vector of the Voronoi facets.\n. @param facetCenters Output vector of the Voronoi facets center points."}, + {"initDelaunay", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_initDelaunay, 0), "initDelaunay(rect) -> None\n. @brief Creates a new empty Delaunay subdivision\n. \n. @param rect Rectangle that includes all of the 2D points that are to be added to the subdivision."}, + {"insert", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_insert, 0), "insert(pt) -> retval\n. @brief Insert a single point into a Delaunay triangulation.\n. \n. @param pt Point to insert.\n. \n. The function inserts a single point into a subdivision and modifies the subdivision topology\n. appropriately. If a point with the same coordinates exists already, no new point is added.\n. @returns the ID of the point.\n. \n. @note If the point is outside of the triangulation specified rect a runtime error is raised.\n\n\n\ninsert(ptvec) -> None\n. @brief Insert multiple points into a Delaunay triangulation.\n. \n. @param ptvec Points to insert.\n. \n. The function inserts a vector of points into a subdivision and modifies the subdivision topology\n. appropriately."}, + {"locate", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_locate, 0), "locate(pt) -> retval, edge, vertex\n. @brief Returns the location of a point within a Delaunay triangulation.\n. \n. @param pt Point to locate.\n. @param edge Output edge that the point belongs to or is located to the right of it.\n. @param vertex Optional output vertex the input point coincides with.\n. \n. The function locates the input point within the subdivision and gives one of the triangle edges\n. or vertices.\n. \n. @returns an integer which specify one of the following five cases for point location:\n. - The point falls into some facet. The function returns #PTLOC_INSIDE and edge will contain one of\n. edges of the facet.\n. - The point falls onto the edge. The function returns #PTLOC_ON_EDGE and edge will contain this edge.\n. - The point coincides with one of the subdivision vertices. The function returns #PTLOC_VERTEX and\n. vertex will contain a pointer to the vertex.\n. - The point is outside the subdivision reference rectangle. The function returns #PTLOC_OUTSIDE_RECT\n. and no pointers are filled.\n. - One of input arguments is invalid. A runtime error is raised or, if silent or \"parent\" error\n. processing mode is selected, #PTLOC_ERROR is returned."}, + {"nextEdge", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_nextEdge, 0), "nextEdge(edge) -> retval\n. @brief Returns next edge around the edge origin.\n. \n. @param edge Subdivision edge ID.\n. \n. @returns an integer which is next edge ID around the edge origin: eOnext on the\n. picture above if e is the input edge)."}, + {"rotateEdge", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_rotateEdge, 0), "rotateEdge(edge, rotate) -> retval\n. @brief Returns another edge of the same quad-edge.\n. \n. @param edge Subdivision edge ID.\n. @param rotate Parameter specifying which of the edges of the same quad-edge as the input\n. one to return. The following values are possible:\n. - 0 - the input edge ( e on the picture below if e is the input edge)\n. - 1 - the rotated edge ( eRot )\n. - 2 - the reversed edge (reversed e (in green))\n. - 3 - the reversed rotated edge (reversed eRot (in green))\n. \n. @returns one of the edges ID of the same quad-edge as the input edge."}, + {"symEdge", CV_PY_FN_WITH_KW_(pyopencv_cv_Subdiv2D_symEdge, 0), "symEdge(edge) -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (Subdiv2D) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_Subdiv2D_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_Subdiv2D_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// TickMeter (Generic) +//================================================================================ + +// GetSet (TickMeter) + + + +// Methods (TickMeter) + +static int pyopencv_cv_TickMeter_TickMeter(pyopencv_TickMeter_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::TickMeter())); + return 0; + } + + return -1; +} + +static PyObject* pyopencv_cv_TickMeter_getAvgTimeMilli(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getAvgTimeMilli()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getAvgTimeSec(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getAvgTimeSec()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getCounter(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + int64 retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getCounter()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getFPS(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getFPS()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getTimeMicro(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getTimeMicro()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getTimeMilli(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getTimeMilli()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getTimeSec(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + double retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getTimeSec()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_getTimeTicks(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + int64 retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getTimeTicks()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_reset(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->reset()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_start(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->start()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_TickMeter_stop(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_TickMeter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'TickMeter' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->stop()); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (TickMeter) + +static PyGetSetDef pyopencv_TickMeter_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_TickMeter_methods[] = +{ + {"getAvgTimeMilli", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getAvgTimeMilli, 0), "getAvgTimeMilli() -> retval\n."}, + {"getAvgTimeSec", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getAvgTimeSec, 0), "getAvgTimeSec() -> retval\n."}, + {"getCounter", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getCounter, 0), "getCounter() -> retval\n."}, + {"getFPS", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getFPS, 0), "getFPS() -> retval\n."}, + {"getTimeMicro", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getTimeMicro, 0), "getTimeMicro() -> retval\n."}, + {"getTimeMilli", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getTimeMilli, 0), "getTimeMilli() -> retval\n."}, + {"getTimeSec", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getTimeSec, 0), "getTimeSec() -> retval\n."}, + {"getTimeTicks", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_getTimeTicks, 0), "getTimeTicks() -> retval\n."}, + {"reset", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_reset, 0), "reset() -> None\n."}, + {"start", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_start, 0), "start() -> None\n."}, + {"stop", CV_PY_FN_WITH_KW_(pyopencv_cv_TickMeter_stop, 0), "stop() -> None\n."}, + + {NULL, NULL} +}; + +// Converter (TickMeter) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_TickMeter_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_TickMeter_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// UMat (Generic) +//================================================================================ + +// GetSet (UMat) + + +static PyObject* pyopencv_UMat_get_offset(pyopencv_UMat_t* p, void *closure) +{ + return pyopencv_from(p->v->offset); +} + +static int pyopencv_UMat_set_offset(pyopencv_UMat_t* p, PyObject *value, void *closure) +{ + if (!value) + { + PyErr_SetString(PyExc_TypeError, "Cannot delete the offset attribute"); + return -1; + } + return pyopencv_to_safe(value, p->v->offset, ArgInfo("value", false)) ? 0 : -1; +} + + +// Methods (UMat) + +static int pyopencv_cv_UMat_UMat(pyopencv_UMat_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(9); + + { + PyObject* pyobj_usageFlags = NULL; + UMatUsageFlags usageFlags=USAGE_DEFAULT; + + const char* keywords[] = { "usageFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:UMat", (char**)keywords, &pyobj_usageFlags) && + pyopencv_to_safe(pyobj_usageFlags, usageFlags, ArgInfo("usageFlags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(usageFlags))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_usageFlags = NULL; + UMatUsageFlags usageFlags=USAGE_DEFAULT; + + const char* keywords[] = { "rows", "cols", "type", "usageFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:UMat", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_usageFlags) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_usageFlags, usageFlags, ArgInfo("usageFlags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(rows, cols, type, usageFlags))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_usageFlags = NULL; + UMatUsageFlags usageFlags=USAGE_DEFAULT; + + const char* keywords[] = { "size", "type", "usageFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:UMat", (char**)keywords, &pyobj_size, &pyobj_type, &pyobj_usageFlags) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_usageFlags, usageFlags, ArgInfo("usageFlags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(size, type, usageFlags))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_usageFlags = NULL; + UMatUsageFlags usageFlags=USAGE_DEFAULT; + + const char* keywords[] = { "rows", "cols", "type", "s", "usageFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:UMat", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_s, &pyobj_usageFlags) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_usageFlags, usageFlags, ArgInfo("usageFlags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(rows, cols, type, s, usageFlags))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_usageFlags = NULL; + UMatUsageFlags usageFlags=USAGE_DEFAULT; + + const char* keywords[] = { "size", "type", "s", "usageFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:UMat", (char**)keywords, &pyobj_size, &pyobj_type, &pyobj_s, &pyobj_usageFlags) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_usageFlags, usageFlags, ArgInfo("usageFlags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(size, type, s, usageFlags))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + UMat m; + + const char* keywords[] = { "m", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:UMat", (char**)keywords, &pyobj_m) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(m))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + UMat m; + PyObject* pyobj_rowRange = NULL; + Range rowRange; + PyObject* pyobj_colRange = NULL; + Range colRange=Range::all(); + + const char* keywords[] = { "m", "rowRange", "colRange", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:UMat", (char**)keywords, &pyobj_m, &pyobj_rowRange, &pyobj_colRange) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_rowRange, rowRange, ArgInfo("rowRange", 0)) && + pyopencv_to_safe(pyobj_colRange, colRange, ArgInfo("colRange", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(m, rowRange, colRange))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + UMat m; + PyObject* pyobj_roi = NULL; + Rect roi; + + const char* keywords[] = { "m", "roi", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:UMat", (char**)keywords, &pyobj_m, &pyobj_roi) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_roi, roi, ArgInfo("roi", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(m, roi))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + UMat m; + PyObject* pyobj_ranges = NULL; + vector_Range ranges; + + const char* keywords[] = { "m", "ranges", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:UMat", (char**)keywords, &pyobj_m, &pyobj_ranges) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_ranges, ranges, ArgInfo("ranges", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::UMat(m, ranges))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("UMat"); + + return -1; +} + +static PyObject* pyopencv_cv_UMat_context_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + void* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv_UMat_context()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_get(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_UMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'UMat' or its derivative)"); + Ptr _self_ = *(self1); + Mat retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv_UMat_get(_self_)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_handle(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_UMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'UMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_accessFlags = NULL; + AccessFlag accessFlags=static_cast(0); + void* retval; + + const char* keywords[] = { "accessFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:UMat.handle", (char**)keywords, &pyobj_accessFlags) && + pyopencv_to_safe(pyobj_accessFlags, accessFlags, ArgInfo("accessFlags", 0)) ) + { + ERRWRAP2(retval = _self_->handle(accessFlags)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_isContinuous(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_UMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'UMat' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isContinuous()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_isSubmatrix(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_UMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'UMat' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isSubmatrix()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_UMat_queue_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + void* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv_UMat_queue()); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (UMat) + +static PyGetSetDef pyopencv_UMat_getseters[] = +{ + {(char*)"offset", (getter)pyopencv_UMat_get_offset, (setter)pyopencv_UMat_set_offset, (char*)"offset", NULL}, + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_UMat_methods[] = +{ + {"context", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_context_static, METH_STATIC), "context() -> retval\n."}, + {"get", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_get, 0), "get() -> retval\n."}, + {"handle", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_handle, 0), "handle(accessFlags) -> retval\n."}, + {"isContinuous", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_isContinuous, 0), "isContinuous() -> retval\n."}, + {"isSubmatrix", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_isSubmatrix, 0), "isSubmatrix() -> retval\n."}, + {"queue", CV_PY_FN_WITH_KW_(pyopencv_cv_UMat_queue_static, METH_STATIC), "queue() -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (UMat) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_UMat_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_UMat_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + { + Ptr _src; + if (pyopencv_to_safe(src, _src, info)) + { + return cv_mappable_to(_src, dst); + } + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// VideoCapture (Generic) +//================================================================================ + +// GetSet (VideoCapture) + + + +// Methods (VideoCapture) + +static int pyopencv_cv_VideoCapture_VideoCapture(pyopencv_VideoCapture_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(5); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoCapture())); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=CAP_ANY; + + const char* keywords[] = { "filename", "apiPreference", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:VideoCapture", (char**)keywords, &pyobj_filename, &pyobj_apiPreference) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoCapture(filename, apiPreference))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_params = NULL; + vector_int params; + + const char* keywords[] = { "filename", "apiPreference", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:VideoCapture", (char**)keywords, &pyobj_filename, &pyobj_apiPreference, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoCapture(filename, apiPreference, params))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_index = NULL; + int index=0; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=CAP_ANY; + + const char* keywords[] = { "index", "apiPreference", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:VideoCapture", (char**)keywords, &pyobj_index, &pyobj_apiPreference) && + pyopencv_to_safe(pyobj_index, index, ArgInfo("index", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoCapture(index, apiPreference))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_index = NULL; + int index=0; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_params = NULL; + vector_int params; + + const char* keywords[] = { "index", "apiPreference", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:VideoCapture", (char**)keywords, &pyobj_index, &pyobj_apiPreference, &pyobj_params) && + pyopencv_to_safe(pyobj_index, index, ArgInfo("index", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoCapture(index, apiPreference, params))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("VideoCapture"); + + return -1; +} + +static PyObject* pyopencv_cv_VideoCapture_get(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_propId = NULL; + int propId=0; + double retval; + + const char* keywords[] = { "propId", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:VideoCapture.get", (char**)keywords, &pyobj_propId) && + pyopencv_to_safe(pyobj_propId, propId, ArgInfo("propId", 0)) ) + { + ERRWRAP2(retval = _self_->get(propId)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_getBackendName(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getBackendName()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_getExceptionMode(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getExceptionMode()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_grab(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->grab()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_isOpened(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isOpened()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_open(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=CAP_ANY; + bool retval; + + const char* keywords[] = { "filename", "apiPreference", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:VideoCapture.open", (char**)keywords, &pyobj_filename, &pyobj_apiPreference) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, apiPreference)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_params = NULL; + vector_int params; + bool retval; + + const char* keywords[] = { "filename", "apiPreference", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:VideoCapture.open", (char**)keywords, &pyobj_filename, &pyobj_apiPreference, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, apiPreference, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_index = NULL; + int index=0; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=CAP_ANY; + bool retval; + + const char* keywords[] = { "index", "apiPreference", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:VideoCapture.open", (char**)keywords, &pyobj_index, &pyobj_apiPreference) && + pyopencv_to_safe(pyobj_index, index, ArgInfo("index", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) ) + { + ERRWRAP2(retval = _self_->open(index, apiPreference)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_index = NULL; + int index=0; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_params = NULL; + vector_int params; + bool retval; + + const char* keywords[] = { "index", "apiPreference", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:VideoCapture.open", (char**)keywords, &pyobj_index, &pyobj_apiPreference, &pyobj_params) && + pyopencv_to_safe(pyobj_index, index, ArgInfo("index", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = _self_->open(index, apiPreference, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("open"); + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_read(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + bool retval; + + const char* keywords[] = { "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:VideoCapture.read", (char**)keywords, &pyobj_image) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) ) + { + ERRWRAP2(retval = _self_->read(image)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(image)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + bool retval; + + const char* keywords[] = { "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:VideoCapture.read", (char**)keywords, &pyobj_image) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) ) + { + ERRWRAP2(retval = _self_->read(image)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(image)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("read"); + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_release(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->release()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_retrieve(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + PyObject* pyobj_flag = NULL; + int flag=0; + bool retval; + + const char* keywords[] = { "image", "flag", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|OO:VideoCapture.retrieve", (char**)keywords, &pyobj_image, &pyobj_flag) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_flag, flag, ArgInfo("flag", 0)) ) + { + ERRWRAP2(retval = _self_->retrieve(image, flag)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(image)); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + PyObject* pyobj_flag = NULL; + int flag=0; + bool retval; + + const char* keywords[] = { "image", "flag", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|OO:VideoCapture.retrieve", (char**)keywords, &pyobj_image, &pyobj_flag) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 1)) && + pyopencv_to_safe(pyobj_flag, flag, ArgInfo("flag", 0)) ) + { + ERRWRAP2(retval = _self_->retrieve(image, flag)); + return Py_BuildValue("(NN)", pyopencv_from(retval), pyopencv_from(image)); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("retrieve"); + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_set(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_propId = NULL; + int propId=0; + PyObject* pyobj_value = NULL; + double value=0; + bool retval; + + const char* keywords[] = { "propId", "value", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:VideoCapture.set", (char**)keywords, &pyobj_propId, &pyobj_value) && + pyopencv_to_safe(pyobj_propId, propId, ArgInfo("propId", 0)) && + pyopencv_to_safe(pyobj_value, value, ArgInfo("value", 0)) ) + { + ERRWRAP2(retval = _self_->set(propId, value)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoCapture_setExceptionMode(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoCapture_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoCapture' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_enable = NULL; + bool enable=0; + + const char* keywords[] = { "enable", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:VideoCapture.setExceptionMode", (char**)keywords, &pyobj_enable) && + pyopencv_to_safe(pyobj_enable, enable, ArgInfo("enable", 0)) ) + { + ERRWRAP2(_self_->setExceptionMode(enable)); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (VideoCapture) + +static PyGetSetDef pyopencv_VideoCapture_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_VideoCapture_methods[] = +{ + {"get", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_get, 0), "get(propId) -> retval\n. @brief Returns the specified VideoCapture property\n. \n. @param propId Property identifier from cv::VideoCaptureProperties (eg. cv::CAP_PROP_POS_MSEC, cv::CAP_PROP_POS_FRAMES, ...)\n. or one from @ref videoio_flags_others\n. @return Value for the specified property. Value 0 is returned when querying a property that is\n. not supported by the backend used by the VideoCapture instance.\n. \n. @note Reading / writing properties involves many layers. Some unexpected result might happens\n. along this chain.\n. @code{.txt}\n. VideoCapture -> API Backend -> Operating System -> Device Driver -> Device Hardware\n. @endcode\n. The returned value might be different from what really used by the device or it could be encoded\n. using device dependent rules (eg. steps or percentage). Effective behaviour depends from device\n. driver and API Backend"}, + {"getBackendName", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_getBackendName, 0), "getBackendName() -> retval\n. @brief Returns used backend API name\n. \n. @note Stream should be opened."}, + {"getExceptionMode", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_getExceptionMode, 0), "getExceptionMode() -> retval\n."}, + {"grab", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_grab, 0), "grab() -> retval\n. @brief Grabs the next frame from video file or capturing device.\n. \n. @return `true` (non-zero) in the case of success.\n. \n. The method/function grabs the next frame from video file or camera and returns true (non-zero) in\n. the case of success.\n. \n. The primary use of the function is in multi-camera environments, especially when the cameras do not\n. have hardware synchronization. That is, you call VideoCapture::grab() for each camera and after that\n. call the slower method VideoCapture::retrieve() to decode and get frame from each camera. This way\n. the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames\n. from different cameras will be closer in time.\n. \n. Also, when a connected camera is multi-head (for example, a stereo camera or a Kinect device), the\n. correct way of retrieving data from it is to call VideoCapture::grab() first and then call\n. VideoCapture::retrieve() one or more times with different values of the channel parameter.\n. \n. @ref tutorial_kinect_openni"}, + {"isOpened", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_isOpened, 0), "isOpened() -> retval\n. @brief Returns true if video capturing has been initialized already.\n. \n. If the previous call to VideoCapture constructor or VideoCapture::open() succeeded, the method returns\n. true."}, + {"open", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_open, 0), "open(filename[, apiPreference]) -> retval\n. @brief Opens a video file or a capturing device or an IP video stream for video capturing.\n. \n. @overload\n. \n. Parameters are same as the constructor VideoCapture(const String& filename, int apiPreference = CAP_ANY)\n. @return `true` if the file has been successfully opened\n. \n. The method first calls VideoCapture::release to close the already opened file or camera.\n\n\n\nopen(filename, apiPreference, params) -> retval\n. @brief Opens a camera for video capturing\n. \n. @overload\n. \n. The `params` parameter allows to specify extra parameters encoded as pairs `(paramId_1, paramValue_1, paramId_2, paramValue_2, ...)`.\n. See cv::VideoCaptureProperties\n. \n. @return `true` if the file has been successfully opened\n. \n. The method first calls VideoCapture::release to close the already opened file or camera.\n\n\n\nopen(index[, apiPreference]) -> retval\n. @brief Opens a camera for video capturing\n. \n. @overload\n. \n. Parameters are same as the constructor VideoCapture(int index, int apiPreference = CAP_ANY)\n. @return `true` if the camera has been successfully opened.\n. \n. The method first calls VideoCapture::release to close the already opened file or camera.\n\n\n\nopen(index, apiPreference, params) -> retval\n. @brief Returns true if video capturing has been initialized already.\n. \n. @overload\n. \n. The `params` parameter allows to specify extra parameters encoded as pairs `(paramId_1, paramValue_1, paramId_2, paramValue_2, ...)`.\n. See cv::VideoCaptureProperties\n. \n. @return `true` if the camera has been successfully opened.\n. \n. The method first calls VideoCapture::release to close the already opened file or camera."}, + {"read", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_read, 0), "read([, image]) -> retval, image\n. @brief Grabs, decodes and returns the next video frame.\n. \n. @param [out] image the video frame is returned here. If no frames has been grabbed the image will be empty.\n. @return `false` if no frames has been grabbed\n. \n. The method/function combines VideoCapture::grab() and VideoCapture::retrieve() in one call. This is the\n. most convenient method for reading video files or capturing data from decode and returns the just\n. grabbed frame. If no frames has been grabbed (camera has been disconnected, or there are no more\n. frames in video file), the method returns false and the function returns empty image (with %cv::Mat, test it with Mat::empty()).\n. \n. @note In @ref videoio_c \"C API\", functions cvRetrieveFrame() and cv.RetrieveFrame() return image stored inside the video\n. capturing structure. It is not allowed to modify or release the image! You can copy the frame using\n. cvCloneImage and then do whatever you want with the copy."}, + {"release", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_release, 0), "release() -> None\n. @brief Closes video file or capturing device.\n. \n. The method is automatically called by subsequent VideoCapture::open and by VideoCapture\n. destructor.\n. \n. The C function also deallocates memory and clears \\*capture pointer."}, + {"retrieve", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_retrieve, 0), "retrieve([, image[, flag]]) -> retval, image\n. @brief Decodes and returns the grabbed video frame.\n. \n. @param [out] image the video frame is returned here. If no frames has been grabbed the image will be empty.\n. @param flag it could be a frame index or a driver specific flag\n. @return `false` if no frames has been grabbed\n. \n. The method decodes and returns the just grabbed frame. If no frames has been grabbed\n. (camera has been disconnected, or there are no more frames in video file), the method returns false\n. and the function returns an empty image (with %cv::Mat, test it with Mat::empty()).\n. \n. @sa read()\n. \n. @note In @ref videoio_c \"C API\", functions cvRetrieveFrame() and cv.RetrieveFrame() return image stored inside the video\n. capturing structure. It is not allowed to modify or release the image! You can copy the frame using\n. cvCloneImage and then do whatever you want with the copy."}, + {"set", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_set, 0), "set(propId, value) -> retval\n. @brief Sets a property in the VideoCapture.\n. \n. @param propId Property identifier from cv::VideoCaptureProperties (eg. cv::CAP_PROP_POS_MSEC, cv::CAP_PROP_POS_FRAMES, ...)\n. or one from @ref videoio_flags_others\n. @param value Value of the property.\n. @return `true` if the property is supported by backend used by the VideoCapture instance.\n. @note Even if it returns `true` this doesn't ensure that the property\n. value has been accepted by the capture device. See note in VideoCapture::get()"}, + {"setExceptionMode", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoCapture_setExceptionMode, 0), "setExceptionMode(enable) -> None\n. Switches exceptions mode\n. *\n. * methods raise exceptions if not successful instead of returning an error code"}, + + {NULL, NULL} +}; + +// Converter (VideoCapture) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_VideoCapture_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_VideoCapture_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// VideoWriter (Generic) +//================================================================================ + +// GetSet (VideoWriter) + + + +// Methods (VideoWriter) + +static int pyopencv_cv_VideoWriter_VideoWriter(pyopencv_VideoWriter_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + pyPrepareArgumentConversionErrorsStorage(5); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoWriter())); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_isColor = NULL; + bool isColor=true; + + const char* keywords[] = { "filename", "fourcc", "fps", "frameSize", "isColor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:VideoWriter", (char**)keywords, &pyobj_filename, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_isColor) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_isColor, isColor, ArgInfo("isColor", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoWriter(filename, fourcc, fps, frameSize, isColor))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_isColor = NULL; + bool isColor=true; + + const char* keywords[] = { "filename", "apiPreference", "fourcc", "fps", "frameSize", "isColor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:VideoWriter", (char**)keywords, &pyobj_filename, &pyobj_apiPreference, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_isColor) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_isColor, isColor, ArgInfo("isColor", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoWriter(filename, apiPreference, fourcc, fps, frameSize, isColor))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_params = NULL; + vector_int params; + + const char* keywords[] = { "filename", "fourcc", "fps", "frameSize", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO:VideoWriter", (char**)keywords, &pyobj_filename, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoWriter(filename, fourcc, fps, frameSize, params))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_params = NULL; + vector_int params; + + const char* keywords[] = { "filename", "apiPreference", "fourcc", "fps", "frameSize", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO:VideoWriter", (char**)keywords, &pyobj_filename, &pyobj_apiPreference, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::VideoWriter(filename, apiPreference, fourcc, fps, frameSize, params))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("VideoWriter"); + + return -1; +} + +static PyObject* pyopencv_cv_VideoWriter_fourcc_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + PyObject* pyobj_c1 = NULL; + char c1; + PyObject* pyobj_c2 = NULL; + char c2; + PyObject* pyobj_c3 = NULL; + char c3; + PyObject* pyobj_c4 = NULL; + char c4; + int retval; + + const char* keywords[] = { "c1", "c2", "c3", "c4", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:VideoWriter.fourcc", (char**)keywords, &pyobj_c1, &pyobj_c2, &pyobj_c3, &pyobj_c4) && + convert_to_char(pyobj_c1, &c1, ArgInfo("c1", 0)) && + convert_to_char(pyobj_c2, &c2, ArgInfo("c2", 0)) && + convert_to_char(pyobj_c3, &c3, ArgInfo("c3", 0)) && + convert_to_char(pyobj_c4, &c4, ArgInfo("c4", 0)) ) + { + ERRWRAP2(retval = cv::VideoWriter::fourcc(c1, c2, c3, c4)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_get(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_propId = NULL; + int propId=0; + double retval; + + const char* keywords[] = { "propId", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:VideoWriter.get", (char**)keywords, &pyobj_propId) && + pyopencv_to_safe(pyobj_propId, propId, ArgInfo("propId", 0)) ) + { + ERRWRAP2(retval = _self_->get(propId)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_getBackendName(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getBackendName()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_isOpened(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isOpened()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_open(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(4); + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_isColor = NULL; + bool isColor=true; + bool retval; + + const char* keywords[] = { "filename", "fourcc", "fps", "frameSize", "isColor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:VideoWriter.open", (char**)keywords, &pyobj_filename, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_isColor) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_isColor, isColor, ArgInfo("isColor", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, fourcc, fps, frameSize, isColor)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_isColor = NULL; + bool isColor=true; + bool retval; + + const char* keywords[] = { "filename", "apiPreference", "fourcc", "fps", "frameSize", "isColor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO|O:VideoWriter.open", (char**)keywords, &pyobj_filename, &pyobj_apiPreference, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_isColor) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_isColor, isColor, ArgInfo("isColor", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, apiPreference, fourcc, fps, frameSize, isColor)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_params = NULL; + vector_int params; + bool retval; + + const char* keywords[] = { "filename", "fourcc", "fps", "frameSize", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOO:VideoWriter.open", (char**)keywords, &pyobj_filename, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, fourcc, fps, frameSize, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_filename = NULL; + String filename; + PyObject* pyobj_apiPreference = NULL; + int apiPreference=0; + PyObject* pyobj_fourcc = NULL; + int fourcc=0; + PyObject* pyobj_fps = NULL; + double fps=0; + PyObject* pyobj_frameSize = NULL; + Size frameSize; + PyObject* pyobj_params = NULL; + vector_int params; + bool retval; + + const char* keywords[] = { "filename", "apiPreference", "fourcc", "fps", "frameSize", "params", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOOOO:VideoWriter.open", (char**)keywords, &pyobj_filename, &pyobj_apiPreference, &pyobj_fourcc, &pyobj_fps, &pyobj_frameSize, &pyobj_params) && + pyopencv_to_safe(pyobj_filename, filename, ArgInfo("filename", 0)) && + pyopencv_to_safe(pyobj_apiPreference, apiPreference, ArgInfo("apiPreference", 0)) && + pyopencv_to_safe(pyobj_fourcc, fourcc, ArgInfo("fourcc", 0)) && + pyopencv_to_safe(pyobj_fps, fps, ArgInfo("fps", 0)) && + pyopencv_to_safe(pyobj_frameSize, frameSize, ArgInfo("frameSize", 0)) && + pyopencv_to_safe(pyobj_params, params, ArgInfo("params", 0)) ) + { + ERRWRAP2(retval = _self_->open(filename, apiPreference, fourcc, fps, frameSize, params)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("open"); + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_release(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->release()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_set(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_propId = NULL; + int propId=0; + PyObject* pyobj_value = NULL; + double value=0; + bool retval; + + const char* keywords[] = { "propId", "value", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:VideoWriter.set", (char**)keywords, &pyobj_propId, &pyobj_value) && + pyopencv_to_safe(pyobj_propId, propId, ArgInfo("propId", 0)) && + pyopencv_to_safe(pyobj_value, value, ArgInfo("value", 0)) ) + { + ERRWRAP2(retval = _self_->set(propId, value)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_VideoWriter_write(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv; + + + Ptr * self1 = 0; + if (!pyopencv_VideoWriter_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'VideoWriter' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + + const char* keywords[] = { "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:VideoWriter.write", (char**)keywords, &pyobj_image) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) ) + { + ERRWRAP2(_self_->write(image)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + + const char* keywords[] = { "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:VideoWriter.write", (char**)keywords, &pyobj_image) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) ) + { + ERRWRAP2(_self_->write(image)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("write"); + + return NULL; +} + + + +// Tables (VideoWriter) + +static PyGetSetDef pyopencv_VideoWriter_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_VideoWriter_methods[] = +{ + {"fourcc", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_fourcc_static, METH_STATIC), "fourcc(c1, c2, c3, c4) -> retval\n. @brief Concatenates 4 chars to a fourcc code\n. \n. @return a fourcc code\n. \n. This static method constructs the fourcc code of the codec to be used in the constructor\n. VideoWriter::VideoWriter or VideoWriter::open."}, + {"get", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_get, 0), "get(propId) -> retval\n. @brief Returns the specified VideoWriter property\n. \n. @param propId Property identifier from cv::VideoWriterProperties (eg. cv::VIDEOWRITER_PROP_QUALITY)\n. or one of @ref videoio_flags_others\n. \n. @return Value for the specified property. Value 0 is returned when querying a property that is\n. not supported by the backend used by the VideoWriter instance."}, + {"getBackendName", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_getBackendName, 0), "getBackendName() -> retval\n. @brief Returns used backend API name\n. \n. @note Stream should be opened."}, + {"isOpened", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_isOpened, 0), "isOpened() -> retval\n. @brief Returns true if video writer has been successfully initialized."}, + {"open", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_open, 0), "open(filename, fourcc, fps, frameSize[, isColor]) -> retval\n. @brief Initializes or reinitializes video writer.\n. \n. The method opens video writer. Parameters are the same as in the constructor\n. VideoWriter::VideoWriter.\n. @return `true` if video writer has been successfully initialized\n. \n. The method first calls VideoWriter::release to close the already opened file.\n\n\n\nopen(filename, apiPreference, fourcc, fps, frameSize[, isColor]) -> retval\n. @overload\n\n\n\nopen(filename, fourcc, fps, frameSize, params) -> retval\n. @overload\n\n\n\nopen(filename, apiPreference, fourcc, fps, frameSize, params) -> retval\n. @overload"}, + {"release", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_release, 0), "release() -> None\n. @brief Closes the video writer.\n. \n. The method is automatically called by subsequent VideoWriter::open and by the VideoWriter\n. destructor."}, + {"set", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_set, 0), "set(propId, value) -> retval\n. @brief Sets a property in the VideoWriter.\n. \n. @param propId Property identifier from cv::VideoWriterProperties (eg. cv::VIDEOWRITER_PROP_QUALITY)\n. or one of @ref videoio_flags_others\n. \n. @param value Value of the property.\n. @return `true` if the property is supported by the backend used by the VideoWriter instance."}, + {"write", CV_PY_FN_WITH_KW_(pyopencv_cv_VideoWriter_write, 0), "write(image) -> None\n. @brief Writes the next video frame\n. \n. @param image The written frame. In general, color images are expected in BGR format.\n. \n. The function/method writes the specified image to video file. It must have the same size as has\n. been specified when opening the video writer."}, + + {NULL, NULL} +}; + +// Converter (VideoWriter) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_VideoWriter_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_VideoWriter_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_BufferPool (Generic) +//================================================================================ + +// GetSet (cuda_BufferPool) + + + +// Methods (cuda_BufferPool) + +static PyObject* pyopencv_cv_cuda_cuda_BufferPool_getAllocator(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_BufferPool_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_BufferPool' or its derivative)"); + Ptr _self_ = *(self1); + Ptr retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->getAllocator()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_BufferPool_getBuffer(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_BufferPool_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_BufferPool' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + GpuMat retval; + + const char* keywords[] = { "rows", "cols", "type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:cuda_BufferPool.getBuffer", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(retval = _self_->getBuffer(rows, cols, type)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + GpuMat retval; + + const char* keywords[] = { "size", "type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_BufferPool.getBuffer", (char**)keywords, &pyobj_size, &pyobj_type) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(retval = _self_->getBuffer(size, type)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("getBuffer"); + + return NULL; +} + + + +// Tables (cuda_BufferPool) + +static PyGetSetDef pyopencv_cuda_BufferPool_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_BufferPool_methods[] = +{ + {"getAllocator", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_BufferPool_getAllocator, 0), "getAllocator() -> retval\n."}, + {"getBuffer", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_BufferPool_getBuffer, 0), "getBuffer(rows, cols, type) -> retval\n. \n\n\n\ngetBuffer(size, type) -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (cuda_BufferPool) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_BufferPool_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_BufferPool_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_DeviceInfo (Generic) +//================================================================================ + +// GetSet (cuda_DeviceInfo) + + + +// Methods (cuda_DeviceInfo) + +static int pyopencv_cv_cuda_cuda_DeviceInfo_DeviceInfo(pyopencv_cuda_DeviceInfo_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + pyPrepareArgumentConversionErrorsStorage(2); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::DeviceInfo())); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_device_id = NULL; + int device_id=0; + + const char* keywords[] = { "device_id", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:DeviceInfo", (char**)keywords, &pyobj_device_id) && + pyopencv_to_safe(pyobj_device_id, device_id, ArgInfo("device_id", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::DeviceInfo(device_id))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("DeviceInfo"); + + return -1; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_ECCEnabled(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->ECCEnabled()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_asyncEngineCount(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->asyncEngineCount()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_canMapHostMemory(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->canMapHostMemory()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_clockRate(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->clockRate()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_computeMode(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + DeviceInfo::ComputeMode retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->computeMode()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_concurrentKernels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->concurrentKernels()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_deviceID(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->deviceID()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_freeMemory(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->freeMemory()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_integrated(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->integrated()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_isCompatible(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isCompatible()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_kernelExecTimeoutEnabled(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->kernelExecTimeoutEnabled()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_l2CacheSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->l2CacheSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_majorVersion(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->majorVersion()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxGridSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxGridSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface1D(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurface1D()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface1DLayered(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurface1DLayered()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface2D(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurface2D()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface2DLayered(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurface2DLayered()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface3D(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurface3D()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurfaceCubemap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurfaceCubemap()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxSurfaceCubemapLayered(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSurfaceCubemapLayered()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1D(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture1D()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1DLayered(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture1DLayered()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1DLinear(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture1DLinear()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1DMipmap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture1DMipmap()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2D(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture2D()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DGather(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture2DGather()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DLayered(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture2DLayered()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DLinear(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture2DLinear()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DMipmap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture2DMipmap()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture3D(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTexture3D()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTextureCubemap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTextureCubemap()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxTextureCubemapLayered(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec2i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxTextureCubemapLayered()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxThreadsDim(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + Vec3i retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxThreadsDim()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxThreadsPerBlock(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxThreadsPerBlock()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_maxThreadsPerMultiProcessor(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxThreadsPerMultiProcessor()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_memPitch(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->memPitch()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_memoryBusWidth(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->memoryBusWidth()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_memoryClockRate(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->memoryClockRate()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_minorVersion(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->minorVersion()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_multiProcessorCount(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->multiProcessorCount()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_pciBusID(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->pciBusID()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_pciDeviceID(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->pciDeviceID()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_pciDomainID(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->pciDomainID()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_queryMemory(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_totalMemory = NULL; + size_t totalMemory=0; + PyObject* pyobj_freeMemory = NULL; + size_t freeMemory=0; + + const char* keywords[] = { "totalMemory", "freeMemory", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_DeviceInfo.queryMemory", (char**)keywords, &pyobj_totalMemory, &pyobj_freeMemory) && + pyopencv_to_safe(pyobj_totalMemory, totalMemory, ArgInfo("totalMemory", 0)) && + pyopencv_to_safe(pyobj_freeMemory, freeMemory, ArgInfo("freeMemory", 0)) ) + { + ERRWRAP2(_self_->queryMemory(totalMemory, freeMemory)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_regsPerBlock(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->regsPerBlock()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_sharedMemPerBlock(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->sharedMemPerBlock()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_surfaceAlignment(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->surfaceAlignment()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_tccDriver(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->tccDriver()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_textureAlignment(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->textureAlignment()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_texturePitchAlignment(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->texturePitchAlignment()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_totalConstMem(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->totalConstMem()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_totalGlobalMem(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->totalGlobalMem()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_totalMemory(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->totalMemory()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_unifiedAddressing(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->unifiedAddressing()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_DeviceInfo_warpSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_DeviceInfo_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_DeviceInfo' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->warpSize()); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (cuda_DeviceInfo) + +static PyGetSetDef pyopencv_cuda_DeviceInfo_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_DeviceInfo_methods[] = +{ + {"ECCEnabled", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_ECCEnabled, 0), "ECCEnabled() -> retval\n."}, + {"asyncEngineCount", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_asyncEngineCount, 0), "asyncEngineCount() -> retval\n."}, + {"canMapHostMemory", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_canMapHostMemory, 0), "canMapHostMemory() -> retval\n."}, + {"clockRate", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_clockRate, 0), "clockRate() -> retval\n."}, + {"computeMode", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_computeMode, 0), "computeMode() -> retval\n."}, + {"concurrentKernels", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_concurrentKernels, 0), "concurrentKernels() -> retval\n."}, + {"deviceID", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_deviceID, 0), "deviceID() -> retval\n. @brief Returns system index of the CUDA device starting with 0."}, + {"freeMemory", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_freeMemory, 0), "freeMemory() -> retval\n."}, + {"integrated", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_integrated, 0), "integrated() -> retval\n."}, + {"isCompatible", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_isCompatible, 0), "isCompatible() -> retval\n. @brief Checks the CUDA module and device compatibility.\n. \n. This function returns true if the CUDA module can be run on the specified device. Otherwise, it\n. returns false ."}, + {"kernelExecTimeoutEnabled", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_kernelExecTimeoutEnabled, 0), "kernelExecTimeoutEnabled() -> retval\n."}, + {"l2CacheSize", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_l2CacheSize, 0), "l2CacheSize() -> retval\n."}, + {"majorVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_majorVersion, 0), "majorVersion() -> retval\n."}, + {"maxGridSize", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxGridSize, 0), "maxGridSize() -> retval\n."}, + {"maxSurface1D", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface1D, 0), "maxSurface1D() -> retval\n."}, + {"maxSurface1DLayered", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface1DLayered, 0), "maxSurface1DLayered() -> retval\n."}, + {"maxSurface2D", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface2D, 0), "maxSurface2D() -> retval\n."}, + {"maxSurface2DLayered", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface2DLayered, 0), "maxSurface2DLayered() -> retval\n."}, + {"maxSurface3D", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurface3D, 0), "maxSurface3D() -> retval\n."}, + {"maxSurfaceCubemap", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurfaceCubemap, 0), "maxSurfaceCubemap() -> retval\n."}, + {"maxSurfaceCubemapLayered", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxSurfaceCubemapLayered, 0), "maxSurfaceCubemapLayered() -> retval\n."}, + {"maxTexture1D", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1D, 0), "maxTexture1D() -> retval\n."}, + {"maxTexture1DLayered", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1DLayered, 0), "maxTexture1DLayered() -> retval\n."}, + {"maxTexture1DLinear", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1DLinear, 0), "maxTexture1DLinear() -> retval\n."}, + {"maxTexture1DMipmap", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture1DMipmap, 0), "maxTexture1DMipmap() -> retval\n."}, + {"maxTexture2D", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2D, 0), "maxTexture2D() -> retval\n."}, + {"maxTexture2DGather", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DGather, 0), "maxTexture2DGather() -> retval\n."}, + {"maxTexture2DLayered", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DLayered, 0), "maxTexture2DLayered() -> retval\n."}, + {"maxTexture2DLinear", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DLinear, 0), "maxTexture2DLinear() -> retval\n."}, + {"maxTexture2DMipmap", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture2DMipmap, 0), "maxTexture2DMipmap() -> retval\n."}, + {"maxTexture3D", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTexture3D, 0), "maxTexture3D() -> retval\n."}, + {"maxTextureCubemap", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTextureCubemap, 0), "maxTextureCubemap() -> retval\n."}, + {"maxTextureCubemapLayered", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxTextureCubemapLayered, 0), "maxTextureCubemapLayered() -> retval\n."}, + {"maxThreadsDim", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxThreadsDim, 0), "maxThreadsDim() -> retval\n."}, + {"maxThreadsPerBlock", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxThreadsPerBlock, 0), "maxThreadsPerBlock() -> retval\n."}, + {"maxThreadsPerMultiProcessor", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_maxThreadsPerMultiProcessor, 0), "maxThreadsPerMultiProcessor() -> retval\n."}, + {"memPitch", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_memPitch, 0), "memPitch() -> retval\n."}, + {"memoryBusWidth", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_memoryBusWidth, 0), "memoryBusWidth() -> retval\n."}, + {"memoryClockRate", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_memoryClockRate, 0), "memoryClockRate() -> retval\n."}, + {"minorVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_minorVersion, 0), "minorVersion() -> retval\n."}, + {"multiProcessorCount", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_multiProcessorCount, 0), "multiProcessorCount() -> retval\n."}, + {"pciBusID", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_pciBusID, 0), "pciBusID() -> retval\n."}, + {"pciDeviceID", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_pciDeviceID, 0), "pciDeviceID() -> retval\n."}, + {"pciDomainID", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_pciDomainID, 0), "pciDomainID() -> retval\n."}, + {"queryMemory", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_queryMemory, 0), "queryMemory(totalMemory, freeMemory) -> None\n."}, + {"regsPerBlock", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_regsPerBlock, 0), "regsPerBlock() -> retval\n."}, + {"sharedMemPerBlock", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_sharedMemPerBlock, 0), "sharedMemPerBlock() -> retval\n."}, + {"surfaceAlignment", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_surfaceAlignment, 0), "surfaceAlignment() -> retval\n."}, + {"tccDriver", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_tccDriver, 0), "tccDriver() -> retval\n."}, + {"textureAlignment", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_textureAlignment, 0), "textureAlignment() -> retval\n."}, + {"texturePitchAlignment", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_texturePitchAlignment, 0), "texturePitchAlignment() -> retval\n."}, + {"totalConstMem", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_totalConstMem, 0), "totalConstMem() -> retval\n."}, + {"totalGlobalMem", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_totalGlobalMem, 0), "totalGlobalMem() -> retval\n."}, + {"totalMemory", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_totalMemory, 0), "totalMemory() -> retval\n."}, + {"unifiedAddressing", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_unifiedAddressing, 0), "unifiedAddressing() -> retval\n."}, + {"warpSize", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_DeviceInfo_warpSize, 0), "warpSize() -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (cuda_DeviceInfo) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_DeviceInfo_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_DeviceInfo_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_Event (Generic) +//================================================================================ + +// GetSet (cuda_Event) + + + +// Methods (cuda_Event) + +static int pyopencv_cv_cuda_cuda_Event_Event(pyopencv_cuda_Event_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_flags = NULL; + Event_CreateFlags flags=Event::CreateFlags::DEFAULT; + + const char* keywords[] = { "flags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:Event", (char**)keywords, &pyobj_flags) && + pyopencv_to_safe(pyobj_flags, flags, ArgInfo("flags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::Event(flags))); + return 0; + } + + return -1; +} + +static PyObject* pyopencv_cv_cuda_cuda_Event_elapsedTime_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_start = NULL; + Event start; + PyObject* pyobj_end = NULL; + Event end; + float retval; + + const char* keywords[] = { "start", "end", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_Event.elapsedTime", (char**)keywords, &pyobj_start, &pyobj_end) && + pyopencv_to_safe(pyobj_start, start, ArgInfo("start", 0)) && + pyopencv_to_safe(pyobj_end, end, ArgInfo("end", 0)) ) + { + ERRWRAP2(retval = cv::cuda::Event::elapsedTime(start, end)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Event_queryIfComplete(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Event_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Event' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->queryIfComplete()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Event_record(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Event_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Event' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_Event.record", (char**)keywords, &pyobj_stream) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->record(stream)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Event_waitForCompletion(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Event_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Event' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->waitForCompletion()); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (cuda_Event) + +static PyGetSetDef pyopencv_cuda_Event_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_Event_methods[] = +{ + {"elapsedTime", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Event_elapsedTime_static, METH_STATIC), "elapsedTime(start, end) -> retval\n."}, + {"queryIfComplete", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Event_queryIfComplete, 0), "queryIfComplete() -> retval\n."}, + {"record", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Event_record, 0), "record([, stream]) -> None\n."}, + {"waitForCompletion", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Event_waitForCompletion, 0), "waitForCompletion() -> None\n."}, + + {NULL, NULL} +}; + +// Converter (cuda_Event) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_Event_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_Event_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_GpuData (Generic) +//================================================================================ + +// GetSet (cuda_GpuData) + + + +// Methods (cuda_GpuData) + + + +// Tables (cuda_GpuData) + +static PyGetSetDef pyopencv_cuda_GpuData_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_GpuData_methods[] = +{ + + {NULL, NULL} +}; + +// Converter (cuda_GpuData) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_GpuData_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_GpuData_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_GpuMat (Generic) +//================================================================================ + +// GetSet (cuda_GpuMat) + + +static PyObject* pyopencv_cuda_GpuMat_get_step(pyopencv_cuda_GpuMat_t* p, void *closure) +{ + return pyopencv_from(p->v->step); +} + + +// Methods (cuda_GpuMat) + +static int pyopencv_cv_cuda_cuda_GpuMat_GpuMat(pyopencv_cuda_GpuMat_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + pyPrepareArgumentConversionErrorsStorage(11); + + { + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:GpuMat", (char**)keywords, &pyobj_allocator) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "rows", "cols", "type", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:GpuMat", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_allocator) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(rows, cols, type, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "size", "type", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:GpuMat", (char**)keywords, &pyobj_size, &pyobj_type, &pyobj_allocator) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(size, type, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "rows", "cols", "type", "s", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:GpuMat", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_s, &pyobj_allocator) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(rows, cols, type, s, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "size", "type", "s", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:GpuMat", (char**)keywords, &pyobj_size, &pyobj_type, &pyobj_s, &pyobj_allocator) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(size, type, s, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + GpuMat m; + + const char* keywords[] = { "m", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:GpuMat", (char**)keywords, &pyobj_m) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(m))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + GpuMat m; + PyObject* pyobj_rowRange = NULL; + Range rowRange; + PyObject* pyobj_colRange = NULL; + Range colRange; + + const char* keywords[] = { "m", "rowRange", "colRange", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:GpuMat", (char**)keywords, &pyobj_m, &pyobj_rowRange, &pyobj_colRange) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_rowRange, rowRange, ArgInfo("rowRange", 0)) && + pyopencv_to_safe(pyobj_colRange, colRange, ArgInfo("colRange", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(m, rowRange, colRange))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_m = NULL; + GpuMat m; + PyObject* pyobj_roi = NULL; + Rect roi; + + const char* keywords[] = { "m", "roi", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:GpuMat", (char**)keywords, &pyobj_m, &pyobj_roi) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_roi, roi, ArgInfo("roi", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(m, roi))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + Mat arr; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "arr", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:GpuMat", (char**)keywords, &pyobj_arr, &pyobj_allocator) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(arr, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + cuda::GpuMat arr; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "arr", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:GpuMat", (char**)keywords, &pyobj_arr, &pyobj_allocator) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(arr, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + UMat arr; + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator=GpuMat::defaultAllocator(); + + const char* keywords[] = { "arr", "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:GpuMat", (char**)keywords, &pyobj_arr, &pyobj_allocator) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::GpuMat(arr, allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("GpuMat"); + + return -1; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_adjustROI(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_dtop = NULL; + int dtop=0; + PyObject* pyobj_dbottom = NULL; + int dbottom=0; + PyObject* pyobj_dleft = NULL; + int dleft=0; + PyObject* pyobj_dright = NULL; + int dright=0; + GpuMat retval; + + const char* keywords[] = { "dtop", "dbottom", "dleft", "dright", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO:cuda_GpuMat.adjustROI", (char**)keywords, &pyobj_dtop, &pyobj_dbottom, &pyobj_dleft, &pyobj_dright) && + pyopencv_to_safe(pyobj_dtop, dtop, ArgInfo("dtop", 0)) && + pyopencv_to_safe(pyobj_dbottom, dbottom, ArgInfo("dbottom", 0)) && + pyopencv_to_safe(pyobj_dleft, dleft, ArgInfo("dleft", 0)) && + pyopencv_to_safe(pyobj_dright, dright, ArgInfo("dright", 0)) ) + { + ERRWRAP2(retval = _self_->adjustROI(dtop, dbottom, dleft, dright)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_assignTo(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_m = NULL; + GpuMat m; + PyObject* pyobj_type = NULL; + int type=-1; + + const char* keywords[] = { "m", "type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.assignTo", (char**)keywords, &pyobj_m, &pyobj_type) && + pyopencv_to_safe(pyobj_m, m, ArgInfo("m", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(_self_->assignTo(m, type)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_channels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->channels()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_clone(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + GpuMat retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->clone()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_col(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_x = NULL; + int x=0; + GpuMat retval; + + const char* keywords[] = { "x", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.col", (char**)keywords, &pyobj_x) && + pyopencv_to_safe(pyobj_x, x, ArgInfo("x", 0)) ) + { + ERRWRAP2(retval = _self_->col(x)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_colRange(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_startcol = NULL; + int startcol=0; + PyObject* pyobj_endcol = NULL; + int endcol=0; + GpuMat retval; + + const char* keywords[] = { "startcol", "endcol", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.colRange", (char**)keywords, &pyobj_startcol, &pyobj_endcol) && + pyopencv_to_safe(pyobj_startcol, startcol, ArgInfo("startcol", 0)) && + pyopencv_to_safe(pyobj_endcol, endcol, ArgInfo("endcol", 0)) ) + { + ERRWRAP2(retval = _self_->colRange(startcol, endcol)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_r = NULL; + Range r; + GpuMat retval; + + const char* keywords[] = { "r", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.colRange", (char**)keywords, &pyobj_r) && + pyopencv_to_safe(pyobj_r, r, ArgInfo("r", 0)) ) + { + ERRWRAP2(retval = _self_->colRange(r)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("colRange"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_convertTo(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(15); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + + const char* keywords[] = { "rtype", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + + const char* keywords[] = { "rtype", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + + const char* keywords[] = { "rtype", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_beta = NULL; + double beta=0.0; + + const char* keywords[] = { "rtype", "alpha", "dst", "beta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_dst, &pyobj_beta) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, beta)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_beta = NULL; + double beta=0.0; + + const char* keywords[] = { "rtype", "alpha", "dst", "beta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_dst, &pyobj_beta) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, beta)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_beta = NULL; + double beta=0.0; + + const char* keywords[] = { "rtype", "alpha", "dst", "beta", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_dst, &pyobj_beta) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, beta)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "alpha", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "alpha", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "alpha", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "alpha", "beta", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_beta, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, beta, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "alpha", "beta", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_beta, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, beta, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_rtype = NULL; + int rtype=0; + PyObject* pyobj_alpha = NULL; + double alpha=0; + PyObject* pyobj_beta = NULL; + double beta=0; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "rtype", "alpha", "beta", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOOO|O:cuda_GpuMat.convertTo", (char**)keywords, &pyobj_rtype, &pyobj_alpha, &pyobj_beta, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_rtype, rtype, ArgInfo("rtype", 0)) && + pyopencv_to_safe(pyobj_alpha, alpha, ArgInfo("alpha", 0)) && + pyopencv_to_safe(pyobj_beta, beta, ArgInfo("beta", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->convertTo(dst, rtype, alpha, beta, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("convertTo"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_copyTo(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(12); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->copyTo(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->copyTo(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->copyTo(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + + const char* keywords[] = { "mask", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_mask, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_mask = NULL; + cuda::GpuMat mask; + + const char* keywords[] = { "mask", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_mask, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + + const char* keywords[] = { "mask", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_mask, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, mask)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "mask", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_mask, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, mask, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_mask = NULL; + cuda::GpuMat mask; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "mask", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_mask, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, mask, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "mask", "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:cuda_GpuMat.copyTo", (char**)keywords, &pyobj_mask, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->copyTo(dst, mask, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("copyTo"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_create(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + + const char* keywords[] = { "rows", "cols", "type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:cuda_GpuMat.create", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(_self_->create(rows, cols, type)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + + const char* keywords[] = { "size", "type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.create", (char**)keywords, &pyobj_size, &pyobj_type) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(_self_->create(size, type)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("create"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_cudaPtr(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + void* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->cudaPtr()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_defaultAllocator_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + GpuMat::Allocator* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::cuda::GpuMat::defaultAllocator()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_depth(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->depth()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_download(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(6); + + { + PyObject* pyobj_dst = NULL; + Mat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_GpuMat.download", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->download(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_GpuMat.download", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->download(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + + const char* keywords[] = { "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:cuda_GpuMat.download", (char**)keywords, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) ) + { + ERRWRAP2(_self_->download(dst)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + Mat dst; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.download", (char**)keywords, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->download(dst, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + cuda::GpuMat dst; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.download", (char**)keywords, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->download(dst, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_dst = NULL; + UMat dst; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "stream", "dst", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.download", (char**)keywords, &pyobj_stream, &pyobj_dst) && + pyopencv_to_safe(pyobj_dst, dst, ArgInfo("dst", 1)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->download(dst, stream)); + return pyopencv_from(dst); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("download"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_elemSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->elemSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_elemSize1(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->elemSize1()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_empty(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->empty()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_isContinuous(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isContinuous()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_locateROI(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_wholeSize = NULL; + Size wholeSize; + PyObject* pyobj_ofs = NULL; + Point ofs; + + const char* keywords[] = { "wholeSize", "ofs", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.locateROI", (char**)keywords, &pyobj_wholeSize, &pyobj_ofs) && + pyopencv_to_safe(pyobj_wholeSize, wholeSize, ArgInfo("wholeSize", 0)) && + pyopencv_to_safe(pyobj_ofs, ofs, ArgInfo("ofs", 0)) ) + { + ERRWRAP2(_self_->locateROI(wholeSize, ofs)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_reshape(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_cn = NULL; + int cn=0; + PyObject* pyobj_rows = NULL; + int rows=0; + GpuMat retval; + + const char* keywords[] = { "cn", "rows", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_GpuMat.reshape", (char**)keywords, &pyobj_cn, &pyobj_rows) && + pyopencv_to_safe(pyobj_cn, cn, ArgInfo("cn", 0)) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) ) + { + ERRWRAP2(retval = _self_->reshape(cn, rows)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_row(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_y = NULL; + int y=0; + GpuMat retval; + + const char* keywords[] = { "y", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.row", (char**)keywords, &pyobj_y) && + pyopencv_to_safe(pyobj_y, y, ArgInfo("y", 0)) ) + { + ERRWRAP2(retval = _self_->row(y)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_rowRange(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_startrow = NULL; + int startrow=0; + PyObject* pyobj_endrow = NULL; + int endrow=0; + GpuMat retval; + + const char* keywords[] = { "startrow", "endrow", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.rowRange", (char**)keywords, &pyobj_startrow, &pyobj_endrow) && + pyopencv_to_safe(pyobj_startrow, startrow, ArgInfo("startrow", 0)) && + pyopencv_to_safe(pyobj_endrow, endrow, ArgInfo("endrow", 0)) ) + { + ERRWRAP2(retval = _self_->rowRange(startrow, endrow)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_r = NULL; + Range r; + GpuMat retval; + + const char* keywords[] = { "r", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.rowRange", (char**)keywords, &pyobj_r) && + pyopencv_to_safe(pyobj_r, r, ArgInfo("r", 0)) ) + { + ERRWRAP2(retval = _self_->rowRange(r)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("rowRange"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_setDefaultAllocator_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_allocator = NULL; + GpuMat_Allocator* allocator; + + const char* keywords[] = { "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.setDefaultAllocator", (char**)keywords, &pyobj_allocator) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + ERRWRAP2(cv::cuda::GpuMat::setDefaultAllocator(allocator)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_setTo(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(8); + + { + PyObject* pyobj_s = NULL; + Scalar s; + GpuMat retval; + + const char* keywords[] = { "s", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + GpuMat retval; + + const char* keywords[] = { "s", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_stream) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, stream)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_mask = NULL; + Mat mask; + GpuMat retval; + + const char* keywords[] = { "s", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_mask) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_mask = NULL; + cuda::GpuMat mask; + GpuMat retval; + + const char* keywords[] = { "s", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_mask) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_mask = NULL; + UMat mask; + GpuMat retval; + + const char* keywords[] = { "s", "mask", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_mask) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, mask)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_mask = NULL; + Mat mask; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + GpuMat retval; + + const char* keywords[] = { "s", "mask", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_mask, &pyobj_stream) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, mask, stream)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_mask = NULL; + cuda::GpuMat mask; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + GpuMat retval; + + const char* keywords[] = { "s", "mask", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_mask, &pyobj_stream) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, mask, stream)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_s = NULL; + Scalar s; + PyObject* pyobj_mask = NULL; + UMat mask; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + GpuMat retval; + + const char* keywords[] = { "s", "mask", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:cuda_GpuMat.setTo", (char**)keywords, &pyobj_s, &pyobj_mask, &pyobj_stream) && + pyopencv_to_safe(pyobj_s, s, ArgInfo("s", 0)) && + pyopencv_to_safe(pyobj_mask, mask, ArgInfo("mask", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(retval = _self_->setTo(s, mask, stream)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("setTo"); + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_size(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + Size retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->size()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_step1(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->step1()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_swap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_mat = NULL; + GpuMat mat; + + const char* keywords[] = { "mat", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.swap", (char**)keywords, &pyobj_mat) && + pyopencv_to_safe(pyobj_mat, mat, ArgInfo("mat", 0)) ) + { + ERRWRAP2(_self_->swap(mat)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_type(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->type()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_updateContinuityFlag(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->updateContinuityFlag()); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_GpuMat_upload(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_GpuMat_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_GpuMat' or its derivative)"); + Ptr _self_ = *(self1); + pyPrepareArgumentConversionErrorsStorage(6); + + { + PyObject* pyobj_arr = NULL; + Mat arr; + + const char* keywords[] = { "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.upload", (char**)keywords, &pyobj_arr) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) ) + { + ERRWRAP2(_self_->upload(arr)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + cuda::GpuMat arr; + + const char* keywords[] = { "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.upload", (char**)keywords, &pyobj_arr) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) ) + { + ERRWRAP2(_self_->upload(arr)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + UMat arr; + + const char* keywords[] = { "arr", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_GpuMat.upload", (char**)keywords, &pyobj_arr) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) ) + { + ERRWRAP2(_self_->upload(arr)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + Mat arr; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "arr", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.upload", (char**)keywords, &pyobj_arr, &pyobj_stream) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->upload(arr, stream)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + cuda::GpuMat arr; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "arr", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.upload", (char**)keywords, &pyobj_arr, &pyobj_stream) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->upload(arr, stream)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + UMat arr; + PyObject* pyobj_stream = NULL; + Stream stream=Stream::Null(); + + const char* keywords[] = { "arr", "stream", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_GpuMat.upload", (char**)keywords, &pyobj_arr, &pyobj_stream) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_stream, stream, ArgInfo("stream", 0)) ) + { + ERRWRAP2(_self_->upload(arr, stream)); + Py_RETURN_NONE; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("upload"); + + return NULL; +} + + + +// Tables (cuda_GpuMat) + +static PyGetSetDef pyopencv_cuda_GpuMat_getseters[] = +{ + {(char*)"step", (getter)pyopencv_cuda_GpuMat_get_step, NULL, (char*)"step", NULL}, + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_GpuMat_methods[] = +{ + {"adjustROI", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_adjustROI, 0), "adjustROI(dtop, dbottom, dleft, dright) -> retval\n."}, + {"assignTo", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_assignTo, 0), "assignTo(m[, type]) -> None\n."}, + {"channels", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_channels, 0), "channels() -> retval\n."}, + {"clone", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_clone, 0), "clone() -> retval\n."}, + {"col", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_col, 0), "col(x) -> retval\n."}, + {"colRange", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_colRange, 0), "colRange(startcol, endcol) -> retval\n. \n\n\n\ncolRange(r) -> retval\n."}, + {"convertTo", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_convertTo, 0), "convertTo(rtype[, dst]) -> dst\n. \n\n\n\nconvertTo(rtype, stream[, dst]) -> dst\n. \n\n\n\nconvertTo(rtype, alpha[, dst[, beta]]) -> dst\n. \n\n\n\nconvertTo(rtype, alpha, stream[, dst]) -> dst\n. \n\n\n\nconvertTo(rtype, alpha, beta, stream[, dst]) -> dst\n."}, + {"copyTo", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_copyTo, 0), "copyTo([, dst]) -> dst\n. \n\n\n\ncopyTo(stream[, dst]) -> dst\n. \n\n\n\ncopyTo(mask[, dst]) -> dst\n. \n\n\n\ncopyTo(mask, stream[, dst]) -> dst\n."}, + {"create", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_create, 0), "create(rows, cols, type) -> None\n. \n\n\n\ncreate(size, type) -> None\n."}, + {"cudaPtr", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_cudaPtr, 0), "cudaPtr() -> retval\n."}, + {"defaultAllocator", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_defaultAllocator_static, METH_STATIC), "defaultAllocator() -> retval\n."}, + {"depth", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_depth, 0), "depth() -> retval\n."}, + {"download", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_download, 0), "download([, dst]) -> dst\n. @brief Performs data download from GpuMat (Blocking call)\n. \n. This function copies data from device memory to host memory. As being a blocking call, it is\n. guaranteed that the copy operation is finished when this function returns.\n\n\n\ndownload(stream[, dst]) -> dst\n. @brief Performs data download from GpuMat (Non-Blocking call)\n. \n. This function copies data from device memory to host memory. As being a non-blocking call, this\n. function may return even if the copy operation is not finished.\n. \n. The copy operation may be overlapped with operations in other non-default streams if \\p stream is\n. not the default stream and \\p dst is HostMem allocated with HostMem::PAGE_LOCKED option."}, + {"elemSize", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_elemSize, 0), "elemSize() -> retval\n."}, + {"elemSize1", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_elemSize1, 0), "elemSize1() -> retval\n."}, + {"empty", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_empty, 0), "empty() -> retval\n."}, + {"isContinuous", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_isContinuous, 0), "isContinuous() -> retval\n."}, + {"locateROI", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_locateROI, 0), "locateROI(wholeSize, ofs) -> None\n."}, + {"reshape", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_reshape, 0), "reshape(cn[, rows]) -> retval\n."}, + {"row", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_row, 0), "row(y) -> retval\n."}, + {"rowRange", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_rowRange, 0), "rowRange(startrow, endrow) -> retval\n. \n\n\n\nrowRange(r) -> retval\n."}, + {"setDefaultAllocator", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_setDefaultAllocator_static, METH_STATIC), "setDefaultAllocator(allocator) -> None\n."}, + {"setTo", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_setTo, 0), "setTo(s) -> retval\n. \n\n\n\nsetTo(s, stream) -> retval\n. \n\n\n\nsetTo(s, mask) -> retval\n. \n\n\n\nsetTo(s, mask, stream) -> retval\n."}, + {"size", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_size, 0), "size() -> retval\n."}, + {"step1", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_step1, 0), "step1() -> retval\n."}, + {"swap", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_swap, 0), "swap(mat) -> None\n."}, + {"type", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_type, 0), "type() -> retval\n."}, + {"updateContinuityFlag", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_updateContinuityFlag, 0), "updateContinuityFlag() -> None\n."}, + {"upload", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_GpuMat_upload, 0), "upload(arr) -> None\n. @brief Performs data upload to GpuMat (Blocking call)\n. \n. This function copies data from host memory to device memory. As being a blocking call, it is\n. guaranteed that the copy operation is finished when this function returns.\n\n\n\nupload(arr, stream) -> None\n. @brief Performs data upload to GpuMat (Non-Blocking call)\n. \n. This function copies data from host memory to device memory. As being a non-blocking call, this\n. function may return even if the copy operation is not finished.\n. \n. The copy operation may be overlapped with operations in other non-default streams if \\p stream is\n. not the default stream and \\p dst is HostMem allocated with HostMem::PAGE_LOCKED option."}, + + {NULL, NULL} +}; + +// Converter (cuda_GpuMat) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_GpuMat_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_GpuMat_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_GpuMatND (Generic) +//================================================================================ + +// GetSet (cuda_GpuMatND) + + + +// Methods (cuda_GpuMatND) + + + +// Tables (cuda_GpuMatND) + +static PyGetSetDef pyopencv_cuda_GpuMatND_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_GpuMatND_methods[] = +{ + + {NULL, NULL} +}; + +// Converter (cuda_GpuMatND) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_GpuMatND_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_GpuMatND_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_GpuMat_Allocator (Generic) +//================================================================================ + +// GetSet (cuda_GpuMat_Allocator) + + + +// Methods (cuda_GpuMat_Allocator) + + + +// Tables (cuda_GpuMat_Allocator) + +static PyGetSetDef pyopencv_cuda_GpuMat_Allocator_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_GpuMat_Allocator_methods[] = +{ + + {NULL, NULL} +}; + +// Converter (cuda_GpuMat_Allocator) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_GpuMat_Allocator_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_GpuMat_Allocator_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_HostMem (Generic) +//================================================================================ + +// GetSet (cuda_HostMem) + + +static PyObject* pyopencv_cuda_HostMem_get_step(pyopencv_cuda_HostMem_t* p, void *closure) +{ + return pyopencv_from(p->v->step); +} + + +// Methods (cuda_HostMem) + +static int pyopencv_cv_cuda_cuda_HostMem_HostMem(pyopencv_cuda_HostMem_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + pyPrepareArgumentConversionErrorsStorage(6); + + { + PyObject* pyobj_alloc_type = NULL; + HostMem_AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED; + + const char* keywords[] = { "alloc_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:HostMem", (char**)keywords, &pyobj_alloc_type) && + pyopencv_to_safe(pyobj_alloc_type, alloc_type, ArgInfo("alloc_type", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::HostMem(alloc_type))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_alloc_type = NULL; + HostMem_AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED; + + const char* keywords[] = { "rows", "cols", "type", "alloc_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:HostMem", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type, &pyobj_alloc_type) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_alloc_type, alloc_type, ArgInfo("alloc_type", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::HostMem(rows, cols, type, alloc_type))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_size = NULL; + Size size; + PyObject* pyobj_type = NULL; + int type=0; + PyObject* pyobj_alloc_type = NULL; + HostMem_AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED; + + const char* keywords[] = { "size", "type", "alloc_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|O:HostMem", (char**)keywords, &pyobj_size, &pyobj_type, &pyobj_alloc_type) && + pyopencv_to_safe(pyobj_size, size, ArgInfo("size", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) && + pyopencv_to_safe(pyobj_alloc_type, alloc_type, ArgInfo("alloc_type", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::HostMem(size, type, alloc_type))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + Mat arr; + PyObject* pyobj_alloc_type = NULL; + HostMem_AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED; + + const char* keywords[] = { "arr", "alloc_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:HostMem", (char**)keywords, &pyobj_arr, &pyobj_alloc_type) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_alloc_type, alloc_type, ArgInfo("alloc_type", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::HostMem(arr, alloc_type))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + cuda::GpuMat arr; + PyObject* pyobj_alloc_type = NULL; + HostMem_AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED; + + const char* keywords[] = { "arr", "alloc_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:HostMem", (char**)keywords, &pyobj_arr, &pyobj_alloc_type) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_alloc_type, alloc_type, ArgInfo("alloc_type", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::HostMem(arr, alloc_type))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_arr = NULL; + UMat arr; + PyObject* pyobj_alloc_type = NULL; + HostMem_AllocType alloc_type=HostMem::AllocType::PAGE_LOCKED; + + const char* keywords[] = { "arr", "alloc_type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:HostMem", (char**)keywords, &pyobj_arr, &pyobj_alloc_type) && + pyopencv_to_safe(pyobj_arr, arr, ArgInfo("arr", 0)) && + pyopencv_to_safe(pyobj_alloc_type, alloc_type, ArgInfo("alloc_type", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::HostMem(arr, alloc_type))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("HostMem"); + + return -1; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_channels(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->channels()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_clone(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + HostMem retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->clone()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_create(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_rows = NULL; + int rows=0; + PyObject* pyobj_cols = NULL; + int cols=0; + PyObject* pyobj_type = NULL; + int type=0; + + const char* keywords[] = { "rows", "cols", "type", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:cuda_HostMem.create", (char**)keywords, &pyobj_rows, &pyobj_cols, &pyobj_type) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) && + pyopencv_to_safe(pyobj_cols, cols, ArgInfo("cols", 0)) && + pyopencv_to_safe(pyobj_type, type, ArgInfo("type", 0)) ) + { + ERRWRAP2(_self_->create(rows, cols, type)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_createMatHeader(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + Mat retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->createMatHeader()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_depth(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->depth()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_elemSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->elemSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_elemSize1(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->elemSize1()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_empty(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->empty()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_isContinuous(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isContinuous()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_reshape(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_cn = NULL; + int cn=0; + PyObject* pyobj_rows = NULL; + int rows=0; + HostMem retval; + + const char* keywords[] = { "cn", "rows", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|O:cuda_HostMem.reshape", (char**)keywords, &pyobj_cn, &pyobj_rows) && + pyopencv_to_safe(pyobj_cn, cn, ArgInfo("cn", 0)) && + pyopencv_to_safe(pyobj_rows, rows, ArgInfo("rows", 0)) ) + { + ERRWRAP2(retval = _self_->reshape(cn, rows)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_size(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + Size retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->size()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_step1(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->step1()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_swap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_b = NULL; + HostMem b; + + const char* keywords[] = { "b", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_HostMem.swap", (char**)keywords, &pyobj_b) && + pyopencv_to_safe(pyobj_b, b, ArgInfo("b", 0)) ) + { + ERRWRAP2(_self_->swap(b)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_HostMem_type(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_HostMem_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_HostMem' or its derivative)"); + Ptr _self_ = *(self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->type()); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (cuda_HostMem) + +static PyGetSetDef pyopencv_cuda_HostMem_getseters[] = +{ + {(char*)"step", (getter)pyopencv_cuda_HostMem_get_step, NULL, (char*)"step", NULL}, + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_HostMem_methods[] = +{ + {"channels", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_channels, 0), "channels() -> retval\n."}, + {"clone", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_clone, 0), "clone() -> retval\n."}, + {"create", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_create, 0), "create(rows, cols, type) -> None\n."}, + {"createMatHeader", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_createMatHeader, 0), "createMatHeader() -> retval\n."}, + {"depth", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_depth, 0), "depth() -> retval\n."}, + {"elemSize", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_elemSize, 0), "elemSize() -> retval\n."}, + {"elemSize1", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_elemSize1, 0), "elemSize1() -> retval\n."}, + {"empty", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_empty, 0), "empty() -> retval\n."}, + {"isContinuous", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_isContinuous, 0), "isContinuous() -> retval\n. @brief Maps CPU memory to GPU address space and creates the cuda::GpuMat header without reference counting\n. for it.\n. \n. This can be done only if memory was allocated with the SHARED flag and if it is supported by the\n. hardware. Laptops often share video and CPU memory, so address spaces can be mapped, which\n. eliminates an extra copy."}, + {"reshape", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_reshape, 0), "reshape(cn[, rows]) -> retval\n."}, + {"size", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_size, 0), "size() -> retval\n."}, + {"step1", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_step1, 0), "step1() -> retval\n."}, + {"swap", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_swap, 0), "swap(b) -> None\n."}, + {"type", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_HostMem_type, 0), "type() -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (cuda_HostMem) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_HostMem_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_HostMem_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_Stream (Generic) +//================================================================================ + +// GetSet (cuda_Stream) + + + +// Methods (cuda_Stream) + +static int pyopencv_cv_cuda_cuda_Stream_Stream(pyopencv_cuda_Stream_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + pyPrepareArgumentConversionErrorsStorage(3); + + { + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::Stream())); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_allocator = NULL; + Ptr allocator; + + const char* keywords[] = { "allocator", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Stream", (char**)keywords, &pyobj_allocator) && + pyopencv_to_safe(pyobj_allocator, allocator, ArgInfo("allocator", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::Stream(allocator))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_cudaFlags = NULL; + size_t cudaFlags=0; + + const char* keywords[] = { "cudaFlags", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:Stream", (char**)keywords, &pyobj_cudaFlags) && + pyopencv_to_safe(pyobj_cudaFlags, cudaFlags, ArgInfo("cudaFlags", 0)) ) + { + new (&(self->v)) Ptr(); // init Ptr with placement new + if(self) ERRWRAP2(self->v.reset(new cv::cuda::Stream(cudaFlags))); + return 0; + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("Stream"); + + return -1; +} + +static PyObject* pyopencv_cv_cuda_cuda_Stream_Null_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + Stream retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::cuda::Stream::Null()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Stream_cudaPtr(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Stream_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Stream' or its derivative)"); + Ptr _self_ = *(self1); + void* retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->cudaPtr()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Stream_queryIfComplete(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Stream_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Stream' or its derivative)"); + Ptr _self_ = *(self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->queryIfComplete()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Stream_waitEvent(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Stream_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Stream' or its derivative)"); + Ptr _self_ = *(self1); + PyObject* pyobj_event = NULL; + Event event; + + const char* keywords[] = { "event", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:cuda_Stream.waitEvent", (char**)keywords, &pyobj_event) && + pyopencv_to_safe(pyobj_event, event, ArgInfo("event", 0)) ) + { + ERRWRAP2(_self_->waitEvent(event)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_Stream_waitForCompletion(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + + Ptr * self1 = 0; + if (!pyopencv_cuda_Stream_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'cuda_Stream' or its derivative)"); + Ptr _self_ = *(self1); + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(_self_->waitForCompletion()); + Py_RETURN_NONE; + } + + return NULL; +} + + + +// Tables (cuda_Stream) + +static PyGetSetDef pyopencv_cuda_Stream_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_Stream_methods[] = +{ + {"Null", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Stream_Null_static, METH_STATIC), "Null() -> retval\n. @brief Adds a callback to be called on the host after all currently enqueued items in the stream have\n. completed.\n. \n. @note Callbacks must not make any CUDA API calls. Callbacks must not perform any synchronization\n. that may depend on outstanding device work or other callbacks that are not mandated to run earlier.\n. Callbacks without a mandated order (in independent streams) execute in undefined order and may be\n. serialized."}, + {"cudaPtr", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Stream_cudaPtr, 0), "cudaPtr() -> retval\n."}, + {"queryIfComplete", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Stream_queryIfComplete, 0), "queryIfComplete() -> retval\n. @brief Returns true if the current stream queue is finished. Otherwise, it returns false."}, + {"waitEvent", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Stream_waitEvent, 0), "waitEvent(event) -> None\n. @brief Makes a compute stream wait on an event."}, + {"waitForCompletion", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_Stream_waitForCompletion, 0), "waitForCompletion() -> None\n. @brief Blocks the current CPU thread until all operations in the stream are complete."}, + + {NULL, NULL} +}; + +// Converter (cuda_Stream) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_Stream_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_Stream_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// cuda_TargetArchs (Generic) +//================================================================================ + +// GetSet (cuda_TargetArchs) + + + +// Methods (cuda_TargetArchs) + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_has_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.has", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::has(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_hasBin_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.hasBin", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasBin(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrGreater_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.hasEqualOrGreater", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrGreater(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrGreaterBin_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.hasEqualOrGreaterBin", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrGreaterBin(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrGreaterPtx_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.hasEqualOrGreaterPtx", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrGreaterPtx(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrLessPtx_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.hasEqualOrLessPtx", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasEqualOrLessPtx(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_cuda_cuda_TargetArchs_hasPtx_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::cuda; + + PyObject* pyobj_major = NULL; + int major=0; + PyObject* pyobj_minor = NULL; + int minor=0; + bool retval; + + const char* keywords[] = { "major", "minor", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO:cuda_TargetArchs.hasPtx", (char**)keywords, &pyobj_major, &pyobj_minor) && + pyopencv_to_safe(pyobj_major, major, ArgInfo("major", 0)) && + pyopencv_to_safe(pyobj_minor, minor, ArgInfo("minor", 0)) ) + { + ERRWRAP2(retval = cv::cuda::TargetArchs::hasPtx(major, minor)); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (cuda_TargetArchs) + +static PyGetSetDef pyopencv_cuda_TargetArchs_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_cuda_TargetArchs_methods[] = +{ + {"has", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_has_static, METH_STATIC), "has(major, minor) -> retval\n. @brief There is a set of methods to check whether the module contains intermediate (PTX) or binary CUDA\n. code for the given architecture(s):\n. \n. @param major Major compute capability version.\n. @param minor Minor compute capability version."}, + {"hasBin", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_hasBin_static, METH_STATIC), "hasBin(major, minor) -> retval\n."}, + {"hasEqualOrGreater", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrGreater_static, METH_STATIC), "hasEqualOrGreater(major, minor) -> retval\n."}, + {"hasEqualOrGreaterBin", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrGreaterBin_static, METH_STATIC), "hasEqualOrGreaterBin(major, minor) -> retval\n."}, + {"hasEqualOrGreaterPtx", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrGreaterPtx_static, METH_STATIC), "hasEqualOrGreaterPtx(major, minor) -> retval\n."}, + {"hasEqualOrLessPtx", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_hasEqualOrLessPtx_static, METH_STATIC), "hasEqualOrLessPtx(major, minor) -> retval\n."}, + {"hasPtx", CV_PY_FN_WITH_KW_(pyopencv_cv_cuda_cuda_TargetArchs_hasPtx_static, METH_STATIC), "hasPtx(major, minor) -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (cuda_TargetArchs) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_cuda_TargetArchs_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_cuda_TargetArchs_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// ocl_Device (Generic) +//================================================================================ + +// GetSet (ocl_Device) + + + +// Methods (ocl_Device) + +static int pyopencv_cv_ocl_ocl_Device_Device(pyopencv_ocl_Device_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + if(self) ERRWRAP2(new (&(self->v)) cv::ocl::Device()); + return 0; + } + + return -1; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_OpenCLVersion(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->OpenCLVersion()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_OpenCL_C_Version(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->OpenCL_C_Version()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_addressBits(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->addressBits()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_available(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->available()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_compilerAvailable(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->compilerAvailable()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_deviceVersionMajor(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->deviceVersionMajor()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_deviceVersionMinor(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->deviceVersionMinor()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_doubleFPConfig(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->doubleFPConfig()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_driverVersion(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->driverVersion()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_endianLittle(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->endianLittle()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_errorCorrectionSupport(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->errorCorrectionSupport()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_executionCapabilities(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->executionCapabilities()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_extensions(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->extensions()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_getDefault_static(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + Device retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = cv::ocl::Device::getDefault()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_globalMemCacheLineSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->globalMemCacheLineSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_globalMemCacheSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->globalMemCacheSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_globalMemCacheType(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->globalMemCacheType()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_globalMemSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->globalMemSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_halfFPConfig(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->halfFPConfig()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_hostUnifiedMemory(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->hostUnifiedMemory()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_image2DMaxHeight(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->image2DMaxHeight()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_image2DMaxWidth(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->image2DMaxWidth()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_image3DMaxDepth(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->image3DMaxDepth()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_image3DMaxHeight(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->image3DMaxHeight()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_image3DMaxWidth(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->image3DMaxWidth()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_imageFromBufferSupport(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->imageFromBufferSupport()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_imageMaxArraySize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->imageMaxArraySize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_imageMaxBufferSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->imageMaxBufferSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_imageSupport(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->imageSupport()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_intelSubgroupsSupport(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->intelSubgroupsSupport()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_isAMD(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isAMD()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_isExtensionSupported(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + PyObject* pyobj_extensionName = NULL; + String extensionName; + bool retval; + + const char* keywords[] = { "extensionName", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:ocl_Device.isExtensionSupported", (char**)keywords, &pyobj_extensionName) && + pyopencv_to_safe(pyobj_extensionName, extensionName, ArgInfo("extensionName", 0)) ) + { + ERRWRAP2(retval = _self_->isExtensionSupported(extensionName)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_isIntel(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isIntel()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_isNVidia(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->isNVidia()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_linkerAvailable(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + bool retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->linkerAvailable()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_localMemSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->localMemSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_localMemType(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->localMemType()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxClockFrequency(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxClockFrequency()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxComputeUnits(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxComputeUnits()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxConstantArgs(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxConstantArgs()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxConstantBufferSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxConstantBufferSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxMemAllocSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxMemAllocSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxParameterSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxParameterSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxReadImageArgs(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxReadImageArgs()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxSamplers(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxSamplers()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxWorkGroupSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxWorkGroupSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxWorkItemDims(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxWorkItemDims()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_maxWriteImageArgs(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->maxWriteImageArgs()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_memBaseAddrAlign(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->memBaseAddrAlign()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_name(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->name()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthChar(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthChar()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthDouble(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthDouble()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthFloat(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthFloat()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthHalf(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthHalf()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthInt(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthInt()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthLong(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthLong()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_nativeVectorWidthShort(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->nativeVectorWidthShort()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthChar(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthChar()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthDouble(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthDouble()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthFloat(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthFloat()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthHalf(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthHalf()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthInt(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthInt()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthLong(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthLong()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_preferredVectorWidthShort(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->preferredVectorWidthShort()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_printfBufferSize(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->printfBufferSize()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_profilingTimerResolution(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + size_t retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->profilingTimerResolution()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_singleFPConfig(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->singleFPConfig()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_type(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->type()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_vendorID(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + int retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->vendorID()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_vendorName(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->vendorName()); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_ocl_ocl_Device_version(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::ocl; + + + cv::ocl::Device * self1 = 0; + if (!pyopencv_ocl_Device_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'ocl_Device' or its derivative)"); + cv::ocl::Device* _self_ = (self1); + String retval; + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + ERRWRAP2(retval = _self_->version()); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (ocl_Device) + +static PyGetSetDef pyopencv_ocl_Device_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_ocl_Device_methods[] = +{ + {"OpenCLVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_OpenCLVersion, 0), "OpenCLVersion() -> retval\n."}, + {"OpenCL_C_Version", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_OpenCL_C_Version, 0), "OpenCL_C_Version() -> retval\n."}, + {"addressBits", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_addressBits, 0), "addressBits() -> retval\n."}, + {"available", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_available, 0), "available() -> retval\n."}, + {"compilerAvailable", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_compilerAvailable, 0), "compilerAvailable() -> retval\n."}, + {"deviceVersionMajor", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_deviceVersionMajor, 0), "deviceVersionMajor() -> retval\n."}, + {"deviceVersionMinor", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_deviceVersionMinor, 0), "deviceVersionMinor() -> retval\n."}, + {"doubleFPConfig", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_doubleFPConfig, 0), "doubleFPConfig() -> retval\n."}, + {"driverVersion", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_driverVersion, 0), "driverVersion() -> retval\n."}, + {"endianLittle", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_endianLittle, 0), "endianLittle() -> retval\n."}, + {"errorCorrectionSupport", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_errorCorrectionSupport, 0), "errorCorrectionSupport() -> retval\n."}, + {"executionCapabilities", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_executionCapabilities, 0), "executionCapabilities() -> retval\n."}, + {"extensions", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_extensions, 0), "extensions() -> retval\n."}, + {"getDefault", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_getDefault_static, METH_STATIC), "getDefault() -> retval\n."}, + {"globalMemCacheLineSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_globalMemCacheLineSize, 0), "globalMemCacheLineSize() -> retval\n."}, + {"globalMemCacheSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_globalMemCacheSize, 0), "globalMemCacheSize() -> retval\n."}, + {"globalMemCacheType", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_globalMemCacheType, 0), "globalMemCacheType() -> retval\n."}, + {"globalMemSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_globalMemSize, 0), "globalMemSize() -> retval\n."}, + {"halfFPConfig", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_halfFPConfig, 0), "halfFPConfig() -> retval\n."}, + {"hostUnifiedMemory", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_hostUnifiedMemory, 0), "hostUnifiedMemory() -> retval\n."}, + {"image2DMaxHeight", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_image2DMaxHeight, 0), "image2DMaxHeight() -> retval\n."}, + {"image2DMaxWidth", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_image2DMaxWidth, 0), "image2DMaxWidth() -> retval\n."}, + {"image3DMaxDepth", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_image3DMaxDepth, 0), "image3DMaxDepth() -> retval\n."}, + {"image3DMaxHeight", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_image3DMaxHeight, 0), "image3DMaxHeight() -> retval\n."}, + {"image3DMaxWidth", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_image3DMaxWidth, 0), "image3DMaxWidth() -> retval\n."}, + {"imageFromBufferSupport", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_imageFromBufferSupport, 0), "imageFromBufferSupport() -> retval\n."}, + {"imageMaxArraySize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_imageMaxArraySize, 0), "imageMaxArraySize() -> retval\n."}, + {"imageMaxBufferSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_imageMaxBufferSize, 0), "imageMaxBufferSize() -> retval\n."}, + {"imageSupport", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_imageSupport, 0), "imageSupport() -> retval\n."}, + {"intelSubgroupsSupport", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_intelSubgroupsSupport, 0), "intelSubgroupsSupport() -> retval\n."}, + {"isAMD", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_isAMD, 0), "isAMD() -> retval\n."}, + {"isExtensionSupported", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_isExtensionSupported, 0), "isExtensionSupported(extensionName) -> retval\n."}, + {"isIntel", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_isIntel, 0), "isIntel() -> retval\n."}, + {"isNVidia", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_isNVidia, 0), "isNVidia() -> retval\n."}, + {"linkerAvailable", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_linkerAvailable, 0), "linkerAvailable() -> retval\n."}, + {"localMemSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_localMemSize, 0), "localMemSize() -> retval\n."}, + {"localMemType", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_localMemType, 0), "localMemType() -> retval\n."}, + {"maxClockFrequency", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxClockFrequency, 0), "maxClockFrequency() -> retval\n."}, + {"maxComputeUnits", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxComputeUnits, 0), "maxComputeUnits() -> retval\n."}, + {"maxConstantArgs", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxConstantArgs, 0), "maxConstantArgs() -> retval\n."}, + {"maxConstantBufferSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxConstantBufferSize, 0), "maxConstantBufferSize() -> retval\n."}, + {"maxMemAllocSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxMemAllocSize, 0), "maxMemAllocSize() -> retval\n."}, + {"maxParameterSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxParameterSize, 0), "maxParameterSize() -> retval\n."}, + {"maxReadImageArgs", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxReadImageArgs, 0), "maxReadImageArgs() -> retval\n."}, + {"maxSamplers", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxSamplers, 0), "maxSamplers() -> retval\n."}, + {"maxWorkGroupSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxWorkGroupSize, 0), "maxWorkGroupSize() -> retval\n."}, + {"maxWorkItemDims", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxWorkItemDims, 0), "maxWorkItemDims() -> retval\n."}, + {"maxWriteImageArgs", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_maxWriteImageArgs, 0), "maxWriteImageArgs() -> retval\n."}, + {"memBaseAddrAlign", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_memBaseAddrAlign, 0), "memBaseAddrAlign() -> retval\n."}, + {"name", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_name, 0), "name() -> retval\n."}, + {"nativeVectorWidthChar", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthChar, 0), "nativeVectorWidthChar() -> retval\n."}, + {"nativeVectorWidthDouble", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthDouble, 0), "nativeVectorWidthDouble() -> retval\n."}, + {"nativeVectorWidthFloat", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthFloat, 0), "nativeVectorWidthFloat() -> retval\n."}, + {"nativeVectorWidthHalf", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthHalf, 0), "nativeVectorWidthHalf() -> retval\n."}, + {"nativeVectorWidthInt", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthInt, 0), "nativeVectorWidthInt() -> retval\n."}, + {"nativeVectorWidthLong", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthLong, 0), "nativeVectorWidthLong() -> retval\n."}, + {"nativeVectorWidthShort", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_nativeVectorWidthShort, 0), "nativeVectorWidthShort() -> retval\n."}, + {"preferredVectorWidthChar", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthChar, 0), "preferredVectorWidthChar() -> retval\n."}, + {"preferredVectorWidthDouble", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthDouble, 0), "preferredVectorWidthDouble() -> retval\n."}, + {"preferredVectorWidthFloat", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthFloat, 0), "preferredVectorWidthFloat() -> retval\n."}, + {"preferredVectorWidthHalf", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthHalf, 0), "preferredVectorWidthHalf() -> retval\n."}, + {"preferredVectorWidthInt", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthInt, 0), "preferredVectorWidthInt() -> retval\n."}, + {"preferredVectorWidthLong", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthLong, 0), "preferredVectorWidthLong() -> retval\n."}, + {"preferredVectorWidthShort", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_preferredVectorWidthShort, 0), "preferredVectorWidthShort() -> retval\n."}, + {"printfBufferSize", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_printfBufferSize, 0), "printfBufferSize() -> retval\n."}, + {"profilingTimerResolution", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_profilingTimerResolution, 0), "profilingTimerResolution() -> retval\n."}, + {"singleFPConfig", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_singleFPConfig, 0), "singleFPConfig() -> retval\n."}, + {"type", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_type, 0), "type() -> retval\n."}, + {"vendorID", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_vendorID, 0), "vendorID() -> retval\n."}, + {"vendorName", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_vendorName, 0), "vendorName() -> retval\n."}, + {"version", CV_PY_FN_WITH_KW_(pyopencv_cv_ocl_ocl_Device_version, 0), "version() -> retval\n."}, + + {NULL, NULL} +}; + +// Converter (ocl_Device) + +template<> +struct PyOpenCV_Converter< cv::ocl::Device > +{ + static PyObject* from(const cv::ocl::Device& r) + { + return pyopencv_ocl_Device_Instance(r); + } + static bool to(PyObject* src, cv::ocl::Device& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + cv::ocl::Device * dst_; + if (pyopencv_ocl_Device_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected cv::ocl::Device for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// ocl_OpenCLExecutionContext (Generic) +//================================================================================ + +// GetSet (ocl_OpenCLExecutionContext) + + + +// Methods (ocl_OpenCLExecutionContext) + + + +// Tables (ocl_OpenCLExecutionContext) + +static PyGetSetDef pyopencv_ocl_OpenCLExecutionContext_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_ocl_OpenCLExecutionContext_methods[] = +{ + + {NULL, NULL} +}; + +// Converter (ocl_OpenCLExecutionContext) + +template<> +struct PyOpenCV_Converter< Ptr > +{ + static PyObject* from(const Ptr& r) + { + return pyopencv_ocl_OpenCLExecutionContext_Instance(r); + } + static bool to(PyObject* src, Ptr& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + Ptr * dst_; + if (pyopencv_ocl_OpenCLExecutionContext_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected Ptr for argument '%s'", info.name); + return false; + } +}; + +//================================================================================ +// segmentation_IntelligentScissorsMB (Generic) +//================================================================================ + +// GetSet (segmentation_IntelligentScissorsMB) + + + +// Methods (segmentation_IntelligentScissorsMB) + +static int pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_IntelligentScissorsMB(pyopencv_segmentation_IntelligentScissorsMB_t* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + if(PyObject_Size(py_args) == 0 && (!kw || PyObject_Size(kw) == 0)) + { + if(self) ERRWRAP2(new (&(self->v)) cv::segmentation::IntelligentScissorsMB()); + return 0; + } + + return -1; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_applyImage(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_image = NULL; + Mat image; + IntelligentScissorsMB retval; + + const char* keywords[] = { "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:segmentation_IntelligentScissorsMB.applyImage", (char**)keywords, &pyobj_image) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) ) + { + ERRWRAP2(retval = _self_->applyImage(image)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_image = NULL; + UMat image; + IntelligentScissorsMB retval; + + const char* keywords[] = { "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:segmentation_IntelligentScissorsMB.applyImage", (char**)keywords, &pyobj_image) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) ) + { + ERRWRAP2(retval = _self_->applyImage(image)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("applyImage"); + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_applyImageFeatures(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_non_edge = NULL; + Mat non_edge; + PyObject* pyobj_gradient_direction = NULL; + Mat gradient_direction; + PyObject* pyobj_gradient_magnitude = NULL; + Mat gradient_magnitude; + PyObject* pyobj_image = NULL; + Mat image; + IntelligentScissorsMB retval; + + const char* keywords[] = { "non_edge", "gradient_direction", "gradient_magnitude", "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:segmentation_IntelligentScissorsMB.applyImageFeatures", (char**)keywords, &pyobj_non_edge, &pyobj_gradient_direction, &pyobj_gradient_magnitude, &pyobj_image) && + pyopencv_to_safe(pyobj_non_edge, non_edge, ArgInfo("non_edge", 0)) && + pyopencv_to_safe(pyobj_gradient_direction, gradient_direction, ArgInfo("gradient_direction", 0)) && + pyopencv_to_safe(pyobj_gradient_magnitude, gradient_magnitude, ArgInfo("gradient_magnitude", 0)) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) ) + { + ERRWRAP2(retval = _self_->applyImageFeatures(non_edge, gradient_direction, gradient_magnitude, image)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_non_edge = NULL; + UMat non_edge; + PyObject* pyobj_gradient_direction = NULL; + UMat gradient_direction; + PyObject* pyobj_gradient_magnitude = NULL; + UMat gradient_magnitude; + PyObject* pyobj_image = NULL; + UMat image; + IntelligentScissorsMB retval; + + const char* keywords[] = { "non_edge", "gradient_direction", "gradient_magnitude", "image", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO|O:segmentation_IntelligentScissorsMB.applyImageFeatures", (char**)keywords, &pyobj_non_edge, &pyobj_gradient_direction, &pyobj_gradient_magnitude, &pyobj_image) && + pyopencv_to_safe(pyobj_non_edge, non_edge, ArgInfo("non_edge", 0)) && + pyopencv_to_safe(pyobj_gradient_direction, gradient_direction, ArgInfo("gradient_direction", 0)) && + pyopencv_to_safe(pyobj_gradient_magnitude, gradient_magnitude, ArgInfo("gradient_magnitude", 0)) && + pyopencv_to_safe(pyobj_image, image, ArgInfo("image", 0)) ) + { + ERRWRAP2(retval = _self_->applyImageFeatures(non_edge, gradient_direction, gradient_magnitude, image)); + return pyopencv_from(retval); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("applyImageFeatures"); + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_buildMap(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + PyObject* pyobj_sourcePt = NULL; + Point sourcePt; + + const char* keywords[] = { "sourcePt", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O:segmentation_IntelligentScissorsMB.buildMap", (char**)keywords, &pyobj_sourcePt) && + pyopencv_to_safe(pyobj_sourcePt, sourcePt, ArgInfo("sourcePt", 0)) ) + { + ERRWRAP2(_self_->buildMap(sourcePt)); + Py_RETURN_NONE; + } + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_getContour(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + pyPrepareArgumentConversionErrorsStorage(2); + + { + PyObject* pyobj_targetPt = NULL; + Point targetPt; + PyObject* pyobj_contour = NULL; + Mat contour; + PyObject* pyobj_backward = NULL; + bool backward=false; + + const char* keywords[] = { "targetPt", "contour", "backward", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:segmentation_IntelligentScissorsMB.getContour", (char**)keywords, &pyobj_targetPt, &pyobj_contour, &pyobj_backward) && + pyopencv_to_safe(pyobj_targetPt, targetPt, ArgInfo("targetPt", 0)) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 1)) && + pyopencv_to_safe(pyobj_backward, backward, ArgInfo("backward", 0)) ) + { + ERRWRAP2(_self_->getContour(targetPt, contour, backward)); + return pyopencv_from(contour); + } + + + pyPopulateArgumentConversionErrors(); + } + + + { + PyObject* pyobj_targetPt = NULL; + Point targetPt; + PyObject* pyobj_contour = NULL; + UMat contour; + PyObject* pyobj_backward = NULL; + bool backward=false; + + const char* keywords[] = { "targetPt", "contour", "backward", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "O|OO:segmentation_IntelligentScissorsMB.getContour", (char**)keywords, &pyobj_targetPt, &pyobj_contour, &pyobj_backward) && + pyopencv_to_safe(pyobj_targetPt, targetPt, ArgInfo("targetPt", 0)) && + pyopencv_to_safe(pyobj_contour, contour, ArgInfo("contour", 1)) && + pyopencv_to_safe(pyobj_backward, backward, ArgInfo("backward", 0)) ) + { + ERRWRAP2(_self_->getContour(targetPt, contour, backward)); + return pyopencv_from(contour); + } + + + pyPopulateArgumentConversionErrors(); + } + pyRaiseCVOverloadException("getContour"); + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setEdgeFeatureCannyParameters(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + PyObject* pyobj_threshold1 = NULL; + double threshold1=0; + PyObject* pyobj_threshold2 = NULL; + double threshold2=0; + PyObject* pyobj_apertureSize = NULL; + int apertureSize=3; + PyObject* pyobj_L2gradient = NULL; + bool L2gradient=false; + IntelligentScissorsMB retval; + + const char* keywords[] = { "threshold1", "threshold2", "apertureSize", "L2gradient", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OO|OO:segmentation_IntelligentScissorsMB.setEdgeFeatureCannyParameters", (char**)keywords, &pyobj_threshold1, &pyobj_threshold2, &pyobj_apertureSize, &pyobj_L2gradient) && + pyopencv_to_safe(pyobj_threshold1, threshold1, ArgInfo("threshold1", 0)) && + pyopencv_to_safe(pyobj_threshold2, threshold2, ArgInfo("threshold2", 0)) && + pyopencv_to_safe(pyobj_apertureSize, apertureSize, ArgInfo("apertureSize", 0)) && + pyopencv_to_safe(pyobj_L2gradient, L2gradient, ArgInfo("L2gradient", 0)) ) + { + ERRWRAP2(retval = _self_->setEdgeFeatureCannyParameters(threshold1, threshold2, apertureSize, L2gradient)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setEdgeFeatureZeroCrossingParameters(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + PyObject* pyobj_gradient_magnitude_min_value = NULL; + float gradient_magnitude_min_value=0.0f; + IntelligentScissorsMB retval; + + const char* keywords[] = { "gradient_magnitude_min_value", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:segmentation_IntelligentScissorsMB.setEdgeFeatureZeroCrossingParameters", (char**)keywords, &pyobj_gradient_magnitude_min_value) && + pyopencv_to_safe(pyobj_gradient_magnitude_min_value, gradient_magnitude_min_value, ArgInfo("gradient_magnitude_min_value", 0)) ) + { + ERRWRAP2(retval = _self_->setEdgeFeatureZeroCrossingParameters(gradient_magnitude_min_value)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setGradientMagnitudeMaxLimit(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + PyObject* pyobj_gradient_magnitude_threshold_max = NULL; + float gradient_magnitude_threshold_max=0.0f; + IntelligentScissorsMB retval; + + const char* keywords[] = { "gradient_magnitude_threshold_max", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "|O:segmentation_IntelligentScissorsMB.setGradientMagnitudeMaxLimit", (char**)keywords, &pyobj_gradient_magnitude_threshold_max) && + pyopencv_to_safe(pyobj_gradient_magnitude_threshold_max, gradient_magnitude_threshold_max, ArgInfo("gradient_magnitude_threshold_max", 0)) ) + { + ERRWRAP2(retval = _self_->setGradientMagnitudeMaxLimit(gradient_magnitude_threshold_max)); + return pyopencv_from(retval); + } + + return NULL; +} + +static PyObject* pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setWeights(PyObject* self, PyObject* py_args, PyObject* kw) +{ + using namespace cv::segmentation; + + + cv::segmentation::IntelligentScissorsMB * self1 = 0; + if (!pyopencv_segmentation_IntelligentScissorsMB_getp(self, self1)) + return failmsgp("Incorrect type of self (must be 'segmentation_IntelligentScissorsMB' or its derivative)"); + cv::segmentation::IntelligentScissorsMB* _self_ = (self1); + PyObject* pyobj_weight_non_edge = NULL; + float weight_non_edge=0.f; + PyObject* pyobj_weight_gradient_direction = NULL; + float weight_gradient_direction=0.f; + PyObject* pyobj_weight_gradient_magnitude = NULL; + float weight_gradient_magnitude=0.f; + IntelligentScissorsMB retval; + + const char* keywords[] = { "weight_non_edge", "weight_gradient_direction", "weight_gradient_magnitude", NULL }; + if( PyArg_ParseTupleAndKeywords(py_args, kw, "OOO:segmentation_IntelligentScissorsMB.setWeights", (char**)keywords, &pyobj_weight_non_edge, &pyobj_weight_gradient_direction, &pyobj_weight_gradient_magnitude) && + pyopencv_to_safe(pyobj_weight_non_edge, weight_non_edge, ArgInfo("weight_non_edge", 0)) && + pyopencv_to_safe(pyobj_weight_gradient_direction, weight_gradient_direction, ArgInfo("weight_gradient_direction", 0)) && + pyopencv_to_safe(pyobj_weight_gradient_magnitude, weight_gradient_magnitude, ArgInfo("weight_gradient_magnitude", 0)) ) + { + ERRWRAP2(retval = _self_->setWeights(weight_non_edge, weight_gradient_direction, weight_gradient_magnitude)); + return pyopencv_from(retval); + } + + return NULL; +} + + + +// Tables (segmentation_IntelligentScissorsMB) + +static PyGetSetDef pyopencv_segmentation_IntelligentScissorsMB_getseters[] = +{ + {NULL} /* Sentinel */ +}; + +static PyMethodDef pyopencv_segmentation_IntelligentScissorsMB_methods[] = +{ + {"applyImage", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_applyImage, 0), "applyImage(image) -> retval\n. @brief Specify input image and extract image features\n. *\n. * @param image input image. Type is #CV_8UC1 / #CV_8UC3"}, + {"applyImageFeatures", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_applyImageFeatures, 0), "applyImageFeatures(non_edge, gradient_direction, gradient_magnitude[, image]) -> retval\n. @brief Specify custom features of imput image\n. *\n. * Customized advanced variant of applyImage() call.\n. *\n. * @param non_edge Specify cost of non-edge pixels. Type is CV_8UC1. Expected values are `{0, 1}`.\n. * @param gradient_direction Specify gradient direction feature. Type is CV_32FC2. Values are expected to be normalized: `x^2 + y^2 == 1`\n. * @param gradient_magnitude Specify cost of gradient magnitude function: Type is CV_32FC1. Values should be in range `[0, 1]`.\n. * @param image **Optional parameter**. Must be specified if subset of features is specified (non-specified features are calculated internally)"}, + {"buildMap", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_buildMap, 0), "buildMap(sourcePt) -> None\n. @brief Prepares a map of optimal paths for the given source point on the image\n. *\n. * @note applyImage() / applyImageFeatures() must be called before this call\n. *\n. * @param sourcePt The source point used to find the paths"}, + {"getContour", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_getContour, 0), "getContour(targetPt[, contour[, backward]]) -> contour\n. @brief Extracts optimal contour for the given target point on the image\n. *\n. * @note buildMap() must be called before this call\n. *\n. * @param targetPt The target point\n. * @param[out] contour The list of pixels which contains optimal path between the source and the target points of the image. Type is CV_32SC2 (compatible with `std::vector`)\n. * @param backward Flag to indicate reverse order of retrived pixels (use \"true\" value to fetch points from the target to the source point)"}, + {"setEdgeFeatureCannyParameters", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setEdgeFeatureCannyParameters, 0), "setEdgeFeatureCannyParameters(threshold1, threshold2[, apertureSize[, L2gradient]]) -> retval\n. @brief Switch edge feature extractor to use Canny edge detector\n. *\n. * @note \"Laplacian Zero-Crossing\" feature extractor is used by default (following to original article)\n. *\n. * @sa Canny"}, + {"setEdgeFeatureZeroCrossingParameters", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setEdgeFeatureZeroCrossingParameters, 0), "setEdgeFeatureZeroCrossingParameters([, gradient_magnitude_min_value]) -> retval\n. @brief Switch to \"Laplacian Zero-Crossing\" edge feature extractor and specify its parameters\n. *\n. * This feature extractor is used by default according to article.\n. *\n. * Implementation has additional filtering for regions with low-amplitude noise.\n. * This filtering is enabled through parameter of minimal gradient amplitude (use some small value 4, 8, 16).\n. *\n. * @note Current implementation of this feature extractor is based on processing of grayscale images (color image is converted to grayscale image first).\n. *\n. * @note Canny edge detector is a bit slower, but provides better results (especially on color images): use setEdgeFeatureCannyParameters().\n. *\n. * @param gradient_magnitude_min_value Minimal gradient magnitude value for edge pixels (default: 0, check is disabled)"}, + {"setGradientMagnitudeMaxLimit", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setGradientMagnitudeMaxLimit, 0), "setGradientMagnitudeMaxLimit([, gradient_magnitude_threshold_max]) -> retval\n. @brief Specify gradient magnitude max value threshold\n. *\n. * Zero limit value is used to disable gradient magnitude thresholding (default behavior, as described in original article).\n. * Otherwize pixels with `gradient magnitude >= threshold` have zero cost.\n. *\n. * @note Thresholding should be used for images with irregular regions (to avoid stuck on parameters from high-contract areas, like embedded logos).\n. *\n. * @param gradient_magnitude_threshold_max Specify gradient magnitude max value threshold (default: 0, disabled)"}, + {"setWeights", CV_PY_FN_WITH_KW_(pyopencv_cv_segmentation_segmentation_IntelligentScissorsMB_setWeights, 0), "setWeights(weight_non_edge, weight_gradient_direction, weight_gradient_magnitude) -> retval\n. @brief Specify weights of feature functions\n. *\n. * Consider keeping weights normalized (sum of weights equals to 1.0)\n. * Discrete dynamic programming (DP) goal is minimization of costs between pixels.\n. *\n. * @param weight_non_edge Specify cost of non-edge pixels (default: 0.43f)\n. * @param weight_gradient_direction Specify cost of gradient direction function (default: 0.43f)\n. * @param weight_gradient_magnitude Specify cost of gradient magnitude function (default: 0.14f)"}, + + {NULL, NULL} +}; + +// Converter (segmentation_IntelligentScissorsMB) + +template<> +struct PyOpenCV_Converter< cv::segmentation::IntelligentScissorsMB > +{ + static PyObject* from(const cv::segmentation::IntelligentScissorsMB& r) + { + return pyopencv_segmentation_IntelligentScissorsMB_Instance(r); + } + static bool to(PyObject* src, cv::segmentation::IntelligentScissorsMB& dst, const ArgInfo& info) + { + if(!src || src == Py_None) + return true; + cv::segmentation::IntelligentScissorsMB * dst_; + if (pyopencv_segmentation_IntelligentScissorsMB_getp(src, dst_)) + { + dst = *dst_; + return true; + } + + failmsg("Expected cv::segmentation::IntelligentScissorsMB for argument '%s'", info.name); + return false; + } +}; + diff --git a/generated/opencv_data_config.hpp b/generated/opencv_data_config.hpp new file mode 100644 index 0000000..d09500e --- /dev/null +++ b/generated/opencv_data_config.hpp @@ -0,0 +1,11 @@ + +#define OPENCV_INSTALL_PREFIX "/usr/local" + +#define OPENCV_DATA_INSTALL_PATH "share/opencv4" + +#define OPENCV_BUILD_DIR "/root/opencv/build" + +#define OPENCV_DATA_BUILD_DIR_SEARCH_PATHS \ + "..//" + +#define OPENCV_INSTALL_DATA_DIR_RELATIVE "../share/opencv4" diff --git a/generated/opencv_tests_config.hpp b/generated/opencv_tests_config.hpp new file mode 100644 index 0000000..bb5fa5c --- /dev/null +++ b/generated/opencv_tests_config.hpp @@ -0,0 +1,4 @@ + +#define OPENCV_INSTALL_PREFIX "/usr/local" + +#define OPENCV_TEST_DATA_INSTALL_PATH "share/opencv4/testdata" diff --git a/importfix/cv2/cv2.py b/importfix/cv2/cv2.py new file mode 100644 index 0000000..cb28288 --- /dev/null +++ b/importfix/cv2/cv2.py @@ -0,0 +1 @@ +from cv2 import * -- Gitee