70 const intg32* sptr = env_img_pixels(src);
71 intg32* dptr = env_img_pixelsw(result);
100 const intg32* sptr = env_img_pixels(src);
101 intg32* dptr = env_img_pixelsw(result);
117 if (src->
dims.
h <= 1)
128 const intg32* sptr = env_img_pixels(src);
129 intg32* dptr = env_img_pixelsw(result);
134 for (
env_size_t i = 0; i < dims2.
w; ++i) dptr[i] = sptr[i];
220 const intg32 hf_flipped[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 };
223 const intg32* src = env_img_pixels(source);
224 intg32* dst = env_img_pixelsw(result);
236 if (i + k < hfs2 || i + k >=
w + hfs2)
continue;
240 sum += hf_flipped[k];
273 const intg32 vf_flipped[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 };
276 const intg32* src = env_img_pixels(source);
277 intg32* dst = env_img_pixelsw(result);
289 if (j + k < vfs2 || j + k >=
h + vfs2)
continue;
293 sum += vf_flipped[k];
322 ENV_ASSERT(env_dims_equal(img1->dims, img2->dims));
323 ENV_ASSERT(env_dims_equal(img1->dims, result->dims));
325 const intg32* s1ptr = env_img_pixels(img1);
326 const intg32* s2ptr = env_img_pixels(img2);
327 intg32* dptr = env_img_pixelsw(result);
342 dptr[i] = (s1 > s2) ? (s1 + (s2 >> 1)) : ((s1 >> 1) + s2);
354 const intg32* sptr = env_img_pixels(src);
355 intg32* reptr = env_img_pixelsw(&re);
356 intg32* imptr = env_img_pixelsw(&im);
378 const intg32 arg = (i * kxnumer + j * kynumer) >> kdenombits;
383 const intg32 sval = *sptr++;
410 if (size * 2 > dims.
w) size = dims.
w / 2;
411 if (size * 2 > dims.
h) size = dims.
h / 2;
412 if (size < 1)
return;
418 intg32* aptr = env_img_pixelsw(a);
423 *aptr = (*aptr / size_plus_1) * coeff;
429 aptr = env_img_pixelsw(a);
435 *(aptr + dims.
w - 1 - x * 2) = (*(aptr + dims.
w - 1 - x * 2) / size_plus_1) * coeff;
437 *aptr = (*aptr / size_plus_1) * coeff;
441 aptr += dims.
w - size;
444 aptr = env_img_pixelsw(a) + (dims.
h - size) * dims.
w;
450 *aptr = (*aptr / size_plus_1) * coeff;
465 const env_size_t depth = env_pyr_depth(result);
467 if (depth == 0)
return;
480 env_img_pixelsw(env_pyr_imgw(result, 0)));
491 if (lev >= firstlevel)
495 env_img_pixelsw(env_pyr_imgw(result, lev)));
510 const env_size_t depth = env_pyr_depth(hipass);
518 if (!env_img_initialized(env_pyr_img(hipass, lev)))
continue;
522 env_steerable_filter(env_pyr_img(hipass, lev), kxnumer, kynumer, kdenombits, imath, env_pyr_imgw(&result, lev));
549 const struct env_image* prev = lev == 1 ? image : env_pyr_img(result, lev-1);
597 if (new_w == orig_w && new_h == orig_h)
603 intg32* dptr = env_img_pixelsw(result);
604 const intg32*
const sptr = env_img_pixels(src);
624 const env_ssize_t fy_numer = y_numer - y0 * y_denom;
639 const env_ssize_t fx_numer = x_numer - x0 * x_denom;
643 const intg32 d00 = sptr[x0 + wy0];
644 const intg32 d10 = sptr[x1 + wy0];
646 const intg32 d01 = sptr[x0 + wy1];
647 const intg32 d11 = sptr[x1 + wy1];
649 const intg32 dx0 = d00 + ((d10 - d00) / fx_denom) * fx_numer;
650 const intg32 dx1 = d01 + ((d11 - d01) / fx_denom) * fx_numer;
653 *dptr++ = dx0 + ((dx1 - dx0) / fy_denom) * fy_numer;
667 default:
ENV_ASSERT2(0,
"Invalid normalization type");
680 if (mi != 0 || ma != 0)
689 if (!env_img_initialized(src))
return;
697 if (nmi != 0 || nma != 0)
706 const intg32 thresh = mi + (ma - mi) / 10;
709 const intg32*
const dptr = env_img_pixels(src);
716 const intg32 val = dptr[index];
718 val >= dptr[index - w] &&
719 val >= dptr[index + w] &&
720 val >= dptr[index - 1] &&
721 val >= dptr[index + 1])
730 if (numlm > 0) lm_mean /= numlm;
742 factor = ((ma - lm_mean) * (ma - lm_mean)) / ma;
755 intg32*
const itr = env_img_pixelsw(src);
764 const int absol,
struct env_image* result)
772 ENV_ASSERT2(lw >= sw && lh >= sh,
"center must be larger than surround");
774 const env_size_t scalex = lw / sw, remx = lw - 1 - (lw % sw);
775 const env_size_t scaley = lh / sh, remy = lh - 1 - (lh % sh);
779 const intg32* lptr = env_img_pixels(center);
780 const intg32* sptr = env_img_pixels(surround);
781 intg32* dptr = env_img_pixelsw(result);
789 if (*lptr > *sptr) *dptr++ = (*lptr++ - *sptr);
790 else *dptr++ = (*sptr - *lptr++);
792 if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; }
794 if (ci) { ci = 0; ++sptr; }
795 if ((++cj) == scaley && j != remy) cj = 0;
else sptr -= sw;
804 if (*lptr > *sptr) *dptr++ = (*lptr++ - *sptr);
805 else { *dptr++ = 0; lptr++; }
807 if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; }
809 if (ci) { ci = 0; ++sptr; }
810 if ((++cj) == scaley && j != remy) cj = 0;
else sptr -= sw;
830 intg32* rgptr = env_img_pixelsw(rg);
831 intg32* byptr = env_img_pixelsw(by);
843 const intg32 lum = r + g + b;
847 rgptr[i] = byptr[i] = 0;
852 intg32 red = (2*r - g - b);
853 intg32 green = (2*g - r - b);
854 intg32 blue = (2*b - r - g);
857 if (red < 0) red = 0;
858 if (green < 0) green = 0;
859 if (blue < 0) blue = 0;
860 if (yellow < 0) yellow=0;
865 rgptr[i] = (3*(red - green) << lshift) / lum;
866 byptr[i] = (3*(blue - yellow) << lshift) / lum;
870 rgptr[i] = ((3*(red - green)) / lum) >> (-lshift);
871 byptr[i] = ((3*(blue - yellow)) / lum) >> (-lshift);
875 rgptr[i] = (3*(red - green)) / lum;
876 byptr[i] = (3*(blue - yellow)) / lum;
885 if (src == 0)
return;
887 const intg32* sptr = env_img_pixels(src);
892 if (sptr[0] < *mi) *mi = sptr[0];
893 if (sptr[0] > *ma) *ma = sptr[0];
897 if (sptr[i] < *mi) *mi = sptr[i];
898 else if (sptr[i] > *ma) *ma = sptr[i];
905 if (src == 0)
return;
907 intg32* sptr = env_img_pixelsw(src);
912 const intg32 scale = ma - mi;
916 for (
env_size_t i = 0; i < sz; ++i) sptr[i] = 0;
920 for (
env_size_t i = 0; i < sz; ++i) sptr[i] = ((sptr[i] - mi) * 255) / scale;
924 const intg32 div = scale / 255;
926 for (
env_size_t i = 0; i < sz; ++i) sptr[i] = (sptr[i] - mi) / div;
934 intg32 const * sptr = env_img_pixels(src);
944 intg32 const * p = sptr + ((srch * j) / ny) * srcw;
949 intg32 const * pp = p + (srcw * i) / nx;
954 for (
env_size_t x = 0; x < tw; ++x) sum += *pp++;
959 if (sum < 0) sum = 0;
961 if (sum > 255) sum = 255;
963 *dest++ = (
unsigned char)sum;
972 if (!env_img_initialized(srcImg))
998 const intg32* src = env_img_pixels(srcImg);
999 intg32* dst = env_img_pixelsw(result);
1001 src += startx + starty * w;
1002 dst += (startx + dx) + (starty + dy) * w;
1009 for (
env_ssize_t i = startx; i < endx; ++i) *dst++ = *src++;
1012 src += skip; dst += skip;
1020 if (!env_img_initialized(srcImg))
1036 env_ssize_t xt = dxnumer >= 0 ? (dxnumer >> denombits) : - ((-dxnumer + denom-1) >> denombits);
1037 env_ssize_t xfrac_numer = dxnumer - (xt << denombits);
1042 env_ssize_t yt = dynumer >= 0 ? (dynumer >> denombits) : - ((-dynumer + denom-1) >> denombits);
1043 env_ssize_t yfrac_numer = dynumer - (yt << denombits);
1050 intg32*
const rptr = env_img_pixelsw(result);
1051 for (
env_size_t i = 0; i < sz; ++i) rptr[i] = 0;
1055 if (xfrac_numer == 0 && yfrac_numer == 0)
1061 if (xfrac_numer > 0)
1063 xfrac_numer = denom - xfrac_numer;
1067 if (yfrac_numer > 0)
1069 yfrac_numer = denom - yfrac_numer;
1074 const intg32* src2 = env_img_pixels(srcImg);
1075 intg32* ret2 = env_img_pixelsw(result);
1076 if (xt > 0) ret2 += xt;
else if (xt < 0) src2 -= xt;
1077 if (yt > 0) ret2 += yt * w;
else if (yt < 0) src2 -= yt * w;
1082 const intg32* src = src2;
1086 *ret = (((src[0] >> denombits) * (denom - xfrac_numer)) >> denombits) * (denom - yfrac_numer);
1087 *ret += (((src[1] >> denombits) * xfrac_numer) >> denombits) * (denom - yfrac_numer);
1088 *ret += (((src[w] >> denombits) * (denom - xfrac_numer)) >> denombits) * yfrac_numer;
1089 *ret += (((src[w+1] >> denombits) * xfrac_numer) >> denombits) * yfrac_numer;
1092 src2 += w; ret2 += w;
void env_c_lowpass_9_y_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst)
Like env_c_lowpass_9_y_fewbits() but uses optimized filter coefficients.
void env_c_lowpass_9_x_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst)
Like env_c_lowpass_9_x_fewbits() but uses optimized filter coefficients.
void env_c_inplace_normalize(intg32 *dst, const env_size_t sz, const intg32 nmin, const intg32 nmax, intg32 *actualmin, intg32 *actualmax, intg32 rangeThresh)
void env_c_lowpass_5_x_dec_x_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst, const env_size_t w2)
void env_c_inplace_rectify(intg32 *dst, const env_size_t sz)
Saturate values < 0.
void env_c_lowpass_5_y_dec_y_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst, const env_size_t h2)
void env_c_image_minus_image(const intg32 *const a, const intg32 *const b, const env_size_t sz, intg32 *const dst)
result = a - b
void env_img_swap(struct env_image *img1, struct env_image *img2)
void env_img_resize_dims(struct env_image *img, const struct env_dims d)
#define env_img_initializer
void env_img_make_empty(struct env_image *img)
void env_img_init(struct env_image *img, const struct env_dims d)
void env_img_copy_src_dst(const struct env_image *src, struct env_image *dst)
void env_max_normalize_std_inplace(struct env_image *src, const intg32 nmi, const intg32 nma, const intg32 rangeThresh)
void env_lowpass_9_y(const struct env_image *source, const struct env_math *imath, struct env_image *result)
void env_max_normalize_none_inplace(struct env_image *src, const intg32 nmi, const intg32 nma, const intg32 rangeThresh)
void env_dec_x(const struct env_image *src, struct env_image *result)
Decimate in X (take one every 'factor' pixels).
void env_get_rgby(const struct env_rgb_pixel *const src, const env_size_t sz, struct env_image *rg, struct env_image *by, const intg32 thresh, const env_size_t inputbits)
Compute R-G and B-Y opponent color maps.
void env_shift_clean(const struct env_image *srcImg, const env_ssize_t dx, const env_ssize_t dy, struct env_image *result)
Shift an image by (dx, dy), without wraparound.
void env_rescale(const struct env_image *src, struct env_image *result)
void env_lowpass_9_x(const struct env_image *source, const struct env_math *imath, struct env_image *result)
void env_rescale_range_inplace(struct env_image *src, const intg32 mi, const intg32 ma)
rescale the src image to a [0..255] result
void env_merge_range(const struct env_image *src, intg32 *mi, intg32 *ma)
Update the range [mi,ma] to include the range of values in src.
void env_grid_average(const struct env_image *src, unsigned char *dest, unsigned int bitshift, env_size_t nx, env_size_t ny)
Compute average values in each tile of a grid.
void env_attenuate_borders_inplace(struct env_image *a, env_size_t size)
void env_lowpass_5_y_dec_y(const struct env_image *src, const struct env_math *imath, struct env_image *result)
void env_max_normalize_inplace(struct env_image *src, const intg32 mi, const intg32 ma, const enum env_maxnorm_type normtyp, const intg32 rangeThresh)
void env_lowpass_9(const struct env_image *src, const struct env_math *imath, struct env_image *result)
void env_pyr_build_steerable_from_hipass_9(const struct env_pyr *hipass, const intg32 kxnumer, const intg32 kynumer, const env_size_t kdenombits, const struct env_math *imath, struct env_pyr *out)
void env_pyr_build_hipass_9(const struct env_image *image, env_size_t firstlevel, const struct env_math *imath, struct env_pyr *result)
void env_steerable_filter(const struct env_image *src, const intg32 kxnumer, const intg32 kynumer, const env_size_t kdenombits, const struct env_math *imath, struct env_image *result)
void env_lowpass_5_x_dec_x(const struct env_image *src, const struct env_math *imath, struct env_image *result)
void env_quad_energy(const struct env_image *img1, const struct env_image *img2, struct env_image *result)
void env_center_surround(const struct env_image *center, const struct env_image *surround, const int absol, struct env_image *result)
void env_downsize_9_inplace(struct env_image *src, const env_size_t depth, const struct env_math *imath)
void env_pyr_build_lowpass_5(const struct env_image *image, env_size_t firstlevel, const struct env_math *imath, struct env_pyr *result)
Wrapper for _cpu or _cuda version.
void env_dec_xy(const struct env_image *src, struct env_image *result)
Decimate in X and Y (take one every 'factor' pixels).
void env_dec_y(const struct env_image *src, struct env_image *result)
Decimate in Y (take one every 'factor' pixels).
void env_shift_image(const struct env_image *srcImg, const env_ssize_t dxnumer, const env_ssize_t dynumer, const env_size_t denombits, struct env_image *result)
#define ENV_ASSERT2(expr, msg)
void env_pyr_init(struct env_pyr *pyr, const env_size_t n)
Construct with a given number of empty images.
void env_pyr_swap(struct env_pyr *pyr1, struct env_pyr *pyr2)
Swap contents with another env_pyr.
void env_pyr_make_empty(struct env_pyr *dst)
ENV_INTG32_TYPE intg32
32-bit signed integer
env_maxnorm_type
Types of normalization.
@ ENV_VCXNORM_MAXNORM
non-iterative maxnorm
@ ENV_VCXNORM_NONE
no max-normalization, but may change range
A simple struct to hold a pair of width/height dimensions.
This class implements a set of images, often used as a dyadic pyramid.