JeVoisBase  1.3
JeVois Smart Embedded Machine Vision Toolkit Base Modules
Share this page:
env_image_ops.c
Go to the documentation of this file.
1 /*!@file Envision/env_image_ops.c Fixed-point integer math versions of some of our floating-point image functions */
2 
3 // //////////////////////////////////////////////////////////////////// //
4 // The iLab Neuromorphic Vision C++ Toolkit - Copyright (C) 2000-2005 //
5 // by the University of Southern California (USC) and the iLab at USC. //
6 // See http://iLab.usc.edu for information about this project. //
7 // //////////////////////////////////////////////////////////////////// //
8 // Major portions of the iLab Neuromorphic Vision Toolkit are protected //
9 // under the U.S. patent ``Computation of Intrinsic Perceptual Saliency //
10 // in Visual Environments, and Applications'' by Christof Koch and //
11 // Laurent Itti, California Institute of Technology, 2001 (patent //
12 // pending; application number 09/912,225 filed July 23, 2001; see //
13 // http://pair.uspto.gov/cgi-bin/final/home.pl for current status). //
14 // //////////////////////////////////////////////////////////////////// //
15 // This file is part of the iLab Neuromorphic Vision C++ Toolkit. //
16 // //
17 // The iLab Neuromorphic Vision C++ Toolkit is free software; you can //
18 // redistribute it and/or modify it under the terms of the GNU General //
19 // Public License as published by the Free Software Foundation; either //
20 // version 2 of the License, or (at your option) any later version. //
21 // //
22 // The iLab Neuromorphic Vision C++ Toolkit is distributed in the hope //
23 // that it will be useful, but WITHOUT ANY WARRANTY; without even the //
24 // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR //
25 // PURPOSE. See the GNU General Public License for more details. //
26 // //
27 // You should have received a copy of the GNU General Public License //
28 // along with the iLab Neuromorphic Vision C++ Toolkit; if not, write //
29 // to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, //
30 // Boston, MA 02111-1307 USA. //
31 // //////////////////////////////////////////////////////////////////// //
32 //
33 // Primary maintainer for this file: Rob Peters <rjpeters at usc dot edu>
34 // $HeadURL: svn://isvn.usc.edu/software/invt/trunk/saliency/src/Envision/env_image_ops.c $
35 // $Id: env_image_ops.c 11874 2009-10-21 01:03:31Z dparks $
36 //
37 
39 
42 
43 
44 // ######################################################################
45 void env_dec_xy(const struct env_image* src, struct env_image* result)
46 {
47  // do not go smaller than 1x1:
48  if (src->dims.w <= 1 && src->dims.h <= 1)
49  {
50  env_img_copy_src_dst(src, result);
51  return;
52  }
53 
54  if (src->dims.w == 1) // only thinout vertic
55  {
56  env_dec_y(src, result);
57  return;
58  }
59 
60  if (src->dims.h == 1)
61  {
62  env_dec_x(src, result);
63  return;
64  }
65 
66  const struct env_dims dims2 = { src->dims.w / 2, src->dims.h / 2 };
67 
68  env_img_resize_dims(result, dims2);
69 
70  const intg32* sptr = env_img_pixels(src);
71  intg32* dptr = env_img_pixelsw(result);
72  const env_size_t skip = src->dims.w % 2 + src->dims.w;
73 
74  for (env_size_t j = 0; j < dims2.h; ++j)
75  {
76  for (env_size_t i = 0; i < dims2.w; ++i)
77  {
78  *dptr++ = *sptr; // copy one pixel
79  sptr += 2; // skip some pixels
80  }
81  sptr += skip; // skip to start of next line
82  }
83 }
84 
85 // ######################################################################
86 void env_dec_x(const struct env_image* src, struct env_image* result)
87 {
88  if (src->dims.w <= 1) // do not go smaller than 1 pixel wide
89  {
90  env_img_copy_src_dst(src, result);
91  return;
92  }
93 
94  const struct env_dims dims2 = { src->dims.w / 2, src->dims.h };
95  const env_size_t skip = src->dims.w % 2;
96  ENV_ASSERT(dims2.w > 0);
97 
98  env_img_resize_dims(result, dims2);
99 
100  const intg32* sptr = env_img_pixels(src);
101  intg32* dptr = env_img_pixelsw(result);
102 
103  for (env_size_t j = 0; j < dims2.h; ++j)
104  {
105  for (env_size_t i = 0; i < dims2.w; ++i)
106  {
107  *dptr++ = *sptr; // copy one point
108  sptr += 2; // skip a few points
109  }
110  sptr += skip;
111  }
112 }
113 
114 // ######################################################################
115 void env_dec_y(const struct env_image* src, struct env_image* result)
116 {
117  if (src->dims.h <= 1) // do not go smaller than 1 pixel high
118  {
119  env_img_copy_src_dst(src, result);
120  return;
121  }
122 
123  const struct env_dims dims2 = { src->dims.w, src->dims.h / 2 };
124  ENV_ASSERT(dims2.h > 0);
125 
126  env_img_resize_dims(result, dims2);
127 
128  const intg32* sptr = env_img_pixels(src);
129  intg32* dptr = env_img_pixelsw(result);
130  const env_size_t skip = dims2.w * 2;
131 
132  for (env_size_t j = 0; j < dims2.h; ++j)
133  {
134  for (env_size_t i = 0; i < dims2.w; ++i) dptr[i] = sptr[i];
135 
136  dptr += dims2.w;
137  sptr += skip;
138  }
139 }
140 
141 // ######################################################################
142 // Anderson's separable kernel: 1/16 * [1 4 6 4 1]
143 void env_lowpass_5_x_dec_x(const struct env_image* src, const struct env_math* imath, struct env_image* result)
144 {
145  const env_size_t w = src->dims.w;
146  const env_size_t h = src->dims.h;
147 
148  if (w < 2) // nothing to smooth
149  {
150  env_img_copy_src_dst(src, result);
151  return;
152  }
153 
154  const struct env_dims dims2 = { w / 2, h };
155  ENV_ASSERT(dims2.w > 0);
156 
157  env_img_resize_dims(result, dims2);
158 
159 #ifndef ENV_NO_DEBUG
160  const env_size_t filterbits = 4; // log2(16)
161  const env_size_t accumbits = 3; // ceil(log2(5))
162 #endif
163 
164  ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
165 
166  env_c_lowpass_5_x_dec_x_fewbits_optim(env_img_pixels(src), w, h, env_img_pixelsw(result), dims2.w);
167 }
168 
169 // ######################################################################
170 // Anderson's separable kernel: 1/16 * [1 4 6 4 1]
171 void env_lowpass_5_y_dec_y(const struct env_image* src, const struct env_math* imath, struct env_image* result)
172 {
173  const env_size_t w = src->dims.w;
174  const env_size_t h = src->dims.h;
175 
176  if (h < 2) // nothing to smooth
177  {
178  env_img_copy_src_dst(src, result);
179  return;
180  }
181 
182  const struct env_dims dims2 = { w, h / 2 };
183  ENV_ASSERT(dims2.h > 0);
184 
185  env_img_resize_dims(result, dims2);
186 
187 #ifndef ENV_NO_DEBUG
188  const env_size_t filterbits = 4; // log2(16)
189  const env_size_t accumbits = 3; // ceil(log2(5))
190 #endif
191 
192  ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
193 
194  env_c_lowpass_5_y_dec_y_fewbits_optim(env_img_pixels(src), w, h, env_img_pixelsw(result), dims2.h);
195 }
196 
197 // ######################################################################
198 void env_lowpass_9_x(const struct env_image* source, const struct env_math* imath, struct env_image* result)
199 {
200  ENV_ASSERT(env_dims_equal(result->dims, source->dims));
201 
202 #ifndef ENV_NO_DEBUG
203  const env_size_t filterbits = 8; // log2(256)
204  const env_size_t accumbits = 4; // ceil(log2(9))
205 #endif
206 
207  ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
208 
209  const env_size_t w = source->dims.w;
210  const env_size_t h = source->dims.h;
211 
212  if (w < 2) // nothing to smooth
213  {
214  env_img_copy_src_dst(source, result);
215  return;
216  }
217 
218  if (w < 9) // use inefficient implementation for small images
219  {
220  const intg32 hf_flipped[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 };
221  const env_size_t hfs = 9;
222 
223  const intg32* src = env_img_pixels(source);
224  intg32* dst = env_img_pixelsw(result);
225 
226  ENV_ASSERT(hfs & 1); // filter size must be odd
227  const env_size_t hfs2 = (hfs - 1) / 2;
228 
229  for (env_size_t j = 0; j < h; ++j)
230  for (env_size_t i = 0; i < w; ++i)
231  {
232  intg32 sum = 0;
233  intg32 val = 0;
234  for (env_size_t k = 0; k < hfs; ++k)
235  {
236  if (i + k < hfs2 || i + k >= w + hfs2) continue;
237 
238  // convert to signed integers to avoid wraparound when k<hfs2
239  val += src[(env_ssize_t) k - (env_ssize_t) hfs2] * hf_flipped[k];
240  sum += hf_flipped[k];
241  }
242 
243  *dst++ = val / sum;
244  ++src;
245  }
246  return;
247  }
248 
249  env_c_lowpass_9_x_fewbits_optim(env_img_pixels(source), w, h, env_img_pixelsw(result));
250 }
251 
252 // ######################################################################
253 void env_lowpass_9_y(const struct env_image* source, const struct env_math* imath, struct env_image* result)
254 {
255  ENV_ASSERT(env_dims_equal(result->dims, source->dims));
256 
257 #ifndef ENV_NO_DEBUG
258  const env_size_t filterbits = 8; // log2(256)
259  const env_size_t accumbits = 4; // ceil(log2(9))
260 #endif
261 
262  ENV_ASSERT((imath->nbits + filterbits + accumbits + 1) < (8*sizeof(intg32)));
263 
264  const env_size_t w = source->dims.w;
265  const env_size_t h = source->dims.h;
266 
267  // if the height is less than 2, then the caller should handle that condition differently since no smoothing need be
268  // done (so the caller could either copy or swap the source into the result location)
269  ENV_ASSERT(h >= 2);
270 
271  if (h < 9) // use inefficient implementation for small images
272  {
273  const intg32 vf_flipped[9] = { 1, 8, 28, 56, 70, 56, 28, 8, 1 };
274  const env_size_t vfs = 9;
275 
276  const intg32* src = env_img_pixels(source);
277  intg32* dst = env_img_pixelsw(result);
278 
279  ENV_ASSERT(vfs & 1); // filter size must be odd
280  const env_size_t vfs2 = (vfs - 1) / 2;
281 
282  for (env_size_t j = 0; j < h; ++j)
283  for (env_size_t i = 0; i < w; ++i)
284  {
285  intg32 sum = 0;
286  intg32 val = 0;
287  for (env_size_t k = 0; k < vfs; ++k)
288  {
289  if (j + k < vfs2 || j + k >= h + vfs2) continue;
290 
291  // convert to signed integers to avoid wraparound when k<vfs2
292  val += src[w * ((env_ssize_t) k - (env_ssize_t) vfs2)] * vf_flipped[k];
293  sum += vf_flipped[k];
294  }
295 
296  *dst++ = val / sum;
297  ++src;
298  }
299 
300  return;
301  }
302 
303  env_c_lowpass_9_y_fewbits_optim(env_img_pixels(source), w, h, env_img_pixelsw(result));
304 }
305 
306 // ######################################################################
307 void env_lowpass_9(const struct env_image* src, const struct env_math* imath, struct env_image* result)
308 {
309  ENV_ASSERT(env_dims_equal(result->dims, src->dims));
310 
311  struct env_image tmp1;
312  env_img_init(&tmp1, src->dims);
313  env_lowpass_9_x(src, imath, &tmp1);
314  if (tmp1.dims.h >= 2) env_lowpass_9_y(&tmp1, imath, result);
315  else env_img_swap(&tmp1, result);
316  env_img_make_empty(&tmp1);
317 }
318 
319 // ######################################################################
320 void env_quad_energy(const struct env_image* img1, const struct env_image* img2, struct env_image* result)
321 {
322  ENV_ASSERT(env_dims_equal(img1->dims, img2->dims));
323  ENV_ASSERT(env_dims_equal(img1->dims, result->dims));
324 
325  const intg32* s1ptr = env_img_pixels(img1);
326  const intg32* s2ptr = env_img_pixels(img2);
327  intg32* dptr = env_img_pixelsw(result);
328 
329  const env_size_t sz = env_img_size(img1);
330 
331  for (env_size_t i = 0; i < sz; ++i)
332  {
333  const intg32 s1 = ENV_ABS(s1ptr[i]);
334  const intg32 s2 = ENV_ABS(s2ptr[i]);
335 
336  /* "A Fast Approximation to the Hypotenuse" by Alan Paeth, from "Graphics Gems", Academic Press, 1990
337 
338  http://www.acm.org/pubs/tog/GraphicsGems/gems/HypotApprox.c
339 
340  gives approximate value of sqrt(s1*s1+s2*s2) with only overestimations, and then never by more than (9/8) + one
341  bit uncertainty */
342  dptr[i] = (s1 > s2) ? (s1 + (s2 >> 1)) : ((s1 >> 1) + s2);
343  }
344 }
345 
346 // ######################################################################
347 void env_steerable_filter(const struct env_image* src, const intg32 kxnumer, const intg32 kynumer,
348  const env_size_t kdenombits, const struct env_math* imath, struct env_image* result)
349 {
350  ENV_ASSERT(env_dims_equal(result->dims, src->dims));
351 
352  struct env_image re; env_img_init(&re, src->dims);
353  struct env_image im; env_img_init(&im, src->dims);
354  const intg32* sptr = env_img_pixels(src);
355  intg32* reptr = env_img_pixelsw(&re);
356  intg32* imptr = env_img_pixelsw(&im);
357 
358  // (x,y) = (0,0) at center of image:
359  const env_ssize_t w2l = ((env_ssize_t) src->dims.w) / 2;
360  const env_ssize_t w2r = ((env_ssize_t) src->dims.w) - w2l;
361  const env_ssize_t h2l = ((env_ssize_t) src->dims.h) / 2;
362  const env_ssize_t h2r = ((env_ssize_t) src->dims.h) - h2l;
363 
364  // let's do a conservative check to make sure that we won't overflow when we compute "arg" later on -- as a very rough
365  // estimate, kxnumer and kynumer are on the order of 2^16 (8 bits from kdenombits=8, 8 bits from ENV_TRIG_TABSIZ=256),
366  // which gives room for w+h to be up to about 2^15
367  ENV_ASSERT((INTG32_MAX / (ENV_ABS(kxnumer) + ENV_ABS(kynumer))) > (w2r + h2r));
368 
369  ENV_ASSERT((2 * ENV_TRIG_NBITS + 1) < 8*sizeof(intg32));
370 
371 #ifndef ENV_NO_DEBUG
372  const intg32 mdcutoff = INTG32_MAX >> (ENV_TRIG_NBITS+1);
373 #endif
374 
375  for (env_ssize_t j = -h2l; j < h2r; ++j)
376  for (env_ssize_t i = -w2l; i < w2r; ++i)
377  {
378  const intg32 arg = (i * kxnumer + j * kynumer) >> kdenombits;
379 
380  env_ssize_t idx = arg % ENV_TRIG_TABSIZ;
381  if (idx < 0) idx += ENV_TRIG_TABSIZ;
382 
383  const intg32 sval = *sptr++;
384 
385  ENV_ASSERT(ENV_ABS(sval) < mdcutoff);
386 
387  *reptr++ = (sval * imath->costab[idx]) >> (ENV_TRIG_NBITS+1);
388  *imptr++ = (sval * imath->sintab[idx]) >> (ENV_TRIG_NBITS+1);
389  }
390 
391  env_lowpass_9(&re, imath, result);
392  env_img_swap(&re, result);
393 
394  env_lowpass_9(&im, imath, result);
395  env_img_swap(&im, result);
396 
397  env_quad_energy(&re, &im, result);
398 
399  env_img_make_empty(&re);
400  env_img_make_empty(&im);
401 }
402 
403 // ######################################################################
405 {
406  ENV_ASSERT(env_img_initialized(a));
407 
408  struct env_dims dims = a->dims;
409 
410  if (size * 2 > dims.w) size = dims.w / 2;
411  if (size * 2 > dims.h) size = dims.h / 2;
412  if (size < 1) return; // forget it
413 
414  const intg32 size_plus_1 = (intg32) (size+1);
415 
416  // top lines:
417  intg32 coeff = 1;
418  intg32* aptr = env_img_pixelsw(a);
419  for (env_size_t y = 0; y < size; y ++)
420  {
421  for (env_size_t x = 0; x < dims.w; x ++)
422  {
423  *aptr = (*aptr / size_plus_1) * coeff;
424  ++aptr;
425  }
426  ++coeff;
427  }
428  // normal lines: start again from beginning to attenuate corners twice:
429  aptr = env_img_pixelsw(a);
430  for (env_size_t y = 0; y < dims.h; y ++)
431  {
432  coeff = 1;
433  for (env_size_t x = 0; x < size; x ++)
434  {
435  *(aptr + dims.w - 1 - x * 2) = (*(aptr + dims.w - 1 - x * 2) / size_plus_1) * coeff;
436 
437  *aptr = (*aptr / size_plus_1) * coeff;
438  ++aptr;
439  ++coeff;
440  }
441  aptr += dims.w - size;
442  }
443  // bottom lines
444  aptr = env_img_pixelsw(a) + (dims.h - size) * dims.w;
445  coeff = size;
446  for (env_size_t y = dims.h - size; y < dims.h; y ++)
447  {
448  for (env_size_t x = 0; x < dims.w; ++x)
449  {
450  *aptr = (*aptr / size_plus_1) * coeff;
451  ++aptr;
452  }
453  --coeff;
454  }
455 }
456 
457 // ######################################################################
458 void env_pyr_build_hipass_9(const struct env_image* image, env_size_t firstlevel, const struct env_math* imath,
459  struct env_pyr* result)
460 {
461  ENV_ASSERT(env_img_initialized(image));
462 
463  // compute hipass as image - lowpass(image)
464 
465  const env_size_t depth = env_pyr_depth(result);
466 
467  if (depth == 0) return;
468 
469  struct env_image lpfima = env_img_initializer;
470 
471  // special case for the zero'th pyramid level so that we don't have to make an extra copy of the input image at its
472  // largest resolution
473  env_img_resize_dims(&lpfima, image->dims);
474  env_lowpass_9(image, imath, &lpfima);
475 
476  if (0 == firstlevel)
477  {
478  env_img_resize_dims(env_pyr_imgw(result, 0), image->dims);
479  env_c_image_minus_image(env_img_pixels(image), env_img_pixels(&lpfima), env_img_size(image),
480  env_img_pixelsw(env_pyr_imgw(result, 0)));
481  }
482 
483  // now do the rest of the pyramid levels starting from level 1:
484  for (env_size_t lev = 1; lev < depth; ++lev)
485  {
486  struct env_image dec = env_img_initializer;
487  env_dec_xy(&lpfima, &dec);
488  env_img_resize_dims(&lpfima, dec.dims);
489  env_lowpass_9(&dec, imath, &lpfima);
490 
491  if (lev >= firstlevel)
492  {
493  env_img_resize_dims(env_pyr_imgw(result, lev), dec.dims);
494  env_c_image_minus_image(env_img_pixels(&dec), env_img_pixels(&lpfima), env_img_size(&dec),
495  env_img_pixelsw(env_pyr_imgw(result, lev)));
496  }
497 
498  env_img_make_empty(&dec);
499  }
500 
501  env_img_make_empty(&lpfima);
502 }
503 
504 // ######################################################################
505 void env_pyr_build_steerable_from_hipass_9(const struct env_pyr* hipass, const intg32 kxnumer, const intg32 kynumer,
506  const env_size_t kdenombits, const struct env_math* imath,
507  struct env_pyr* out)
508 {
509  const env_size_t attenuation_width = 5;
510  const env_size_t depth = env_pyr_depth(hipass);
511 
512  struct env_pyr result;
513  env_pyr_init(&result, depth);
514 
515  for (env_size_t lev = 0; lev < depth; ++lev)
516  {
517  // if the hipass is empty at a given level, then just leave the output empty at that level, too
518  if (!env_img_initialized(env_pyr_img(hipass, lev))) continue;
519 
520  env_img_resize_dims(env_pyr_imgw(&result, lev), env_pyr_img(hipass, lev)->dims);
521 
522  env_steerable_filter(env_pyr_img(hipass, lev), kxnumer, kynumer, kdenombits, imath, env_pyr_imgw(&result, lev));
523 
524  // attenuate borders that are overestimated due to filter trunctation:
525  env_attenuate_borders_inplace(env_pyr_imgw(&result, lev), attenuation_width);
526  }
527 
528  env_pyr_swap(out, &result);
529  env_pyr_make_empty(&result);
530 }
531 
532 
533 // ######################################################################
534 void env_pyr_build_lowpass_5(const struct env_image* image, env_size_t firstlevel, const struct env_math* imath,
535  struct env_pyr* result)
536 {
537  ENV_ASSERT(env_img_initialized(image));
538  ENV_ASSERT(env_pyr_depth(result) > 0);
539 
540  if (firstlevel == 0)
541  env_img_copy_src_dst(image, env_pyr_imgw(result, 0));
542 
543  const env_size_t depth = env_pyr_depth(result);
544 
545  for (env_size_t lev = 1; lev < depth; ++lev)
546  {
547  struct env_image tmp1 = env_img_initializer;
548 
549  const struct env_image* prev = lev == 1 ? image : env_pyr_img(result, lev-1);
550 
551  env_lowpass_5_x_dec_x(prev, imath, &tmp1);
552  env_lowpass_5_y_dec_y(&tmp1, imath, env_pyr_imgw(result, lev));
553 
554  if ((lev - 1) < firstlevel) env_img_make_empty(env_pyr_imgw(result, lev-1));
555 
556  env_img_make_empty(&tmp1);
557  }
558 }
559 
560 // ######################################################################
561 void env_downsize_9_inplace(struct env_image* src, const env_size_t depth,
562  const struct env_math* imath)
563 {
564  for (env_size_t i = 0; i < depth; ++i)
565  {
566  {
567  struct env_image tmp1;
568  env_img_init(&tmp1, src->dims);
569  env_lowpass_9_x(src, imath, &tmp1);
570  env_dec_x(&tmp1, src);
571  env_img_make_empty(&tmp1);
572  }
573  {
574  struct env_image tmp2;
575  env_img_init(&tmp2, src->dims);
576  if (src->dims.h >= 2) env_lowpass_9_y(src, imath, &tmp2);
577  else env_img_swap(src, &tmp2);
578  env_dec_y(&tmp2, src);
579  env_img_make_empty(&tmp2);
580  }
581  }
582 }
583 
584 // ######################################################################
585 void env_rescale(const struct env_image* src, struct env_image* result)
586 {
587  const env_ssize_t new_w = (env_ssize_t) result->dims.w;
588  const env_ssize_t new_h = (env_ssize_t) result->dims.h;
589 
590  ENV_ASSERT(env_img_initialized(src));
591  ENV_ASSERT(new_w > 0 && new_h > 0);
592 
593  const env_ssize_t orig_w = (env_ssize_t) src->dims.w;
594  const env_ssize_t orig_h = (env_ssize_t) src->dims.h;
595 
596  // check if same size already
597  if (new_w == orig_w && new_h == orig_h)
598  {
599  env_img_copy_src_dst(src, result);
600  return;
601  }
602 
603  intg32* dptr = env_img_pixelsw(result);
604  const intg32* const sptr = env_img_pixels(src);
605 
606  // code inspired from one of the Graphics Gems book:
607  /*
608  (1) (x,y) are the original coords corresponding to scaled coords (i,j)
609  (2) (x0,y0) are the greatest lower bound integral coords from (x,y)
610  (3) (x1,y1) are the least upper bound integral coords from (x,y)
611  (4) d00, d10, d01, d11 are the values of the original image at the corners
612  of the rect (x0,y0),(x1,y1)
613  (5) the value in the scaled image is computed from bilinear interpolation
614  among d00,d10,d01,d11
615  */
616  for (env_ssize_t j = 0; j < new_h; ++j)
617  {
618  const env_ssize_t y_numer = ENV_MAX(((env_ssize_t) 0), j*2*orig_h+orig_h-new_h);
619  const env_ssize_t y_denom = 2*new_h;
620 
621  const env_ssize_t y0 = y_numer / y_denom;
622  const env_ssize_t y1 = ENV_MIN(y0 + 1, orig_h - 1);
623 
624  const env_ssize_t fy_numer = y_numer - y0 * y_denom;
625  const env_ssize_t fy_denom = y_denom;
626  ENV_ASSERT(fy_numer == (y_numer % y_denom));
627 
628  const env_ssize_t wy0 = orig_w * y0;
629  const env_ssize_t wy1 = orig_w * y1;
630 
631  for (env_ssize_t i = 0; i < new_w; ++i)
632  {
633  const env_ssize_t x_numer = ENV_MAX(((env_ssize_t) 0), i*2*orig_w+orig_w-new_w);
634  const env_ssize_t x_denom = 2*new_w;
635 
636  const env_ssize_t x0 = x_numer / x_denom;
637  const env_ssize_t x1 = ENV_MIN(x0 + 1, orig_w - 1);
638 
639  const env_ssize_t fx_numer = x_numer - x0 * x_denom;
640  const env_ssize_t fx_denom = x_denom;
641  ENV_ASSERT(fx_numer == (x_numer % x_denom));
642 
643  const intg32 d00 = sptr[x0 + wy0];
644  const intg32 d10 = sptr[x1 + wy0];
645 
646  const intg32 d01 = sptr[x0 + wy1];
647  const intg32 d11 = sptr[x1 + wy1];
648 
649  const intg32 dx0 = d00 + ((d10 - d00) / fx_denom) * fx_numer;
650  const intg32 dx1 = d01 + ((d11 - d01) / fx_denom) * fx_numer;
651 
652  // no need to clamp
653  *dptr++ = dx0 + ((dx1 - dx0) / fy_denom) * fy_numer;
654  }
655  }
656 }
657 
658 // ######################################################################
659 void env_max_normalize_inplace(struct env_image* src, const intg32 mi, const intg32 ma,
660  const enum env_maxnorm_type normtyp, const intg32 rangeThresh)
661 {
662  // do normalization depending on desired type:
663  switch(normtyp)
664  {
665  case ENV_VCXNORM_NONE: env_max_normalize_none_inplace(src, mi, ma, rangeThresh); break;
666  case ENV_VCXNORM_MAXNORM: env_max_normalize_std_inplace(src, mi, ma, rangeThresh); break;
667  default: ENV_ASSERT2(0, "Invalid normalization type");
668  }
669 }
670 
671 // ######################################################################
672 void env_max_normalize_none_inplace(struct env_image* src, const intg32 nmi, const intg32 nma, const intg32 rangeThresh)
673 {
674  // first clamp negative values to zero
675  env_c_inplace_rectify(env_img_pixelsw(src), env_img_size(src));
676 
677  // then, normalize between mi and ma if not zero
678  intg32 mi = nmi;
679  intg32 ma = nma;
680  if (mi != 0 || ma != 0)
681  env_c_inplace_normalize(env_img_pixelsw(src), env_img_size(src), nmi, nma, &mi, &ma, rangeThresh);
682 }
683 
684 // ######################################################################
686  const intg32 nmi, const intg32 nma,
687  const intg32 rangeThresh)
688 {
689  if (!env_img_initialized(src)) return;
690 
691  // first clamp negative values to zero
692  env_c_inplace_rectify(env_img_pixelsw(src), env_img_size(src));
693 
694  // then, normalize between mi and ma if not zero
695  intg32 mi = nmi;
696  intg32 ma = nma;
697  if (nmi != 0 || nma != 0)
698  env_c_inplace_normalize(env_img_pixelsw(src), env_img_size(src), nmi, nma, &mi, &ma, rangeThresh);
699 
700  const env_size_t w = src->dims.w;
701  const env_size_t h = src->dims.h;
702 
703  // normalize between mi and ma and multiply by (max - mean)^2
704 
705  // we want to detect quickly local maxes, but avoid getting local mins
706  const intg32 thresh = mi + (ma - mi) / 10;
707 
708  // then get the mean value of the local maxima:
709  const intg32* const dptr = env_img_pixels(src);
710  intg32 lm_mean = 0;
711  env_size_t numlm = 0;
712  for (env_size_t j = 1; j+1 < h; ++j)
713  for (env_size_t i = 1; i+1 < w; ++i)
714  {
715  const env_size_t index = i + w * j;
716  const intg32 val = dptr[index];
717  if (val >= thresh &&
718  val >= dptr[index - w] &&
719  val >= dptr[index + w] &&
720  val >= dptr[index - 1] &&
721  val >= dptr[index + 1]) // local max
722  {
723  ++numlm;
724  ENV_ASSERT2(INTG32_MAX - val >= lm_mean,
725  "integer overflow");
726  lm_mean += val;
727  }
728  }
729 
730  if (numlm > 0) lm_mean /= numlm;
731 
732  ENV_ASSERT(ma >= lm_mean);
733 
734  intg32 factor = 1;
735 
736  // scale factor is (max - mean_local_max)^2:
737  if (numlm > 1)
738  {
739  // make sure that (ma - lm_mean)^2 won't overflow:
740  ENV_ASSERT((ma == lm_mean) || ((INTG32_MAX / (ma - lm_mean)) > (ma - lm_mean)));
741 
742  factor = ((ma - lm_mean) * (ma - lm_mean)) / ma;
743  }
744  else if (numlm == 1) // a single narrow peak
745  {
746  factor = ma;
747  }
748  else
749  {
750  /* LERROR("No local maxes found !!"); */
751  }
752 
753  if (factor != 1)
754  {
755  intg32* const itr = env_img_pixelsw(src);
756  const env_size_t sz = env_img_size(src);
757  for (env_size_t i = 0; i < sz; ++i)
758  itr[i] *= factor;
759  }
760 }
761 
762 // ######################################################################
763 void env_center_surround(const struct env_image* center, const struct env_image* surround,
764  const int absol, struct env_image* result)
765 {
766  // result has the size of the larger image:
767  ENV_ASSERT(env_dims_equal(result->dims, center->dims));
768 
769  const env_size_t lw = center->dims.w, lh = center->dims.h;
770  const env_size_t sw = surround->dims.w, sh = surround->dims.h;
771 
772  ENV_ASSERT2(lw >= sw && lh >= sh, "center must be larger than surround");
773 
774  const env_size_t scalex = lw / sw, remx = lw - 1 - (lw % sw);
775  const env_size_t scaley = lh / sh, remy = lh - 1 - (lh % sh);
776 
777  // scan large image and subtract corresponding pixel from small image:
778  env_size_t ci = 0, cj = 0;
779  const intg32* lptr = env_img_pixels(center);
780  const intg32* sptr = env_img_pixels(surround);
781  intg32* dptr = env_img_pixelsw(result);
782 
783  if (absol) // compute abs(hires - lowres):
784  {
785  for (env_size_t j = 0; j < lh; ++j)
786  {
787  for (env_size_t i = 0; i < lw; ++i)
788  {
789  if (*lptr > *sptr) *dptr++ = (*lptr++ - *sptr);
790  else *dptr++ = (*sptr - *lptr++);
791 
792  if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; }
793  }
794  if (ci) { ci = 0; ++sptr; } // in case the reduction is not round
795  if ((++cj) == scaley && j != remy) cj = 0; else sptr -= sw;
796  }
797  }
798  else // compute hires - lowres, clamped to 0:
799  {
800  for (env_size_t j = 0; j < lh; ++j)
801  {
802  for (env_size_t i = 0; i < lw; ++i)
803  {
804  if (*lptr > *sptr) *dptr++ = (*lptr++ - *sptr);
805  else { *dptr++ = 0; lptr++; }
806 
807  if ((++ci) == scalex && i != remx) { ci = 0; ++sptr; }
808  }
809  if (ci) { ci = 0; ++sptr; } // in case the reduction is not round
810  if ((++cj) == scaley && j != remy) cj = 0; else sptr -= sw;
811  }
812  }
813 
814  // attenuate borders:
815  env_attenuate_borders_inplace(result, ENV_MAX(result->dims.w, result->dims.h) / 20);
816 }
817 
818 // ######################################################################
819 void env_get_rgby(const struct env_rgb_pixel* const src, const env_size_t sz, struct env_image* rg,
820  struct env_image* by, const intg32 thresh, const env_size_t inputbits)
821 {
822  // red = [r - (g+b)/2] [.] = clamp between 0 and 255
823  // green = [g - (r+b)/2]
824  // blue = [b - (r+g)/2]
825  // yellow = [2*((r+g)/2 - |r-g| - b)]
826 
827  ENV_ASSERT(env_img_size(rg) == sz);
828  ENV_ASSERT(env_img_size(by) == sz);
829 
830  intg32* rgptr = env_img_pixelsw(rg);
831  intg32* byptr = env_img_pixelsw(by);
832 
833  const env_ssize_t lshift = ((env_ssize_t)inputbits) - 3;
834 
835  for (env_size_t i = 0; i < sz; ++i)
836  {
837  intg32 r, g, b;
838  r = (intg32) src[i].p[0];
839  g = (intg32) src[i].p[1];
840  b = (intg32) src[i].p[2];
841 
842  // first do the luminanceNormalization:
843  const intg32 lum = r + g + b;
844 
845  if (lum < thresh) // too dark... no response from anybody
846  {
847  rgptr[i] = byptr[i] = 0;
848  }
849  else
850  {
851  // now compute color opponencies:
852  intg32 red = (2*r - g - b);
853  intg32 green = (2*g - r - b);
854  intg32 blue = (2*b - r - g);
855  intg32 yellow = (-2*blue - 4*ENV_ABS(r-g));
856 
857  if (red < 0) red = 0;
858  if (green < 0) green = 0;
859  if (blue < 0) blue = 0;
860  if (yellow < 0) yellow=0;
861 
862  // compute differences and normalize chroma by luminance:
863  if (lshift > 0)
864  {
865  rgptr[i] = (3*(red - green) << lshift) / lum;
866  byptr[i] = (3*(blue - yellow) << lshift) / lum;
867  }
868  else if (lshift < 0)
869  {
870  rgptr[i] = ((3*(red - green)) / lum) >> (-lshift);
871  byptr[i] = ((3*(blue - yellow)) / lum) >> (-lshift);
872  }
873  else // lshift == 0
874  {
875  rgptr[i] = (3*(red - green)) / lum;
876  byptr[i] = (3*(blue - yellow)) / lum;
877  }
878  }
879  }
880 }
881 
882 // ######################################################################
883 void env_merge_range(const struct env_image* src, intg32* mi, intg32* ma)
884 {
885  if (src == 0) return;
886 
887  const intg32* sptr = env_img_pixels(src);
888  const env_size_t sz = env_img_size(src);
889 
890  if (sz == 0) return;
891 
892  if (sptr[0] < *mi) *mi = sptr[0];
893  if (sptr[0] > *ma) *ma = sptr[0];
894 
895  for (env_size_t i = 1; i < sz; ++i)
896  {
897  if (sptr[i] < *mi) *mi = sptr[i];
898  else if (sptr[i] > *ma) *ma = sptr[i];
899  }
900 }
901 
902 // ######################################################################
903 void env_rescale_range_inplace(struct env_image* src, const intg32 mi, const intg32 ma)
904 {
905  if (src == 0) return;
906 
907  intg32* sptr = env_img_pixelsw(src);
908  const env_size_t sz = env_img_size(src);
909 
910  ENV_ASSERT(ma >= mi);
911 
912  const intg32 scale = ma - mi;
913 
914  if (scale < 1) // image is uniform
915  {
916  for (env_size_t i = 0; i < sz; ++i) sptr[i] = 0;
917  }
918  else if ((INTG32_MAX / 255) > scale)
919  {
920  for (env_size_t i = 0; i < sz; ++i) sptr[i] = ((sptr[i] - mi) * 255) / scale;
921  }
922  else
923  {
924  const intg32 div = scale / 255;
925  ENV_ASSERT(div > 0);
926  for (env_size_t i = 0; i < sz; ++i) sptr[i] = (sptr[i] - mi) / div;
927  }
928 }
929 
930 // ######################################################################
931 void env_grid_average(const struct env_image * src, unsigned char * dest, unsigned int bitshift,
932  env_size_t nx, env_size_t ny)
933 {
934  intg32 const * sptr = env_img_pixels(src);
935  env_size_t const srcw = src->dims.w; env_size_t const tw = srcw / nx;
936  env_size_t const srch = src->dims.h; env_size_t const th = srch / ny;
937  env_size_t const stride = srcw - tw;
938  env_size_t const ts = tw * th;
939  ENV_ASSERT(tw > 0); ENV_ASSERT(th > 0);
940 
941  for (env_size_t j = 0; j < ny; ++j)
942  {
943  // Divide height de novo to avoid rounding errors if srch not multiple of ny:
944  intg32 const * p = sptr + ((srch * j) / ny) * srcw;
945 
946  for (env_size_t i = 0; i < nx; ++i)
947  {
948  // Divide width de novo to avoid rounding errors if srcw not multiple of nx:
949  intg32 const * pp = p + (srcw * i) / nx;
950 
951  intg32 sum = 0;
952  for (env_size_t y = 0; y < th; ++y)
953  {
954  for (env_size_t x = 0; x < tw; ++x) sum += *pp++;
955  pp += stride;
956  }
957 
958  sum /= ts;
959  if (sum < 0) sum = 0;
960  sum >>= bitshift;
961  if (sum > 255) sum = 255;
962 
963  *dest++ = (unsigned char)sum;
964  }
965  }
966 }
967 
968 // ######################################################################
969 void env_shift_clean(const struct env_image* srcImg, const env_ssize_t dx, const env_ssize_t dy,
970  struct env_image* result)
971 {
972  if (!env_img_initialized(srcImg))
973  {
974  env_img_make_empty(result);
975  return;
976  }
977 
978  ENV_ASSERT(env_dims_equal(result->dims, srcImg->dims));
979 
980  const env_ssize_t w = (env_ssize_t) srcImg->dims.w;
981  const env_ssize_t h = (env_ssize_t) srcImg->dims.h;
982 
983  if (ENV_ABS(dx) >= w || ENV_ABS(dy) >= h)
984  // the shifts are so large that the resulting image will just be empty:
985  return;
986 
987  // find range of pixels to copy:
988  const env_ssize_t startx = ENV_MAX(((env_ssize_t) 0), -dx);
989  const env_ssize_t endx = ENV_MIN(w, w - dx);
990  ENV_ASSERT(startx < w);
991  ENV_ASSERT(endx > 0);
992  const env_ssize_t starty = ENV_MAX(((env_ssize_t) 0), -dy);
993  const env_ssize_t endy = ENV_MIN(h, h - dy);
994  ENV_ASSERT(starty < h);
995  ENV_ASSERT(endy > 0);
996 
997  // create the source and destination pointers
998  const intg32* src = env_img_pixels(srcImg);
999  intg32* dst = env_img_pixelsw(result);
1000 
1001  src += startx + starty * w;
1002  dst += (startx + dx) + (starty + dy) * w;
1003 
1004  const env_ssize_t skip = w - endx + startx;
1005 
1006  // do the copy:
1007  for (env_ssize_t j = starty; j < endy; ++j)
1008  {
1009  for (env_ssize_t i = startx; i < endx; ++i) *dst++ = *src++;
1010 
1011  // ready for next row of pixels:
1012  src += skip; dst += skip;
1013  }
1014 }
1015 
1016 // ######################################################################
1017 void env_shift_image(const struct env_image* srcImg, const env_ssize_t dxnumer, const env_ssize_t dynumer,
1018  const env_size_t denombits, struct env_image* result)
1019 {
1020  if (!env_img_initialized(srcImg))
1021  {
1022  env_img_make_empty(result);
1023  return;
1024  }
1025 
1026  ENV_ASSERT(env_dims_equal(result->dims, srcImg->dims));
1027 
1028  ENV_ASSERT(denombits < 8*sizeof(intg32));
1029 
1030  const env_ssize_t denom = (1 << denombits);
1031 
1032  const env_ssize_t w = (env_ssize_t) srcImg->dims.w;
1033  const env_ssize_t h = (env_ssize_t) srcImg->dims.h;
1034 
1035  // prepare a couple of variable for the x direction
1036  env_ssize_t xt = dxnumer >= 0 ? (dxnumer >> denombits) : - ((-dxnumer + denom-1) >> denombits);
1037  env_ssize_t xfrac_numer = dxnumer - (xt << denombits);
1038  const env_ssize_t startx = ENV_MAX(((env_ssize_t) 0),xt);
1039  const env_ssize_t endx = ENV_MIN(((env_ssize_t) 0),xt) + w - 1;
1040 
1041  // prepare a couple of variable for the y direction
1042  env_ssize_t yt = dynumer >= 0 ? (dynumer >> denombits) : - ((-dynumer + denom-1) >> denombits);
1043  env_ssize_t yfrac_numer = dynumer - (yt << denombits);
1044  const env_ssize_t starty = ENV_MAX(((env_ssize_t) 0),yt);
1045  const env_ssize_t endy = ENV_MIN(((env_ssize_t) 0),yt) + h - 1;
1046 
1047  // clear the return image
1048  {
1049  const env_size_t sz = w * h;
1050  intg32* const rptr = env_img_pixelsw(result);
1051  for (env_size_t i = 0; i < sz; ++i) rptr[i] = 0;
1052  }
1053 
1054  // dispatch to faster env_shift_clean() if displacements are roughly integer:
1055  if (xfrac_numer == 0 && yfrac_numer == 0)
1056  {
1057  env_shift_clean(srcImg, xt, yt, result);
1058  return;
1059  }
1060 
1061  if (xfrac_numer > 0)
1062  {
1063  xfrac_numer = denom - xfrac_numer;
1064  ++xt;
1065  }
1066 
1067  if (yfrac_numer > 0)
1068  {
1069  yfrac_numer = denom - yfrac_numer;
1070  ++yt;
1071  }
1072 
1073  // prepare the pointers
1074  const intg32* src2 = env_img_pixels(srcImg);
1075  intg32* ret2 = env_img_pixelsw(result);
1076  if (xt > 0) ret2 += xt; else if (xt < 0) src2 -= xt;
1077  if (yt > 0) ret2 += yt * w; else if (yt < 0) src2 -= yt * w;
1078 
1079  // now loop over the images
1080  for (env_ssize_t y = starty; y < endy; ++y)
1081  {
1082  const intg32* src = src2;
1083  intg32* ret = ret2;
1084  for (env_ssize_t x = startx; x < endx; ++x)
1085  {
1086  *ret = (((src[0] >> denombits) * (denom - xfrac_numer)) >> denombits) * (denom - yfrac_numer);
1087  *ret += (((src[1] >> denombits) * xfrac_numer) >> denombits) * (denom - yfrac_numer);
1088  *ret += (((src[w] >> denombits) * (denom - xfrac_numer)) >> denombits) * yfrac_numer;
1089  *ret += (((src[w+1] >> denombits) * xfrac_numer) >> denombits) * yfrac_numer;
1090  ++src; ++ret;
1091  }
1092  src2 += w; ret2 += w;
1093  }
1094 }
1095 
#define ENV_MAX(a, b)
Definition: env_types.h:93
void env_dec_x(const struct env_image *src, struct env_image *result)
Decimate in X (take one every 'factor' pixels).
Definition: env_image_ops.c:86
void env_shift_clean(const struct env_image *srcImg, const env_ssize_t dx, const env_ssize_t dy, struct env_image *result)
Shift an image by (dx, dy), without wraparound.
void env_img_copy_src_dst(const struct env_image *src, struct env_image *dst)
Definition: env_image.c:76
void env_rescale_range_inplace(struct env_image *src, const intg32 mi, const intg32 ma)
rescale the src image to a [0..255] result
void env_rescale(const struct env_image *src, struct env_image *result)
#define INTG32_MAX
Definition: env_types.h:54
void env_steerable_filter(const struct env_image *src, const intg32 kxnumer, const intg32 kynumer, const env_size_t kdenombits, const struct env_math *imath, struct env_image *result)
void env_quad_energy(const struct env_image *img1, const struct env_image *img2, struct env_image *result)
env_size_t w
The width.
Definition: env_types.h:82
void env_dec_xy(const struct env_image *src, struct env_image *result)
Decimate in X and Y (take one every 'factor' pixels).
Definition: env_image_ops.c:45
const intg16 * sintab
Definition: env_math.h:51
void env_attenuate_borders_inplace(struct env_image *a, env_size_t size)
void env_c_lowpass_9_y_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst)
Like env_c_lowpass_9_y_fewbits() but uses optimized filter coefficients.
void env_pyr_build_hipass_9(const struct env_image *image, env_size_t firstlevel, const struct env_math *imath, struct env_pyr *result)
no max-normalization, but may change range
Definition: env_types.h:89
#define ENV_MIN(a, b)
Definition: env_types.h:94
void env_img_make_empty(struct env_image *img)
Definition: env_image.c:56
void env_img_swap(struct env_image *img1, struct env_image *img2)
Definition: env_image.c:48
env_maxnorm_type
Types of normalization.
Definition: env_types.h:87
void env_dec_y(const struct env_image *src, struct env_image *result)
Decimate in Y (take one every 'factor' pixels).
Basic image class.
Definition: env_image.h:43
void env_lowpass_9(const struct env_image *src, const struct env_math *imath, struct env_image *result)
#define ENV_TRIG_TABSIZ
Definition: env_math.h:44
#define ENV_TRIG_NBITS
Definition: env_math.h:45
#define ENV_ABS(a)
Definition: env_types.h:95
#define env_img_initializer
Definition: env_image.h:49
void env_max_normalize_inplace(struct env_image *src, const intg32 mi, const intg32 ma, const enum env_maxnorm_type normtyp, const intg32 rangeThresh)
void env_c_inplace_normalize(intg32 *const dst, const env_size_t sz, const intg32 nmin, const intg32 nmax, intg32 *const actualmin_p, intg32 *const actualmax_p, const intg32 rangeThresh)
void env_pyr_init(struct env_pyr *pyr, const env_size_t n)
Construct with a given number of empty images.
Definition: env_pyr.c:41
void env_img_init(struct env_image *img, const struct env_dims d)
Definition: env_image.c:41
void env_c_inplace_rectify(intg32 *dst, const env_size_t sz)
Saturate values < 0.
#define ENV_ASSERT2(expr, msg)
Definition: env_log.h:64
void env_get_rgby(const struct env_rgb_pixel *const src, const env_size_t sz, struct env_image *rg, struct env_image *by, const intg32 thresh, const env_size_t inputbits)
Compute R-G and B-Y opponent color maps.
void env_c_lowpass_5_x_dec_x_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst, const env_size_t w2)
void env_c_image_minus_image(const intg32 *const a, const intg32 *const b, const env_size_t sz, intg32 *const dst)
result = a - b
const intg16 * costab
Definition: env_math.h:52
struct env_dims dims
Definition: env_image.h:45
env_size_t depth
Definition: env_pyr.h:48
void env_lowpass_9_x(const struct env_image *source, const struct env_math *imath, struct env_image *result)
env_size_t h
The height.
Definition: env_types.h:83
void env_shift_image(const struct env_image *srcImg, const env_ssize_t dxnumer, const env_ssize_t dynumer, const env_size_t denombits, struct env_image *result)
unsigned long env_size_t
Definition: env_types.h:71
void env_pyr_make_empty(struct env_pyr *dst)
Definition: env_pyr.c:51
RGB pixel class.
Definition: env_types.h:74
void env_grid_average(const struct env_image *src, unsigned char *dest, unsigned int bitshift, env_size_t nx, env_size_t ny)
Compute average values in each tile of a grid.
void env_img_resize_dims(struct env_image *img, const struct env_dims d)
Definition: env_image.c:64
ENV_INTG32_TYPE intg32
32-bit signed integer
Definition: env_types.h:52
void env_merge_range(const struct env_image *src, intg32 *mi, intg32 *ma)
Update the range [mi,ma] to include the range of values in src.
void env_center_surround(const struct env_image *center, const struct env_image *surround, const int absol, struct env_image *result)
void env_max_normalize_none_inplace(struct env_image *src, const intg32 nmi, const intg32 nma, const intg32 rangeThresh)
void env_pyr_build_steerable_from_hipass_9(const struct env_pyr *hipass, const intg32 kxnumer, const intg32 kynumer, const env_size_t kdenombits, const struct env_math *imath, struct env_pyr *out)
void env_lowpass_5_x_dec_x(const struct env_image *src, const struct env_math *imath, struct env_image *result)
void env_lowpass_9_y(const struct env_image *source, const struct env_math *imath, struct env_image *result)
A simple struct to hold a pair of width/height dimensions.
Definition: env_types.h:80
This class implements a set of images, often used as a dyadic pyramid.
Definition: env_pyr.h:45
void env_lowpass_5_y_dec_y(const struct env_image *src, const struct env_math *imath, struct env_image *result)
void env_pyr_build_lowpass_5(const struct env_image *image, env_size_t firstlevel, const struct env_math *imath, struct env_pyr *result)
Wrapper for _cpu or _cuda version.
void env_c_lowpass_9_x_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst)
Like env_c_lowpass_9_x_fewbits() but uses optimized filter coefficients.
long env_ssize_t
Definition: env_types.h:70
void env_pyr_swap(struct env_pyr *pyr1, struct env_pyr *pyr2)
Swap contents with another env_pyr.
Definition: env_pyr.c:60
void env_c_lowpass_5_y_dec_y_fewbits_optim(const intg32 *src, const env_size_t w, const env_size_t h, intg32 *dst, const env_size_t h2)
env_size_t nbits
Definition: env_math.h:49
void env_max_normalize_std_inplace(struct env_image *src, const intg32 nmi, const intg32 nma, const intg32 rangeThresh)
#define ENV_ASSERT(expr)
Definition: env_log.h:63
non-iterative maxnorm
Definition: env_types.h:90
void env_downsize_9_inplace(struct env_image *src, const env_size_t depth, const struct env_math *imath)