001/* 002 * This file is part of McIDAS-V 003 * 004 * Copyright 2007-2015 005 * Space Science and Engineering Center (SSEC) 006 * University of Wisconsin - Madison 007 * 1225 W. Dayton Street, Madison, WI 53706, USA 008 * https://www.ssec.wisc.edu/mcidas 009 * 010 * All Rights Reserved 011 * 012 * McIDAS-V is built on Unidata's IDV and SSEC's VisAD libraries, and 013 * some McIDAS-V source code is based on IDV and VisAD source code. 014 * 015 * McIDAS-V is free software; you can redistribute it and/or modify 016 * it under the terms of the GNU Lesser Public License as published by 017 * the Free Software Foundation; either version 3 of the License, or 018 * (at your option) any later version. 019 * 020 * McIDAS-V is distributed in the hope that it will be useful, 021 * but WITHOUT ANY WARRANTY; without even the implied warranty of 022 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 023 * GNU Lesser Public License for more details. 024 * 025 * You should have received a copy of the GNU Lesser Public License 026 * along with this program. If not, see http://www.gnu.org/licenses. 027 */ 028 029package edu.wisc.ssec.mcidasv.data.hydra; 030 031import java.util.HashMap; 032 033import org.slf4j.Logger; 034import org.slf4j.LoggerFactory; 035 036import edu.wisc.ssec.mcidasv.data.QualityFlag; 037 038import visad.util.Util; 039 040public class RangeProcessor { 041 042 private static final Logger logger = LoggerFactory 043 .getLogger(RangeProcessor.class); 044 045 static RangeProcessor createRangeProcessor(MultiDimensionReader reader, 046 HashMap metadata) throws Exception { 047 if (reader instanceof GranuleAggregation) { 048 return new AggregationRangeProcessor((GranuleAggregation) reader, 049 metadata); 050 } 051 052 if (metadata.get("scale_name") == null) { 053 String product_name = (String) metadata 054 .get(SwathAdapter.product_name); 055 if (product_name == "IASI_L1C_xxx") { 056 return new IASI_RangeProcessor(); 057 } 058 return null; 059 } else { 060 String product_name = (String) metadata 061 .get(ProfileAlongTrack.product_name); 062 if (product_name == "2B-GEOPROF") { 063 return new CloudSat_2B_GEOPROF_RangeProcessor(reader, metadata); 064 } else { 065 return new RangeProcessor(reader, metadata); 066 } 067 } 068 } 069 070 MultiDimensionReader reader; 071 HashMap metadata; 072 073 float[] scale = null; 074 float[] offset = null; 075 float[] missing = null; 076 float[] valid_range = null; 077 float valid_low = -Float.MAX_VALUE; 078 float valid_high = Float.MAX_VALUE; 079 float[] low = new float[] { -Float.MAX_VALUE }; 080 float[] high = new float[] { Float.MAX_VALUE }; 081 082 boolean unpack = false; 083 boolean unsigned = false; 084 boolean rangeCheckBeforeScaling = true; 085 086 int scaleOffsetLen = 1; 087 088 String multiScaleDimName = SpectrumAdapter.channelIndex_name; 089 boolean hasMultiDimensionScale = false; 090 091 int multiScaleDimensionIndex = 0; 092 093 int soIndex = 0; 094 095 public RangeProcessor() { 096 } 097 098 public RangeProcessor(float scale, float offset, float valid_low, 099 float valid_high, float missing) { 100 this.scale = new float[] { scale }; 101 this.offset = new float[] { offset }; 102 this.missing = new float[] { missing }; 103 this.valid_low = valid_low; 104 this.valid_high = valid_high; 105 } 106 107 public RangeProcessor(MultiDimensionReader reader, HashMap metadata, 108 String multiScaleDimName) throws Exception { 109 this(reader, metadata); 110 this.multiScaleDimName = multiScaleDimName; 111 } 112 113 public RangeProcessor(MultiDimensionReader reader, HashMap metadata) 114 throws Exception { 115 this.reader = reader; 116 this.metadata = metadata; 117 118 if (metadata.get("unpack") != null) { 119 unpack = true; 120 } 121 122 if (metadata.get("unsigned") != null) { 123 unsigned = true; 124 } 125 126 if (metadata.get("range_check_after_scaling") != null) { 127 String s = (String) metadata.get("range_check_after_scaling"); 128 logger.debug("range_check_after_scaling: " + s); 129 rangeCheckBeforeScaling = false; 130 } 131 132 String array_name = (String) metadata.get("array_name"); 133 134 scale = getAttributeAsFloatArray(array_name, 135 (String) metadata.get("scale_name")); 136 137 offset = getAttributeAsFloatArray(array_name, 138 (String) metadata.get("offset_name")); 139 140 if (scale != null) { 141 scaleOffsetLen = scale.length; 142 143 if (offset != null) { 144 if (scale.length != offset.length) { 145 throw new Exception( 146 "RangeProcessor: scale and offset array lengths must be equal"); 147 } 148 } else { 149 offset = new float[scaleOffsetLen]; 150 for (int i = 0; i < offset.length; i++) 151 offset[i] = 0f; 152 } 153 154 } 155 156 missing = getAttributeAsFloatArray(array_name, 157 (String) metadata.get("fill_value_name")); 158 159 String metaStr = (String) metadata.get("valid_range"); 160 // attr name not supplied, so try the convention default 161 if (metaStr == null) { 162 metaStr = "valid_range"; 163 } 164 165 valid_range = getAttributeAsFloatArray(array_name, metaStr); 166 if (valid_range != null) { 167 168 valid_low = valid_range[0]; 169 valid_high = valid_range[1]; 170 171 if (valid_range[0] > valid_range[1]) { 172 valid_low = valid_range[1]; 173 valid_high = valid_range[0]; 174 } 175 } 176 177 String str = (String) metadata.get("multiScaleDimensionIndex"); 178 hasMultiDimensionScale = (str != null); 179 multiScaleDimensionIndex = (str != null) ? Integer.parseInt(str) : 0; 180 } 181 182 public float[] getAttributeAsFloatArray(String arrayName, String attrName) 183 throws Exception { 184 float[] fltArray = null; 185 HDFArray arrayAttr = reader.getArrayAttribute(arrayName, attrName); 186 187 if (arrayAttr != null) { 188 189 if (arrayAttr.getType().equals(Float.TYPE)) { 190 float[] attr = (float[]) arrayAttr.getArray(); 191 fltArray = new float[attr.length]; 192 for (int k = 0; k < attr.length; k++) 193 fltArray[k] = attr[k]; 194 } else if (arrayAttr.getType().equals(Short.TYPE)) { 195 short[] attr = (short[]) arrayAttr.getArray(); 196 fltArray = new float[attr.length]; 197 for (int k = 0; k < attr.length; k++) 198 fltArray[k] = (float) attr[k]; 199 } else if (arrayAttr.getType().equals(Integer.TYPE)) { 200 int[] attr = (int[]) arrayAttr.getArray(); 201 fltArray = new float[attr.length]; 202 for (int k = 0; k < attr.length; k++) 203 fltArray[k] = (float) attr[k]; 204 } else if (arrayAttr.getType().equals(Double.TYPE)) { 205 double[] attr = (double[]) arrayAttr.getArray(); 206 fltArray = new float[attr.length]; 207 for (int k = 0; k < attr.length; k++) 208 fltArray[k] = (float) attr[k]; 209 } 210 211 } 212 213 return fltArray; 214 } 215 216 /** 217 * Process a range of data from an array of {@code byte} values where 218 * bytes are packed bit or multi-bit fields of quality flags. Based on 219 * info in a {@link QualityFlag} object passed in, we extract and return 220 * values for that flag. 221 * 222 * @param values Input byte values. Cannot be {@code null}. 223 * @param subset Optional subset. 224 * @param qf Quality flag. 225 * 226 * @return Processed range. 227 */ 228 public float[] processRangeQualityFlag(byte[] values, HashMap subset, 229 QualityFlag qf) { 230 231 if (subset != null) { 232 if (subset.get(multiScaleDimName) != null) { 233 soIndex = (int) ((double[]) subset.get(multiScaleDimName))[0]; 234 } 235 } 236 237 float[] newValues = new float[values.length]; 238 239 float val = 0f; 240 int bitOffset = qf.getBitOffset(); 241 int divisor = -1; 242 243 // map bit offset to a divisor 244 switch (bitOffset) { 245 case 1: 246 divisor = 2; 247 break; 248 case 2: 249 divisor = 4; 250 break; 251 case 3: 252 divisor = 8; 253 break; 254 case 4: 255 divisor = 16; 256 break; 257 case 5: 258 divisor = 32; 259 break; 260 case 6: 261 divisor = 64; 262 break; 263 case 7: 264 divisor = 128; 265 break; 266 default: 267 divisor = 1; 268 break; 269 } 270 271 // now map bit width to a mask 272 int numBits = qf.getNumBits(); 273 int mask = -1; 274 switch (numBits) { 275 case 1: 276 mask = (int) 0x00000001; 277 break; 278 case 2: 279 mask = (int) 0x00000003; 280 break; 281 case 3: 282 mask = (int) 0x00000007; 283 break; 284 case 4: 285 mask = (int) 0x0000000F; 286 break; 287 case 5: 288 mask = (int) 0x0000001F; 289 break; 290 case 6: 291 mask = (int) 0x0000003F; 292 break; 293 case 7: 294 mask = (int) 0x0000007F; 295 break; 296 default: 297 mask = (int) 0x00000000; 298 break; 299 } 300 301 int i = 0; 302 for (int k = 0; k < values.length; k++) { 303 val = (float) values[k]; 304 i = Util.unsignedByteToInt(values[k]); 305 val = (float) ((i / divisor) & mask); 306 newValues[k] = val; 307 } 308 309 return newValues; 310 } 311 312 /** 313 * Process a range of data from an array of {@code byte} values. 314 * 315 * @param values Input {@code byte} values. Cannot be {@code null}. 316 * @param subset Optional subset. 317 * 318 * @return Processed range. 319 */ 320 public float[] processRange(byte[] values, HashMap subset) { 321 322 if (subset != null) { 323 if (subset.get(multiScaleDimName) != null) { 324 soIndex = (int) ((double[]) subset.get(multiScaleDimName))[0]; 325 } 326 } 327 328 float[] new_values = new float[values.length]; 329 330 // if we are working with unsigned data, need to convert missing vals to 331 // unsigned too 332 if (unsigned) { 333 if (missing != null) { 334 for (int i = 0; i < missing.length; i++) { 335 missing[i] = (float) Util.unsignedByteToInt((byte) missing[i]); 336 } 337 } 338 } 339 340 float val = 0f; 341 int i = 0; 342 boolean isMissing = false; 343 344 for (int k = 0; k < values.length; k++) { 345 346 val = (float) values[k]; 347 if (unsigned) { 348 i = Util.unsignedByteToInt(values[k]); 349 val = (float) i; 350 } 351 352 // first, check the (possibly multiple) missing values 353 isMissing = false; 354 if (missing != null) { 355 for (int mvIdx = 0; mvIdx < missing.length; mvIdx++) { 356 if (val == missing[mvIdx]) { 357 isMissing = true; 358 break; 359 } 360 } 361 } 362 363 if (isMissing) { 364 new_values[k] = Float.NaN; 365 continue; 366 } 367 368 if (rangeCheckBeforeScaling) { 369 if ((val < valid_low) || (val > valid_high)) { 370 new_values[k] = Float.NaN; 371 continue; 372 } 373 } 374 375 if (scale != null) { 376 if (unpack) { 377 new_values[k] = scale[soIndex] * (val) + offset[soIndex]; 378 } else { 379 new_values[k] = scale[soIndex] * (val - offset[soIndex]); 380 } 381 } else { 382 new_values[k] = val; 383 } 384 385 // do valid range check AFTER scaling? 386 if (!rangeCheckBeforeScaling) { 387 if ((new_values[k] < valid_low) || (new_values[k] > valid_high)) { 388 new_values[k] = Float.NaN; 389 } 390 } 391 } 392 return new_values; 393 } 394 395 /** 396 * Process a range of data from an array of {@code short} values. 397 * 398 * @param values Input {@code short} values. Cannot be {@code null}. 399 * @param subset Optional subset. 400 * 401 * @return Processed range. 402 */ 403 public float[] processRange(short[] values, HashMap subset) { 404 405 if (subset != null) { 406 if (subset.get(multiScaleDimName) != null) { 407 soIndex = (int) ((double[]) subset.get(multiScaleDimName))[0]; 408 } 409 } 410 411 float[] new_values = new float[values.length]; 412 413 // if we are working with unsigned data, need to convert missing vals to 414 // unsigned too 415 if (unsigned) { 416 if (missing != null) { 417 for (int i = 0; i < missing.length; i++) { 418 missing[i] = (float) Util.unsignedShortToInt((short) missing[i]); 419 } 420 } 421 } 422 423 float val = 0f; 424 int i = 0; 425 boolean isMissing = false; 426 427 for (int k = 0; k < values.length; k++) { 428 429 val = (float) values[k]; 430 if (unsigned) { 431 i = Util.unsignedShortToInt(values[k]); 432 val = (float) i; 433 } 434 435 // first, check the (possibly multiple) missing values 436 isMissing = false; 437 if (missing != null) { 438 for (int mvIdx = 0; mvIdx < missing.length; mvIdx++) { 439 if (val == missing[mvIdx]) { 440 isMissing = true; 441 break; 442 } 443 } 444 } 445 446 if (isMissing) { 447 new_values[k] = Float.NaN; 448 continue; 449 } 450 451 if (rangeCheckBeforeScaling) { 452 if ((val < valid_low) || (val > valid_high)) { 453 new_values[k] = Float.NaN; 454 continue; 455 } 456 } 457 458 if (scale != null) { 459 if (unpack) { 460 new_values[k] = (scale[soIndex] * val) + offset[soIndex]; 461 } else { 462 new_values[k] = scale[soIndex] * (val - offset[soIndex]); 463 } 464 } else { 465 new_values[k] = val; 466 } 467 468 // do valid range check AFTER scaling? 469 if (!rangeCheckBeforeScaling) { 470 if ((new_values[k] < valid_low) || (new_values[k] > valid_high)) { 471 new_values[k] = Float.NaN; 472 } 473 } 474 475 } 476 return new_values; 477 } 478 479 /** 480 * Process a range of data from an array of {@code float} values. 481 * 482 * @param values Input {@code float} values. Cannot be {@code null}. 483 * @param subset Optional subset. 484 * 485 * @return Processed array. 486 */ 487 public float[] processRange(float[] values, HashMap subset) { 488 489 float[] new_values = null; 490 491 if ((missing != null) || (valid_range != null)) { 492 new_values = new float[values.length]; 493 } else { 494 return values; 495 } 496 497 float val; 498 499 for (int k = 0; k < values.length; k++) { 500 val = values[k]; 501 new_values[k] = val; 502 503 // first, check the (possibly multiple) missing values 504 if (missing != null) { 505 for (int mvIdx = 0; mvIdx < missing.length; mvIdx++) { 506 if (val == missing[mvIdx]) { 507 new_values[k] = Float.NaN; 508 break; 509 } 510 } 511 } 512 513 if ((valid_range != null) 514 && ((val < valid_low) || (val > valid_high))) { 515 new_values[k] = Float.NaN; 516 } 517 518 } 519 520 return new_values; 521 } 522 523 /** 524 * Process a range of data from an array of {@code double} value. 525 * 526 * @param values Input {@code double} values. Cannot be {@code null}. 527 * @param subset Optional subset. 528 * 529 * @return Processed array. 530 */ 531 public double[] processRange(double[] values, HashMap subset) { 532 533 double[] new_values = null; 534 535 if ((missing != null) || (valid_range != null)) { 536 new_values = new double[values.length]; 537 } else { 538 return values; 539 } 540 541 double val; 542 543 for (int k = 0; k < values.length; k++) { 544 val = values[k]; 545 new_values[k] = val; 546 547 // first, check the (possibly multiple) missing values 548 if (missing != null) { 549 for (int mvIdx = 0; mvIdx < missing.length; mvIdx++) { 550 if (val == missing[mvIdx]) { 551 new_values[k] = Float.NaN; 552 break; 553 } 554 } 555 } 556 557 if ((valid_range != null) 558 && ((val < valid_low) || (val > valid_high))) { 559 new_values[k] = Double.NaN; 560 } 561 } 562 563 return new_values; 564 } 565 566 /** 567 * Process a range of data from an array of byte values. 568 */ 569 public float[] processAlongMultiScaleDim(byte[] values) { 570 571 float[] new_values = new float[values.length]; 572 573 // if we are working with unsigned data, need to convert missing vals to 574 // unsigned too 575 if (unsigned) { 576 if (missing != null) { 577 for (int i = 0; i < missing.length; i++) { 578 missing[i] = (float) Util.unsignedByteToInt((byte) missing[i]); 579 } 580 } 581 } 582 583 float val = 0f; 584 int i = 0; 585 boolean isMissing = false; 586 587 for (int k = 0; k < values.length; k++) { 588 589 val = (float) values[k]; 590 if (unsigned) { 591 i = Util.unsignedByteToInt(values[k]); 592 val = (float) i; 593 } 594 595 // first, check the (possibly multiple) missing values 596 isMissing = false; 597 if (missing != null) { 598 for (int mvIdx = 0; mvIdx < missing.length; mvIdx++) { 599 if (val == missing[mvIdx]) { 600 isMissing = true; 601 break; 602 } 603 } 604 } 605 606 if (isMissing) { 607 new_values[k] = Float.NaN; 608 continue; 609 } 610 611 if (rangeCheckBeforeScaling) { 612 if ((val < valid_low) || (val > valid_high)) { 613 new_values[k] = Float.NaN; 614 continue; 615 } 616 } 617 618 if (unpack) { 619 new_values[k] = scale[k] * val + offset[k]; 620 } else { 621 new_values[k] = scale[k] * (val - offset[k]); 622 } 623 624 // do valid range check AFTER scaling? 625 if (!rangeCheckBeforeScaling) { 626 if ((new_values[k] < valid_low) || (new_values[k] > valid_high)) { 627 new_values[k] = Float.NaN; 628 } 629 } 630 } 631 return new_values; 632 } 633 634 /** 635 * Process a range of data from an array of short values. 636 */ 637 public float[] processAlongMultiScaleDim(short[] values) { 638 639 float[] new_values = new float[values.length]; 640 641 // if we are working with unsigned data, need to convert missing vals to 642 // unsigned too 643 if (unsigned) { 644 if (missing != null) { 645 for (int i = 0; i < missing.length; i++) { 646 missing[i] = (float) Util.unsignedShortToInt((short) missing[i]); 647 } 648 } 649 } 650 651 float val = 0f; 652 int i = 0; 653 boolean isMissing = false; 654 655 for (int k = 0; k < values.length; k++) { 656 657 val = (float) values[k]; 658 if (unsigned) { 659 i = Util.unsignedShortToInt(values[k]); 660 val = (float) i; 661 } 662 663 // first, check the (possibly multiple) missing values 664 isMissing = false; 665 if (missing != null) { 666 for (int mvIdx = 0; mvIdx < missing.length; mvIdx++) { 667 if (val == missing[mvIdx]) { 668 isMissing = true; 669 break; 670 } 671 } 672 } 673 674 if (isMissing) { 675 new_values[k] = Float.NaN; 676 continue; 677 } 678 679 if (rangeCheckBeforeScaling) { 680 if ((val < valid_low) || (val > valid_high)) { 681 new_values[k] = Float.NaN; 682 continue; 683 } 684 } 685 686 if (unpack) { 687 new_values[k] = scale[k] * val + offset[k]; 688 } else { 689 new_values[k] = scale[k] * (val - offset[k]); 690 } 691 692 // do valid range check AFTER scaling? 693 if (!rangeCheckBeforeScaling) { 694 if ((new_values[k] < valid_low) || (new_values[k] > valid_high)) { 695 new_values[k] = Float.NaN; 696 } 697 } 698 } 699 return new_values; 700 } 701 702 public void setMultiScaleDimName(String multiScaleDimName) { 703 this.multiScaleDimName = multiScaleDimName; 704 } 705 706 public int getMultiScaleDimensionIndex() { 707 return multiScaleDimensionIndex; 708 } 709 710 public boolean hasMultiDimensionScale() { 711 return hasMultiDimensionScale; 712 } 713 714 public void setHasMultiDimensionScale(boolean yesno) { 715 hasMultiDimensionScale = yesno; 716 } 717 718 public void setMultiScaleIndex(int idx) { 719 this.soIndex = idx; 720 } 721 722}