/* * Copyright (C) 1998 by Southwest Research Institute (SwRI) * * All rights reserved under U.S. Copyright Law and International Conventions. * * The development of this Software was supported by contracts NAG5-3148, * NAG5-6855, NAS8-36840, NAG5-2323, and NAG5-7043 issued on behalf of * the United States Government by its National Aeronautics and Space * Administration. Southwest Research Institute grants to the Government, * and others acting on its behalf, a paid-up nonexclusive, irrevocable, * worldwide license to reproduce, prepare derivative works, and perform * publicly and display publicly, by or on behalf of the Government. * Other than those rights granted to the United States Government, no part * of this Software may be reproduced in any form or by any means, electronic * or mechanical, including photocopying, without permission in writing from * Southwest Research Institute. All inquiries should be addressed to: * * Director of Contracts * Southwest Research Institute * P. O. Drawer 28510 * San Antonio, Texas 78228-0510 * * * Use of this Software is governed by the terms of the end user license * agreement, if any, which accompanies or is included with the Software * (the "License Agreement"). An end user will be unable to install any * Software that is accompanied by or includes a License Agreement, unless * the end user first agrees to the terms of the License Agreement. Except * as set forth in the applicable License Agreement, any further copying, * reproduction or distribution of this Software is expressly prohibited. * Installation assistance, product support and maintenance, if any, of the * Software is available from SwRI and/or the Third Party Providers, as the * case may be. * * Disclaimer of Warranty * * SOFTWARE IS WARRANTED, IF AT ALL, IN ACCORDANCE WITH THESE TERMS OF THE * LICENSE AGREEMENT. UNLESS OTHERWISE EXPLICITLY STATED, THIS SOFTWARE IS * PROVIDED "AS IS", IS EXPERIMENTAL, AND IS FOR NON-COMMERCIAL USE ONLY, * AND ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT * SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. * * Limitation of Liability * * SwRI SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED AS A RESULT OF USING, * MODIFYING, CONTRIBUTING, COPYING, DISTRIBUTING, OR DOWNLOADING THIS * SOFTWARE. IN NO EVENT SHALL SwRI BE LIABLE FOR ANY INDIRECT, PUNITIVE, * SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGE (INCLUDING LOSS OF BUSINESS, * REVENUE, PROFITS, USE, DATA OR OTHER ECONOMIC ADVANTAGE) HOWEVER IT ARISES, * WHETHER FOR BREACH OF IN TORT, EVEN IF SwRI HAS BEEN PREVIOUSLY ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. YOU HAVE SOLE RESPONSIBILITY FOR ADEQUATE * PROTECTION AND BACKUP OF DATA AND/OR EQUIPMENT USED IN CONNECTION WITH THE * SOFTWARE AND WILL NOT MAKE A CLAIM AGAINST SwRI FOR LOST DATA, RE-RUN TIME, * INACCURATE OUTPUT, WORK DELAYS OR LOST PROFITS RESULTING FROM THE USE OF * THIS SOFTWARE. YOU AGREE TO HOLD SwRI HARMLESS FROM, AND YOU COVENANT NOT * TO SUE SwRI FOR, ANY CLAIMS BASED ON USING THE SOFTWARE. * * Local Laws: Export Control * * You acknowledge and agree this Software is subject to the U.S. Export * Administration Laws and Regulations. Diversion of such Software contrary * to U.S. law is prohibited. You agree that none of the Software, nor any * direct product therefrom, is being or will be acquired for, shipped, * transferred, or reexported, directly or indirectly, to proscribed or * embargoed countries or their nationals, nor be used for nuclear activities, * chemical biological weapons, or missile projects unless authorized by U.S. * Government. Proscribed countries are set forth in the U.S. Export * Administration Regulations. Countries subject to U.S embargo are: Cuba, * Iran, Iraq, Libya, North Korea, Syria, and the Sudan. This list is subject * to change without further notice from SwRI, and you must comply with the * list as it exists in fact. You certify that you are not on the U.S. * Department of Commerce's Denied Persons List or affiliated lists or on the * U.S. Department of Treasury's Specially Designated Nationals List. You agree * to comply strictly with all U.S. export laws and assume sole responsibilities * for obtaining licenses to export or reexport as may be required. * * General * * These Terms represent the entire understanding relating to the use of the * Software and prevail over any prior or contemporaneous, conflicting or * additional, communications. SwRI can revise these Terms at any time * without notice by updating this posting. * * Trademarks * * The SwRI logo is a trademark of SwRI in the United States and other countries. * */ #ident "@(#) chk_dep_thdr.c 1.1 05/08/19 SwRI" #include #include "ret_codes.h" #include "gen_defs.h" #include "libbase_idfs.h" #include "libVIDF.h" /* for header format comparison */ /******************************************************************************* * * * IR_CHECK_DEPENDENT_ON_TENSOR_HEADER SUBROUTINE * * * * DESCRIPTION * * This routine is called to determine if the arrays for the sensor data, * * and mode data are large enough to accommodate the data based upon the * * header sizes. If not, the space is enlarged to the newly defined size. * * * * INPUT VARIABLES * * void *tensor_data_ptr ptr to the memory location for the structure * * that holds returned tensor data values * * * * USAGE * * x = ir_check_dependent_on_tensor_header (tensor_data_ptr) * * * * NECESSARY SUBPROGRAMS * * malloc() allocates memory * * realloc() reallocates previously allocated memory * * sizeof () the size of the specified object in bytes * * * * EXTERNAL VARIABLES * * struct general_info structure that holds information concerning * * ginfo the experiment that is being processed * * * * INTERNAL VARIABLES * * struct tensor_data *TENSOR_DATA structure that holds all the currently * * returned data values to be processed * * struct experiment_info a pointer to the structure that holds * * *ex specific experiment information * * struct ptr_rec *ptr a pointer to the structure which holds all * * pointers to the header and data for the * * experiment of interest * * void *tmp_ptr pointer which holds address passed back by * * the call to the MALLOC routine * * size_t bytes the number of bytes to allocate * * size_t num_bytes_slong the number of bytes needed for a SDDAS_LONG * * * * SUBSYSTEM * * Display Level * * * ******************************************************************************/ SDDAS_SHORT ir_check_dependent_on_tensor_header (void *tensor_data_ptr) { extern struct general_info ginfo; struct tensor_data *TENSOR_DATA; struct experiment_info *ex; struct ptr_rec *ptr; void *tmp_ptr; size_t bytes, num_bytes_slong; /***********************************************************************/ /* Set a pointer to the structure which holds all pointers for header */ /* and data information for the experiment currently being processed. */ /***********************************************************************/ ex = ginfo.expt; ptr = ex->info_ptr; if (ex->header_format != TENSOR_SINGLE_HEADER) return (WRONG_HEADER_FORMAT); /****************************************************************************/ /* Since void pointer, make sure correct type of data structure being used.*/ /* Data structures define the same elements at the beginning of the */ /* structure definition. */ /****************************************************************************/ TENSOR_DATA = (struct tensor_data *) tensor_data_ptr; if (TENSOR_DATA->header_format != TENSOR_SINGLE_HEADER) return (WRONG_DATA_STRUCTURE); num_bytes_slong = sizeof (SDDAS_LONG); bytes = ex->tensor_elements * num_bytes_slong; /**************************************************************************/ /* Allocate space to hold the sensor data that is to be returned to the */ /* user if no memory has been allocated yet. */ /**************************************************************************/ if (TENSOR_DATA->base_data == NO_MEMORY) { if ((tmp_ptr = malloc (bytes)) == NO_MEMORY) return (TENSOR_DATA_MALLOC); TENSOR_DATA->base_data = tmp_ptr; TENSOR_DATA->sen_data = (SDDAS_LONG *) TENSOR_DATA->base_data; TENSOR_DATA->data_size = (SDDAS_UINT) bytes; } /*************************************************************************/ /* If the space for the data is not large enough to handle this virtual */ /* instrument, enlarge the memory size. This array will end up being */ /* maxed out to the largest number of elements for ALL virtual inst. */ /* being plotted. */ /*************************************************************************/ else if (bytes > TENSOR_DATA->data_size) { if ((tmp_ptr = realloc (TENSOR_DATA->base_data, bytes)) == NO_MEMORY) return (TENSOR_DATA_REALLOC); TENSOR_DATA->base_data = tmp_ptr; TENSOR_DATA->sen_data = (SDDAS_LONG *) TENSOR_DATA->base_data; TENSOR_DATA->data_size = (SDDAS_UINT) bytes; } /**************************************************************************/ /* Allocate space to hold the tensor that is to be returned to the user, */ /* converted according to d_type, if no memory has been allocated yet. */ /**************************************************************************/ bytes = ex->tensor_elements * sizeof (SDDAS_FLOAT); if (TENSOR_DATA->base_tdata == NO_MEMORY) { if ((tmp_ptr = malloc (bytes)) == NO_MEMORY) return (TENSOR_DATA_MALLOC); TENSOR_DATA->base_tdata = tmp_ptr; TENSOR_DATA->tdata = (SDDAS_FLOAT *) TENSOR_DATA->base_tdata; TENSOR_DATA->tensor_bytes = (SDDAS_UINT) bytes; } /*************************************************************************/ /* If the space for the data is not large enough to handle this virtual */ /* instrument, enlarge the memory size. This array will end up being */ /* maxed out to the largest number of elements for ALL virtual inst. */ /* being plotted. */ /*************************************************************************/ else if (bytes > TENSOR_DATA->tensor_bytes) { if ((tmp_ptr = realloc (TENSOR_DATA->base_tdata, bytes)) == NO_MEMORY) return (TENSOR_DATA_REALLOC); TENSOR_DATA->base_tdata = tmp_ptr; TENSOR_DATA->tdata = (SDDAS_FLOAT *) TENSOR_DATA->base_tdata; TENSOR_DATA->tensor_bytes = (SDDAS_UINT) bytes; } /*************************************************************************/ /* Allocate space to hold mode flags that are to be returned to the */ /* user, returned as SDDAS_LONGs, if modes are defined for the data set.*/ /*************************************************************************/ if (*ptr->I_MODE != 0) { bytes = *(ptr->I_MODE) * num_bytes_slong; if (TENSOR_DATA->base_mode == NO_MEMORY) { if ((tmp_ptr = malloc (bytes)) == NO_MEMORY) return (TENSOR_MODE_MALLOC); TENSOR_DATA->base_mode = tmp_ptr; TENSOR_DATA->mode = (SDDAS_LONG *) TENSOR_DATA->base_mode; TENSOR_DATA->mode_size = (SDDAS_UINT) bytes; TENSOR_DATA->mode_len = bytes / num_bytes_slong; } /*************************************************************************/ /* If the space for the modes is not large enough to handle this virtual*/ /* instrument, enlarge the memory size. This array will end up being */ /* maxed out to the largest number of elements for ALL virtual inst. */ /* using this data structure. */ /*************************************************************************/ else if (bytes > TENSOR_DATA->mode_size) { if ((tmp_ptr = realloc (TENSOR_DATA->base_mode, bytes)) == NO_MEMORY) return (TENSOR_MODE_REALLOC); TENSOR_DATA->base_mode = tmp_ptr; TENSOR_DATA->mode = (SDDAS_LONG *) TENSOR_DATA->base_mode; TENSOR_DATA->mode_size = (SDDAS_UINT) bytes; TENSOR_DATA->mode_len = bytes / num_bytes_slong; } } /*************************************************************************/ /* Allocate space to hold data quality flags that are to be returned to */ /* the user for a single sensor, returned as SDDAS_LONGs. */ /*************************************************************************/ bytes = *ptr->hdr_fmt2_ptr->DQUAL_SIZE * num_bytes_slong; if (TENSOR_DATA->base_dqual == NO_MEMORY) { if ((tmp_ptr = malloc (bytes)) == NO_MEMORY) return (TENSOR_DQUAL_MALLOC); TENSOR_DATA->base_dqual = tmp_ptr; TENSOR_DATA->d_qual = (SDDAS_LONG *) TENSOR_DATA->base_dqual; TENSOR_DATA->dqual_size = (SDDAS_UINT) bytes; TENSOR_DATA->num_dqual = bytes / num_bytes_slong; } /*************************************************************************/ /* If the space for the data quality flags is not large enough to handle*/ /* this virtual instrument, enlarge the memory size. This array will */ /* end up being maxed out to the largest number of elements for ALL */ /* virtual inst. using this data structure. */ /*************************************************************************/ else if (bytes > TENSOR_DATA->dqual_size) { if ((tmp_ptr = realloc (TENSOR_DATA->base_dqual, bytes)) == NO_MEMORY) return (TENSOR_DQUAL_REALLOC); TENSOR_DATA->base_dqual = tmp_ptr; TENSOR_DATA->d_qual = (SDDAS_LONG *) TENSOR_DATA->base_dqual; TENSOR_DATA->dqual_size = (SDDAS_UINT) bytes; TENSOR_DATA->num_dqual = bytes / num_bytes_slong; } /**************************************************************************/ /* CARRIE - Not sure if this is needed or not. Put in code just in case */ /* Allocate space to hold the number of elements in each cal. set. */ /**************************************************************************/ /* CARRIE - if (ex->cal_sets != 0) { CARRIE if (TENSOR_DATA->base_cset == NO_MEMORY) CARRIE { CARRIE bytes = ex->cal_sets * sizeof (SDDAS_ULONG); CARRIE if ((tmp_ptr = malloc (bytes)) == NO_MEMORY) CARRIE return (TENSOR_CSET_MALLOC); CARRIE TENSOR_DATA->base_cset = tmp_ptr; CARRIE TENSOR_DATA->cset_num = (SDDAS_ULONG *) TENSOR_DATA->base_cset; CARRIE } } */ return (ALL_OKAY); }