/* * Copyright (C) 1998 by Southwest Research Institute (SwRI) * * All rights reserved under U.S. Copyright Law and International Conventions. * * The development of this Software was supported by contracts NAG5-3148, * NAG5-6855, NAS8-36840, NAG5-2323, and NAG5-7043 issued on behalf of * the United States Government by its National Aeronautics and Space * Administration. Southwest Research Institute grants to the Government, * and others acting on its behalf, a paid-up nonexclusive, irrevocable, * worldwide license to reproduce, prepare derivative works, and perform * publicly and display publicly, by or on behalf of the Government. * Other than those rights granted to the United States Government, no part * of this Software may be reproduced in any form or by any means, electronic * or mechanical, including photocopying, without permission in writing from * Southwest Research Institute. All inquiries should be addressed to: * * Director of Contracts * Southwest Research Institute * P. O. Drawer 28510 * San Antonio, Texas 78228-0510 * * * Use of this Software is governed by the terms of the end user license * agreement, if any, which accompanies or is included with the Software * (the "License Agreement"). An end user will be unable to install any * Software that is accompanied by or includes a License Agreement, unless * the end user first agrees to the terms of the License Agreement. Except * as set forth in the applicable License Agreement, any further copying, * reproduction or distribution of this Software is expressly prohibited. * Installation assistance, product support and maintenance, if any, of the * Software is available from SwRI and/or the Third Party Providers, as the * case may be. * * Disclaimer of Warranty * * SOFTWARE IS WARRANTED, IF AT ALL, IN ACCORDANCE WITH THESE TERMS OF THE * LICENSE AGREEMENT. UNLESS OTHERWISE EXPLICITLY STATED, THIS SOFTWARE IS * PROVIDED "AS IS", IS EXPERIMENTAL, AND IS FOR NON-COMMERCIAL USE ONLY, * AND ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR * PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT * SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. * * Limitation of Liability * * SwRI SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED AS A RESULT OF USING, * MODIFYING, CONTRIBUTING, COPYING, DISTRIBUTING, OR DOWNLOADING THIS * SOFTWARE. IN NO EVENT SHALL SwRI BE LIABLE FOR ANY INDIRECT, PUNITIVE, * SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGE (INCLUDING LOSS OF BUSINESS, * REVENUE, PROFITS, USE, DATA OR OTHER ECONOMIC ADVANTAGE) HOWEVER IT ARISES, * WHETHER FOR BREACH OF IN TORT, EVEN IF SwRI HAS BEEN PREVIOUSLY ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. YOU HAVE SOLE RESPONSIBILITY FOR ADEQUATE * PROTECTION AND BACKUP OF DATA AND/OR EQUIPMENT USED IN CONNECTION WITH THE * SOFTWARE AND WILL NOT MAKE A CLAIM AGAINST SwRI FOR LOST DATA, RE-RUN TIME, * INACCURATE OUTPUT, WORK DELAYS OR LOST PROFITS RESULTING FROM THE USE OF * THIS SOFTWARE. YOU AGREE TO HOLD SwRI HARMLESS FROM, AND YOU COVENANT NOT * TO SUE SwRI FOR, ANY CLAIMS BASED ON USING THE SOFTWARE. * * Local Laws: Export Control * * You acknowledge and agree this Software is subject to the U.S. Export * Administration Laws and Regulations. Diversion of such Software contrary * to U.S. law is prohibited. You agree that none of the Software, nor any * direct product therefrom, is being or will be acquired for, shipped, * transferred, or reexported, directly or indirectly, to proscribed or * embargoed countries or their nationals, nor be used for nuclear activities, * chemical biological weapons, or missile projects unless authorized by U.S. * Government. Proscribed countries are set forth in the U.S. Export * Administration Regulations. Countries subject to U.S embargo are: Cuba, * Iran, Iraq, Libya, North Korea, Syria, and the Sudan. This list is subject * to change without further notice from SwRI, and you must comply with the * list as it exists in fact. You certify that you are not on the U.S. * Department of Commerce's Denied Persons List or affiliated lists or on the * U.S. Department of Treasury's Specially Designated Nationals List. You agree * to comply strictly with all U.S. export laws and assume sole responsibilities * for obtaining licenses to export or reexport as may be required. * * General * * These Terms represent the entire understanding relating to the use of the * Software and prevail over any prior or contemporaneous, conflicting or * additional, communications. SwRI can revise these Terms at any time * without notice by updating this posting. * * Trademarks * * The SwRI logo is a trademark of SwRI in the United States and other countries. * */ #ident "@(#) $Id: extract9D.c 20998 2011-01-25 22:05:37Z carrie $ SwRI" #include "libIDFSTensor.h" /******************************************************************************* * * * EXTRACT_FROM_9DTENSOR SUBROUTINE * * * * DESCRIPTION * * This routine is called to extract elements from a tensor with a rank of * * nine. A single element extraction from the tensor is handled by the * * calling routine. However, for resultants of rank 1, 2, 3, 4, 5, 6, 7, 8 * * and 9 this routine is called. The starting position (index) within each * * dimension of the tensor is provided, along with the stop position (index), * * which taken together, define the subset of data to be extracted along each * * dimension. If the start and stop index values are the same, all data * * along that specific index are extracted from that particular dimension; * * otherwise, the index values represent a subset (range) of data values to * * be extracted from that particular dimension. The rank of the resultant is * * based upon the start/stop index values provided. For each pair of start/ * * stop index values that are the same, the rank of the resultant should be * * decremented by one. For example, the start/stop index values defined as: * * start_ind[9] = {0, 0, 0, 2, 0, 3, 2, 1, 1} * * stop_ind[9] = {3, 0, 2, 2, 4, 3, 5, 1, 1} * * should be inferred to result in a 4-D tensor that is 4 x 3 x 5 x 4 in size.* * The second dimension is held constant at index value 0, the fourth * * dimension is held constant at index value 2, the sixth dimension is held * * constant at index value 3 and the eighth and ninth dimensions are held * * constant at index value 1 so that data values with index values of * * [0-3][0][0-2][2][0-4][3][2-5][1][1] are extracted. * * * * INPUT VARIABLES * * void *tensorA pointer to the input tensor being processed * * void *res_ptr ptr to memory allocated for resultant value(s) * * SDDAS_ULONG *next_dimen ptr to an array that holds no. of data values * * to bypass in order to get to the next index for* * a given dimension ([0] = first dimension or * * slowest varying dimension) * * void *start_ind start index position for each dimension defined* * for the given tensor argument * * void *stop_ind stop index position for each dimension defined * * for the given tensor argument * * SDDAS_BOOL double_precision flag indicating if arguments are double- * * precision values * * * * USAGE * * extract_from_9Dtensor (&tensorA, &res_ptr, &next_dimen, &start_ind, * * &stop_ind, double_precision) * * * * NECESSARY SUBPROGRAMS * * None * * * * EXTERNAL VARIABLES * * None * * * * INTERNAL VARIABLES * * reg SDDAS_LONG loop0, looping variables for each dimension of tensor * * loop1, loop2, loop3, * * loop4, loop5, loop6, * * loop7, loop8 * * SDDAS_LONG loop0_end, loop termination variables * * loop1_end, loop2_end, * * loop3_end, loop4_end, * * loop5_end, loop6_end, * * loop7_end, loop8_end * * SDDAS_DOUBLE *tensor_double pointer to the input tensor being processed * * SDDAS_DOUBLE *result_double ptr to memory allocated for resultant * * SDDAS_DOUBLE *start_double start index position for each dimension * * defined for the given tensor argument * * SDDAS_DOUBLE *stop_double stop index position for each dimension * * defined for the given tensor argument * * SDDAS_FLOAT *tensor_float pointer to the input tensor being processed * * SDDAS_FLOAT *result_float ptr to memory allocated for resultant * * SDDAS_ULONG next_dimen0 no. of data values to bypass in order to get * * to the next index for the first dimension * * SDDAS_ULONG next_dimen1 no. of data values to bypass in order to get * * to the next index for the second dimension * * SDDAS_ULONG next_dimen2 no. of data values to bypass in order to get * * to the next index for the third dimension * * SDDAS_ULONG next_dimen3 no. of data values to bypass in order to get * * to the next index for the fourth dimension * * SDDAS_ULONG next_dimen4 no. of data values to bypass in order to get * * to the next index for the fifth dimension * * SDDAS_ULONG next_dimen5 no. of data values to bypass in order to get * * to the next index for the sixth dimension * * SDDAS_ULONG next_dimen6 no. of data values to bypass in order to get * * to the next index for the seventh dimension * * SDDAS_ULONG next_dimen7 no. of data values to bypass in order to get * * to the next index for the eighth dimension * * SDDAS_ULONG offset0 offset for index location along dimension one * * SDDAS_ULONG offset1 offset for index location along dimension two * * SDDAS_ULONG offset2 offset for index location along dimension three* * SDDAS_ULONG offset3 offset for index location along dimension four * * SDDAS_ULONG offset4 offset for index location along dimension five * * SDDAS_ULONG offset5 offset for index location along dimension six * * SDDAS_ULONG offset6 offset for index location along dimension seven* * SDDAS_ULONG offset7 offset for index location along dimension eight* * SDDAS_ULONG ind index to get to specific data element within * * the tensor * * SDDAS_LONG *start_long start index position for each dimension * * defined for the given tensor argument * * SDDAS_LONG *stop_long stop index position for each dimension * * defined for the given tensor argument * * SDDAS_LONG which_val index into the resultant * * SDDAS_BOOL fdimen_range flag indicating if the fastest changing index * * (dimension) represents a range or a fixed * * location * * * * SUBSYSTEM * * Display Level * * * ******************************************************************************/ void extract_from_9Dtensor (void *tensorA, void *res_ptr, SDDAS_ULONG *next_dimen, void *start_ind, void *stop_ind, SDDAS_BOOL double_precision) { register SDDAS_LONG loop0, loop1, loop2, loop3, loop4, loop5, loop6, loop7; register SDDAS_LONG loop8; SDDAS_DOUBLE *tensor_double, *result_double, *start_double, *stop_double; SDDAS_FLOAT *tensor_float, *result_float; SDDAS_ULONG next_dimen0, next_dimen1, next_dimen2, next_dimen3; SDDAS_ULONG next_dimen4, next_dimen5, next_dimen6, next_dimen7; SDDAS_ULONG offset0, offset1, offset2, offset3, offset4; SDDAS_ULONG offset5, offset6, offset7, ind; SDDAS_LONG loop0_end, loop1_end, loop2_end, loop3_end, loop4_end, loop5_end; SDDAS_LONG loop6_end, loop7_end, loop8_end, which_val; SDDAS_LONG *start_long, *stop_long; SDDAS_BOOL fdimen_range; /* The tensor from which we are extracting is a 9-D tensor. Go over */ /* the desired sub-area, extracting the requested elements. Since the */ /* size of the resultant is reflected by the range of the start/stop */ /* indices, the index for the resultant is simply the next value as we */ /* proceed through the dimensions of the tensor. */ /* This code is shared by libbase_idfs and SCF code; SCF deals with */ /* double-precision data and IDFS deals with single-precision floats. */ /* MUST maintain double-precision even for indices; otherwise, code */ /* fails since incorrect values are extracted. */ if (double_precision == sTrue) { tensor_double = (SDDAS_DOUBLE *) tensorA; result_double = (SDDAS_DOUBLE *) res_ptr; start_double = (SDDAS_DOUBLE *) start_ind; stop_double = (SDDAS_DOUBLE *) stop_ind; fdimen_range = (*(start_double + 8) == *(stop_double + 8)) ? sFalse : sTrue; which_val = 0; next_dimen0 = *next_dimen; next_dimen1 = *(next_dimen + 1); next_dimen2 = *(next_dimen + 2); next_dimen3 = *(next_dimen + 3); next_dimen4 = *(next_dimen + 4); next_dimen5 = *(next_dimen + 5); next_dimen6 = *(next_dimen + 6); next_dimen7 = *(next_dimen + 7); /* Set FOR loop termination variables. */ loop0_end = *stop_double; loop1_end = *(stop_double + 1); loop2_end = *(stop_double + 2); loop3_end = *(stop_double + 3); loop4_end = *(stop_double + 4); loop5_end = *(stop_double + 5); loop6_end = *(stop_double + 6); loop7_end = *(stop_double + 7); loop8_end = *(stop_double + 8); /* It is faster to have a conditional check OUTSIDE a for */ /* loop than to check the condition every time through the loop. */ if (fdimen_range == sFalse) { offset0 = next_dimen0 * *start_double; for (loop0 = *start_double; loop0 <= loop0_end; ++loop0, offset0 += next_dimen0) { offset1 = next_dimen1 * *(start_double + 1); for (loop1 = *(start_double + 1); loop1 <= loop1_end; ++loop1, offset1 += next_dimen1) { offset2 = next_dimen2 * *(start_double + 2); for (loop2 = *(start_double + 2); loop2 <= loop2_end; ++loop2, offset2 += next_dimen2) { offset3 = next_dimen3 * *(start_double + 3); for (loop3 = *(start_double + 3); loop3 <= loop3_end; ++loop3, offset3 += next_dimen3) { offset4 = next_dimen4 * *(start_double + 4); for (loop4 = *(start_double + 4); loop4 <= loop4_end; ++loop4, offset4 += next_dimen4) { offset5 = next_dimen5 * *(start_double + 5); for (loop5 = *(start_double + 5); loop5 <= loop5_end; ++loop5, offset5 += next_dimen5) { offset6 = next_dimen6 * *(start_double + 6); for (loop6 = *(start_double + 6); loop6 <= loop6_end; ++loop6, offset6 += next_dimen6) { offset7 = next_dimen7 * *(start_double + 7); for (loop7 = *(start_double + 7); loop7 <= loop7_end; ++loop7, offset7 += next_dimen7) { /* Stay along a single index location for last */ /* dimension. */ ind = offset0 + offset1 + offset2 + offset3 + offset4 + offset5 + offset6 + offset7 + *(start_double + 8); *(result_double + which_val) = *(tensor_double + ind); ++which_val; } } } } } } } } } /* if condition */ else { offset0 = next_dimen0 * *start_double; for (loop0 = *start_double; loop0 <= loop0_end; ++loop0, offset0 += next_dimen0) { offset1 = next_dimen1 * *(start_double + 1); for (loop1 = *(start_double + 1); loop1 <= loop1_end; ++loop1, offset1 += next_dimen1) { offset2 = next_dimen2 * *(start_double + 2); for (loop2 = *(start_double + 2); loop2 <= loop2_end; ++loop2, offset2 += next_dimen2) { offset3 = next_dimen3 * *(start_double + 3); for (loop3 = *(start_double + 3); loop3 <= loop3_end; ++loop3, offset3 += next_dimen3) { offset4 = next_dimen4 * *(start_double + 4); for (loop4 = *(start_double + 4); loop4 <= loop4_end; ++loop4, offset4 += next_dimen4) { offset5 = next_dimen5 * *(start_double + 5); for (loop5 = *(start_double + 5); loop5 <= loop5_end; ++loop5, offset5 += next_dimen5) { offset6 = next_dimen6 * *(start_double + 6); for (loop6 = *(start_double + 6); loop6 <= loop6_end; ++loop6, offset6 += next_dimen6) { offset7 = next_dimen7 * *(start_double + 7); for (loop7 = *(start_double + 7); loop7 <= loop7_end; ++loop7, offset7 += next_dimen7) { for (loop8 = *(start_double + 8); loop8 <= loop8_end; ++loop8) { ind = offset0 + offset1 + offset2 + offset3 + offset4 + offset5 + offset6 + offset7 + loop8; *(result_double + which_val) = *(tensor_double + ind); ++which_val; } } } } } } } } } } /* else condition */ } /* if (double_precision == sTrue) */ else { tensor_float = (SDDAS_FLOAT *) tensorA; result_float = (SDDAS_FLOAT *) res_ptr; start_long = (SDDAS_LONG *) start_ind; stop_long = (SDDAS_LONG *) stop_ind; fdimen_range = (*(start_long + 8) == *(stop_long + 8)) ? sFalse : sTrue; which_val = 0; next_dimen0 = *next_dimen; next_dimen1 = *(next_dimen + 1); next_dimen2 = *(next_dimen + 2); next_dimen3 = *(next_dimen + 3); next_dimen4 = *(next_dimen + 4); next_dimen5 = *(next_dimen + 5); next_dimen6 = *(next_dimen + 6); next_dimen7 = *(next_dimen + 7); /* Set FOR loop termination variables. */ loop0_end = *stop_long; loop1_end = *(stop_long + 1); loop2_end = *(stop_long + 2); loop3_end = *(stop_long + 3); loop4_end = *(stop_long + 4); loop5_end = *(stop_long + 5); loop6_end = *(stop_long + 6); loop7_end = *(stop_long + 7); loop8_end = *(stop_long + 8); /* It is faster to have a conditional check OUTSIDE a for */ /* loop than to check the condition every time through the loop. */ if (fdimen_range == sFalse) { offset0 = next_dimen0 * *start_long; for (loop0 = *start_long; loop0 <= loop0_end; ++loop0, offset0 += next_dimen0) { offset1 = next_dimen1 * *(start_long + 1); for (loop1 = *(start_long + 1); loop1 <= loop1_end; ++loop1, offset1 += next_dimen1) { offset2 = next_dimen2 * *(start_long + 2); for (loop2 = *(start_long + 2); loop2 <= loop2_end; ++loop2, offset2 += next_dimen2) { offset3 = next_dimen3 * *(start_long + 3); for (loop3 = *(start_long + 3); loop3 <= loop3_end; ++loop3, offset3 += next_dimen3) { offset4 = next_dimen4 * *(start_long + 4); for (loop4 = *(start_long + 4); loop4 <= loop4_end; ++loop4, offset4 += next_dimen4) { offset5 = next_dimen5 * *(start_long + 5); for (loop5 = *(start_long + 5); loop5 <= loop5_end; ++loop5, offset5 += next_dimen5) { offset6 = next_dimen6 * *(start_long + 6); for (loop6 = *(start_long + 6); loop6 <= loop6_end; ++loop6, offset6 += next_dimen6) { offset7 = next_dimen7 * *(start_long + 7); for (loop7 = *(start_long + 7); loop7 <= loop7_end; ++loop7, offset7 += next_dimen7) { /* Stay along a single index location for last */ /* dimension. */ ind = offset0 + offset1 + offset2 + offset3 + offset4 + offset5 + offset6 + offset7 + *(start_long + 8); *(result_float + which_val) = *(tensor_float + ind); ++which_val; } } } } } } } } } /* if condition */ else { offset0 = next_dimen0 * *start_long; for (loop0 = *start_long; loop0 <= loop0_end; ++loop0, offset0 += next_dimen0) { offset1 = next_dimen1 * *(start_long + 1); for (loop1 = *(start_long + 1); loop1 <= loop1_end; ++loop1, offset1 += next_dimen1) { offset2 = next_dimen2 * *(start_long + 2); for (loop2 = *(start_long + 2); loop2 <= loop2_end; ++loop2, offset2 += next_dimen2) { offset3 = next_dimen3 * *(start_long + 3); for (loop3 = *(start_long + 3); loop3 <= loop3_end; ++loop3, offset3 += next_dimen3) { offset4 = next_dimen4 * *(start_long + 4); for (loop4 = *(start_long + 4); loop4 <= loop4_end; ++loop4, offset4 += next_dimen4) { offset5 = next_dimen5 * *(start_long + 5); for (loop5 = *(start_long + 5); loop5 <= loop5_end; ++loop5, offset5 += next_dimen5) { offset6 = next_dimen6 * *(start_long + 6); for (loop6 = *(start_long + 6); loop6 <= loop6_end; ++loop6, offset6 += next_dimen6) { offset7 = next_dimen7 * *(start_long + 7); for (loop7 = *(start_long + 7); loop7 <= loop7_end; ++loop7, offset7 += next_dimen7) { for (loop8 = *(start_long + 8); loop8 <= loop8_end; ++loop8) { ind = offset0 + offset1 + offset2 + offset3 + offset4 + offset5 + offset6 + offset7 + loop8; *(result_float + which_val) = *(tensor_float + ind); ++which_val; } } } } } } } } } } /* else condition */ } /* else */ }