1 // Created on: 1992-06-24
2 // Created by: Gilles DEBARBOUILLE
3 // Copyright (c) 1992-1999 Matra Datavision
4 // Copyright (c) 1999-2014 OPEN CASCADE SAS
6 // This file is part of Open CASCADE Technology software library.
8 // This library is free software; you can redistribute it and/or modify it under
9 // the terms of the GNU Lesser General Public License version 2.1 as published
10 // by the Free Software Foundation, with special exception defined in the file
11 // OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
12 // distribution for complete text of the license and disclaimer of any warranty.
14 // Alternatively, this file may be used under the terms of Open CASCADE
15 // commercial license or contractual agreement.
17 #include <Units_Lexicon.hxx>
19 #include <Standard_Type.hxx>
20 #include <TCollection_AsciiString.hxx>
21 #include <TCollection_HAsciiString.hxx>
22 #include <Units_Token.hxx>
24 IMPLEMENT_STANDARD_RTTIEXT(Units_Lexicon,Standard_Transient)
32 char Prefix[10]; //!< prefix or symbol (e.g. "k" for kilo)
33 char Operation[2]; //!< operation
34 double Value; //!< numeric parameter (e.g. multiplier)
39 //! Original table (UnitsAPI/Lexi_Expr.dat) used symbols from extended ASCII,
40 //! which should not be used within UTF-8 text.
42 //! This table preserves these codes for compatibility.
43 //! UTF-8 items might be uncommented after updating UnitsAPI/Units.dat
44 //! and analysis of further consequences.
45 static const LexiconItem THE_LEXICON[] =
58 { "\xB2", "P", 2.0 }, // ISO 8859-1/ISO Latin-1 (extended ASCII)
59 //{ "\xC2\xB2", "P", 2.0 }, // UTF-8
63 { "\xB3", "P", 3.0 }, // ISO 8859-1/ISO Latin-1 (extended ASCII)
64 //{ "\xC2\xB3", "P", 3.0 }, // UTF-8
67 { "y", "M", 1.E-24 }, // yocto
68 { "z", "M", 1.E-21 }, // zepto
69 { "a", "M", 1.E-18 }, // atto
70 { "f", "M", 1.E-15 }, // femto
71 { "p", "M", 1.E-12 }, // pico
72 { "n", "M", 1.E-09 }, // nano
73 { "\xB5", "M", 1.E-06 }, // micro, ISO 8859-1/ISO Latin-1 (extended ASCII)
74 //{ "\xC2\xB5", "M", 1.E-06 }, // micro, UTF-8
75 { "m", "M", 1.E-03 }, // milli
76 { "c", "M", 1.E-02 }, // centi
77 { "d", "M", 1.E-01 }, // deci
78 { "da", "M", 1.E+01 }, // deca
79 { "h", "M", 1.E+02 }, // hecto
80 { "k", "M", 1.E+03 }, // kilo
81 { "M", "M", 1.E+06 }, // mega
82 { "G", "M", 1.E+09 }, // giga
83 { "T", "M", 1.E+12 }, // tera
84 { "P", "M", 1.E+15 }, // peta
85 { "E", "M", 1.E+18 }, // exa
86 { "Z", "M", 1.E+21 }, // zetta
87 { "Y", "M", 1.E+24 }, // yotta
89 { "\xB6", "", M_PI }, // Pilcrow sign, ISO 8859-1/ISO Latin-1 (extended ASCII)
90 //{ "\xCF\x80", "", M_PI }, // UTF-8
96 //=======================================================================
97 //function : Units_Lexicon
99 //=======================================================================
101 Units_Lexicon::Units_Lexicon()
106 //=======================================================================
109 //=======================================================================
111 void Units_Lexicon::Creates()
113 thesequenceoftokens = new Units_TokensSequence();
115 const Standard_Integer aNbLexiItems = sizeof(THE_LEXICON) / sizeof(LexiconItem);
116 for (Standard_Integer anItemIter = 0; anItemIter < aNbLexiItems; ++anItemIter)
118 const LexiconItem& anItem = THE_LEXICON[anItemIter];
119 if (thesequenceoftokens->IsEmpty())
121 Handle(Units_Token) aToken = new Units_Token (anItem.Prefix, anItem.Operation, anItem.Value);
122 thesequenceoftokens->Prepend (aToken);
126 AddToken (anItem.Prefix, anItem.Operation, anItem.Value);
131 //=======================================================================
132 //function : AddToken
134 //=======================================================================
136 void Units_Lexicon::AddToken(const Standard_CString aword,
137 const Standard_CString amean,
138 const Standard_Real avalue)
140 Handle(Units_Token) token;
141 Handle(Units_Token) referencetoken;
142 Standard_Boolean found = Standard_False;
143 Standard_Integer index;
145 for(index=1;index<=thesequenceoftokens->Length();index++) {
146 referencetoken = thesequenceoftokens->Value(index);
147 if( referencetoken->Word() == aword ) {
148 referencetoken->Update(amean);
149 found = Standard_True;
152 else if( !( referencetoken->Word()>aword ) ) {
153 token = new Units_Token(aword,amean,avalue);
154 thesequenceoftokens->InsertBefore(index,token);
155 found = Standard_True;
160 token = new Units_Token(aword,amean,avalue);
161 thesequenceoftokens->Append(token);