summaryrefslogtreecommitdiffstats
path: root/khtml/html/htmltokenizer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'khtml/html/htmltokenizer.cpp')
-rw-r--r--khtml/html/htmltokenizer.cpp42
1 files changed, 21 insertions, 21 deletions
diff --git a/khtml/html/htmltokenizer.cpp b/khtml/html/htmltokenizer.cpp
index 5da6edd14..ba8e99a50 100644
--- a/khtml/html/htmltokenizer.cpp
+++ b/khtml/html/htmltokenizer.cpp
@@ -89,7 +89,7 @@ static const char titleEnd [] = "</title";
#define fixUpChar(x)
#else
#define fixUpChar(x) \
- switch ((x).unicode()) \
+ switch ((x).tqunicode()) \
{ \
case 0x80: (x) = 0x20ac; break; \
case 0x82: (x) = 0x201a; break; \
@@ -347,7 +347,7 @@ void HTMLTokenizer::parseSpecial(TokenizerString &src)
// possible end of tagname, lets check.
if ( !scriptCodeResync && !escaped && !src.escaped() && ( ch == '>' || ch == '/' || ch <= ' ' ) && ch &&
scriptCodeSize >= searchStopperLen &&
- !TQConstString( scriptCode+scriptCodeSize-searchStopperLen, searchStopperLen ).string().find( searchStopper, 0, false )) {
+ !TQConstString( scriptCode+scriptCodeSize-searchStopperLen, searchStopperLen ).string().tqfind( searchStopper, 0, false )) {
scriptCodeResync = scriptCodeSize-searchStopperLen+1;
tquote = NoQuote;
continue;
@@ -471,7 +471,7 @@ void HTMLTokenizer::parseComment(TokenizerString &src)
if (strict)
{
- if (src->unicode() == '-') {
+ if (src->tqunicode() == '-') {
delimiterCount++;
if (delimiterCount == 2) {
delimiterCount = 0;
@@ -482,7 +482,7 @@ void HTMLTokenizer::parseComment(TokenizerString &src)
delimiterCount = 0;
}
- if ((!strict || canClose) && src->unicode() == '>')
+ if ((!strict || canClose) && src->tqunicode() == '>')
{
bool handleBrokenComments = brokenComments && !( script || style );
bool scriptEnd=false;
@@ -521,7 +521,7 @@ void HTMLTokenizer::parseServer(TokenizerString &src)
checkScriptBuffer(src.length());
while ( !src.isEmpty() ) {
scriptCode[ scriptCodeSize++ ] = *src;
- if (src->unicode() == '>' &&
+ if (src->tqunicode() == '>' &&
scriptCodeSize > 1 && scriptCode[scriptCodeSize-2] == '%') {
++src;
server = false;
@@ -607,7 +607,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
while( !src.isEmpty() )
{
- ushort cc = src->unicode();
+ ushort cc = src->tqunicode();
switch(Entity) {
case NoEntity:
return;
@@ -639,7 +639,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
case Hexadecimal:
{
- int uc = EntityChar.unicode();
+ int uc = EntityChar.tqunicode();
int ll = kMin<uint>(src.length(), 8);
while(ll--) {
TQChar csrc(src->lower());
@@ -658,7 +658,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
}
case Decimal:
{
- int uc = EntityChar.unicode();
+ int uc = EntityChar.tqunicode();
int ll = kMin(src.length(), 9-cBufferPos);
while(ll--) {
cc = src->cell();
@@ -695,7 +695,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
// be IE compatible and interpret even unterminated entities
// outside tags. like "foo &nbspstuff bla".
if ( tag == NoTag ) {
- const entity* e = kde_findEntity(cBuffer, cBufferPos);
+ const entity* e = kde_tqfindEntity(cBuffer, cBufferPos);
if ( e && e->code < 256 ) {
EntityChar = e->code;
entityLen = cBufferPos;
@@ -705,7 +705,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
if(cBufferPos == 9) Entity = SearchSemicolon;
if(Entity == SearchSemicolon) {
if(cBufferPos > 1) {
- const entity *e = kde_findEntity(cBuffer, cBufferPos);
+ const entity *e = kde_tqfindEntity(cBuffer, cBufferPos);
// IE only accepts unterminated entities < 256,
// Gecko accepts them all, but only outside tags
if(e && ( tag == NoTag || e->code < 256 || *src == ';' )) {
@@ -718,7 +718,7 @@ void HTMLTokenizer::parseEntity(TokenizerString &src, TQChar *&dest, bool start)
}
case SearchSemicolon:
#ifdef TOKEN_DEBUG
- kdDebug( 6036 ) << "ENTITY " << EntityChar.unicode() << endl;
+ kdDebug( 6036 ) << "ENTITY " << EntityChar.tqunicode() << endl;
#endif
fixUpChar(EntityChar);
@@ -813,7 +813,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
break;
}
// this is a nasty performance trick. will work for the A-Z
- // characters, but not for others. if it contains one,
+ // characters, but not for others. if it tqcontains one,
// we fail anyway
char cc = curchar;
cBuffer[cBufferPos++] = cc | 0x20;
@@ -919,7 +919,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
a = khtml::getAttrID(cBuffer, cBufferPos-1);
}
if (!a)
- attrName = TQString::fromLatin1(TQCString(cBuffer, cBufferPos+1).data());
+ attrName = TQString::tqfromLatin1(TQCString(cBuffer, cBufferPos+1).data());
}
dest = buffer;
@@ -941,7 +941,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
}
if ( cBufferPos == CBUFLEN ) {
cBuffer[cBufferPos] = '\0';
- attrName = TQString::fromLatin1(TQCString(cBuffer, cBufferPos+1).data());
+ attrName = TQString::tqfromLatin1(TQCString(cBuffer, cBufferPos+1).data());
dest = buffer;
*dest++ = 0;
tag = SearchEqual;
@@ -956,7 +956,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
ushort curchar;
bool atespace = false;
while(!src.isEmpty()) {
- curchar = src->unicode();
+ curchar = src->tqunicode();
if(curchar > ' ') {
if(curchar == '=') {
#ifdef TOKEN_DEBUG
@@ -988,7 +988,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
{
ushort curchar;
while(!src.isEmpty()) {
- curchar = src->unicode();
+ curchar = src->tqunicode();
if(curchar > ' ') {
if(( curchar == '\'' || curchar == '\"' )) {
tquote = curchar == '\"' ? DoubleQuote : SingleQuote;
@@ -1012,7 +1012,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
while(!src.isEmpty()) {
checkBuffer();
- curchar = src->unicode();
+ curchar = src->tqunicode();
if(curchar <= '\'' && !src.escaped()) {
// ### attributes like '&{blaa....};' are supposed to be treated as jscript.
if ( curchar == '&' )
@@ -1050,7 +1050,7 @@ void HTMLTokenizer::parseTag(TokenizerString &src)
ushort curchar;
while(!src.isEmpty()) {
checkBuffer();
- curchar = src->unicode();
+ curchar = src->tqunicode();
if(curchar <= '>' && !src.escaped()) {
// parse Entities
if ( curchar == '&' )
@@ -1351,7 +1351,7 @@ void HTMLTokenizer::write( const TokenizerString &str, bool appendData )
// do we need to enlarge the buffer?
checkBuffer();
- ushort cc = src->unicode();
+ ushort cc = src->tqunicode();
if (skipLF && (cc != '\n'))
skipLF = false;
@@ -1595,7 +1595,7 @@ void HTMLTokenizer::finish()
killTimer( m_autoCloseTimer );
m_autoCloseTimer = 0;
}
- // do this as long as we don't find matching comment ends
+ // do this as long as we don't tqfind matching comment ends
while((title || script || comment || server) && scriptCode && scriptCodeSize)
{
// we've found an unmatched comment start
@@ -1618,7 +1618,7 @@ void HTMLTokenizer::finish()
food += TQString(scriptCode, scriptCodeSize);
}
else {
- pos = TQConstString(scriptCode, scriptCodeSize).string().find('>');
+ pos = TQConstString(scriptCode, scriptCodeSize).string().tqfind('>');
food.setUnicode(scriptCode+pos+1, scriptCodeSize-pos-1); // deep copy
}
KHTML_DELETE_QCHAR_VEC(scriptCode);