Understanding wip_TCPClientCreate(); && evh();


#1

This code is based off the given wip tcp client over gprs example.

void email_point() {
  TRACE (( 1, "email_point" ));
  wip_channel_t socket;
  wip_debug( "[SAMPLE]: connecting to client %s:%i...\n", PEER_STRADDR, PEER_PORT);
  socket = wip_TCPClientCreate( PEER_STRADDR, PEER_PORT, evh, NULL);
  if( ! socket) { wip_debug( "[SAMPLE] Can't connect\n"); return; }
}

/* buffer to hold the data I want to send*/
static u8 snd_buffer[SND_BUFFER_SIZE];

/* How many bytes of [buffer] have already been sent. */
static int snd_offset = 0;

static char rcv_buffer[RCV_BUFFER_SIZE];
static int  rcv_offset = 0;

/***************************************************************************/
/*  Function   : evh_data                                                  */
/*-------------------------------------------------------------------------*/
/*  Object     : Handling events happenning on the TCP client socket.      */
/*-------------------------------------------------------------------------*/
/*  Variable Name     |IN |OUT|GLB|  Utilisation                           */
/*--------------------+---+---+---+----------------------------------------*/
/*  ev                | X |   |   |  WIP event                             */
/*--------------------+---+---+---+----------------------------------------*/
/*  ctx               | X |   |   |  user data (unused)                    */
/*--------------------+---+---+---+----------------------------------------*/
/***************************************************************************/
static void evh( wip_event_t *ev, void *ctx) {
    TRACE (( 1, "Email EVH" ));
    enum State {rts_Helo,
    rts_Mail_From,
    rts_RCPT_To,
    rts_DATA,
    rts_MSG,
    cts_Helo,
    cts_Mail_From,
    cts_RCPT_To,
    cts_DATA,
    cts_MSG,
    done,
    error
    };
   
  static enum State state = rts_Helo; 
  
  switch( ev->kind) {

  case WIP_CEV_OPEN: {
        wip_debug ("[SAMPLE] Connection established successfully\n");
        state = rts_Helo;        
    break;
  }
  case WIP_CEV_READ: {//data has arrived to be read
    switch(state)
    {
        case rts_Helo:
        
            //read whatever is on the socket and ensure the first chars are 220
            read(ev, ctx);
            if( rcv_offset > 4 && rcv_buffer[0] == '2' && rcv_buffer[1] == '2' && rcv_buffer[2] == '0')
            {
                rcv_offset = 0;
                state = cts_Helo;
            }
        break;
        case rts_Mail_From:
            //read whatever is on the socket and ensure the first chars are 250
            read(ev, ctx);
            if( rcv_offset > 4 && rcv_buffer[0] == '2' && rcv_buffer[1] == '5' && rcv_buffer[2] == '0')
            {
                rcv_offset = 0;
                state = cts_Mail_From;
            } 
        break;
        case rts_RCPT_To:
            //read whatever is on the socket and ensure the first chars are 250
            read(ev, ctx);
            if( rcv_offset > 4 && rcv_buffer[0] == '2' && rcv_buffer[1] == '5' && rcv_buffer[2] == '0')
            {
                rcv_offset = 0;
                state = cts_RCPT_To;
            }
        break;
        case rts_DATA:
            //read whatever is on the socket and ensure the first chars are 250
            read(ev, ctx);
            if( rcv_offset > 4 && rcv_buffer[0] == '2' && rcv_buffer[1] == '5' && rcv_buffer[2] == '0')
            {
                rcv_offset = 0;
                state = cts_DATA;
            }
        break;        
        case rts_MSG:
            //read whatever is on the socket and ensure the first chars are 354
            read(ev, ctx);
            if( rcv_offset > 4 && rcv_buffer[0] == '3' && rcv_buffer[1] == '5' && rcv_buffer[2] == '4')
            {
                rcv_offset = 0;
                state = cts_MSG;
            }
        break;
        default:;//do nothing
    }
    break;
  }
  case WIP_CEV_WRITE: {
    switch(state)
    {
        case cts_Helo:
        {
            TRACE (( 2, "Send Helo" ));
            JP_strncpy(snd_buffer, "Helo telus.net\r\n", SND_BUFFER_SIZE);
            if(write(ev, ctx))
            {
                snd_offset = 0;
                state = rts_Mail_From;
            }
        }
        break;
        case cts_Mail_From: 
        {
            TRACE (( 2, "Send Mail from:" ));
            char * temp = "MAIL FROM: ";
            u16 temp_len = JP_strlen(temp);
            JP_strncpy(snd_buffer, temp, SND_BUFFER_SIZE);
            JP_strncpy(snd_buffer + temp_len, "do_not_reply@temp.com\r\n", SND_BUFFER_SIZE-temp_len);
            if(write(ev, ctx))
            {
                snd_offset = 0;
                state = rts_RCPT_To;
            }
        }
        break;
        case cts_RCPT_To:
        {
            TRACE (( 2, "Send RCPT TO:" ));
            char * temp = "RCPT TO: ";
            u16 temp_len= JP_strlen(temp);
            JP_strncpy(snd_buffer, temp, SND_BUFFER_SIZE);
            JP_strncpy(snd_buffer + temp_len, recipient_, SND_BUFFER_SIZE-temp_len);
            temp_len= JP_strlen(snd_buffer);
            JP_strncpy(snd_buffer + temp_len, "\r\n", SND_BUFFER_SIZE-temp_len);            
            if(write(ev, ctx))
            {
                snd_offset = 0;
                state = rts_DATA;
            }
        }
        break;
        case cts_DATA:
        {
            JP_strncpy(snd_buffer, "DATA\r\n", SND_BUFFER_SIZE);
            if(write(ev, ctx))
            {
                snd_offset = 0;
                state = rts_MSG;
            }
        }
        break;        
        case cts_MSG:
        {
            char * temp;
            u16 temp_len = 0;
            if(subject_)
            {
                temp = "Subject:";
                JP_strncpy(snd_buffer, temp, SND_BUFFER_SIZE);
                temp_len= JP_strlen(snd_buffer);
                JP_strncpy(snd_buffer + temp_len, subject_, SND_BUFFER_SIZE-temp_len);
                temp_len= JP_strlen(snd_buffer);
                JP_strncpy(snd_buffer + temp_len, "\r\n\r\n", SND_BUFFER_SIZE-temp_len);
                temp_len= JP_strlen(snd_buffer);                
            }
            JP_strncpy(snd_buffer + temp_len, msg_, SND_BUFFER_SIZE-temp_len);
            temp_len= JP_strlen(snd_buffer);
            JP_strncpy(snd_buffer + temp_len, "\r\n.\r\nQUIT\r\n", SND_BUFFER_SIZE-temp_len);
            if(write(ev, ctx))
            {
                snd_offset = 0;
                state = done;
                wip_close( ev->channel);
                done_sending();
            }
        }
        break;
        default:;//do nothing
    }

    break;
  }
  case WIP_CEV_ERROR: {
    wip_debug( "[SAMPLE] Error %i on socket. Closing.\n", 
               ev->content.error.errnum);
               state = error;
    wip_close( ev->channel);
    break;
  }
  case WIP_CEV_PEER_CLOSE: {
    wip_debug( "[SAMPLE] Connection closed by peer\n");
    wip_close( ev->channel);
    state = error;
    break;
  }
  }
}

static void read(wip_event_t *ev, void *ctx)
{
    int nread;
    wip_debug ("[SAMPLE] Some data arrived\n");
    nread = wip_read( ev->channel, rcv_buffer + rcv_offset, 
                      sizeof( rcv_buffer) - rcv_offset);
    if( nread < 0) { wip_debug( "[SAMPLE] read error %i\n", nread); return; }
    TRACE (( 3,  (rcv_buffer + snd_offset) ));
    rcv_offset += nread;
    if( rcv_offset == sizeof( rcv_buffer)) {
      wip_debug( "[SAMPLE] Reception capacity exceeded, won't read more\n");
    } else {
      wip_debug( "[SAMPLE] Wrote %i bytes of data from network to rcv_buffer. "
                 "%i bytes remain available in rcv_buffer\n",
                 nread, sizeof( rcv_buffer) - rcv_offset);
    }
    
}

static bool write(wip_event_t *ev, void *ctx)
{
    TRACE (( 3,  (snd_buffer + snd_offset) ));
    int nwrite;
    wip_debug ("[SAMPLE] Can send more data\n");
    nwrite = wip_write( ev->channel, snd_buffer + snd_offset,
                        JP_strlen(snd_buffer) - snd_offset);
    if( nwrite < 0) { wip_debug( "[SAMPLE] write error %i\n", nwrite); return; }
    snd_offset += nwrite;
    if( snd_offset == JP_strlen( snd_buffer)) {
      wip_debug( "[SAMPLE] Everything has been sent, won't send more.\n");
      snd_offset = 0;
      return TRUE;
    } else {
      wip_debug( "[SAMPLE] Wrote %i bytes. "
                 "%i bytes left to send in snd_buffer\n",
                 nwrite, sizeof( snd_buffer) - snd_offset);
    }
    return FALSE;
}

The Trace returns the following:

1188.182	Trace		1	internet_connection : Main
1188.698	Trace		1	(cfg_gprs) Enter.
1188.964	Trace		1	(evh_sim) Enter.
1218.977	Trace		1	(evh_sim) Enter.
1219.134	Trace		1	(poll_creg_callback) Enter.
1219.150	Trace		1	(poll_creg_callback) Registered on GPRS network.
1219.165	Trace		1	(open_and_start_bearer) Enter.
1221.680	Trace		1	Unable to find the string of the remote trace in the file (ID = 528)
1223.008	Trace		1	email_point
1250.881	Trace		1	Email EVH
1252.221	Trace		1	Email EVH
1254.222	Trace		1	Email EVH
1254.299	Trace		3	220 priv-edtnaa12.telusplanet.net ESMTP 
			
1353.643	Trace		1	Email EVH
1371.440	Trace		1	Email EVH

The ev->kind are : open, write, read. read(timeout message). error.

So I get the msg that I’ve succesfully connected, but then I never get another chance to send data, so the connection times out. If anyone knows what I’m doing wrong I’d greatly appreciate the help.

In particular I’d like to know what triggers the call of the evh( wip_event_t *ev, void *ctx); function.


#2

WIP_CEV_WRITE marks the transition from “cannot send more data” to “can send more data”. That is, you get a CEV_WRITE just after the CEV_OPEN, but then, you won’t get another one until:

  • you attempt to send data with wip_write(), and if fails, at least partially (either it sends no data, or it sends only part of the data you gave as parameter), because TCP buffers are full.
  • then, TCP buffers are partially flushed and you can again send more data: that’s when CEV_WRITE is emitted.

As long as you haven’t saturated TCP buffers, keep wip_write()ing, you won’t get another CEV_WRITE as long as all your calls to wip_write() completely succeed.

Similarly, CEV_READ marks the transition from “cannot read data” to “can read some data”: as long as you haven’t completely emptied the TCP reception buffer with wip_read(), you won’t get another CEV_READ.

Although the solution of emitting CEV_READ/CEV_WRITE events everytime data arrives/departs seems appealing at first, it would actually cause a nightmare of concurrency issues, and very tricky errors to find/reproduce in users’ applicative code, hence the solution described above.