XrdXrootdAio.cc

Go to the documentation of this file.
00001 /******************************************************************************/
00002 /*                                                                            */
00003 /*                       X r d X r o o t d A i o . c c                        */
00004 /*                                                                            */
00005 /* (c) 2004 by the Board of Trustees of the Leland Stanford, Jr., University  */
00006 /*                            All Rights Reserved                             */
00007 /*   Produced by Andrew Hanushevsky for Stanford University under contract    */
00008 /*              DE-AC02-76-SFO0515 with the Department of Energy              */
00009 /******************************************************************************/
00010 
00011 //        $Id: XrdXrootdAio.cc 35287 2010-09-14 21:19:35Z ganis $
00012 
00013 const char *XrdXrootdAioCVSID = "$Id: XrdXrootdAio.cc 35287 2010-09-14 21:19:35Z ganis $";
00014   
00015 #include <unistd.h>
00016 
00017 #include "Xrd/XrdBuffer.hh"
00018 #include "Xrd/XrdLink.hh"
00019 #include "XrdSys/XrdSysError.hh"
00020 #include "XrdSys/XrdSysPthread.hh"
00021 #include "XrdSfs/XrdSfsInterface.hh"
00022 #include "XrdXrootd/XrdXrootdAio.hh"
00023 #include "XrdXrootd/XrdXrootdFile.hh"
00024 #include "XrdXrootd/XrdXrootdProtocol.hh"
00025 #include "XrdXrootd/XrdXrootdStats.hh"
00026 #include "XrdXrootd/XrdXrootdTrace.hh"
00027  
00028 /******************************************************************************/
00029 /*                        S t a t i c   O b j e c t s                         */
00030 /******************************************************************************/
00031   
00032 XrdBuffManager           *XrdXrootdAio::BPool;
00033 XrdScheduler             *XrdXrootdAio::Sched;
00034 XrdXrootdStats           *XrdXrootdAio::SI;
00035 
00036 XrdSysMutex               XrdXrootdAio::fqMutex;
00037 XrdXrootdAio             *XrdXrootdAio::fqFirst = 0;
00038 const char               *XrdXrootdAio::TraceID = "Aio";
00039 
00040 int                       XrdXrootdAio::maxAio;
00041 
00042 XrdSysError              *XrdXrootdAioReq::eDest;
00043 XrdSysMutex               XrdXrootdAioReq::rqMutex;
00044 XrdXrootdAioReq          *XrdXrootdAioReq::rqFirst = 0;
00045 const char               *XrdXrootdAioReq::TraceID = "AioReq";
00046 
00047 int                       XrdXrootdAioReq::QuantumMin;
00048 int                       XrdXrootdAioReq::Quantum;
00049 int                       XrdXrootdAioReq::QuantumMax;
00050 int                       XrdXrootdAioReq::maxAioPR  = 8;
00051 int                       XrdXrootdAioReq::maxAioPR2 =16;
00052 
00053 extern XrdOucTrace       *XrdXrootdTrace;
00054  
00055 /******************************************************************************/
00056 /*                   X r d X r o o t d A i o : : A l l o c                    */
00057 /******************************************************************************/
00058   
00059 XrdXrootdAio *XrdXrootdAio::Alloc(XrdXrootdAioReq *arp, int bsize)
00060 {
00061    XrdXrootdAio *aiop;
00062 
00063 // Obtain an aio object
00064 //
00065    fqMutex.Lock();
00066    if ((aiop = fqFirst)) fqFirst = aiop->Next;
00067       else if (maxAio) aiop = addBlock();
00068    if (aiop && (SI->AsyncNow > SI->AsyncMax)) SI->AsyncMax = SI->AsyncNow;
00069    fqMutex.UnLock();
00070 
00071 // Allocate a buffer for this object
00072 //
00073    if (aiop)
00074       {if (bsize && (aiop->buffp = BPool->Obtain(bsize)))
00075           {aiop->sfsAio.aio_buf = (void *)(aiop->buffp->buff);
00076            aiop->aioReq = arp;
00077            aiop->TIdent = arp->Link->ID;
00078           }
00079           else {aiop->Recycle(); aiop = 0;}
00080       }
00081 
00082 // Return what we have
00083 //
00084    return aiop;
00085 }
00086  
00087 /******************************************************************************/
00088 /*                X r d X r o o t d A i o : : d o n e R e a d                 */
00089 /******************************************************************************/
00090 
00091 // Aio read requests are double buffered. So, there is only one aiocb active
00092 // at a time. This is done for two reasons:
00093 // 1) Provide a serial stream to the client, and
00094 // 2) avoid swamping the network adapter.
00095 // Additionally, double buffering requires minimal locking and simplifies the 
00096 // redrive logic. While this knowledge violates OO design, it substantially 
00097 // speeds up async I/O handling. This method is called out of the async event
00098 // handler so it does very little work.
00099   
00100 void XrdXrootdAio::doneRead()
00101 {
00102 // Plase this aio request on the completed queue
00103 //
00104    aioReq->aioDone = this;
00105 
00106 // Extract out any error conditions (keep only the first one)
00107 //
00108    if (Result >= 0) aioReq->aioTotal += Result;
00109       else if (!aioReq->aioError) aioReq->aioError = Result;
00110 
00111 // Schedule the associated arp to redrive the I/O
00112 //
00113    Sched->Schedule((XrdJob *)aioReq);
00114 }
00115 
00116 /******************************************************************************/
00117 /*               X r d X r o o t d A i o : : d o n e W r i t e                */
00118 /******************************************************************************/
00119 
00120 // Writes are more complicated because there may be several in transit. This
00121 // is done to keep the client from swamping the network adapter. We try
00122 // to optimize the handling of the aio object for the common cases. This method
00123 // is called out of the async event handler so it does very little work.
00124 
00125 void XrdXrootdAio::doneWrite()
00126 {
00127    char recycle = 0;
00128 
00129 // Lock the aioreq object against competition
00130 //
00131    aioReq->Lock();
00132    aioReq->numActive--;
00133 
00134 // Extract out any error conditions (keep only the first one).
00135 //
00136    if (Result >= 0) {aioReq->myIOLen  -= Result;
00137                      aioReq->aioTotal += Result;
00138                     }
00139       else if (!aioReq->aioError) aioReq->aioError = Result;
00140 
00141 // Redrive the protocol if so requested. It is impossible to have a proocol
00142 // redrive and completed all of the I/O at the same time.
00143 //
00144    if (aioReq->reDrive)
00145       {Sched->Schedule((XrdJob *)aioReq->Link);
00146        aioReq->reDrive = 0;
00147       }
00148 
00149 // If more aio objects are needed, place this one on the free queue. Otherwise,
00150 // schedule the AioReq object to complete handling the request if no more
00151 // requests are outstanding. It is impossible to have a zero length with more
00152 // requests outstanding.
00153 //
00154    if (aioReq->myIOLen > 0)
00155       {Next = aioReq->aioFree; aioReq->aioFree = this;}
00156       else {if (!(aioReq->numActive)) Sched->Schedule((XrdJob *)aioReq);
00157             recycle = 1;
00158            }
00159 
00160 // All done, perform early recycling if possible
00161 //
00162    aioReq->UnLock();
00163    if (recycle) Recycle();
00164 }
00165 
00166 /******************************************************************************/
00167 /*                 X r d X r o o t d A i o : : R e c y c l e                  */
00168 /******************************************************************************/
00169   
00170 void XrdXrootdAio::Recycle()
00171 {
00172 
00173 // Recycle the buffer
00174 //
00175    if (buffp) {BPool->Release(buffp); buffp = 0;}
00176 
00177 // Add this object to the free queue
00178 //
00179    fqMutex.Lock();
00180    Next = fqFirst;
00181    fqFirst = this;
00182    if (--SI->AsyncNow < 0) SI->AsyncNow=0;
00183    fqMutex.UnLock();
00184 }
00185   
00186 /******************************************************************************/
00187 /*                       P r i v a t e   M e t h o d s                        */
00188 /******************************************************************************/
00189 /******************************************************************************/
00190 /*                X r d X r o o t d A i o : : a d d B l o c k                 */
00191 /******************************************************************************/
00192   
00193 XrdXrootdAio *XrdXrootdAio::addBlock()
00194 {
00195    const int numalloc = 4096/sizeof(XrdXrootdAio);
00196    int i = (numalloc <= maxAio ? numalloc : maxAio);
00197    XrdXrootdAio *aiop;
00198 
00199    TRACE(DEBUG, "Adding " <<i <<" aio objects; " <<maxAio <<" pending.");
00200 
00201    if ((aiop = new XrdXrootdAio[i]()))
00202       {maxAio -= i;
00203        while(--i) {aiop->Next = fqFirst; fqFirst = aiop; aiop++;}
00204       }
00205 
00206    return aiop;
00207 }
00208   
00209 /******************************************************************************/
00210 /*                       X r d X r o o t d A i o R e q                        */
00211 /******************************************************************************/
00212 /******************************************************************************/
00213 /*                X r d X r o o t d A i o R e q : : A l l o c                 */
00214 /******************************************************************************/
00215 
00216 // Implicit Parameters: prot->myIOLen   // Length of i/o request
00217 //                      prot->myOffset  // Starting offset
00218 //                      prot->myFile    // Target file
00219 //                      prot->Link      // Link object
00220 //                      prot->response  // Response object
00221   
00222 XrdXrootdAioReq *XrdXrootdAioReq::Alloc(XrdXrootdProtocol *prot,
00223                                        char iotype, int numaio)
00224 {
00225    int i, cntaio, myQuantum, iolen = prot->myIOLen;
00226    XrdXrootdAioReq *arp;
00227    XrdXrootdAio    *aiop;
00228 
00229 // Obtain an aioreq object
00230 //
00231    rqMutex.Lock();
00232    if ((arp = rqFirst)) rqFirst = arp->Next;
00233       else arp = addBlock();
00234    rqMutex.UnLock();
00235 
00236 // Make sure we have one, fully reset it if we do
00237 //
00238    if (!arp) return arp;
00239    arp->Clear(prot->Link);
00240    if (!numaio) numaio = maxAioPR;
00241 
00242 // Compute the number of aio objects should get and the Quantum size we should
00243 // use. This is a delicate balancing act. We don't want too many segments but
00244 // neither do we want too large of an i/o size. So, if the i/o size is less than 
00245 // the quantum then use half a quantum. If the number of segments is greater 
00246 // than twice what we would like, then use a larger quantum size.
00247 //
00248    if (iolen < Quantum) 
00249       {myQuantum = QuantumMin;
00250        if (!(cntaio = iolen / myQuantum)) cntaio = 1;
00251           else if (iolen % myQuantum) cntaio++;
00252       } else {cntaio = iolen / Quantum;
00253               if (cntaio <= maxAioPR2) myQuantum = Quantum;
00254                  else {myQuantum = QuantumMax;
00255                        cntaio = iolen / myQuantum;
00256                       }
00257               if (iolen % myQuantum) cntaio++;
00258              }
00259 
00260 // Get appropriate number of aio objects
00261 //
00262    i = (maxAioPR < cntaio ? maxAioPR : cntaio);
00263    while(i && (aiop = XrdXrootdAio::Alloc(arp, myQuantum)))
00264         {aiop->Next = arp->aioFree; arp->aioFree = aiop; i--;}
00265 
00266 // Make sure we have at least the minimum number of aio objects
00267 //
00268    if (i && (maxAioPR - i) < 2 && cntaio > 1)
00269       {arp->Recycle(0); return (XrdXrootdAioReq *)0;}
00270 
00271 // Complete the request information
00272 //
00273    if (iotype != 'w') prot->Link->setRef(1);
00274    arp->Instance   = prot->Link->Inst();
00275    arp->myIOLen    = iolen;  // Amount that is left to send
00276    arp->myOffset   = prot->myOffset;
00277    arp->myFile     = prot->myFile;
00278    arp->Response   = prot->Response;
00279    arp->aioType    = iotype;
00280 
00281 // Return what we have
00282 //
00283    return arp;
00284 }
00285 
00286 /******************************************************************************/
00287 /*               X r d X r o o t d A i o R e q : : g e t A i o                */
00288 /******************************************************************************/
00289   
00290 XrdXrootdAio *XrdXrootdAioReq::getAio()
00291 {
00292   XrdXrootdAio *aiop;
00293 
00294 // Grab the next free aio object. If none, we return a null pointer. While this
00295 // is a classic consumer/producer problem, normally handled by a semaphore,
00296 // doing so would cause more threads to be tied up as the load increases. We
00297 // want the opposite effect for scaling purposes. So, we use a redrive scheme.
00298 //
00299    Lock();
00300    if ((aiop = aioFree)) {aioFree = aiop->Next; aiop->Next = 0;}
00301       else reDrive = 1;
00302    UnLock();
00303    return aiop;
00304 }
00305 
00306 /******************************************************************************/
00307 /*                 X r d X r o o t d A i o R e q : : I n i t                  */
00308 /******************************************************************************/
00309   
00310 void XrdXrootdAioReq::Init(int iosize, int maxaiopr, int maxaio)
00311 {
00312    XrdXrootdAio    *aiop;
00313    XrdXrootdAioReq *arp;
00314 
00315 // Set the pointer to the buffer pool, scheduler and statistical area, these are
00316 // only used by the Aio object
00317 //
00318    XrdXrootdAio::Sched = XrdXrootdProtocol::Sched;
00319    XrdXrootdAio::BPool = XrdXrootdProtocol::BPool;
00320    XrdXrootdAio::SI    = XrdXrootdProtocol::SI;
00321 
00322 // Set the pointer to the error object and compute the limits
00323 //
00324    eDest       = &XrdXrootdProtocol::eDest;
00325    Quantum     = static_cast<size_t>(iosize);
00326    QuantumMin  = Quantum / 2;
00327    QuantumMax  = Quantum * 2;
00328    if (QuantumMax > XrdXrootdProtocol::maxBuffsz)
00329        QuantumMax = XrdXrootdProtocol::maxBuffsz;
00330 
00331 // Set the maximum number of aio objects we can have (used by Aio object only)
00332 // Note that sysconf(_SC_AIO_MAX) usually provides an unreliable number if it
00333 // provides a number at all.
00334 //
00335    maxAioPR  = (maxaiopr < 1 ? 8 : maxaiopr);
00336    maxAioPR2 = maxAioPR * 2;
00337    XrdXrootdAio::maxAio = (maxaio < maxAioPR ? maxAioPR : maxaio);
00338 
00339 // Do some debuging
00340 //
00341    TRACE(DEBUG, "Max aio/req=" <<maxAioPR
00342                 <<"; aio/srv=" <<XrdXrootdAio::maxAio
00343                 <<"; Quantum=" <<Quantum);
00344 
00345 // Preallocate a block of AIO request objects AIO I/O objects
00346 //
00347    if ((arp  =               addBlock())) {arp->Clear(0); arp->Recycle(0);}
00348    if ((aiop = XrdXrootdAio::addBlock())) aiop->Recycle();
00349 }
00350 
00351 /******************************************************************************/
00352 /*                 X r d X r o o t d A i o R e q : : R e a d                  */
00353 /******************************************************************************/
00354   
00355 int XrdXrootdAioReq::Read()
00356 {
00357    int rc;
00358    XrdXrootdAio *aiop;
00359 
00360 // Get an aio object. No need to lock since we are simply double buffered.
00361 // In fact, thsi interface is called only once to start the I/O. After the
00362 // initial call, the I/O is propelled via aio redrive logic.
00363 //
00364    if (!(aiop = aioFree)) return -ENOBUFS;
00365    aioFree = aiop->Next;
00366    aiop->Next = 0;
00367 
00368 // Fill out the aiocb block
00369 //
00370 // aiop->sfsAio.aio_buf     = aiop->buffp->buff  (Filled in by Alloc())
00371    aiop->sfsAio.aio_offset  = myOffset;
00372    aiop->sfsAio.aio_nbytes  = (aiop->buffp->bsize>myIOLen ? myIOLen
00373                                                           : aiop->buffp->bsize);
00374 // aiop->sfsAio.aio_reqprio = 0;                 (Filled in by XrdSfs Construct)
00375 // aiop->sfsAio.aio_fildes  =                    (Filled in by XrdSfs aio read)
00376 
00377 // Fire up the I/O (no need to lock this as it's simple double buffering)
00378 //
00379    myIOLen  -= aiop->sfsAio.aio_nbytes;
00380    myOffset += aiop->sfsAio.aio_nbytes;
00381    numActive++;
00382    if ((rc = myFile->XrdSfsp->read((XrdSfsAio *)aiop))) 
00383       {numActive--; Recycle();} // Only 1!
00384 
00385 // All done
00386 //
00387    return rc;
00388 }
00389 
00390 /******************************************************************************/
00391 /*              X r d X r o o t d A i o R e q : : R e c y c l e               */
00392 /******************************************************************************/
00393   
00394 void XrdXrootdAioReq::Recycle(int dref, XrdXrootdAio *oldp)
00395 {
00396    XrdXrootdAio *aiop;
00397 
00398 // Recycle any hanging aio object
00399 //
00400 // TRACE(DEBUG, "Recycling aioreq; dref=" <<dref <<" link=" <<Link);
00401    if (oldp) oldp->Recycle();
00402 
00403 // When dref is <0, Recycle() was called to terminate an already started 
00404 // operation. Make sure that everything is drained prior to recycling.
00405 // Warining, the caller may not have the aioReq lock held in this case.
00406 //
00407    if (dref < 0)
00408       {Lock();
00409        if (numActive)
00410           {aioError = -1; respDone = 1;
00411            UnLock();
00412            return;
00413           }
00414        UnLock();
00415       }
00416 
00417 // Get rid of any aio objects that we might have
00418 //
00419    while((aiop = aioDone)) {aioDone = aiop->Next; aiop->Recycle();}
00420    while((aiop = aioFree)) {aioFree = aiop->Next; aiop->Recycle();}
00421 
00422 // If we have a link and it should be derefernced, do so now
00423 //
00424    if (Link && dref && aioType != 'w') Link->setRef(-1);
00425 
00426 // If this object is locked; remove the lock (caller must have obatined it)
00427 //
00428    if (isLocked) UnLock();
00429 
00430 // Put ourselves on the free queue
00431 //
00432    rqMutex.Lock();
00433    Next = rqFirst;
00434    rqFirst = this;
00435    rqMutex.UnLock();
00436 }
00437 
00438 /******************************************************************************/
00439 /*                X r d X r o o t d A i o R e q : : W r i t e                 */
00440 /******************************************************************************/
00441   
00442 int XrdXrootdAioReq::Write(XrdXrootdAio *aiop)
00443 {
00444    int rc;
00445 
00446 // For write, the aiop should or will be filled in as follows:
00447 //
00448 // aiop->sfsAio.aio_buf     = aiop->buffp->buff  (Filled in by Alloc())
00449 // aiop->sfsAio.aio_offset  = Filled in by caller
00450 // aiop->sfsAio.aio_nbytes  = Filled in by caller
00451 // aiop->sfsAio.aio_reqprio = 0                  (Filled in by XrdSfs Construct)
00452 // aiop->sfsAio.aio_fildes  =                    (Filled in by XrdSfs aio write)
00453 
00454 // Fire up the I/O. Be optimistic that this will succeed.
00455 //
00456    Lock(); numActive++; UnLock();
00457    if ((rc = myFile->XrdSfsp->write((XrdSfsAio *)aiop))) 
00458       {Lock(); numActive--; UnLock(); Recycle(-1);}
00459 
00460 // All done
00461 //
00462    return rc;
00463 }
00464 
00465 /******************************************************************************/
00466 /*                       P r i v a t e   M e t h o d s                        */
00467 /******************************************************************************/
00468 /******************************************************************************/
00469 /*             X r d X r o o t d A i o R e q : : a d d B l o c k              */
00470 /******************************************************************************/
00471   
00472 XrdXrootdAioReq *XrdXrootdAioReq::addBlock()
00473 {
00474    const int numalloc = 4096/sizeof(XrdXrootdAioReq);
00475    int i = numalloc;
00476    XrdXrootdAioReq *arp;
00477 
00478    if (!numalloc) return new XrdXrootdAioReq();
00479    TRACE(DEBUG, "Adding " <<numalloc <<" aioreq objects.");
00480 
00481    if ((arp = new XrdXrootdAioReq[numalloc]()))
00482       while(--i) {arp->Next = rqFirst; rqFirst = arp; arp++;}
00483 
00484    return arp;
00485 }
00486   
00487 /******************************************************************************/
00488 /*                                 C l e a r                                  */
00489 /******************************************************************************/
00490 
00491 void XrdXrootdAioReq::Clear(XrdLink *lnkp)
00492 {
00493 Next      = 0;
00494 myOffset  = 0;
00495 myIOLen   = 0;
00496 Instance  = 0;
00497 Link      = lnkp;
00498 myFile    = 0;
00499 aioDone   = 0;
00500 aioFree   = 0;
00501 numActive = 0;
00502 aioTotal  = 0;
00503 aioError  = 0;
00504 aioType   = 0;
00505 respDone  = 0;
00506 isLocked  = 0;
00507 reDrive   = 0;
00508 }
00509   
00510 /******************************************************************************/
00511 /*              X r d X r o o t d A i o R e q : : e n d R e a d               */
00512 /******************************************************************************/
00513   
00514 void XrdXrootdAioReq::endRead()
00515 {
00516    XrdXrootdAio *aiop;
00517    int rc;
00518 
00519 // For read requests, schedule the next read request and send the data we
00520 // already have. Since we don't know if that read will complete before we
00521 // can send the data of the just completed read, we must lock the AioReq.
00522 // We do know that if we have the lock, absolutely nothing is in transit.
00523 //
00524    Lock();
00525    numActive--;
00526 
00527 // Do a sanity check. The link should not have changed hands but stranger
00528 // things have happened.
00529 //
00530    if (!(Link->isInstance(Instance))) {Scuttle("aio read"); return;}
00531 
00532 // Dequeue the completed request (we know we're just double buffered but the
00533 // queueing is structured so this works even we're n-buffered.
00534 //
00535    aiop = aioDone;
00536    aioDone = aiop->Next;
00537 
00538 // If we encountered an error, send off the error message now and terminate
00539 //
00540    if (aioError
00541    || (myIOLen > 0 && aiop->Result == aiop->buffp->bsize && (aioError=Read())))
00542       {sendError((char *)aiop->TIdent);
00543        Recycle(1, aiop);
00544        return;
00545       }
00546 
00547 // We may or may not have an I/O request in flight. However, send off
00548 // whatever data we have at this point.
00549 //
00550    rc = (numActive ?
00551          Response.Send(kXR_oksofar, aiop->buffp->buff, aiop->Result) :
00552          Response.Send(             aiop->buffp->buff, aiop->Result));
00553 
00554 // Stop the operation if no I/O is in flight. Make the request stop-pending if
00555 // we could not send the data to the client.
00556 //
00557    if (!numActive) 
00558       {myFile->readCnt += aioTotal;
00559        Recycle(1, aiop);
00560       }
00561       else {aiop->Next = aioFree, aioFree = aiop;
00562             if (rc < 0) {aioError = -1; respDone = 1;}
00563             UnLock();
00564            }
00565 }
00566   
00567 /******************************************************************************/
00568 /*             X r d X r o o t d A i o R e q : : e n d W r i t e              */
00569 /******************************************************************************/
00570   
00571 void XrdXrootdAioReq::endWrite()
00572 {
00573 
00574 // For write requests, this method is called when all of the I/O has completed
00575 // There is no need to lock this object since nothing is pending. In any case,
00576 // Do a sanity check. The link should not have changed hands but stranger
00577 // things have happened.
00578 //
00579    if (!(Link->isInstance(Instance))) {Scuttle("aio write"); return;}
00580 
00581 // If we encountered an error, send off the error message else indicate all OK
00582 //
00583    if (aioError) sendError(Link->ID);
00584       else Response.Send();
00585 
00586 // Add in the bytes written. This is approzimate because it is done without
00587 // obtaining any kind of lock. Fortunately, it only statistical in nature.
00588 //
00589    myFile->writeCnt += aioTotal;
00590 
00591 // We are done, simply recycle ouselves.
00592 //
00593    Recycle();
00594 }
00595 
00596 /******************************************************************************/
00597 /*              X r d X r o o t d A i o R e q : : S c u t t l e               */
00598 /******************************************************************************/
00599   
00600 void XrdXrootdAioReq::Scuttle(const char *opname)
00601 {
00602 
00603 // Log this event. We can't trust much of anything at this point.
00604 //
00605    eDest->Emsg("scuttle",opname,"failed; link reassigned to",Link->ID);
00606 
00607 // We can just recycle ourselves at this point since we know we are in a
00608 // transition window where nothing is active w.r.t. this request.
00609 //
00610    Recycle(0);
00611 }
00612 
00613 /******************************************************************************/
00614 /*            X r d X r o o t d A i o R e q : : s e n d E r r o r             */
00615 /******************************************************************************/
00616   
00617 // Warning! The caller must have appropriately serialized the use of this method
00618 
00619 void XrdXrootdAioReq::sendError(char *tident)
00620 {
00621    char mbuff[4096];
00622    int rc;
00623 
00624 // If a response was sent, don't send one again
00625 //
00626    if (respDone) return;
00627    respDone = 1;
00628 
00629 // Generate message text. We can't rely on the sfs interface to do this since
00630 // that interface is synchronous.
00631 //
00632    snprintf(mbuff, sizeof(mbuff)-1, "XrdXrootdAio: Unable to %s %s; %s",
00633            (aioType == 'r' ? "read" : "write"), myFile->XrdSfsp->FName(),
00634            eDest->ec2text(aioError));
00635 
00636 // Please the error message in the log
00637 //
00638    eDest->Emsg("aio", tident, mbuff);
00639 
00640 // Remap the error from the filesystem
00641 //
00642    rc = XrdXrootdProtocol::mapError(aioError);
00643 
00644 // Send the erro back to the client (ignore any errors)
00645 //
00646    Response.Send((XErrorCode)rc, mbuff);
00647 }

Generated on Tue Jul 5 14:47:03 2011 for ROOT_528-00b_version by  doxygen 1.5.1