X-Git-Url: http://git.samba.org/samba.git/?a=blobdiff_plain;f=drivers%2Fstaging%2Fvc04_services%2Finterface%2Fvchi%2Fvchi_cfg.h;h=138c36151a22b312a1aad52fe50bd6ac17c958c9;hb=0dd09bc02c1bad55e92306ca83b38b3cf48b9f40;hp=89aa4e6122cdf83e9905a378820b58b7cd065f44;hpb=1c7c3237c0cc4ad3c7b0df458290c8e2a652f178;p=sfrench%2Fcifs-2.6.git diff --git a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h index 89aa4e6122cd..138c36151a22 100644 --- a/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h +++ b/drivers/staging/vc04_services/interface/vchi/vchi_cfg.h @@ -4,13 +4,17 @@ #ifndef VCHI_CFG_H_ #define VCHI_CFG_H_ -/**************************************************************************************** - * Defines in this first section are part of the VCHI API and may be examined by VCHI - * services. - ***************************************************************************************/ - -/* Required alignment of base addresses for bulk transfer, if unaligned transfers are not enabled */ -/* Really determined by the message driver, and should be available from a run-time call. */ +/******************************************************************************* + * Defines in this first section are part of the VCHI API and may be examined by + * VCHI services. + ******************************************************************************/ + +/* + * Required alignment of base addresses for bulk transfer, if unaligned + * transfers are not enabled + * Really determined by the message driver, and should be available from + * a run-time call. + */ #ifndef VCHI_BULK_ALIGN # if __VCCOREVER__ >= 0x04000000 # define VCHI_BULK_ALIGN 32 // Allows for the need to do cache cleans @@ -19,9 +23,13 @@ # endif #endif -/* Required length multiple for bulk transfers, if unaligned transfers are not enabled */ -/* May be less than or greater than VCHI_BULK_ALIGN */ -/* Really determined by the message driver, and should be available from a run-time call. */ +/* + * Required length multiple for bulk transfers, if unaligned transfers are + * not enabled + * May be less than or greater than VCHI_BULK_ALIGN + * Really determined by the message driver, and should be available from + * a run-time call. + */ #ifndef VCHI_BULK_GRANULARITY # if __VCCOREVER__ >= 0x04000000 # define VCHI_BULK_GRANULARITY 32 // Allows for the need to do cache cleans @@ -39,19 +47,24 @@ # endif #endif -/****************************************************************************************** - * Defines below are system configuration options, and should not be used by VCHI services. - *****************************************************************************************/ +/****************************************************************************** + * Defines below are system configuration options, and should not be used by + * VCHI services. + ******************************************************************************/ -/* How many connections can we support? A localhost implementation uses 2 connections, - * 1 for host-app, 1 for VMCS, and these are hooked together by a loopback MPHI VCFW - * driver. */ +/* + * How many connections can we support? A localhost implementation uses + * 2 connections, 1 for host-app, 1 for VMCS, and these are hooked together + * by a loopback MPHI VCFW driver. + */ #ifndef VCHI_MAX_NUM_CONNECTIONS # define VCHI_MAX_NUM_CONNECTIONS 3 #endif -/* How many services can we open per connection? Extending this doesn't cost processing time, just a small - * amount of static memory. */ +/* + * How many services can we open per connection? Extending this doesn't cost + * processing time, just a small amount of static memory. + */ #ifndef VCHI_MAX_SERVICES_PER_CONNECTION # define VCHI_MAX_SERVICES_PER_CONNECTION 36 #endif @@ -66,8 +79,10 @@ # define VCHI_MAX_BULK_RX_CHANNELS_PER_CONNECTION 1 // 1 MPHI #endif -/* How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the effective - * receive queue space, less message headers. */ +/* + * How many receive slots do we use. This times VCHI_MAX_MSG_SIZE gives the + * effective receive queue space, less message headers. + */ #ifndef VCHI_NUM_READ_SLOTS # if defined(VCHI_LOCAL_HOST_PORT) # define VCHI_NUM_READ_SLOTS 4 @@ -76,112 +91,141 @@ # endif #endif -/* Do we utilise overrun facility for receive message slots? Can aid peer transmit - * performance. Only define on VideoCore end, talking to host. +/* + * Do we utilise overrun facility for receive message slots? Can aid peer + * transmit performance. Only define on VideoCore end, talking to host. */ //#define VCHI_MSG_RX_OVERRUN -/* How many transmit slots do we use. Generally don't need many, as the hardware driver - * underneath VCHI will usually have its own buffering. */ +/* + * How many transmit slots do we use. Generally don't need many, + * as the hardware driver underneath VCHI will usually have its own buffering. + */ #ifndef VCHI_NUM_WRITE_SLOTS # define VCHI_NUM_WRITE_SLOTS 4 #endif -/* If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or more slots, - * then it's taking up too much buffer space, and the peer service will be told to stop - * transmitting with an XOFF message. For this to be effective, the VCHI_NUM_READ_SLOTS - * needs to be considerably bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency - * is too high. */ +/* + * If a service has held or queued received messages in VCHI_XOFF_THRESHOLD or + * more slots, then it's taking up too much buffer space, + * and the peer service will be told to stop transmitting with an XOFF message. + * For this to be effective, the VCHI_NUM_READ_SLOTS needs to be considerably + * bigger than VCHI_NUM_WRITE_SLOTS, or the transmit latency is too high. + */ #ifndef VCHI_XOFF_THRESHOLD # define VCHI_XOFF_THRESHOLD (VCHI_NUM_READ_SLOTS / 2) #endif -/* After we've sent an XOFF, the peer will be told to resume transmission once the local - * service has dequeued/released enough messages that it's now occupying - * VCHI_XON_THRESHOLD slots or fewer. */ +/* + * After we've sent an XOFF, the peer will be told to resume transmission + * once the local service has dequeued/released enough messages that it's now + * occupying VCHI_XON_THRESHOLD slots or fewer. + */ #ifndef VCHI_XON_THRESHOLD # define VCHI_XON_THRESHOLD (VCHI_NUM_READ_SLOTS / 4) #endif -/* A size below which a bulk transfer omits the handshake completely and always goes - * via the message channel, if bulk auxiliary is being sent on that service. (The user - * can guarantee this by enabling unaligned transmits). - * Not API. */ +/* + * A size below which a bulk transfer omits the handshake completely and always + * goes via the message channel, if bulk auxiliary is being sent on that + * service. (The user can guarantee this by enabling unaligned transmits). + * Not API. + */ #ifndef VCHI_MIN_BULK_SIZE # define VCHI_MIN_BULK_SIZE (VCHI_MAX_MSG_SIZE / 2 < 4096 ? VCHI_MAX_MSG_SIZE / 2 : 4096) #endif -/* Maximum size of bulk transmission chunks, for each interface type. A trade-off between - * speed and latency; the smaller the chunk size the better change of messages and other - * bulk transmissions getting in when big bulk transfers are happening. Set to 0 to not - * break transmissions into chunks. +/* + * Maximum size of bulk transmission chunks, for each interface type. + * A trade-off between speed and latency; the smaller the chunk size the better + * change of messages and other bulk transmissions getting in when big bulk + * transfers are happening. Set to 0 to not break transmissions into chunks. */ #ifndef VCHI_MAX_BULK_CHUNK_SIZE_MPHI # define VCHI_MAX_BULK_CHUNK_SIZE_MPHI (16 * 1024) #endif -/* NB Chunked CCP2 transmissions violate the letter of the CCP2 spec by using "JPEG8" mode - * with multiple-line frames. Only use if the receiver can cope. */ +/* + * NB Chunked CCP2 transmissions violate the letter of the CCP2 spec + * by using "JPEG8" mode with multiple-line frames. Only use if the receiver + * can cope. + */ #ifndef VCHI_MAX_BULK_CHUNK_SIZE_CCP2 # define VCHI_MAX_BULK_CHUNK_SIZE_CCP2 0 #endif -/* How many TX messages can we have pending in our transmit slots. Once exhausted, - * vchi_msg_queue will be blocked. */ +/* + * How many TX messages can we have pending in our transmit slots. + * Once exhausted, vchi_msg_queue will be blocked. + */ #ifndef VCHI_TX_MSG_QUEUE_SIZE # define VCHI_TX_MSG_QUEUE_SIZE 256 #endif -/* How many RX messages can we have parsed in the receive slots. Once exhausted, parsing - * will be suspended until older messages are dequeued/released. */ +/* + * How many RX messages can we have parsed in the receive slots. Once exhausted, + * parsing will be suspended until older messages are dequeued/released. + */ #ifndef VCHI_RX_MSG_QUEUE_SIZE # define VCHI_RX_MSG_QUEUE_SIZE 256 #endif -/* Really should be able to cope if we run out of received message descriptors, by - * suspending parsing as the comment above says, but we don't. This sweeps the issue - * under the carpet. */ -#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS +/* + * Really should be able to cope if we run out of received message descriptors, + * by suspending parsing as the comment above says, but we don't. + * This sweeps the issue under the carpet. + */ +#if VCHI_RX_MSG_QUEUE_SIZE < (VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS # undef VCHI_RX_MSG_QUEUE_SIZE -# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE/16 + 1) * VCHI_NUM_READ_SLOTS) +# define VCHI_RX_MSG_QUEUE_SIZE ((VCHI_MAX_MSG_SIZE / 16 + 1) * VCHI_NUM_READ_SLOTS) #endif -/* How many bulk transmits can we have pending. Once exhausted, vchi_bulk_queue_transmit - * will be blocked. */ +/* + * How many bulk transmits can we have pending. Once exhausted, + * vchi_bulk_queue_transmit will be blocked. + */ #ifndef VCHI_TX_BULK_QUEUE_SIZE # define VCHI_TX_BULK_QUEUE_SIZE 64 #endif -/* How many bulk receives can we have pending. Once exhausted, vchi_bulk_queue_receive - * will be blocked. */ +/* + * How many bulk receives can we have pending. Once exhausted, + *vchi_bulk_queue_receive will be blocked. + */ #ifndef VCHI_RX_BULK_QUEUE_SIZE # define VCHI_RX_BULK_QUEUE_SIZE 64 #endif -/* A limit on how many outstanding bulk requests we expect the peer to give us. If - * the peer asks for more than this, VCHI will fail and assert. The number is determined - * by the peer's hardware - it's the number of outstanding requests that can be queued - * on all bulk channels. VC3's MPHI peripheral allows 16. */ +/* + * A limit on how many outstanding bulk requests we expect the peer to give us. + * If the peer asks for more than this, VCHI will fail and assert. + * The number is determined by the peer's hardware + * - it's the number of outstanding requests that can be queued + * on all bulk channels. VC3's MPHI peripheral allows 16. + */ #ifndef VCHI_MAX_PEER_BULK_REQUESTS # define VCHI_MAX_PEER_BULK_REQUESTS 32 #endif -/* Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2 +/* + * Define VCHI_CCP2TX_MANUAL_POWER if the host tells us when to turn the CCP2 * transmitter on and off. */ /*#define VCHI_CCP2TX_MANUAL_POWER*/ #ifndef VCHI_CCP2TX_MANUAL_POWER -/* Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. Set - * negative for no IDLE. +/* + * Timeout (in milliseconds) for putting the CCP2TX interface into IDLE state. + * Set negative for no IDLE. */ # ifndef VCHI_CCP2TX_IDLE_TIMEOUT # define VCHI_CCP2TX_IDLE_TIMEOUT 5 # endif -/* Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. Set - * negative for no OFF. +/* + * Timeout (in milliseconds) for putting the CCP2TX interface into OFF state. + * Set negative for no OFF. */ # ifndef VCHI_CCP2TX_OFF_TIMEOUT # define VCHI_CCP2TX_OFF_TIMEOUT 1000