3 Unix SMB/Netbios implementation.
5 read/write to a files_struct
6 Copyright (C) Andrew Tridgell 1992-1998
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 extern int DEBUGLEVEL;
27 static BOOL setup_write_cache(files_struct *, SMB_OFF_T);
29 /****************************************************************************
30 seek a file. Try to avoid the seek if possible
31 ****************************************************************************/
33 SMB_OFF_T seek_file(files_struct *fsp,SMB_OFF_T pos)
38 if (fsp->print_file && lp_postscript(fsp->conn->service))
41 seek_ret = fsp->conn->vfs_ops.lseek(fsp,fsp->fd,pos+offset,SEEK_SET);
44 * We want to maintain the fiction that we can seek
45 * on a fifo for file system purposes. This allows
46 * people to set up UNIX fifo's that feed data to Windows
50 if((seek_ret == -1) && (errno == ESPIPE)) {
51 seek_ret = pos+offset;
55 if((seek_ret == -1) || (seek_ret != pos+offset)) {
56 DEBUG(0,("seek_file: sys_lseek failed. Error was %s\n", strerror(errno) ));
61 fsp->pos = seek_ret - offset;
63 DEBUG(10,("seek_file: requested pos = %.0f, new pos = %.0f\n",
64 (double)(pos+offset), (double)fsp->pos ));
69 /****************************************************************************
70 Read from write cache if we can.
71 ****************************************************************************/
74 BOOL read_from_write_cache(files_struct *fsp,char *data,SMB_OFF_T pos,size_t n)
76 write_cache *wcp = fsp->wcp;
81 if(n > wcp->data_size || pos < wcp->offset || pos + n > wcp->offset + wcp->data_size)
84 memcpy(data, wcp->data + (pos - wcp->offset), n);
87 INC_PROFILE_COUNT(writecache_read_hits);
93 /****************************************************************************
95 ****************************************************************************/
97 ssize_t read_file(files_struct *fsp,char *data,SMB_OFF_T pos,size_t n)
99 ssize_t ret=0,readret;
101 /* you can't read from print files */
102 if (fsp->print_file) {
107 * Serve from write cache if we can.
109 if(read_from_write_cache(fsp, data, pos, n))
112 flush_write_cache(fsp, READ_FLUSH);
114 if (seek_file(fsp,pos) == -1) {
115 DEBUG(3,("read_file: Failed to seek to %.0f\n",(double)pos));
120 readret = fsp->conn->vfs_ops.read(fsp,fsp->fd,data,n);
123 if (readret > 0) ret += readret;
129 /* how many write cache buffers have been allocated */
130 static unsigned int allocated_write_caches;
132 /****************************************************************************
133 *Really* write to a file.
134 ****************************************************************************/
136 static ssize_t real_write_file(files_struct *fsp,char *data,SMB_OFF_T pos, size_t n)
138 if ((pos != -1) && (seek_file(fsp,pos) == -1))
141 return vfs_write_data(fsp,data,n);
144 /****************************************************************************
146 ****************************************************************************/
148 ssize_t write_file(files_struct *fsp, char *data, SMB_OFF_T pos, size_t n)
150 write_cache *wcp = fsp->wcp;
151 ssize_t total_written = 0;
154 if (fsp->print_file) {
155 return print_job_write(fsp->print_jobid, data, n);
158 if (!fsp->can_write) {
163 if (!fsp->modified) {
165 fsp->modified = True;
167 if (fsp->conn->vfs_ops.fstat(fsp,fsp->fd,&st) == 0) {
168 int dosmode = dos_mode(fsp->conn,fsp->fsp_name,&st);
169 if (MAP_ARCHIVE(fsp->conn) && !IS_DOS_ARCHIVE(dosmode)) {
170 file_chmod(fsp->conn,fsp->fsp_name,dosmode | aARCH,&st);
174 * If this is the first write and we have an exclusive oplock then setup
178 if (EXCLUSIVE_OPLOCK_TYPE(fsp->oplock_type) && !wcp) {
179 setup_write_cache(fsp, st.st_size);
186 INC_PROFILE_COUNT(writecache_total_writes);
187 if (!fsp->oplock_type) {
188 INC_PROFILE_COUNT(writecache_non_oplock_writes);
193 * If this file is level II oplocked then we need
194 * to grab the shared memory lock and inform all
195 * other files with a level II lock that they need
196 * to flush their read caches. We keep the lock over
197 * the shared memory area whilst doing this.
200 if (LEVEL_II_OPLOCK_TYPE(fsp->oplock_type)) {
201 share_mode_entry *share_list = NULL;
202 pid_t pid = sys_getpid();
204 int num_share_modes = 0;
207 if (lock_share_entry_fsp(fsp) == False) {
208 DEBUG(0,("write_file: failed to lock share mode entry for file %s.\n", fsp->fsp_name ));
211 num_share_modes = get_share_modes(fsp->conn, fsp->dev, fsp->inode, &share_list);
213 for(i = 0; i < num_share_modes; i++) {
214 share_mode_entry *share_entry = &share_list[i];
217 * As there could have been multiple writes waiting at the lock_share_entry
218 * gate we may not be the first to enter. Hence the state of the op_types
219 * in the share mode entries may be partly NO_OPLOCK and partly LEVEL_II
220 * oplock. It will do no harm to re-send break messages to those smbd's
221 * that are still waiting their turn to remove their LEVEL_II state, and
222 * also no harm to ignore existing NO_OPLOCK states. JRA.
225 if (share_entry->op_type == NO_OPLOCK)
229 if (EXCLUSIVE_OPLOCK_TYPE(share_entry->op_type)) {
230 DEBUG(0,("write_file: PANIC. share mode entry %d is an exlusive oplock !\n", i ));
231 unlock_share_entry(fsp->conn, fsp->dev, fsp->inode);
236 * Check if this is a file we have open (including the
237 * file we've been called to do write_file on. If so
238 * then break it directly without releasing the lock.
241 if (pid == share_entry->pid) {
242 files_struct *new_fsp = file_find_dit(fsp->dev, fsp->inode, &share_entry->time);
244 /* Paranoia check... */
245 if(new_fsp == NULL) {
246 DEBUG(0,("write_file: PANIC. share mode entry %d is not a local file !\n", i ));
247 unlock_share_entry(fsp->conn, fsp->dev, fsp->inode);
250 oplock_break_level2(new_fsp, True, token);
255 * This is a remote file and so we send an asynchronous
259 request_oplock_break(share_entry, fsp->dev, fsp->inode);
263 free((char *)share_list);
264 unlock_share_entry_fsp(fsp);
267 /* Paranoia check... */
268 if (LEVEL_II_OPLOCK_TYPE(fsp->oplock_type)) {
269 DEBUG(0,("write_file: PANIC. File %s still has a level II oplock.\n", fsp->fsp_name));
274 if (profile_p && profile_p->writecache_total_writes % 500 == 0) {
275 DEBUG(3,("WRITECACHE: initwrites=%u abutted=%u total=%u \
276 nonop=%u allocated=%u active=%u direct=%u perfect=%u readhits=%u\n",
277 profile_p->writecache_init_writes,
278 profile_p->writecache_abutted_writes,
279 profile_p->writecache_total_writes,
280 profile_p->writecache_non_oplock_writes,
281 profile_p->writecache_allocated_write_caches,
282 profile_p->writecache_num_write_caches,
283 profile_p->writecache_direct_writes,
284 profile_p->writecache_num_perfect_writes,
285 profile_p->writecache_read_hits ));
287 DEBUG(3,("WRITECACHE: Flushes SEEK=%d, READ=%d, WRITE=%d, READRAW=%d, OPLOCK=%d, CLOSE=%d, SYNC=%d\n",
288 profile_p->writecache_flushed_writes[SEEK_FLUSH],
289 profile_p->writecache_flushed_writes[READ_FLUSH],
290 profile_p->writecache_flushed_writes[WRITE_FLUSH],
291 profile_p->writecache_flushed_writes[READRAW_FLUSH],
292 profile_p->writecache_flushed_writes[OPLOCK_RELEASE_FLUSH],
293 profile_p->writecache_flushed_writes[CLOSE_FLUSH],
294 profile_p->writecache_flushed_writes[SYNC_FLUSH] ));
300 INC_PROFILE_COUNT(writecache_direct_writes);
302 return real_write_file(fsp, data, pos, n);
305 DEBUG(9,("write_file(fd=%d pos=%d size=%d) wofs=%d wsize=%d\n",
306 fsp->fd, (int)pos, (int)n, (int)wcp->offset, (int)wcp->data_size));
309 * If we have active cache and it isn't contiguous then we flush.
310 * NOTE: There is a small problem with running out of disk ....
313 if (wcp->data_size) {
315 BOOL cache_flush_needed = False;
317 if ((pos >= wcp->offset) && (pos <= wcp->offset + wcp->data_size)) {
320 * Start of write overlaps or abutts the existing data.
323 size_t data_used = MIN((wcp->alloc_size - (pos - wcp->offset)), n);
325 memcpy(wcp->data + (pos - wcp->offset), data, data_used);
328 * Update the current buffer size with the new data.
331 if(pos + data_used > wcp->offset + wcp->data_size)
332 wcp->data_size = pos + data_used - wcp->offset;
335 * If we used all the data then
342 cache_flush_needed = True;
345 * Move the start of data forward by the amount used,
346 * cut down the amount left by the same amount.
354 INC_PROFILE_COUNT(writecache_abutted_writes);
356 total_written = data_used;
360 } else if ((pos < wcp->offset) && (pos + n > wcp->offset) &&
361 (pos + n <= wcp->offset + wcp->alloc_size)) {
364 * End of write overlaps the existing data.
367 size_t data_used = pos + n - wcp->offset;
369 memcpy(wcp->data, data + n - data_used, data_used);
372 * Update the current buffer size with the new data.
375 if(pos + n > wcp->offset + wcp->data_size)
376 wcp->data_size = pos + n - wcp->offset;
379 * We don't need to move the start of data, but we
380 * cut down the amount left by the amount used.
386 * We cannot have used all the data here.
389 cache_flush_needed = True;
392 INC_PROFILE_COUNT(writecache_abutted_writes);
394 total_written = data_used;
398 } else if ( (pos >= wcp->file_size) &&
399 (pos > wcp->offset + wcp->data_size) &&
400 (pos < wcp->offset + wcp->alloc_size) ) {
403 * Non-contiguous write part of which fits within
404 * the cache buffer and is extending the file.
409 if(pos + n <= wcp->offset + wcp->alloc_size)
412 data_used = wcp->offset + wcp->alloc_size - pos;
415 * Fill in the non-continuous area with zeros.
418 memset(wcp->data + wcp->data_size, '\0',
419 pos - (wcp->offset + wcp->data_size) );
421 memcpy(wcp->data + (pos - wcp->offset), data, data_used);
424 * Update the current buffer size with the new data.
427 if(pos + data_used > wcp->offset + wcp->data_size)
428 wcp->data_size = pos + data_used - wcp->offset;
431 * Update the known file length.
434 wcp->file_size = wcp->offset + wcp->data_size;
437 if (set_filelen(fsp->fd, wcp->file_size) == -1) {
438 DEBUG(0,("write_file: error %s in setting file to length %.0f\n",
439 strerror(errno), (double)wcp->file_size ));
445 * If we used all the data then
452 cache_flush_needed = True;
455 * Move the start of data forward by the amount used,
456 * cut down the amount left by the same amount.
464 INC_PROFILE_COUNT(writecache_abutted_writes);
466 total_written = data_used;
473 * Write is bigger than buffer, or there is no overlap on the
477 DEBUG(9,("write_file: non cacheable write : fd = %d, pos = %.0f, len = %u, current cache pos = %.0f \
478 len = %u\n",fsp->fd, (double)pos, (unsigned int)n, (double)wcp->offset, (unsigned int)wcp->data_size ));
481 * Update the file size if needed.
484 if(pos + n > wcp->file_size)
485 wcp->file_size = pos + n;
488 * If write would fit in the cache, and is larger than
489 * the data already in the cache, flush the cache and
490 * preferentially copy the data new data into it. Otherwise
491 * just write the data directly.
494 if ( n <= wcp->alloc_size && n > wcp->data_size) {
495 cache_flush_needed = True;
498 INC_PROFILE_COUNT(writecache_direct_writes);
500 return real_write_file(fsp, data, pos, n);
507 if(wcp->data_size > wcp->file_size)
508 wcp->file_size = wcp->data_size;
510 if (cache_flush_needed) {
511 DEBUG(3,("WRITE_FLUSH:%d: due to noncontinuous write: fd = %d, size = %.0f, pos = %.0f, \
512 n = %u, wcp->offset=%.0f, wcp->data_size=%u\n",
513 write_path, fsp->fd, (double)wcp->file_size, (double)pos, (unsigned int)n,
514 (double)wcp->offset, (unsigned int)wcp->data_size ));
516 flush_write_cache(fsp, WRITE_FLUSH);
521 * If the write request is bigger than the cache
522 * size, write it all out.
525 if (n > wcp->alloc_size ) {
526 if(real_write_file(fsp, data, pos, n) == -1)
529 INC_PROFILE_COUNT(writecache_direct_writes);
531 return total_written + n;
535 * If there's any data left, cache it.
540 if (wcp->data_size) {
541 INC_PROFILE_COUNT(writecache_abutted_writes);
543 INC_PROFILE_COUNT(writecache_init_writes);
546 memcpy(wcp->data+wcp->data_size, data, n);
547 if (wcp->data_size == 0) {
550 INC_PROFILE_COUNT(writecache_num_write_caches);
554 DEBUG(9,("cache return %u\n", (unsigned int)n));
556 return total_written; /* .... that's a write :) */
559 return total_written;
562 /****************************************************************************
563 Delete the write cache structure.
564 ****************************************************************************/
566 void delete_write_cache(files_struct *fsp)
573 if(!(wcp = fsp->wcp))
577 DEC_PROFILE_COUNT(writecache_allocated_write_caches);
579 allocated_write_caches--;
581 SMB_ASSERT(wcp->data_size == 0);
588 DEBUG(10,("delete_write_cache: File %s deleted write cache\n", fsp->fsp_name ));
592 /****************************************************************************
593 Setup the write cache structure.
594 ****************************************************************************/
596 static BOOL setup_write_cache(files_struct *fsp, SMB_OFF_T file_size)
598 ssize_t alloc_size = lp_write_cache_size(SNUM(fsp->conn));
601 if (allocated_write_caches >= MAX_WRITE_CACHES)
604 if(alloc_size == 0 || fsp->wcp)
607 if((wcp = (write_cache *)malloc(sizeof(write_cache))) == NULL) {
608 DEBUG(0,("setup_write_cache: malloc fail.\n"));
612 wcp->file_size = file_size;
614 wcp->alloc_size = alloc_size;
616 if((wcp->data = malloc(wcp->alloc_size)) == NULL) {
617 DEBUG(0,("setup_write_cache: malloc fail for buffer size %u.\n",
618 (unsigned int)wcp->alloc_size ));
625 INC_PROFILE_COUNT(writecache_allocated_write_caches);
627 allocated_write_caches++;
629 DEBUG(10,("setup_write_cache: File %s allocated write cache size %u\n",
630 fsp->fsp_name, wcp->alloc_size ));
635 /****************************************************************************
636 Cope with a size change.
637 ****************************************************************************/
639 void set_filelen_write_cache(files_struct *fsp, SMB_OFF_T file_size)
642 flush_write_cache(fsp, SIZECHANGE_FLUSH);
643 fsp->wcp->file_size = file_size;
647 /*******************************************************************
648 Flush a write cache struct to disk.
649 ********************************************************************/
651 ssize_t flush_write_cache(files_struct *fsp, enum flush_reason_enum reason)
653 write_cache *wcp = fsp->wcp;
656 if(!wcp || !wcp->data_size)
659 data_size = wcp->data_size;
663 DEC_PROFILE_COUNT(writecache_num_write_caches);
664 INC_PROFILE_COUNT(writecache_flushed_writes[reason]);
667 DEBUG(9,("flushing write cache: fd = %d, off=%.0f, size=%u\n",
668 fsp->fd, (double)wcp->offset, (unsigned int)data_size));
671 if(data_size == wcp->alloc_size)
672 INC_PROFILE_COUNT(writecache_num_perfect_writes);
675 return real_write_file(fsp, wcp->data, wcp->offset, data_size);
678 /*******************************************************************
680 ********************************************************************/
682 void sync_file(connection_struct *conn, files_struct *fsp)
684 if(lp_strict_sync(SNUM(conn)) && fsp->fd != -1) {
685 flush_write_cache(fsp, SYNC_FLUSH);
686 conn->vfs_ops.fsync(fsp,fsp->fd);