1 /***************************************************************************
2 * Copyright (c) 2024 Microsoft Corporation
3 *
4 * This program and the accompanying materials are made available under the
5 * terms of the MIT License which is available at
6 * https://opensource.org/licenses/MIT.
7 *
8 * SPDX-License-Identifier: MIT
9 **************************************************************************/
10
11
12 /**************************************************************************/
13 /**************************************************************************/
14 /** */
15 /** FileX Component */
16 /** */
17 /** Utility */
18 /** */
19 /**************************************************************************/
20 /**************************************************************************/
21
22 #define FX_SOURCE_CODE
23
24
25 /* Include necessary system files. */
26
27 #include "fx_api.h"
28 #include "fx_system.h"
29 #include "fx_utility.h"
30
31
32 /**************************************************************************/
33 /* */
34 /* FUNCTION RELEASE */
35 /* */
36 /* _fx_utility_logical_sector_flush PORTABLE C */
37 /* 6.1.10 */
38 /* AUTHOR */
39 /* */
40 /* William E. Lamie, Microsoft Corporation */
41 /* */
42 /* DESCRIPTION */
43 /* */
44 /* This function handles logical sector flush requests for all FileX */
45 /* components. It will process all dirty logical sectors in the */
46 /* logical sector cache within the range specified. This function */
47 /* optionally invalidates sectors. */
48 /* */
49 /* INPUT */
50 /* */
51 /* media_ptr Media control block pointer */
52 /* starting_sector Starting sector number */
53 /* sectors Number of sectors */
54 /* invalidate Invalidate flag */
55 /* (FX_TRUE -> invalidate) */
56 /* */
57 /* OUTPUT */
58 /* */
59 /* return status */
60 /* */
61 /* CALLS */
62 /* */
63 /* I/O Driver */
64 /* */
65 /* CALLED BY */
66 /* */
67 /* FileX System Functions */
68 /* */
69 /* RELEASE HISTORY */
70 /* */
71 /* DATE NAME DESCRIPTION */
72 /* */
73 /* 05-19-2020 William E. Lamie Initial Version 6.0 */
74 /* 09-30-2020 William E. Lamie Modified comment(s), and */
75 /* added conditional to */
76 /* disable cache, */
77 /* resulting in version 6.1 */
78 /* 01-31-2022 William E. Lamie Modified comment(s), fixed */
79 /* errors without cache, */
80 /* resulting in version 6.1.10 */
81 /* */
82 /**************************************************************************/
_fx_utility_logical_sector_flush(FX_MEDIA * media_ptr,ULONG64 starting_sector,ULONG64 sectors,UINT invalidate)83 UINT _fx_utility_logical_sector_flush(FX_MEDIA *media_ptr, ULONG64 starting_sector, ULONG64 sectors, UINT invalidate)
84 {
85
86 #ifndef FX_DISABLE_CACHE
87 FX_CACHED_SECTOR *cache_entry;
88 UINT cache_size;
89 UINT i, bit_set, use_starting_sector;
90 ULONG index;
91 ULONG remaining_valid;
92 ULONG remaining_dirty;
93 ULONG64 ending_sector;
94 ULONG valid_bit_map;
95
96
97 /* Extended port-specific processing macro, which is by default defined to white space. */
98 FX_UTILITY_LOGICAL_SECTOR_FLUSH_EXTENSION
99
100 /* Calculate the ending sector. */
101 ending_sector = starting_sector + sectors - 1;
102
103 /* Pickup the number of dirty sectors currently in the cache. */
104 remaining_dirty = media_ptr -> fx_media_sector_cache_dirty_count;
105
106 /* If trace is enabled, insert this event into the trace buffer. */
107 FX_TRACE_IN_LINE_INSERT(FX_TRACE_INTERNAL_MEDIA_FLUSH, media_ptr, media_ptr -> fx_media_sector_cache_dirty_count, 0, 0, FX_TRACE_INTERNAL_EVENTS, 0, 0)
108
109 /* Determine what type of cache configuration we have. */
110 if (media_ptr -> fx_media_sector_cache_hashed == FX_FALSE)
111 {
112
113 /* Linear cache present, simply walk through the search list until
114 an unused cache entry is present. */
115
116 /* Flush and invalidate the internal logical sector cache. */
117 cache_size = media_ptr -> fx_media_sector_cache_size;
118 cache_entry = media_ptr -> fx_media_sector_cache_list_ptr;
119
120 /* Look at the cache entries that have been written to. */
121 while ((cache_size--) && (cache_entry -> fx_cached_sector))
122 {
123
124 /* Determine if invalidation is not required and there are no
125 more dirty sectors. */
126 if ((remaining_dirty == 0) && (invalidate == FX_FALSE))
127 {
128
129 /* Yes, nothing left to do. */
130 break;
131 }
132
133 /* Determine if there are any more sectors to process. */
134 if (sectors == 0)
135 {
136
137 /* No more sectors required to process. */
138 break;
139 }
140
141 /* Determine if this cached sector is within the specified range and is valid. */
142 if ((cache_entry -> fx_cached_sector_valid) &&
143 (cache_entry -> fx_cached_sector >= starting_sector) &&
144 (cache_entry -> fx_cached_sector <= ending_sector))
145 {
146
147 /* Yes, the cache entry is valid and within the specified range. Determine if
148 the requested sector has been written to. */
149 if (cache_entry -> fx_cached_sector_buffer_dirty)
150 {
151
152 /* Yes, write the cached sector out to the media. */
153
154 /* Check for write protect at the media level (set by driver). */
155 if (media_ptr -> fx_media_driver_write_protect == FX_FALSE)
156 {
157
158 #ifndef FX_MEDIA_STATISTICS_DISABLE
159
160 /* Increment the number of driver write sector(s) requests. */
161 media_ptr -> fx_media_driver_write_requests++;
162 #endif
163
164 /* Build write request to the driver. */
165 media_ptr -> fx_media_driver_request = FX_DRIVER_WRITE;
166 media_ptr -> fx_media_driver_status = FX_IO_ERROR;
167 media_ptr -> fx_media_driver_buffer = cache_entry -> fx_cached_sector_memory_buffer;
168 #ifdef FX_DRIVER_USE_64BIT_LBA
169 media_ptr -> fx_media_driver_logical_sector = cache_entry -> fx_cached_sector;
170 #else
171 media_ptr -> fx_media_driver_logical_sector = (ULONG)cache_entry -> fx_cached_sector;
172 #endif
173 media_ptr -> fx_media_driver_sectors = 1;
174 media_ptr -> fx_media_driver_sector_type = cache_entry -> fx_cached_sector_type;
175
176 /* Sectors other than FX_DATA_SECTOR will never be dirty when FX_FAULT_TOLERANT is defined. */
177 #ifndef FX_FAULT_TOLERANT
178 /* Determine if the system write flag needs to be set. */
179 if (cache_entry -> fx_cached_sector_type != FX_DATA_SECTOR)
180 {
181
182 /* Yes, a system sector write is present so set the flag. The driver
183 can use this flag to make extra safeguards in writing the sector
184 out, yielding more fault tolerance. */
185 media_ptr -> fx_media_driver_system_write = FX_TRUE;
186 }
187 #endif /* FX_FAULT_TOLERANT */
188
189 /* If trace is enabled, insert this event into the trace buffer. */
190 FX_TRACE_IN_LINE_INSERT(FX_TRACE_INTERNAL_IO_DRIVER_WRITE, media_ptr, cache_entry -> fx_cached_sector, 1, cache_entry -> fx_cached_sector_memory_buffer, FX_TRACE_INTERNAL_EVENTS, 0, 0)
191
192 /* Invoke the driver to write the sector. */
193 (media_ptr -> fx_media_driver_entry) (media_ptr);
194
195 /* Clear the system write flag. */
196 media_ptr -> fx_media_driver_system_write = FX_FALSE;
197
198 /* Check for successful completion. */
199 if (media_ptr -> fx_media_driver_status)
200 {
201
202 /* Error writing a cached sector out. Return the
203 error status. */
204 return(media_ptr -> fx_media_driver_status);
205 }
206
207 /* Clear the buffer dirty flag since it has been flushed
208 out. */
209 cache_entry -> fx_cached_sector_buffer_dirty = FX_FALSE;
210
211 /* Decrement the number of dirty sectors currently in the cache. */
212 media_ptr -> fx_media_sector_cache_dirty_count--;
213 remaining_dirty--;
214 }
215 }
216
217 /* Determine if the invalidate option is specified. */
218 if (invalidate)
219 {
220
221 /* Invalidate the cache entry. */
222 cache_entry -> fx_cached_sector_valid = FX_FALSE;
223
224 /* Place all ones in the sector number. */
225 cache_entry -> fx_cached_sector = (~(ULONG64)0);
226
227 /* Determine if this sector is still dirty, this could be the case if
228 write protection was turned on. */
229 if (cache_entry -> fx_cached_sector_buffer_dirty)
230 {
231
232 /* Yes, clear the dirty flag. */
233 cache_entry -> fx_cached_sector_buffer_dirty = FX_FALSE;
234
235 /* Decrement the number of dirty sectors currently in the cache. */
236 media_ptr -> fx_media_sector_cache_dirty_count--;
237 remaining_dirty--;
238 }
239 }
240
241 /* Decrement the number of sectors in the range that have been processed. */
242 sectors--;
243 }
244
245 /* Move to the next entry in the sector cache. */
246 cache_entry = cache_entry -> fx_cached_sector_next_used;
247 }
248 }
249 else
250 {
251
252 /* Hashed cache is present. Pickup the cache size. */
253 cache_size = media_ptr -> fx_media_sector_cache_size;
254
255 /* Initialize the loop control parameters. */
256 bit_set = 0;
257 valid_bit_map = media_ptr -> fx_media_sector_cache_hashed_sector_valid;
258
259 /* Determine how to process the hashed cache based on the number of sectors
260 to process. If the sequential sector range is less than the bit map size,
261 simply use the starting sector to derive the index into the cache. */
262 if (sectors < 32)
263 {
264 use_starting_sector = FX_TRUE;
265 }
266 else
267 {
268 use_starting_sector = FX_FALSE;
269 }
270
271 /* Determine if there is anything valid in the cache. */
272 while (valid_bit_map)
273 {
274
275 /* Determine if invalidation is not required and there are no
276 more dirty sectors. */
277 if ((remaining_dirty == 0) && (invalidate == FX_FALSE))
278 {
279
280 /* Yes, nothing left to do. */
281 break;
282 }
283
284 /* Determine if there are any more sectors to process. */
285 if ((sectors == 0) || (starting_sector > ending_sector))
286 {
287
288 /* No more sectors required to process. */
289 break;
290 }
291
292 /* Determine how to compute the hash index. */
293 if (use_starting_sector)
294 {
295
296 /* Calculate the hash value of this sector using the lower bits. */
297 index = (ULONG)(starting_sector & media_ptr -> fx_media_sector_cache_hash_mask);
298
299 /* Calculate the bit set indicating there is one or more valid sectors at this cache index. */
300 bit_set = (index % 32);
301
302 /* Compute the actual array index by multiplying by the cache depth. */
303 index = (bit_set * FX_SECTOR_CACHE_DEPTH);
304 }
305 else
306 {
307
308 /* Walk the bit map to find the next valid entry. */
309
310 /* Find the next set bit. */
311 while ((valid_bit_map & 1) == 0)
312 {
313
314 /* Otherwise, shift down the bit in the bit map. */
315 valid_bit_map = valid_bit_map >> 1;
316
317 /* Increment the set bit marker. */
318 bit_set++;
319 }
320
321 /* Compute the first actual index into the hashed cache. */
322 index = (bit_set * FX_SECTOR_CACHE_DEPTH);
323 }
324
325 /* At this point, bit_set represents the next group of hashed sectors that could
326 have valid cache entries and index represents the index into the sector cache
327 of that sector group. */
328
329 /* Clear the remaining valid sectors for this entry in the bit map. */
330 remaining_valid = 0;
331
332 /* Loop to check the corresponding hash entries. */
333 do
334 {
335
336 /* Setup pointer to the cache entry. */
337 cache_entry = &(media_ptr -> fx_media_sector_cache[index]);
338
339 /* Loop to examine the full depth of the hashed cache. */
340 for (i = 0; i < 4; i++)
341 {
342
343 /* Determine if this cached sector is within the specified range and is valid. */
344 if ((cache_entry -> fx_cached_sector_valid) &&
345 (cache_entry -> fx_cached_sector >= starting_sector) &&
346 (cache_entry -> fx_cached_sector <= ending_sector))
347 {
348
349 /* Determine if the requested sector has been written to. */
350 if (cache_entry -> fx_cached_sector_buffer_dirty)
351 {
352
353
354 /* Yes, write the cached sector out to the media. */
355
356 /* Check for write protect at the media level (set by driver). */
357 if (media_ptr -> fx_media_driver_write_protect == FX_FALSE)
358 {
359
360 #ifndef FX_MEDIA_STATISTICS_DISABLE
361
362 /* Increment the number of driver write sector(s) requests. */
363 media_ptr -> fx_media_driver_write_requests++;
364 #endif
365
366 /* Build Write request to the driver. */
367 media_ptr -> fx_media_driver_request = FX_DRIVER_WRITE;
368 media_ptr -> fx_media_driver_status = FX_IO_ERROR;
369 media_ptr -> fx_media_driver_buffer = cache_entry -> fx_cached_sector_memory_buffer;
370 #ifdef FX_DRIVER_USE_64BIT_LBA
371 media_ptr -> fx_media_driver_logical_sector = cache_entry -> fx_cached_sector;
372 #else
373 media_ptr -> fx_media_driver_logical_sector = (ULONG)cache_entry -> fx_cached_sector;
374 #endif
375 media_ptr -> fx_media_driver_sectors = 1;
376 media_ptr -> fx_media_driver_sector_type = cache_entry -> fx_cached_sector_type;
377
378 /* Sectors other than FX_DATA_SECTOR will never be dirty when FX_FAULT_TOLERANT is defined. */
379 #ifndef FX_FAULT_TOLERANT
380 /* Determine if the system write flag needs to be set. */
381 if (cache_entry -> fx_cached_sector_type != FX_DATA_SECTOR)
382 {
383
384 /* Yes, a system sector write is present so set the flag. The driver
385 can use this flag to make extra safeguards in writing the sector
386 out, yielding more fault tolerance. */
387 media_ptr -> fx_media_driver_system_write = FX_TRUE;
388 }
389 #endif /* FX_FAULT_TOLERANT */
390
391 /* If trace is enabled, insert this event into the trace buffer. */
392 FX_TRACE_IN_LINE_INSERT(FX_TRACE_INTERNAL_IO_DRIVER_WRITE, media_ptr, cache_entry -> fx_cached_sector, 1, cache_entry -> fx_cached_sector_memory_buffer, FX_TRACE_INTERNAL_EVENTS, 0, 0)
393
394 /* Invoke the driver to write the sector. */
395 (media_ptr -> fx_media_driver_entry) (media_ptr);
396
397 /* Clear the system write flag. */
398 media_ptr -> fx_media_driver_system_write = FX_FALSE;
399
400 /* Check for successful completion. */
401 if (media_ptr -> fx_media_driver_status)
402 {
403
404 /* Error writing a cached sector out. Return the
405 error status. */
406 return(media_ptr -> fx_media_driver_status);
407 }
408
409 /* Clear the buffer dirty flag since it has been flushed
410 out. */
411 cache_entry -> fx_cached_sector_buffer_dirty = FX_FALSE;
412
413 /* Decrement the number of dirty sectors currently in the cache. */
414 media_ptr -> fx_media_sector_cache_dirty_count--;
415 remaining_dirty--;
416 }
417 }
418
419 /* Determine if the invalidate option is specified. */
420 if (invalidate)
421 {
422
423 /* Invalidate the cache entry. */
424 cache_entry -> fx_cached_sector_valid = FX_FALSE;
425
426 /* Place all ones in the sector number. */
427 cache_entry -> fx_cached_sector = (~(ULONG64)0);
428
429 /* Determine if this sector is still dirty, this could be the case if
430 write protection was turned on. */
431 if (cache_entry -> fx_cached_sector_buffer_dirty)
432 {
433
434 /* Yes, clear the dirty flag. */
435 cache_entry -> fx_cached_sector_buffer_dirty = FX_FALSE;
436
437 /* Decrement the number of dirty sectors currently in the cache. */
438 media_ptr -> fx_media_sector_cache_dirty_count--;
439 remaining_dirty--;
440 }
441 }
442
443 /* Decrement the number of sectors in the range that have been processed. */
444 sectors--;
445 }
446 else
447 {
448
449 /* Determine if the sector is valid. */
450 if (cache_entry -> fx_cached_sector_valid)
451 {
452
453 /* Increment the number of still remaining but out of range sectors. */
454 remaining_valid++;
455 }
456 }
457
458 /* Determine if invalidation is not required and there are no
459 more dirty sectors. */
460 if ((remaining_dirty == 0) && (invalidate == FX_FALSE))
461 {
462
463 /* Yes, nothing left to do. */
464 break;
465 }
466
467 /* Determine if there are any more sectors to process. */
468 if ((sectors == 0) && (invalidate == FX_FALSE))
469 {
470
471 /* No more sectors required to process. */
472 break;
473 }
474
475 /* Move to the next cache entry. */
476 cache_entry++;
477 }
478
479 /* Move the index to the next position since the bit map can only represent 32
480 cache entries. */
481 index = index + (32 * FX_SECTOR_CACHE_DEPTH);
482 } while (index < cache_size);
483
484 /* Determine if invalidation was required and there are no more valid sectors
485 associated with this bit position. */
486 if ((invalidate) && (remaining_valid == 0))
487 {
488
489 /* Clear this bit position. */
490 media_ptr -> fx_media_sector_cache_hashed_sector_valid &= ~(((ULONG)1) << bit_set);
491 }
492
493 /* Determine if the starting sector is being used for examination of the hash. */
494 if (use_starting_sector)
495 {
496
497 /* Move to the next sector. */
498 starting_sector++;
499 }
500 else
501 {
502
503 /* Move to next bit in the map. */
504 valid_bit_map = valid_bit_map >> 1;
505
506 /* Increment the set bit marker. */
507 bit_set++;
508 }
509 }
510 }
511 #else
512 FX_PARAMETER_NOT_USED(media_ptr);
513 FX_PARAMETER_NOT_USED(starting_sector);
514 FX_PARAMETER_NOT_USED(sectors);
515 FX_PARAMETER_NOT_USED(invalidate);
516 #endif /* FX_DISABLE_CACHE */
517
518 /* If we get here, return successful status to the caller. */
519 return(FX_SUCCESS);
520 }
521
522