Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ACPI: ACPICA 20060623

Implemented a new acpi_spinlock type for the OSL lock
interfaces. This allows the type to be customized to
the host OS for improved efficiency (since a spinlock is
usually a very small object.)

Implemented support for "ignored" bits in the ACPI
registers. According to the ACPI specification, these
bits should be preserved when writing the registers via
a read/modify/write cycle. There are 3 bits preserved
in this manner: PM1_CONTROL[0] (SCI_EN), PM1_CONTROL[9],
and PM1_STATUS[11].
http://bugzilla.kernel.org/show_bug.cgi?id=3691

Implemented the initial deployment of new OSL mutex
interfaces. Since some host operating systems have
separate mutex and semaphore objects, this feature was
requested. The base code now uses mutexes (and the new
mutex interfaces) wherever a binary semaphore was used
previously. However, for the current release, the mutex
interfaces are defined as macros to map them to the
existing semaphore interfaces.

Fixed several problems with the support for the control
method SyncLevel parameter. The SyncLevel now works
according to the ACPI specification and in concert with the
Mutex SyncLevel parameter, since the current SyncLevel is
a property of the executing thread. Mutual exclusion for
control methods is now implemented with a mutex instead
of a semaphore.

Fixed three instances of the use of the C shift operator
in the bitfield support code (exfldio.c) to avoid the use
of a shift value larger than the target data width. The
behavior of C compilers is undefined in this case and can
cause unpredictable results, and therefore the case must
be detected and avoided. (Fiodor Suietov)

Added an info message whenever an SSDT or OEM table
is loaded dynamically via the Load() or LoadTable()
ASL operators. This should improve debugging capability
since it will show exactly what tables have been loaded
(beyond the tables present in the RSDT/XSDT.)

Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

authored by

Bob Moore and committed by
Len Brown
967440e3 95b38b3f

+562 -469
-30
drivers/acpi/dispatcher/dsinit.c
··· 125 125 if (info->table_desc->pointer->revision == 1) { 126 126 node->flags |= ANOBJ_DATA_WIDTH_32; 127 127 } 128 - #ifdef ACPI_INIT_PARSE_METHODS 129 - /* 130 - * Note 11/2005: Removed this code to parse all methods during table 131 - * load because it causes problems if there are any errors during the 132 - * parse. Also, it seems like overkill and we probably don't want to 133 - * abort a table load because of an issue with a single method. 134 - */ 135 128 136 - /* 137 - * Print a dot for each method unless we are going to print 138 - * the entire pathname 139 - */ 140 - if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { 141 - ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ".")); 142 - } 143 - 144 - /* 145 - * Always parse methods to detect errors, we will delete 146 - * the parse tree below 147 - */ 148 - status = acpi_ds_parse_method(obj_handle); 149 - if (ACPI_FAILURE(status)) { 150 - ACPI_ERROR((AE_INFO, 151 - "Method %p [%4.4s] - parse failure, %s", 152 - obj_handle, 153 - acpi_ut_get_node_name(obj_handle), 154 - acpi_format_exception(status))); 155 - 156 - /* This parse failed, but we will continue parsing more methods */ 157 - } 158 - #endif 159 129 info->method_count++; 160 130 break; 161 131
+138 -192
drivers/acpi/dispatcher/dsmethod.c
··· 52 52 #define _COMPONENT ACPI_DISPATCHER 53 53 ACPI_MODULE_NAME("dsmethod") 54 54 55 + /* Local prototypes */ 56 + static acpi_status 57 + acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); 58 + 55 59 /******************************************************************************* 56 60 * 57 61 * FUNCTION: acpi_ds_method_error ··· 71 67 * Note: Allows the exception handler to change the status code 72 68 * 73 69 ******************************************************************************/ 70 + 74 71 acpi_status 75 72 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) 76 73 { ··· 118 113 119 114 /******************************************************************************* 120 115 * 116 + * FUNCTION: acpi_ds_create_method_mutex 117 + * 118 + * PARAMETERS: obj_desc - The method object 119 + * 120 + * RETURN: Status 121 + * 122 + * DESCRIPTION: Create a mutex object for a serialized control method 123 + * 124 + ******************************************************************************/ 125 + 126 + static acpi_status 127 + acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) 128 + { 129 + union acpi_operand_object *mutex_desc; 130 + acpi_status status; 131 + 132 + ACPI_FUNCTION_NAME(ds_create_method_mutex); 133 + 134 + /* Create the new mutex object */ 135 + 136 + mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); 137 + if (!mutex_desc) { 138 + return_ACPI_STATUS(AE_NO_MEMORY); 139 + } 140 + 141 + /* Create the actual OS Mutex */ 142 + 143 + status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); 144 + if (ACPI_FAILURE(status)) { 145 + return_ACPI_STATUS(status); 146 + } 147 + 148 + mutex_desc->mutex.sync_level = method_desc->method.sync_level; 149 + method_desc->method.mutex = mutex_desc; 150 + return_ACPI_STATUS(AE_OK); 151 + } 152 + 153 + /******************************************************************************* 154 + * 121 155 * FUNCTION: acpi_ds_begin_method_execution 122 156 * 123 157 * PARAMETERS: method_node - Node of the method 124 158 * obj_desc - The method object 125 - * calling_method_node - Caller of this method (if non-null) 159 + * walk_state - current state, NULL if not yet executing 160 + * a method. 126 161 * 127 162 * RETURN: Status 128 163 * ··· 173 128 ******************************************************************************/ 174 129 175 130 acpi_status 176 - acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node, 177 - union acpi_operand_object * obj_desc, 178 - struct acpi_namespace_node * calling_method_node) 131 + acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, 132 + union acpi_operand_object *obj_desc, 133 + struct acpi_walk_state *walk_state) 179 134 { 180 135 acpi_status status = AE_OK; 181 136 ··· 194 149 } 195 150 196 151 /* 197 - * If there is a concurrency limit on this method, we need to 198 - * obtain a unit from the method semaphore. 152 + * If this method is serialized, we need to acquire the method mutex. 199 153 */ 200 - if (obj_desc->method.semaphore) { 154 + if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) { 201 155 /* 202 - * Allow recursive method calls, up to the reentrancy/concurrency 203 - * limit imposed by the SERIALIZED rule and the sync_level method 204 - * parameter. 205 - * 206 - * The point of this code is to avoid permanently blocking a 207 - * thread that is making recursive method calls. 156 + * Create a mutex for the method if it is defined to be Serialized 157 + * and a mutex has not already been created. We defer the mutex creation 158 + * until a method is actually executed, to minimize the object count 208 159 */ 209 - if (method_node == calling_method_node) { 210 - if (obj_desc->method.thread_count >= 211 - obj_desc->method.concurrency) { 212 - return_ACPI_STATUS(AE_AML_METHOD_LIMIT); 160 + if (!obj_desc->method.mutex) { 161 + status = acpi_ds_create_method_mutex(obj_desc); 162 + if (ACPI_FAILURE(status)) { 163 + return_ACPI_STATUS(status); 213 164 } 214 165 } 215 166 216 167 /* 217 - * Get a unit from the method semaphore. This releases the 218 - * interpreter if we block (then reacquires it) 168 + * The current_sync_level (per-thread) must be less than or equal to 169 + * the sync level of the method. This mechanism provides some 170 + * deadlock prevention 171 + * 172 + * Top-level method invocation has no walk state at this point 219 173 */ 220 - status = 221 - acpi_ex_system_wait_semaphore(obj_desc->method.semaphore, 222 - ACPI_WAIT_FOREVER); 223 - if (ACPI_FAILURE(status)) { 224 - return_ACPI_STATUS(status); 174 + if (walk_state && 175 + (walk_state->thread->current_sync_level > 176 + obj_desc->method.mutex->mutex.sync_level)) { 177 + ACPI_ERROR((AE_INFO, 178 + "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)", 179 + acpi_ut_get_node_name(method_node), 180 + walk_state->thread->current_sync_level)); 181 + 182 + return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 225 183 } 184 + 185 + /* 186 + * Obtain the method mutex if necessary. Do not acquire mutex for a 187 + * recursive call. 188 + */ 189 + if (!walk_state || 190 + !obj_desc->method.mutex->mutex.owner_thread || 191 + (walk_state->thread != 192 + obj_desc->method.mutex->mutex.owner_thread)) { 193 + /* 194 + * Acquire the method mutex. This releases the interpreter if we 195 + * block (and reacquires it before it returns) 196 + */ 197 + status = 198 + acpi_ex_system_wait_mutex(obj_desc->method.mutex-> 199 + mutex.os_mutex, 200 + ACPI_WAIT_FOREVER); 201 + if (ACPI_FAILURE(status)) { 202 + return_ACPI_STATUS(status); 203 + } 204 + 205 + /* Update the mutex and walk info and save the original sync_level */ 206 + 207 + if (walk_state) { 208 + obj_desc->method.mutex->mutex. 209 + original_sync_level = 210 + walk_state->thread->current_sync_level; 211 + 212 + obj_desc->method.mutex->mutex.owner_thread = 213 + walk_state->thread; 214 + walk_state->thread->current_sync_level = 215 + obj_desc->method.sync_level; 216 + } else { 217 + obj_desc->method.mutex->mutex. 218 + original_sync_level = 219 + obj_desc->method.mutex->mutex.sync_level; 220 + } 221 + } 222 + 223 + /* Always increase acquisition depth */ 224 + 225 + obj_desc->method.mutex->mutex.acquisition_depth++; 226 226 } 227 227 228 228 /* ··· 290 200 return_ACPI_STATUS(status); 291 201 292 202 cleanup: 293 - /* On error, must signal the method semaphore if present */ 203 + /* On error, must release the method mutex (if present) */ 294 204 295 - if (obj_desc->method.semaphore) { 296 - (void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1); 205 + if (obj_desc->method.mutex) { 206 + acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); 297 207 } 298 208 return_ACPI_STATUS(status); 299 209 } ··· 343 253 return_ACPI_STATUS(AE_NULL_OBJECT); 344 254 } 345 255 346 - /* Init for new method, possibly wait on concurrency semaphore */ 256 + /* Init for new method, possibly wait on method mutex */ 347 257 348 258 status = acpi_ds_begin_method_execution(method_node, obj_desc, 349 - this_walk_state->method_node); 259 + this_walk_state); 350 260 if (ACPI_FAILURE(status)) { 351 261 return_ACPI_STATUS(status); 352 262 } ··· 568 478 * created, delete all locals and arguments, and delete the parse 569 479 * tree if requested. 570 480 * 481 + * MUTEX: Interpreter is locked 482 + * 571 483 ******************************************************************************/ 572 484 573 485 void ··· 595 503 } 596 504 597 505 /* 598 - * Lock the parser while we terminate this method. 599 - * If this is the last thread executing the method, 600 - * we have additional cleanup to perform 506 + * If method is serialized, release the mutex and restore the 507 + * current sync level for this thread 601 508 */ 602 - status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD); 603 - if (ACPI_FAILURE(status)) { 604 - return_VOID; 605 - } 509 + if (method_desc->method.mutex) { 606 510 607 - /* Signal completion of the execution of this method if necessary */ 511 + /* Acquisition Depth handles recursive calls */ 608 512 609 - if (method_desc->method.semaphore) { 610 - status = 611 - acpi_os_signal_semaphore(method_desc->method.semaphore, 1); 612 - if (ACPI_FAILURE(status)) { 513 + method_desc->method.mutex->mutex.acquisition_depth--; 514 + if (!method_desc->method.mutex->mutex.acquisition_depth) { 515 + walk_state->thread->current_sync_level = 516 + method_desc->method.mutex->mutex. 517 + original_sync_level; 613 518 614 - /* Ignore error and continue */ 615 - 616 - ACPI_EXCEPTION((AE_INFO, status, 617 - "Could not signal method semaphore")); 519 + acpi_os_release_mutex(method_desc->method.mutex->mutex. 520 + os_mutex); 618 521 } 619 522 } 620 523 ··· 624 537 625 538 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 626 539 if (ACPI_FAILURE(status)) { 627 - goto exit; 540 + return_VOID; 628 541 } 629 542 630 543 /* ··· 667 580 /* 668 581 * Support to dynamically change a method from not_serialized to 669 582 * Serialized if it appears that the method is incorrectly written and 670 - * does not support multiple thread execution. The best example of this 671 - * is if such a method creates namespace objects and blocks. A second 583 + * does not support multiple thread execution. The best example of this 584 + * is if such a method creates namespace objects and blocks. A second 672 585 * thread will fail with an AE_ALREADY_EXISTS exception 673 586 * 674 587 * This code is here because we must wait until the last thread exits 675 588 * before creating the synchronization semaphore. 676 589 */ 677 - if ((method_desc->method.concurrency == 1) && 678 - (!method_desc->method.semaphore)) { 679 - status = acpi_os_create_semaphore(1, 1, 680 - &method_desc->method. 681 - semaphore); 590 + if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED) 591 + && (!method_desc->method.mutex)) { 592 + status = acpi_ds_create_method_mutex(method_desc); 682 593 } 683 594 684 595 /* No more threads, we can free the owner_id */ ··· 684 599 acpi_ut_release_owner_id(&method_desc->method.owner_id); 685 600 } 686 601 687 - exit: 688 - (void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD); 689 602 return_VOID; 690 603 } 691 - 692 - #ifdef ACPI_INIT_PARSE_METHODS 693 - /* 694 - * Note 11/2005: Removed this code to parse all methods during table 695 - * load because it causes problems if there are any errors during the 696 - * parse. Also, it seems like overkill and we probably don't want to 697 - * abort a table load because of an issue with a single method. 698 - */ 699 - 700 - /******************************************************************************* 701 - * 702 - * FUNCTION: acpi_ds_parse_method 703 - * 704 - * PARAMETERS: Node - Method node 705 - * 706 - * RETURN: Status 707 - * 708 - * DESCRIPTION: Parse the AML that is associated with the method. 709 - * 710 - * MUTEX: Assumes parser is locked 711 - * 712 - ******************************************************************************/ 713 - 714 - acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) 715 - { 716 - acpi_status status; 717 - union acpi_operand_object *obj_desc; 718 - union acpi_parse_object *op; 719 - struct acpi_walk_state *walk_state; 720 - 721 - ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node); 722 - 723 - /* Parameter Validation */ 724 - 725 - if (!node) { 726 - return_ACPI_STATUS(AE_NULL_ENTRY); 727 - } 728 - 729 - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 730 - "**** Parsing [%4.4s] **** NamedObj=%p\n", 731 - acpi_ut_get_node_name(node), node)); 732 - 733 - /* Extract the method object from the method Node */ 734 - 735 - obj_desc = acpi_ns_get_attached_object(node); 736 - if (!obj_desc) { 737 - return_ACPI_STATUS(AE_NULL_OBJECT); 738 - } 739 - 740 - /* Create a mutex for the method if there is a concurrency limit */ 741 - 742 - if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) && 743 - (!obj_desc->method.semaphore)) { 744 - status = acpi_os_create_semaphore(obj_desc->method.concurrency, 745 - obj_desc->method.concurrency, 746 - &obj_desc->method.semaphore); 747 - if (ACPI_FAILURE(status)) { 748 - return_ACPI_STATUS(status); 749 - } 750 - } 751 - 752 - /* 753 - * Allocate a new parser op to be the root of the parsed 754 - * method tree 755 - */ 756 - op = acpi_ps_alloc_op(AML_METHOD_OP); 757 - if (!op) { 758 - return_ACPI_STATUS(AE_NO_MEMORY); 759 - } 760 - 761 - /* Init new op with the method name and pointer back to the Node */ 762 - 763 - acpi_ps_set_name(op, node->name.integer); 764 - op->common.node = node; 765 - 766 - /* 767 - * Get a new owner_id for objects created by this method. Namespace 768 - * objects (such as Operation Regions) can be created during the 769 - * first pass parse. 770 - */ 771 - status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); 772 - if (ACPI_FAILURE(status)) { 773 - goto cleanup; 774 - } 775 - 776 - /* Create and initialize a new walk state */ 777 - 778 - walk_state = 779 - acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL, 780 - NULL); 781 - if (!walk_state) { 782 - status = AE_NO_MEMORY; 783 - goto cleanup2; 784 - } 785 - 786 - status = acpi_ds_init_aml_walk(walk_state, op, node, 787 - obj_desc->method.aml_start, 788 - obj_desc->method.aml_length, NULL, 1); 789 - if (ACPI_FAILURE(status)) { 790 - acpi_ds_delete_walk_state(walk_state); 791 - goto cleanup2; 792 - } 793 - 794 - /* 795 - * Parse the method, first pass 796 - * 797 - * The first pass load is where newly declared named objects are added into 798 - * the namespace. Actual evaluation of the named objects (what would be 799 - * called a "second pass") happens during the actual execution of the 800 - * method so that operands to the named objects can take on dynamic 801 - * run-time values. 802 - */ 803 - status = acpi_ps_parse_aml(walk_state); 804 - if (ACPI_FAILURE(status)) { 805 - goto cleanup2; 806 - } 807 - 808 - ACPI_DEBUG_PRINT((ACPI_DB_PARSE, 809 - "**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n", 810 - acpi_ut_get_node_name(node), node, op)); 811 - 812 - /* 813 - * Delete the parse tree. We simply re-parse the method for every 814 - * execution since there isn't much overhead (compared to keeping lots 815 - * of parse trees around) 816 - */ 817 - acpi_ns_delete_namespace_subtree(node); 818 - acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id); 819 - 820 - cleanup2: 821 - acpi_ut_release_owner_id(&obj_desc->method.owner_id); 822 - 823 - cleanup: 824 - acpi_ps_delete_parse_tree(op); 825 - return_ACPI_STATUS(status); 826 - } 827 - #endif
+1 -3
drivers/acpi/dispatcher/dswexec.c
··· 472 472 acpi_ds_result_push(walk_state->result_obj, 473 473 walk_state); 474 474 } 475 - 476 475 break; 477 476 478 477 default: ··· 509 510 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 510 511 "Method Reference in a Package, Op=%p\n", 511 512 op)); 513 + 512 514 op->common.node = 513 515 (struct acpi_namespace_node *)op->asl.value. 514 516 arg->asl.node->object; ··· 670 670 671 671 status = acpi_ds_result_stack_pop(walk_state); 672 672 } 673 - 674 673 break; 675 674 676 675 case AML_TYPE_UNDEFINED: ··· 707 708 * Check if we just completed the evaluation of a 708 709 * conditional predicate 709 710 */ 710 - 711 711 if ((ACPI_SUCCESS(status)) && 712 712 (walk_state->control_state) && 713 713 (walk_state->control_state->common.state ==
+23 -26
drivers/acpi/dispatcher/dswload.c
··· 175 175 if (status == AE_NOT_FOUND) { 176 176 /* 177 177 * Table disassembly: 178 - * Target of Scope() not found. Generate an External for it, and 178 + * Target of Scope() not found. Generate an External for it, and 179 179 * insert the name into the namespace. 180 180 */ 181 181 acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0); ··· 210 210 case ACPI_TYPE_BUFFER: 211 211 212 212 /* 213 - * These types we will allow, but we will change the type. This 213 + * These types we will allow, but we will change the type. This 214 214 * enables some existing code of the form: 215 215 * 216 216 * Name (DEB, 0) 217 217 * Scope (DEB) { ... } 218 218 * 219 - * Note: silently change the type here. On the second pass, we will report 219 + * Note: silently change the type here. On the second pass, we will report 220 220 * a warning 221 221 */ 222 - 223 222 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 224 223 "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", 225 224 path, ··· 241 242 break; 242 243 243 244 default: 244 - 245 245 /* 246 246 * For all other named opcodes, we will enter the name into 247 247 * the namespace. ··· 257 259 * buffer_field, or Package), the name of the object is already 258 260 * in the namespace. 259 261 */ 260 - 261 262 if (walk_state->deferred_node) { 262 263 263 264 /* This name is already in the namespace, get the node */ ··· 290 293 } 291 294 292 295 /* 293 - * Enter the named type into the internal namespace. We enter the name 294 - * as we go downward in the parse tree. Any necessary subobjects that 296 + * Enter the named type into the internal namespace. We enter the name 297 + * as we go downward in the parse tree. Any necessary subobjects that 295 298 * involve arguments to the opcode must be created as we go back up the 296 299 * parse tree later. 297 300 */ ··· 324 327 (status); 325 328 } 326 329 } 330 + 327 331 status = AE_OK; 328 332 } 329 333 } 330 334 331 335 if (ACPI_FAILURE(status)) { 332 - 333 336 ACPI_ERROR_NAMESPACE(path, status); 334 337 return_ACPI_STATUS(status); 335 338 } ··· 431 434 status = 432 435 acpi_ex_create_region(op->named.data, 433 436 op->named.length, 434 - (acpi_adr_space_type) 435 - ((op->common.value.arg)-> 436 - common.value.integer), 437 + (acpi_adr_space_type) ((op-> 438 + common. 439 + value. 440 + arg)-> 441 + common. 442 + value. 443 + integer), 437 444 walk_state); 438 445 if (ACPI_FAILURE(status)) { 439 446 return_ACPI_STATUS(status); ··· 475 474 * method_op pkg_length name_string method_flags term_list 476 475 * 477 476 * Note: We must create the method node/object pair as soon as we 478 - * see the method declaration. This allows later pass1 parsing 477 + * see the method declaration. This allows later pass1 parsing 479 478 * of invocations of the method (need to know the number of 480 479 * arguments.) 481 480 */ ··· 500 499 length, 501 500 walk_state); 502 501 } 502 + 503 503 walk_state->operands[0] = NULL; 504 504 walk_state->num_operands = 0; 505 505 ··· 572 570 #ifdef ACPI_ENABLE_MODULE_LEVEL_CODE 573 571 if ((walk_state->op_info->class == AML_CLASS_EXECUTE) || 574 572 (walk_state->op_info->class == AML_CLASS_CONTROL)) { 575 - 576 573 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, 577 574 "Begin/EXEC: %s (fl %8.8X)\n", 578 575 walk_state->op_info->name, ··· 603 602 } else { 604 603 /* Get name from the op */ 605 604 606 - buffer_ptr = (char *)&op->named.name; 605 + buffer_ptr = ACPI_CAST_PTR(char, &op->named.name); 607 606 } 608 607 } else { 609 608 /* Get the namestring from the raw AML */ ··· 630 629 break; 631 630 632 631 case AML_INT_NAMEPATH_OP: 633 - 634 632 /* 635 633 * The name_path is an object reference to an existing object. 636 634 * Don't enter the name into the namespace, but look it up ··· 642 642 break; 643 643 644 644 case AML_SCOPE_OP: 645 - 646 645 /* 647 646 * The Path is an object reference to an existing object. 648 647 * Don't enter the name into the namespace, but look it up ··· 663 664 #endif 664 665 return_ACPI_STATUS(status); 665 666 } 667 + 666 668 /* 667 669 * We must check to make sure that the target is 668 670 * one of the opcodes that actually opens a scope ··· 683 683 case ACPI_TYPE_BUFFER: 684 684 685 685 /* 686 - * These types we will allow, but we will change the type. This 686 + * These types we will allow, but we will change the type. This 687 687 * enables some existing code of the form: 688 688 * 689 689 * Name (DEB, 0) 690 690 * Scope (DEB) { ... } 691 691 */ 692 - 693 692 ACPI_WARNING((AE_INFO, 694 693 "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)", 695 694 buffer_ptr, ··· 728 729 if (ACPI_FAILURE(status)) { 729 730 return_ACPI_STATUS(status); 730 731 } 731 - 732 732 } 733 + 733 734 return_ACPI_STATUS(AE_OK); 734 735 } 735 736 736 737 /* 737 - * Enter the named type into the internal namespace. We enter the name 738 - * as we go downward in the parse tree. Any necessary subobjects that 738 + * Enter the named type into the internal namespace. We enter the name 739 + * as we go downward in the parse tree. Any necessary subobjects that 739 740 * involve arguments to the opcode must be created as we go back up the 740 741 * parse tree later. 741 742 * ··· 786 787 * can get it again quickly when this scope is closed 787 788 */ 788 789 op->common.node = node; 789 - 790 790 return_ACPI_STATUS(status); 791 791 } 792 792 ··· 920 922 #ifndef ACPI_NO_METHOD_EXECUTION 921 923 922 924 case AML_TYPE_CREATE_FIELD: 923 - 924 925 /* 925 926 * Create the field object, but the field buffer and index must 926 927 * be evaluated later during the execution phase ··· 928 931 break; 929 932 930 933 case AML_TYPE_NAMED_FIELD: 931 - 932 934 /* 933 935 * If we are executing a method, initialize the field 934 936 */ ··· 1047 1051 * argument is the space_id. (We must save the address of the 1048 1052 * AML of the address and length operands) 1049 1053 */ 1054 + 1050 1055 /* 1051 1056 * If we have a valid region, initialize it 1052 1057 * Namespace is NOT locked at this point. ··· 1077 1080 * method_op pkg_length name_string method_flags term_list 1078 1081 * 1079 1082 * Note: We must create the method node/object pair as soon as we 1080 - * see the method declaration. This allows later pass1 parsing 1083 + * see the method declaration. This allows later pass1 parsing 1081 1084 * of invocations of the method (need to know the number of 1082 1085 * arguments.) 1083 1086 */
+5 -9
drivers/acpi/events/evgpe.c
··· 382 382 u32 status_reg; 383 383 u32 enable_reg; 384 384 acpi_cpu_flags flags; 385 - acpi_cpu_flags hw_flags; 386 385 acpi_native_uint i; 387 386 acpi_native_uint j; 388 387 ··· 393 394 return (int_status); 394 395 } 395 396 396 - /* We need to hold the GPE lock now, hardware lock in the loop */ 397 - 397 + /* 398 + * We need to obtain the GPE lock for both the data structs and registers 399 + * Note: Not necessary to obtain the hardware lock, since the GPE registers 400 + * are owned by the gpe_lock. 401 + */ 398 402 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 399 403 400 404 /* Examine all GPE blocks attached to this interrupt level */ ··· 415 413 416 414 gpe_register_info = &gpe_block->register_info[i]; 417 415 418 - hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); 419 - 420 416 /* Read the Status Register */ 421 417 422 418 status = ··· 423 423 &gpe_register_info-> 424 424 status_address); 425 425 if (ACPI_FAILURE(status)) { 426 - acpi_os_release_lock(acpi_gbl_hardware_lock, 427 - hw_flags); 428 426 goto unlock_and_exit; 429 427 } 430 428 ··· 433 435 &enable_reg, 434 436 &gpe_register_info-> 435 437 enable_address); 436 - acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags); 437 - 438 438 if (ACPI_FAILURE(status)) { 439 439 goto unlock_and_exit; 440 440 }
+8
drivers/acpi/executer/exconfig.c
··· 266 266 } 267 267 } 268 268 269 + ACPI_INFO((AE_INFO, 270 + "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", 271 + table->signature, table->oem_id, table->oem_table_id)); 272 + 269 273 *return_desc = ddb_handle; 270 274 return_ACPI_STATUS(status); 271 275 } ··· 449 445 450 446 return_ACPI_STATUS(status); 451 447 } 448 + 449 + ACPI_INFO((AE_INFO, 450 + "Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]", 451 + table_ptr->oem_id, table_ptr->oem_table_id)); 452 452 453 453 cleanup: 454 454 if (ACPI_FAILURE(status)) {
+11 -16
drivers/acpi/executer/excreate.c
··· 177 177 * that the event is created in an unsignalled state 178 178 */ 179 179 status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, 180 - &obj_desc->event.semaphore); 180 + &obj_desc->event.os_semaphore); 181 181 if (ACPI_FAILURE(status)) { 182 182 goto cleanup; 183 183 } ··· 226 226 goto cleanup; 227 227 } 228 228 229 - /* 230 - * Create the actual OS semaphore. 231 - * One unit max to make it a mutex, with one initial unit to allow 232 - * the mutex to be acquired. 233 - */ 234 - status = acpi_os_create_semaphore(1, 1, &obj_desc->mutex.semaphore); 229 + /* Create the actual OS Mutex */ 230 + 231 + status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex); 235 232 if (ACPI_FAILURE(status)) { 236 233 goto cleanup; 237 234 } ··· 562 565 obj_desc->method.aml_length = aml_length; 563 566 564 567 /* 565 - * Disassemble the method flags. Split off the Arg Count 568 + * Disassemble the method flags. Split off the Arg Count 566 569 * for efficiency 567 570 */ 568 571 method_flags = (u8) operand[1]->integer.value; ··· 573 576 (u8) (method_flags & AML_METHOD_ARG_COUNT); 574 577 575 578 /* 576 - * Get the concurrency count. If required, a semaphore will be 579 + * Get the sync_level. If method is serialized, a mutex will be 577 580 * created for this method when it is parsed. 578 581 */ 579 582 if (acpi_gbl_all_methods_serialized) { 580 - obj_desc->method.concurrency = 1; 583 + obj_desc->method.sync_level = 0; 581 584 obj_desc->method.method_flags |= AML_METHOD_SERIALIZED; 582 585 } else if (method_flags & AML_METHOD_SERIALIZED) { 583 586 /* 584 - * ACPI 1.0: Concurrency = 1 585 - * ACPI 2.0: Concurrency = (sync_level (in method declaration) + 1) 587 + * ACPI 1.0: sync_level = 0 588 + * ACPI 2.0: sync_level = sync_level in method declaration 586 589 */ 587 - obj_desc->method.concurrency = (u8) 588 - (((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4) + 1); 589 - } else { 590 - obj_desc->method.concurrency = ACPI_INFINITE_CONCURRENCY; 590 + obj_desc->method.sync_level = (u8) 591 + ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4); 591 592 } 592 593 593 594 /* Attach the new object to the method Node */
+4 -4
drivers/acpi/executer/exdump.c
··· 118 118 119 119 static struct acpi_exdump_info acpi_ex_dump_event[2] = { 120 120 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL}, 121 - {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.semaphore), "Semaphore"} 121 + {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"} 122 122 }; 123 123 124 124 static struct acpi_exdump_info acpi_ex_dump_method[8] = { 125 125 {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, 126 126 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"}, 127 - {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.concurrency), "Concurrency"}, 128 - {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.semaphore), "Semaphore"}, 127 + {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, 128 + {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"}, 129 129 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, 130 130 {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"}, 131 131 {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, ··· 138 138 {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, 139 139 {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), 140 140 "Acquire Depth"}, 141 - {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.semaphore), "Semaphore"} 141 + {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} 142 142 }; 143 143 144 144 static struct acpi_exdump_info acpi_ex_dump_region[7] = {
+55 -16
drivers/acpi/executer/exfldio.c
··· 727 727 return_ACPI_STATUS(status); 728 728 } 729 729 730 - /* Merge with previous datum if necessary */ 731 - 732 - merged_datum |= raw_datum << 733 - (obj_desc->common_field.access_bit_width - 734 - obj_desc->common_field.start_field_bit_offset); 730 + /* 731 + * Merge with previous datum if necessary. 732 + * 733 + * Note: Before the shift, check if the shift value will be larger than 734 + * the integer size. If so, there is no need to perform the operation. 735 + * This avoids the differences in behavior between different compilers 736 + * concerning shift values larger than the target data width. 737 + */ 738 + if ((obj_desc->common_field.access_bit_width - 739 + obj_desc->common_field.start_field_bit_offset) < 740 + ACPI_INTEGER_BIT_SIZE) { 741 + merged_datum |= 742 + raw_datum << (obj_desc->common_field. 743 + access_bit_width - 744 + obj_desc->common_field. 745 + start_field_bit_offset); 746 + } 735 747 736 748 if (i == datum_count) { 737 749 break; ··· 820 808 return_ACPI_STATUS(AE_BUFFER_OVERFLOW); 821 809 } 822 810 823 - /* Compute the number of datums (access width data items) */ 811 + /* 812 + * Create the bitmasks used for bit insertion. 813 + * Note: This if/else is used to bypass compiler differences with the 814 + * shift operator 815 + */ 816 + if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) { 817 + width_mask = ACPI_INTEGER_MAX; 818 + } else { 819 + width_mask = 820 + ACPI_MASK_BITS_ABOVE(obj_desc->common_field. 821 + access_bit_width); 822 + } 824 823 825 - width_mask = 826 - ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width); 827 - mask = 828 - width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field. 829 - start_field_bit_offset); 824 + mask = width_mask & 825 + ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset); 826 + 827 + /* Compute the number of datums (access width data items) */ 830 828 831 829 datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, 832 830 obj_desc->common_field.access_bit_width); ··· 870 848 return_ACPI_STATUS(status); 871 849 } 872 850 873 - /* Start new output datum by merging with previous input datum */ 874 - 875 851 field_offset += obj_desc->common_field.access_byte_width; 876 - merged_datum = raw_datum >> 877 - (obj_desc->common_field.access_bit_width - 878 - obj_desc->common_field.start_field_bit_offset); 852 + 853 + /* 854 + * Start new output datum by merging with previous input datum 855 + * if necessary. 856 + * 857 + * Note: Before the shift, check if the shift value will be larger than 858 + * the integer size. If so, there is no need to perform the operation. 859 + * This avoids the differences in behavior between different compilers 860 + * concerning shift values larger than the target data width. 861 + */ 862 + if ((obj_desc->common_field.access_bit_width - 863 + obj_desc->common_field.start_field_bit_offset) < 864 + ACPI_INTEGER_BIT_SIZE) { 865 + merged_datum = 866 + raw_datum >> (obj_desc->common_field. 867 + access_bit_width - 868 + obj_desc->common_field. 869 + start_field_bit_offset); 870 + } else { 871 + merged_datum = 0; 872 + } 873 + 879 874 mask = width_mask; 880 875 881 876 if (i == datum_count) {
+6 -6
drivers/acpi/executer/exmutex.c
··· 161 161 162 162 /* 163 163 * Current Sync must be less than or equal to the sync level of the 164 - * mutex. This mechanism provides some deadlock prevention 164 + * mutex. This mechanism provides some deadlock prevention 165 165 */ 166 166 if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { 167 167 ACPI_ERROR((AE_INFO, 168 - "Cannot acquire Mutex [%4.4s], incorrect SyncLevel", 169 - acpi_ut_get_node_name(obj_desc->mutex.node))); 168 + "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)", 169 + acpi_ut_get_node_name(obj_desc->mutex.node), 170 + walk_state->thread->current_sync_level)); 170 171 return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 171 172 } 172 173 ··· 179 178 180 179 if ((obj_desc->mutex.owner_thread->thread_id == 181 180 walk_state->thread->thread_id) || 182 - (obj_desc->mutex.semaphore == 183 - acpi_gbl_global_lock_semaphore)) { 181 + (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) { 184 182 /* 185 183 * The mutex is already owned by this thread, 186 184 * just increment the acquisition depth ··· 264 264 */ 265 265 if ((obj_desc->mutex.owner_thread->thread_id != 266 266 walk_state->thread->thread_id) 267 - && (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) { 267 + && (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) { 268 268 ACPI_ERROR((AE_INFO, 269 269 "Thread %X cannot release Mutex [%4.4s] acquired by thread %X", 270 270 walk_state->thread->thread_id,
+68 -14
drivers/acpi/executer/exsystem.c
··· 63 63 * interpreter is released. 64 64 * 65 65 ******************************************************************************/ 66 - acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout) 66 + acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) 67 67 { 68 68 acpi_status status; 69 69 acpi_status status2; 70 70 71 71 ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); 72 72 73 - status = acpi_os_wait_semaphore(semaphore, 1, 0); 73 + status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT); 74 74 if (ACPI_SUCCESS(status)) { 75 75 return_ACPI_STATUS(status); 76 76 } ··· 82 82 acpi_ex_exit_interpreter(); 83 83 84 84 status = acpi_os_wait_semaphore(semaphore, 1, timeout); 85 + 86 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 87 + "*** Thread awake after blocking, %s\n", 88 + acpi_format_exception(status))); 89 + 90 + /* Reacquire the interpreter */ 91 + 92 + status2 = acpi_ex_enter_interpreter(); 93 + if (ACPI_FAILURE(status2)) { 94 + 95 + /* Report fatal error, could not acquire interpreter */ 96 + 97 + return_ACPI_STATUS(status2); 98 + } 99 + } 100 + 101 + return_ACPI_STATUS(status); 102 + } 103 + 104 + /******************************************************************************* 105 + * 106 + * FUNCTION: acpi_ex_system_wait_mutex 107 + * 108 + * PARAMETERS: Mutex - Mutex to wait on 109 + * Timeout - Max time to wait 110 + * 111 + * RETURN: Status 112 + * 113 + * DESCRIPTION: Implements a semaphore wait with a check to see if the 114 + * semaphore is available immediately. If it is not, the 115 + * interpreter is released. 116 + * 117 + ******************************************************************************/ 118 + 119 + acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) 120 + { 121 + acpi_status status; 122 + acpi_status status2; 123 + 124 + ACPI_FUNCTION_TRACE(ex_system_wait_mutex); 125 + 126 + status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT); 127 + if (ACPI_SUCCESS(status)) { 128 + return_ACPI_STATUS(status); 129 + } 130 + 131 + if (status == AE_TIME) { 132 + 133 + /* We must wait, so unlock the interpreter */ 134 + 135 + acpi_ex_exit_interpreter(); 136 + 137 + status = acpi_os_acquire_mutex(mutex, timeout); 85 138 86 139 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 87 140 "*** Thread awake after blocking, %s\n", ··· 229 176 * 230 177 * FUNCTION: acpi_ex_system_acquire_mutex 231 178 * 232 - * PARAMETERS: time_desc - The 'time to delay' object descriptor 179 + * PARAMETERS: time_desc - Maximum time to wait for the mutex 233 180 * obj_desc - The object descriptor for this op 234 181 * 235 182 * RETURN: Status ··· 254 201 255 202 /* Support for the _GL_ Mutex object -- go get the global lock */ 256 203 257 - if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { 204 + if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) { 258 205 status = 259 206 acpi_ev_acquire_global_lock((u16) time_desc->integer.value); 260 207 return_ACPI_STATUS(status); 261 208 } 262 209 263 - status = acpi_ex_system_wait_semaphore(obj_desc->mutex.semaphore, 264 - (u16) time_desc->integer.value); 210 + status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, 211 + (u16) time_desc->integer.value); 265 212 return_ACPI_STATUS(status); 266 213 } 267 214 ··· 292 239 293 240 /* Support for the _GL_ Mutex object -- release the global lock */ 294 241 295 - if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { 242 + if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) { 296 243 status = acpi_ev_release_global_lock(); 297 244 return_ACPI_STATUS(status); 298 245 } 299 246 300 - status = acpi_os_signal_semaphore(obj_desc->mutex.semaphore, 1); 301 - return_ACPI_STATUS(status); 247 + acpi_os_release_mutex(obj_desc->mutex.os_mutex); 248 + return_ACPI_STATUS(AE_OK); 302 249 } 303 250 304 251 /******************************************************************************* ··· 321 268 ACPI_FUNCTION_TRACE(ex_system_signal_event); 322 269 323 270 if (obj_desc) { 324 - status = acpi_os_signal_semaphore(obj_desc->event.semaphore, 1); 271 + status = 272 + acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1); 325 273 } 326 274 327 275 return_ACPI_STATUS(status); ··· 353 299 354 300 if (obj_desc) { 355 301 status = 356 - acpi_ex_system_wait_semaphore(obj_desc->event.semaphore, 302 + acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore, 357 303 (u16) time_desc->integer. 358 304 value); 359 305 } ··· 376 322 acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc) 377 323 { 378 324 acpi_status status = AE_OK; 379 - void *temp_semaphore; 325 + acpi_semaphore temp_semaphore; 380 326 381 327 ACPI_FUNCTION_ENTRY(); 382 328 ··· 387 333 status = 388 334 acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore); 389 335 if (ACPI_SUCCESS(status)) { 390 - (void)acpi_os_delete_semaphore(obj_desc->event.semaphore); 391 - obj_desc->event.semaphore = temp_semaphore; 336 + (void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore); 337 + obj_desc->event.os_semaphore = temp_semaphore; 392 338 } 393 339 394 340 return (status);
+62 -15
drivers/acpi/hardware/hwregs.c
··· 172 172 } 173 173 174 174 /* 175 - * The package must have at least two elements. NOTE (March 2005): This 175 + * The package must have at least two elements. NOTE (March 2005): This 176 176 * goes against the current ACPI spec which defines this object as a 177 - * package with one encoded DWORD element. However, existing practice 177 + * package with one encoded DWORD element. However, existing practice 178 178 * by BIOS vendors seems to be to have 2 or more elements, at least 179 179 * one per sleep type (A/B). 180 180 */ ··· 255 255 * return_value - Value that was read from the register 256 256 * Flags - Lock the hardware or not 257 257 * 258 - * RETURN: Status and the value read from specified Register. Value 258 + * RETURN: Status and the value read from specified Register. Value 259 259 * returned is normalized to bit0 (is shifted all the way right) 260 260 * 261 261 * DESCRIPTION: ACPI bit_register read function. ··· 361 361 case ACPI_REGISTER_PM1_STATUS: 362 362 363 363 /* 364 - * Status Registers are different from the rest. Clear by 365 - * writing 1, and writing 0 has no effect. So, the only relevant 364 + * Status Registers are different from the rest. Clear by 365 + * writing 1, and writing 0 has no effect. So, the only relevant 366 366 * information is the single bit we're interested in, all others should 367 367 * be written as 0 so they will be left unchanged. 368 368 */ ··· 467 467 * 468 468 * FUNCTION: acpi_hw_register_read 469 469 * 470 - * PARAMETERS: use_lock - Mutex hw access 471 - * register_id - register_iD + Offset 470 + * PARAMETERS: use_lock - Lock hardware? True/False 471 + * register_id - ACPI Register ID 472 472 * return_value - Where the register value is returned 473 473 * 474 474 * RETURN: Status and the value read. 475 475 * 476 - * DESCRIPTION: Acpi register read function. Registers are read at the 477 - * given offset. 476 + * DESCRIPTION: Read from the specified ACPI register 478 477 * 479 478 ******************************************************************************/ 480 479 acpi_status ··· 579 580 * 580 581 * FUNCTION: acpi_hw_register_write 581 582 * 582 - * PARAMETERS: use_lock - Mutex hw access 583 - * register_id - register_iD + Offset 583 + * PARAMETERS: use_lock - Lock hardware? True/False 584 + * register_id - ACPI Register ID 584 585 * Value - The value to write 585 586 * 586 587 * RETURN: Status 587 588 * 588 - * DESCRIPTION: Acpi register Write function. Registers are written at the 589 - * given offset. 589 + * DESCRIPTION: Write to the specified ACPI register 590 + * 591 + * NOTE: In accordance with the ACPI specification, this function automatically 592 + * preserves the value of the following bits, meaning that these bits cannot be 593 + * changed via this interface: 594 + * 595 + * PM1_CONTROL[0] = SCI_EN 596 + * PM1_CONTROL[9] 597 + * PM1_STATUS[11] 598 + * 599 + * ACPI References: 600 + * 1) Hardware Ignored Bits: When software writes to a register with ignored 601 + * bit fields, it preserves the ignored bit fields 602 + * 2) SCI_EN: OSPM always preserves this bit position 590 603 * 591 604 ******************************************************************************/ 592 605 ··· 606 595 { 607 596 acpi_status status; 608 597 acpi_cpu_flags lock_flags = 0; 598 + u32 read_value; 609 599 610 600 ACPI_FUNCTION_TRACE(hw_register_write); 611 601 ··· 616 604 617 605 switch (register_id) { 618 606 case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ 607 + 608 + /* Perform a read first to preserve certain bits (per ACPI spec) */ 609 + 610 + status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, 611 + ACPI_REGISTER_PM1_STATUS, 612 + &read_value); 613 + if (ACPI_FAILURE(status)) { 614 + goto unlock_and_exit; 615 + } 616 + 617 + /* Insert the bits to be preserved */ 618 + 619 + ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, 620 + read_value); 621 + 622 + /* Now we can write the data */ 619 623 620 624 status = 621 625 acpi_hw_low_level_write(16, value, ··· 662 634 break; 663 635 664 636 case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ 637 + 638 + /* 639 + * Perform a read first to preserve certain bits (per ACPI spec) 640 + * 641 + * Note: This includes SCI_EN, we never want to change this bit 642 + */ 643 + status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, 644 + ACPI_REGISTER_PM1_CONTROL, 645 + &read_value); 646 + if (ACPI_FAILURE(status)) { 647 + goto unlock_and_exit; 648 + } 649 + 650 + /* Insert the bits to be preserved */ 651 + 652 + ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, 653 + read_value); 654 + 655 + /* Now we can write the data */ 665 656 666 657 status = 667 658 acpi_hw_low_level_write(16, value, ··· 773 726 return (AE_OK); 774 727 } 775 728 776 - /* Get a local copy of the address. Handles possible alignment issues */ 729 + /* Get a local copy of the address. Handles possible alignment issues */ 777 730 778 731 ACPI_MOVE_64_TO_64(&address, &reg->address); 779 732 if (!address) { ··· 845 798 return (AE_OK); 846 799 } 847 800 848 - /* Get a local copy of the address. Handles possible alignment issues */ 801 + /* Get a local copy of the address. Handles possible alignment issues */ 849 802 850 803 ACPI_MOVE_64_TO_64(&address, &reg->address); 851 804 if (!address) {
+12 -15
drivers/acpi/namespace/nsaccess.c
··· 196 196 (u8) (ACPI_TO_INTEGER(val) - 1); 197 197 198 198 if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { 199 - /* 200 - * Create a counting semaphore for the 201 - * global lock 202 - */ 199 + 200 + /* Create a counting semaphore for the global lock */ 201 + 203 202 status = 204 203 acpi_os_create_semaphore 205 204 (ACPI_NO_UNIT_LIMIT, 1, 206 - &obj_desc->mutex.semaphore); 205 + &acpi_gbl_global_lock_semaphore); 207 206 if (ACPI_FAILURE(status)) { 208 207 acpi_ut_remove_reference 209 208 (obj_desc); 210 209 goto unlock_and_exit; 211 210 } 212 211 213 - /* 214 - * We just created the mutex for the 215 - * global lock, save it 216 - */ 217 - acpi_gbl_global_lock_semaphore = 218 - obj_desc->mutex.semaphore; 212 + /* Mark this mutex as very special */ 213 + 214 + obj_desc->mutex.os_mutex = 215 + ACPI_GLOBAL_LOCK; 219 216 } else { 220 217 /* Create a mutex */ 221 218 222 - status = acpi_os_create_semaphore(1, 1, 223 - &obj_desc-> 224 - mutex. 225 - semaphore); 219 + status = 220 + acpi_os_create_mutex(&obj_desc-> 221 + mutex. 222 + os_mutex); 226 223 if (ACPI_FAILURE(status)) { 227 224 acpi_ut_remove_reference 228 225 (obj_desc);
+7 -21
drivers/acpi/osl.c
··· 688 688 /* 689 689 * Allocate the memory for a spinlock and initialize it. 690 690 */ 691 - acpi_status acpi_os_create_lock(acpi_handle * out_handle) 691 + acpi_status acpi_os_create_lock(acpi_spinlock * handle) 692 692 { 693 - spinlock_t *lock_ptr; 694 - 695 - 696 - lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); 697 - 698 - spin_lock_init(lock_ptr); 699 - 700 - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); 701 - 702 - *out_handle = lock_ptr; 693 + spin_lock_init(*handle); 703 694 704 695 return AE_OK; 705 696 } ··· 698 707 /* 699 708 * Deallocate the memory for a spinlock. 700 709 */ 701 - void acpi_os_delete_lock(acpi_handle handle) 710 + void acpi_os_delete_lock(acpi_spinlock handle) 702 711 { 703 - 704 - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); 705 - 706 - acpi_os_free(handle); 707 - 708 712 return; 709 713 } 710 714 ··· 1023 1037 * handle is a pointer to the spinlock_t. 1024 1038 */ 1025 1039 1026 - acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) 1040 + acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) 1027 1041 { 1028 1042 acpi_cpu_flags flags; 1029 - spin_lock_irqsave((spinlock_t *) handle, flags); 1043 + spin_lock_irqsave(lockp, flags); 1030 1044 return flags; 1031 1045 } 1032 1046 ··· 1034 1048 * Release a spinlock. See above. 1035 1049 */ 1036 1050 1037 - void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags) 1051 + void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) 1038 1052 { 1039 - spin_unlock_irqrestore((spinlock_t *) handle, flags); 1053 + spin_unlock_irqrestore(lockp, flags); 1040 1054 } 1041 1055 1042 1056 #ifndef ACPI_USE_LOCAL_CACHE
+16 -2
drivers/acpi/parser/psparse.c
··· 469 469 } 470 470 471 471 walk_state->thread = thread; 472 + 473 + /* 474 + * If executing a method, the starting sync_level is this method's 475 + * sync_level 476 + */ 477 + if (walk_state->method_desc) { 478 + walk_state->thread->current_sync_level = 479 + walk_state->method_desc->method.sync_level; 480 + } 481 + 472 482 acpi_ds_push_walk_state(walk_state, thread); 473 483 474 484 /* ··· 515 505 status = 516 506 acpi_ds_call_control_method(thread, walk_state, 517 507 NULL); 508 + if (ACPI_FAILURE(status)) { 509 + status = 510 + acpi_ds_method_error(status, walk_state); 511 + } 518 512 519 513 /* 520 514 * If the transfer to the new method method call worked, a new walk ··· 539 525 /* Check for possible multi-thread reentrancy problem */ 540 526 541 527 if ((status == AE_ALREADY_EXISTS) && 542 - (!walk_state->method_desc->method.semaphore)) { 528 + (!walk_state->method_desc->method.mutex)) { 543 529 /* 544 530 * Method tried to create an object twice. The probable cause is 545 531 * that the method cannot handle reentrancy. ··· 551 537 */ 552 538 walk_state->method_desc->method.method_flags |= 553 539 AML_METHOD_SERIALIZED; 554 - walk_state->method_desc->method.concurrency = 1; 540 + walk_state->method_desc->method.sync_level = 0; 555 541 } 556 542 } 557 543
+23 -13
drivers/acpi/utilities/utdelete.c
··· 155 155 case ACPI_TYPE_MUTEX: 156 156 157 157 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 158 - "***** Mutex %p, Semaphore %p\n", 159 - object, object->mutex.semaphore)); 158 + "***** Mutex %p, OS Mutex %p\n", 159 + object, object->mutex.os_mutex)); 160 160 161 - acpi_ex_unlink_mutex(object); 162 - (void)acpi_os_delete_semaphore(object->mutex.semaphore); 161 + if (object->mutex.os_mutex != ACPI_GLOBAL_LOCK) { 162 + acpi_ex_unlink_mutex(object); 163 + acpi_os_delete_mutex(object->mutex.os_mutex); 164 + } else { 165 + /* Global Lock "mutex" is actually a counting semaphore */ 166 + 167 + (void) 168 + acpi_os_delete_semaphore 169 + (acpi_gbl_global_lock_semaphore); 170 + acpi_gbl_global_lock_semaphore = NULL; 171 + } 163 172 break; 164 173 165 174 case ACPI_TYPE_EVENT: 166 175 167 176 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 168 - "***** Event %p, Semaphore %p\n", 169 - object, object->event.semaphore)); 177 + "***** Event %p, OS Semaphore %p\n", 178 + object, object->event.os_semaphore)); 170 179 171 - (void)acpi_os_delete_semaphore(object->event.semaphore); 172 - object->event.semaphore = NULL; 180 + (void)acpi_os_delete_semaphore(object->event.os_semaphore); 181 + object->event.os_semaphore = NULL; 173 182 break; 174 183 175 184 case ACPI_TYPE_METHOD: ··· 186 177 ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, 187 178 "***** Method %p\n", object)); 188 179 189 - /* Delete the method semaphore if it exists */ 180 + /* Delete the method mutex if it exists */ 190 181 191 - if (object->method.semaphore) { 192 - (void)acpi_os_delete_semaphore(object->method. 193 - semaphore); 194 - object->method.semaphore = NULL; 182 + if (object->method.mutex) { 183 + acpi_os_delete_mutex(object->method.mutex->mutex. 184 + os_mutex); 185 + acpi_ut_delete_object_desc(object->method.mutex); 186 + object->method.mutex = NULL; 195 187 } 196 188 break; 197 189
+1
drivers/acpi/utilities/utglobal.c
··· 794 794 795 795 /* Global Lock support */ 796 796 797 + acpi_gbl_global_lock_semaphore = NULL; 797 798 acpi_gbl_global_lock_acquired = FALSE; 798 799 acpi_gbl_global_lock_thread_count = 0; 799 800 acpi_gbl_global_lock_handle = 0;
+10 -29
drivers/acpi/utilities/utmutex.c
··· 82 82 83 83 /* Create the spinlocks for use at interrupt level */ 84 84 85 - status = acpi_os_create_lock(&acpi_gbl_gpe_lock); 86 - if (ACPI_FAILURE(status)) { 87 - return_ACPI_STATUS(status); 88 - } 85 + spin_lock_init(acpi_gbl_gpe_lock); 86 + spin_lock_init(acpi_gbl_hardware_lock); 89 87 90 - status = acpi_os_create_lock(&acpi_gbl_hardware_lock); 91 88 return_ACPI_STATUS(status); 92 89 } 93 90 ··· 143 146 } 144 147 145 148 if (!acpi_gbl_mutex_info[mutex_id].mutex) { 146 - status = acpi_os_create_semaphore(1, 1, 147 - &acpi_gbl_mutex_info 148 - [mutex_id].mutex); 149 + status = 150 + acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex); 149 151 acpi_gbl_mutex_info[mutex_id].thread_id = 150 152 ACPI_MUTEX_NOT_ACQUIRED; 151 153 acpi_gbl_mutex_info[mutex_id].use_count = 0; ··· 167 171 168 172 static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) 169 173 { 170 - acpi_status status; 171 174 172 175 ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); 173 176 ··· 174 179 return_ACPI_STATUS(AE_BAD_PARAMETER); 175 180 } 176 181 177 - status = acpi_os_delete_semaphore(acpi_gbl_mutex_info[mutex_id].mutex); 182 + acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex); 178 183 179 184 acpi_gbl_mutex_info[mutex_id].mutex = NULL; 180 185 acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; 181 186 182 - return_ACPI_STATUS(status); 187 + return_ACPI_STATUS(AE_OK); 183 188 } 184 189 185 190 /******************************************************************************* ··· 246 251 "Thread %X attempting to acquire Mutex [%s]\n", 247 252 this_thread_id, acpi_ut_get_mutex_name(mutex_id))); 248 253 249 - status = acpi_os_wait_semaphore(acpi_gbl_mutex_info[mutex_id].mutex, 250 - 1, ACPI_WAIT_FOREVER); 254 + status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, 255 + ACPI_WAIT_FOREVER); 251 256 if (ACPI_SUCCESS(status)) { 252 257 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 253 258 "Thread %X acquired Mutex [%s]\n", ··· 279 284 280 285 acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) 281 286 { 282 - acpi_status status; 283 287 acpi_thread_id this_thread_id; 284 288 285 289 ACPI_FUNCTION_NAME(ut_release_mutex); ··· 334 340 335 341 acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; 336 342 337 - status = 338 - acpi_os_signal_semaphore(acpi_gbl_mutex_info[mutex_id].mutex, 1); 339 - 340 - if (ACPI_FAILURE(status)) { 341 - ACPI_EXCEPTION((AE_INFO, status, 342 - "Thread %X could not release Mutex [%X]", 343 - this_thread_id, mutex_id)); 344 - } else { 345 - ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, 346 - "Thread %X released Mutex [%s]\n", 347 - this_thread_id, 348 - acpi_ut_get_mutex_name(mutex_id))); 349 - } 350 - 351 - return (status); 343 + acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex); 344 + return (AE_OK); 352 345 }
+1 -1
include/acpi/acconfig.h
··· 63 63 64 64 /* Current ACPICA subsystem version in YYYYMMDD format */ 65 65 66 - #define ACPI_CA_VERSION 0x20060608 66 + #define ACPI_CA_VERSION 0x20060623 67 67 68 68 /* 69 69 * OS name, used for the _OS object. The _OS object is essentially obsolete,
+1 -1
include/acpi/acdispat.h
··· 201 201 acpi_status 202 202 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, 203 203 union acpi_operand_object *obj_desc, 204 - struct acpi_namespace_node *calling_method_node); 204 + struct acpi_walk_state *walk_state); 205 205 206 206 acpi_status 207 207 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state);
+20 -6
include/acpi/acglobal.h
··· 181 181 extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1]; 182 182 extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1]; 183 183 184 + /***************************************************************************** 185 + * 186 + * Mutual exlusion within ACPICA subsystem 187 + * 188 + ****************************************************************************/ 189 + 184 190 /* 185 191 * Predefined mutex objects. This array contains the 186 192 * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. 187 193 * (The table maps local handles to the real OS handles) 188 194 */ 189 195 ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX]; 196 + 197 + /* 198 + * Global lock semaphore works in conjunction with the actual HW global lock 199 + */ 200 + ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; 201 + 202 + /* 203 + * Spinlocks are used for interfaces that can be possibly called at 204 + * interrupt level 205 + */ 206 + ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ 207 + ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ 208 + #define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock 209 + #define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock 190 210 191 211 /***************************************************************************** 192 212 * ··· 237 217 ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler; 238 218 ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; 239 219 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; 240 - ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore; 241 220 242 221 /* Misc */ 243 222 ··· 333 314 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; 334 315 ACPI_EXTERN struct acpi_gpe_block_info 335 316 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; 336 - 337 - /* Spinlocks */ 338 - 339 - ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock; 340 - ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock; 341 317 342 318 /***************************************************************************** 343 319 *
+4 -1
include/acpi/acinterp.h
··· 287 287 288 288 acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc); 289 289 290 - acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout); 290 + acpi_status 291 + acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout); 292 + 293 + acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout); 291 294 292 295 /* 293 296 * exoparg1 - ACPI AML execution, 1 operand
+20 -14
include/acpi/aclocal.h
··· 47 47 /* acpisrc:struct_defs -- for acpisrc conversion */ 48 48 49 49 #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ 50 - #define ACPI_INFINITE_CONCURRENCY 0xFF 50 + #define ACPI_DO_NOT_WAIT 0 51 + #define ACPI_SERIALIZED 0xFF 51 52 52 - typedef void *acpi_mutex; 53 53 typedef u32 acpi_mutex_handle; 54 + #define ACPI_GLOBAL_LOCK (acpi_semaphore) (-1) 54 55 55 56 /* Total number of aml opcodes defined */ 56 57 ··· 80 79 * table below also! 81 80 */ 82 81 #define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */ 83 - #define ACPI_MTX_CONTROL_METHOD 1 /* Control method termination [TBD: may no longer be necessary] */ 84 - #define ACPI_MTX_TABLES 2 /* Data for ACPI tables */ 85 - #define ACPI_MTX_NAMESPACE 3 /* ACPI Namespace */ 86 - #define ACPI_MTX_EVENTS 4 /* Data for ACPI events */ 87 - #define ACPI_MTX_CACHES 5 /* Internal caches, general purposes */ 88 - #define ACPI_MTX_MEMORY 6 /* Debug memory tracking lists */ 89 - #define ACPI_MTX_DEBUG_CMD_COMPLETE 7 /* AML debugger */ 90 - #define ACPI_MTX_DEBUG_CMD_READY 8 /* AML debugger */ 82 + #define ACPI_MTX_TABLES 1 /* Data for ACPI tables */ 83 + #define ACPI_MTX_NAMESPACE 2 /* ACPI Namespace */ 84 + #define ACPI_MTX_EVENTS 3 /* Data for ACPI events */ 85 + #define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */ 86 + #define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */ 87 + #define ACPI_MTX_DEBUG_CMD_COMPLETE 6 /* AML debugger */ 88 + #define ACPI_MTX_DEBUG_CMD_READY 7 /* AML debugger */ 91 89 92 - #define ACPI_MAX_MUTEX 8 90 + #define ACPI_MAX_MUTEX 7 93 91 #define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1 94 92 95 93 #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) ··· 98 98 99 99 static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { 100 100 "ACPI_MTX_Interpreter", 101 - "ACPI_MTX_Method", 102 101 "ACPI_MTX_Tables", 103 102 "ACPI_MTX_Namespace", 104 103 "ACPI_MTX_Events", 105 104 "ACPI_MTX_Caches", 106 105 "ACPI_MTX_Memory", 107 - "ACPI_MTX_DebugCmdComplete", 108 - "ACPI_MTX_DebugCmdReady" 106 + "ACPI_MTX_CommandComplete", 107 + "ACPI_MTX_CommandReady" 109 108 }; 110 109 111 110 #endif ··· 702 703 u8 bit_position; 703 704 u16 access_bit_mask; 704 705 }; 706 + 707 + /* 708 + * Some ACPI registers have bits that must be ignored -- meaning that they 709 + * must be preserved. 710 + */ 711 + #define ACPI_PM1_STATUS_PRESERVED_BITS 0x0800 /* Bit 11 */ 712 + #define ACPI_PM1_CONTROL_PRESERVED_BITS 0x0201 /* Bit 9, Bit 0 (SCI_EN) */ 705 713 706 714 /* 707 715 * Register IDs
+2
include/acpi/acmacros.h
··· 394 394 #define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask) 395 395 #define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) 396 396 397 + #define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask)) 398 + 397 399 /* Generate a UUID */ 398 400 399 401 #define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \
+4 -4
include/acpi/acobject.h
··· 140 140 *****************************************************************************/ 141 141 142 142 struct acpi_object_event { 143 - ACPI_OBJECT_COMMON_HEADER void *semaphore; 143 + ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */ 144 144 }; 145 145 146 146 struct acpi_object_mutex { 147 147 ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ 148 148 u16 acquisition_depth; /* Allow multiple Acquires, same thread */ 149 149 struct acpi_thread_state *owner_thread; /* Current owner of the mutex */ 150 - void *semaphore; /* Actual OS synchronization object */ 150 + acpi_mutex os_mutex; /* Actual OS synchronization object */ 151 151 union acpi_operand_object *prev; /* Link for list of acquired mutexes */ 152 152 union acpi_operand_object *next; /* Link for list of acquired mutexes */ 153 153 struct acpi_namespace_node *node; /* Containing namespace node */ ··· 166 166 struct acpi_object_method { 167 167 ACPI_OBJECT_COMMON_HEADER u8 method_flags; 168 168 u8 param_count; 169 - u8 concurrency; 170 - void *semaphore; 169 + u8 sync_level; 170 + union acpi_operand_object *mutex; 171 171 u8 *aml_start; 172 172 ACPI_INTERNAL_METHOD implementation; 173 173 u32 aml_length;
+31 -9
include/acpi/acpiosxf.h
··· 96 96 struct acpi_table_header **new_table); 97 97 98 98 /* 99 - * Synchronization primitives 99 + * Spinlock primitives 100 + */ 101 + acpi_status acpi_os_create_lock(acpi_spinlock * out_handle); 102 + 103 + void acpi_os_delete_lock(acpi_spinlock handle); 104 + 105 + acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); 106 + 107 + void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags); 108 + 109 + /* 110 + * Semaphore primitives 100 111 */ 101 112 acpi_status 102 113 acpi_os_create_semaphore(u32 max_units, 103 - u32 initial_units, acpi_handle * out_handle); 114 + u32 initial_units, acpi_semaphore * out_handle); 104 115 105 - acpi_status acpi_os_delete_semaphore(acpi_handle handle); 116 + acpi_status acpi_os_delete_semaphore(acpi_semaphore handle); 106 117 107 - acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout); 118 + acpi_status 119 + acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout); 108 120 109 - acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units); 121 + acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); 110 122 111 - acpi_status acpi_os_create_lock(acpi_handle * out_handle); 123 + /* 124 + * Mutex primitives 125 + */ 126 + acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); 112 127 113 - void acpi_os_delete_lock(acpi_handle handle); 128 + void acpi_os_delete_mutex(acpi_mutex handle); 114 129 115 - acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle); 130 + acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); 116 131 117 - void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags); 132 + void acpi_os_release_mutex(acpi_mutex handle); 133 + 134 + /* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */ 135 + 136 + #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) 137 + #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) 138 + #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) 139 + #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) 118 140 119 141 /* 120 142 * Memory allocation and mapping
+27 -22
include/acpi/actypes.h
··· 241 241 242 242 /******************************************************************************* 243 243 * 244 - * OS- or compiler-dependent types 244 + * OS-dependent and compiler-dependent types 245 245 * 246 246 * If the defaults below are not appropriate for the host system, they can 247 247 * be defined in the compiler-specific or OS-specific header, and this will ··· 249 249 * 250 250 ******************************************************************************/ 251 251 252 - /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ 252 + /* Value returned by acpi_os_get_thread_id */ 253 253 254 - #ifndef acpi_uintptr_t 255 - #define acpi_uintptr_t void * 254 + #ifndef acpi_thread_id 255 + #define acpi_thread_id acpi_native_uint 256 256 #endif 257 257 258 - /* 259 - * If acpi_cache_t was not defined in the OS-dependent header, 260 - * define it now. This is typically the case where the local cache 261 - * manager implementation is to be used (ACPI_USE_LOCAL_CACHE) 262 - */ 258 + /* Object returned from acpi_os_create_lock */ 259 + 260 + #ifndef acpi_spinlock 261 + #define acpi_spinlock void * 262 + #endif 263 + 264 + /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ 265 + 266 + #ifndef acpi_cpu_flags 267 + #define acpi_cpu_flags acpi_native_uint 268 + #endif 269 + 270 + /* Object returned from acpi_os_create_cache */ 271 + 263 272 #ifndef acpi_cache_t 264 273 #define acpi_cache_t struct acpi_memory_list 265 274 #endif 266 275 267 - /* 268 - * Allow the CPU flags word to be defined per-OS to simplify the use of the 269 - * lock and unlock OSL interfaces. 270 - */ 271 - #ifndef acpi_cpu_flags 272 - #define acpi_cpu_flags acpi_native_uint 276 + /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ 277 + 278 + #ifndef acpi_uintptr_t 279 + #define acpi_uintptr_t void * 273 280 #endif 274 281 275 282 /* ··· 303 296 */ 304 297 #ifndef ACPI_EXPORT_SYMBOL 305 298 #define ACPI_EXPORT_SYMBOL(symbol) 306 - #endif 307 - 308 - /* 309 - * thread_id is returned by acpi_os_get_thread_id. 310 - */ 311 - #ifndef acpi_thread_id 312 - #define acpi_thread_id acpi_native_uint 313 299 #endif 314 300 315 301 /******************************************************************************* ··· 379 379 u32 lo; 380 380 u32 hi; 381 381 }; 382 + 383 + /* Synchronization objects */ 384 + 385 + #define acpi_mutex void * 386 + #define acpi_semaphore void * 382 387 383 388 /* 384 389 * Acpi integer width. In ACPI version 1, integers are
+2
include/acpi/platform/aclinux.h
··· 58 58 #include <asm/div64.h> 59 59 #include <asm/acpi.h> 60 60 #include <linux/slab.h> 61 + #include <linux/spinlock_types.h> 61 62 62 63 /* Host-dependent types and defines */ 63 64 64 65 #define ACPI_MACHINE_WIDTH BITS_PER_LONG 65 66 #define acpi_cache_t kmem_cache_t 67 + #define acpi_spinlock spinlock_t * 66 68 #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); 67 69 #define strtoul simple_strtoul 68 70