Changeset a7961271 in mainline for kernel/arch/sparc64/src/trap/trap_table.S
- Timestamp:
- 2006-08-26T18:42:11Z (18 years ago)
- Branches:
- lfn, master, serial, ticket/834-toolchain-update, topic/msim-upgrade, topic/simplify-dev-export
- Children:
- c8ea4a8b
- Parents:
- f47fd19
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/sparc64/src/trap/trap_table.S
rf47fd19 ra7961271 33 33 .register %g2, #scratch 34 34 .register %g3, #scratch 35 .register %g6, #scratch36 .register %g7, #scratch37 35 38 36 .text … … 204 202 .global spill_0_normal 205 203 spill_0_normal: 206 SPILL_NORMAL_HANDLER 204 SPILL_NORMAL_HANDLER_KERNEL 207 205 208 206 /* TT = 0xc0, TL = 0, fill_0_normal handler */ … … 210 208 .global fill_0_normal 211 209 fill_0_normal: 212 FILL_NORMAL_HANDLER 210 FILL_NORMAL_HANDLER_KERNEL 213 211 214 212 /* … … 268 266 .global spill_0_normal_high 269 267 spill_0_normal_high: 270 SPILL_NORMAL_HANDLER 268 SPILL_NORMAL_HANDLER_KERNEL 271 269 272 270 /* TT = 0xc0, TL > 0, fill_0_normal handler */ … … 274 272 .global fill_0_normal_high 275 273 fill_0_normal_high: 276 FILL_NORMAL_HANDLER 274 FILL_NORMAL_HANDLER_KERNEL 277 275 278 276 … … 280 278 * 281 279 * This trap handler makes arrangements to make calling of scheduler() from 282 * within a trap context possible. It is guaranteed to function only when traps283 * are not nested (i.e. for TL=1).280 * within a trap context possible. It is called from several other trap 281 * handlers. 284 282 * 285 * Every trap handler on TL=1 that makes a call to the scheduler needs to 286 * be based on this function. The reason behind it is that the nested 287 * trap levels and the automatic saving of the interrupted context by hardware 288 * does not work well together with scheduling (i.e. a thread cannot be rescheduled 289 * with TL>0). Therefore it is necessary to eliminate the effect of trap levels 290 * by software and save the necessary state on the kernel stack. 291 * 292 * Note that for traps with TL>1, more state needs to be saved. This function 293 * is therefore not going to work when TL>1. 294 * 295 * The caller is responsible for doing SAVE and allocating 296 * PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack. 283 * This function can be entered either with interrupt globals or alternate globals. 284 * Memory management trap handlers are obliged to switch to one of those global sets 285 * prior to calling this function. Register window management functions are not 286 * allowed to modify the alternate global registers. 297 287 * 298 288 * Input registers: 299 * %l0 Address of function to call. 300 * Output registers: 301 * %l1 - %l7 Copy of %g1 - %g7 289 * %g1 Address of function to call. 290 * %g2 Argument for the function. 291 * %g6 Pre-set as kernel stack base if trap from userspace. 292 * %g7 Reserved. 302 293 */ 303 294 .global preemptible_handler 304 295 preemptible_handler: 305 /* 306 * Save TSTATE, TPC, TNPC and PSTATE aside. 296 rdpr %tstate, %g3 297 andcc %g3, TSTATE_PRIV_BIT, %g0 ! if this trap came from the privileged mode... 298 bnz 0f ! ...skip setting of kernel stack and primary context 299 nop 300 301 /* 302 * Switch to kernel stack. The old stack is 303 * automatically saved in the old window's %sp 304 * and the new window's %fp. 305 */ 306 save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 307 308 /* 309 * Mark the CANSAVE windows as OTHER windows. 310 * Set CLEANWIN to NWINDOW-1 so that clean_window traps do not occur. 311 */ 312 rdpr %cansave, %l0 313 wrpr %l0, %otherwin 314 wrpr %g0, %cansave 315 wrpr %g0, NWINDOW-1, %cleanwin 316 317 /* 318 * Switch to primary context 0. 319 */ 320 mov VA_PRIMARY_CONTEXT_REG, %l0 321 stxa %g0, [%l0] ASI_DMMU 322 set kernel_image_start, %l0 323 flush %l0 324 325 ba 1f 326 nop 327 328 0: 329 save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp 330 331 /* 332 * At this moment, we are using the kernel stack 333 * and have successfully allocated a register window. 334 */ 335 1: 336 337 /* 338 * Copy arguments. 339 */ 340 mov %g1, %l0 341 mov %g2, %o0 342 343 /* 344 * Save TSTATE, TPC and TNPC aside. 307 345 */ 308 346 rdpr %tstate, %g1 309 347 rdpr %tpc, %g2 310 348 rdpr %tnpc, %g3 311 rdpr %pstate, %g4312 349 313 350 /* … … 316 353 * the kernel stack of THREAD locked in DTLB. 317 354 */ 318 stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE] 319 stx %g2, [%fp + STACK_BIAS + SAVED_TPC] 320 stx %g3, [%fp + STACK_BIAS + SAVED_TNPC] 321 stx %g4, [%fp + STACK_BIAS + SAVED_PSTATE] 355 stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE] 356 stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC] 357 stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC] 322 358 323 /*324 * Write 0 to TL.325 */326 359 wrpr %g0, 0, %tl 327 328 /* 329 * Alter PSTATE. 330 * - switch to normal globals. 331 */ 332 and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4 333 wrpr %g4, 0, %pstate 334 335 /* 336 * Save the normal globals. 337 */ 360 wrpr %g0, PSTATE_PRIV_BIT, %pstate 338 361 SAVE_GLOBALS 339 362 340 363 /* 341 * Call the higher-level handler. 342 */ 343 mov %fp, %o1 ! calculate istate address 364 * Call the higher-level handler and pass istate as second parameter. 365 */ 344 366 call %l0 345 add %o1, STACK_BIAS + SAVED_PSTATE, %o1 ! calculate istate address 346 347 /* 348 * Restore the normal global register set. 349 */ 367 add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1 368 350 369 RESTORE_GLOBALS 351 352 /* 353 * Restore PSTATE from saved copy. 354 * Alternate/Interrupt/MM globals become active. 355 */ 356 ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4 357 wrpr %l4, 0, %pstate 358 359 /* 360 * Write 1 to TL. 361 */ 370 wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate 362 371 wrpr %g0, 1, %tl 363 372 … … 365 374 * Read TSTATE, TPC and TNPC from saved copy. 366 375 */ 367 ldx [%fp + STACK_BIAS + SAVED_TSTATE], %g1 368 ldx [%fp + STACK_BIAS + SAVED_TPC], %g2 369 ldx [%fp + STACK_BIAS + SAVED_TNPC], %g3 370 371 /* 372 * Do restore to match the save instruction from the top-level handler. 373 */ 374 restore 375 376 /* 377 * On execution of the RETRY instruction, CWP will be restored from the TSTATE 378 * register. However, because of scheduling, it is possible that CWP in the saved 379 * TSTATE is different from the current CWP. The following chunk of code fixes 380 * CWP in the saved copy of TSTATE. 381 */ 382 rdpr %cwp, %g4 ! read current CWP 383 and %g1, ~0x1f, %g1 ! clear CWP field in saved TSTATE 384 or %g1, %g4, %g1 ! write current CWP to TSTATE 385 376 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1 377 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2 378 ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3 379 386 380 /* 387 381 * Restore TSTATE, TPC and TNPC from saved copies. … … 390 384 wrpr %g2, 0, %tpc 391 385 wrpr %g3, 0, %tnpc 392 393 /* 394 * Return from interrupt. 395 */ 386 387 /* 388 * If OTHERWIN is zero, then all the userspace windows have been 389 * spilled to kernel memory (i.e. register window buffer). If 390 * OTHERWIN is non-zero, then some userspace windows are still 391 * valid. Others might have been spilled. However, the CWP pointer 392 * needs no fixing because the scheduler had not been called. 393 */ 394 rdpr %otherwin, %l0 395 brnz %l0, 0f 396 nop 397 398 /* 399 * OTHERWIN == 0 400 */ 401 402 /* 403 * If TSTATE.CWP + 1 == CWP, then we still do not have to fix CWP. 404 */ 405 and %g1, TSTATE_CWP_MASK, %l0 406 inc %l0 407 and %l0, TSTATE_CWP_MASK, %l0 ! %l0 mod NWINDOW 408 rdpr %cwp, %l1 409 cmp %l0, %l1 410 bz 0f ! CWP is ok 411 nop 412 413 /* 414 * Fix CWP. 415 */ 416 mov %fp, %g1 417 flushw 418 wrpr %l0, 0, %cwp 419 mov %g1, %fp 420 421 /* 422 * OTHERWIN != 0 or fall-through from the OTHERWIN == 0 case. 423 */ 424 0: 425 ! TODO: restore register windows from register window memory buffer 426 427 restore 396 428 retry
Note:
See TracChangeset
for help on using the changeset viewer.