Changes in kernel/arch/ia32/src/asm.S [1d3d2cf:8fb47ec0] in mainline
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
kernel/arch/ia32/src/asm.S
r1d3d2cf r8fb47ec0 1 /* 2 * Copyright (c) 2010 Jakub Jermar 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * - Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * - Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * - The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /** Very low and hardware-level functions 30 * 31 */ 32 33 #include <arch/pm.h> 34 #include <arch/cpu.h> 35 #include <arch/mm/page.h> 1 # 2 # Copyright (c) 2001-2004 Jakub Jermar 3 # All rights reserved. 4 # 5 # Redistribution and use in source and binary forms, with or without 6 # modification, are permitted provided that the following conditions 7 # are met: 8 # 9 # - Redistributions of source code must retain the above copyright 10 # notice, this list of conditions and the following disclaimer. 11 # - Redistributions in binary form must reproduce the above copyright 12 # notice, this list of conditions and the following disclaimer in the 13 # documentation and/or other materials provided with the distribution. 14 # - The name of the author may not be used to endorse or promote products 15 # derived from this software without specific prior written permission. 16 # 17 # THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 # OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 # IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 # THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 # 28 29 ## very low and hardware-level functions 30 31 # Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int has no error 32 # word and 1 means interrupt with error word 33 #define ERROR_WORD_INTERRUPT_LIST 0x00027d00 36 34 37 35 .text 36 38 37 .global paging_on 39 38 .global enable_l_apic_in_msr 39 .global interrupt_handlers 40 40 .global memsetb 41 41 .global memsetw … … 45 45 .global memcpy_to_uspace 46 46 .global memcpy_to_uspace_failover_address 47 .global early_putchar 48 49 /* Wrapper for generic memsetb */ 47 48 49 # Wrapper for generic memsetb 50 50 memsetb: 51 51 jmp _memsetb 52 52 53 /* Wrapper for generic memsetw */ 53 # Wrapper for generic memsetw 54 54 memsetw: 55 55 jmp _memsetw 56 56 57 #define MEMCPY_DST 4 58 #define MEMCPY_SRC 8 59 #define MEMCPY_SIZE 12 57 58 #define MEMCPY_DST 4 59 #define MEMCPY_SRC 8 60 #define MEMCPY_SIZE 12 60 61 61 62 /** Copy memory to/from userspace. … … 67 68 * or copy_to_uspace(). 68 69 * 69 * @param MEMCPY_DST(%esp) 70 * @param MEMCPY_SRC(%esp) 71 * @param MEMCPY_SIZE(%esp) 70 * @param MEMCPY_DST(%esp) Destination address. 71 * @param MEMCPY_SRC(%esp) Source address. 72 * @param MEMCPY_SIZE(%esp) Size. 72 73 * 73 74 * @return MEMCPY_DST(%esp) on success and 0 on failure. 74 *75 75 */ 76 76 memcpy: 77 77 memcpy_from_uspace: 78 78 memcpy_to_uspace: 79 movl %edi, %edx 80 movl %esi, %eax 79 movl %edi, %edx /* save %edi */ 80 movl %esi, %eax /* save %esi */ 81 81 82 82 movl MEMCPY_SIZE(%esp), %ecx 83 shrl $2, %ecx 83 shrl $2, %ecx /* size / 4 */ 84 84 85 85 movl MEMCPY_DST(%esp), %edi 86 86 movl MEMCPY_SRC(%esp), %esi 87 87 88 /* Copy whole words */ 89 rep movsl 90 88 rep movsl /* copy whole words */ 89 91 90 movl MEMCPY_SIZE(%esp), %ecx 92 andl $3, %ecx 91 andl $3, %ecx /* size % 4 */ 93 92 jz 0f 94 93 95 /* Copy the rest byte by byte */ 96 rep movsb 97 98 0: 99 100 movl %edx, %edi 101 movl %eax, %esi 102 103 /* MEMCPY_DST(%esp), success */ 104 movl MEMCPY_DST(%esp), %eax 105 ret 106 94 rep movsb /* copy the rest byte by byte */ 95 96 0: 97 movl %edx, %edi 98 movl %eax, %esi 99 movl MEMCPY_DST(%esp), %eax /* MEMCPY_DST(%esp), success */ 100 ret 101 107 102 /* 108 103 * We got here from as_page_fault() after the memory operations … … 113 108 movl %edx, %edi 114 109 movl %eax, %esi 115 116 /* Return 0, failure */ 117 xorl %eax, %eax 110 xorl %eax, %eax /* return 0, failure */ 118 111 ret 119 112 120 /** Turn paging on 121 * 122 * Enable paging and write-back caching in CR0. 123 * 124 */ 113 ## Turn paging on 114 # 115 # Enable paging and write-back caching in CR0. 116 # 125 117 paging_on: 126 118 movl %cr0, %edx 127 orl $(1 << 31), %edx /* paging on */ 128 129 /* Clear Cache Disable and not Write Though */ 119 orl $(1 << 31), %edx # paging on 120 # clear Cache Disable and not Write Though 130 121 andl $~((1 << 30) | (1 << 29)), %edx 131 movl %edx, 122 movl %edx,%cr0 132 123 jmp 0f 133 134 0: 135 ret 136 137 /** Enable local APIC 138 * 139 * Enable local APIC in MSR. 140 * 141 */ 124 0: 125 ret 126 127 128 ## Enable local APIC 129 # 130 # Enable local APIC in MSR. 131 # 142 132 enable_l_apic_in_msr: 143 133 movl $0x1b, %ecx … … 148 138 ret 149 139 150 #define ISTATE_OFFSET_EDX 0 151 #define ISTATE_OFFSET_ECX 4 152 #define ISTATE_OFFSET_EBX 8 153 #define ISTATE_OFFSET_ESI 12 154 #define ISTATE_OFFSET_EDI 16 155 #define ISTATE_OFFSET_EBP 20 156 #define ISTATE_OFFSET_EAX 24 157 #define ISTATE_OFFSET_EBP_FRAME 28 158 #define ISTATE_OFFSET_EIP_FRAME 32 159 #define ISTATE_OFFSET_GS 36 160 #define ISTATE_OFFSET_FS 40 161 #define ISTATE_OFFSET_ES 44 162 #define ISTATE_OFFSET_DS 48 163 #define ISTATE_OFFSET_ERROR_WORD 52 164 #define ISTATE_OFFSET_EIP 56 165 #define ISTATE_OFFSET_CS 60 166 #define ISTATE_OFFSET_EFLAGS 64 167 #define ISTATE_OFFSET_ESP 68 168 #define ISTATE_OFFSET_SS 72 169 170 /* 171 * Size of the istate structure without the hardware-saved part 172 * and without the error word. 173 */ 174 #define ISTATE_SOFT_SIZE 52 140 # Clear nested flag 141 # overwrites %ecx 142 .macro CLEAR_NT_FLAG 143 pushfl 144 pop %ecx 145 and $0xffffbfff, %ecx 146 push %ecx 147 popfl 148 .endm 175 149 176 150 /* … … 185 159 .global sysenter_handler 186 160 sysenter_handler: 187 188 /* 189 * Note that the space needed for the istate structure has been 190 * preallocated on the stack by before_thread_runs_arch(). 191 */ 192 193 /* 194 * Save the return address and the userspace stack in the istate 195 * structure on locations that would normally be taken by them. 196 */ 197 movl %ebp, ISTATE_OFFSET_ESP(%esp) 198 movl %edi, ISTATE_OFFSET_EIP(%esp) 199 200 /* 201 * Push syscall arguments onto the stack 202 */ 203 movl %eax, ISTATE_OFFSET_EAX(%esp) 204 movl %ebx, ISTATE_OFFSET_EBX(%esp) 205 movl %ecx, ISTATE_OFFSET_ECX(%esp) 206 movl %edx, ISTATE_OFFSET_EDX(%esp) 207 movl %esi, ISTATE_OFFSET_ESI(%esp) 208 movl %edi, ISTATE_OFFSET_EDI(%esp) /* observability; not needed */ 209 movl %ebp, ISTATE_OFFSET_EBP(%esp) /* observability; not needed */ 210 211 /* 212 * Fake up the stack trace linkage. 213 */ 214 movl %edi, ISTATE_OFFSET_EIP_FRAME(%esp) 215 movl $0, ISTATE_OFFSET_EBP_FRAME(%esp) 216 leal ISTATE_OFFSET_EBP_FRAME(%esp), %ebp 217 218 /* 219 * Save TLS. 220 */ 221 movl %gs, %edx 222 movl %edx, ISTATE_OFFSET_GS(%esp) 223 224 /* 225 * Switch to kernel selectors. 226 */ 227 movw $(GDT_SELECTOR(KDATA_DES)), %ax 161 sti 162 pushl %ebp # remember user stack 163 pushl %edi # remember return user address 164 165 pushl %gs # remember TLS 166 167 pushl %eax # syscall number 168 subl $8, %esp # unused sixth and fifth argument 169 pushl %esi # fourth argument 170 pushl %ebx # third argument 171 pushl %ecx # second argument 172 pushl %edx # first argument 173 174 movw $16, %ax 228 175 movw %ax, %ds 229 176 movw %ax, %es 230 177 178 cld 179 call syscall_handler 180 addl $28, %esp # remove arguments from stack 181 182 pop %gs # restore TLS 183 184 pop %edx # prepare return EIP for SYSEXIT 185 pop %ecx # prepare userspace ESP for SYSEXIT 186 187 sysexit # return to userspace 188 189 190 ## Declare interrupt handlers 191 # 192 # Declare interrupt handlers for n interrupt 193 # vectors starting at vector i. 194 # 195 # The handlers setup data segment registers 196 # and call exc_dispatch(). 197 # 198 #define INTERRUPT_ALIGN 64 199 .macro handler i n 200 201 .ifeq \i - 0x30 # Syscall handler 202 pushl %ds 203 pushl %es 204 pushl %fs 205 pushl %gs 206 207 # 208 # Push syscall arguments onto the stack 209 # 210 # NOTE: The idea behind the order of arguments passed in registers is to 211 # use all scratch registers first and preserved registers next. 212 # An optimized libc syscall wrapper can make use of this setup. 213 # 214 pushl %eax 215 pushl %ebp 216 pushl %edi 217 pushl %esi 218 pushl %ebx 219 pushl %ecx 220 pushl %edx 221 222 # we must fill the data segment registers 223 movw $16, %ax 224 movw %ax, %ds 225 movw %ax, %es 226 227 cld 228 sti 229 # syscall_handler(edx, ecx, ebx, esi, edi, ebp, eax) 230 call syscall_handler 231 cli 232 addl $28, %esp # clean-up of parameters 233 234 popl %gs 235 popl %fs 236 popl %es 237 popl %ds 238 239 CLEAR_NT_FLAG 240 iret 241 .else 231 242 /* 232 * Sanitize EFLAGS. 233 * 234 * SYSENTER does not clear the NT flag, which could thus proliferate 235 * from here to the IRET instruction via a context switch and result 236 * in crash. 237 * 238 * SYSENTER does not clear DF, which the ABI assumes to be cleared. 239 * 240 * SYSENTER clears IF, which we would like to be set for syscalls. 241 * 242 */ 243 pushl $(EFLAGS_IF) /* specify EFLAGS bits that we want to set */ 244 popfl /* set bits from the mask, clear or ignore others */ 245 246 call syscall_handler 247 248 /* 249 * Restore TLS. 250 */ 251 movl ISTATE_OFFSET_GS(%esp), %edx 252 movl %edx, %gs 253 254 /* 255 * Prepare return address and userspace stack for SYSEXIT. 256 */ 257 movl ISTATE_OFFSET_EIP(%esp), %edx 258 movl ISTATE_OFFSET_ESP(%esp), %ecx 259 260 sysexit /* return to userspace */ 261 262 /* 263 * This is the legacy syscall handler using the interrupt mechanism. 264 */ 265 .global int_syscall 266 int_syscall: 267 subl $(ISTATE_SOFT_SIZE + 4), %esp 268 269 /* 270 * Push syscall arguments onto the stack 271 * 272 * NOTE: The idea behind the order of arguments passed 273 * in registers is to use all scratch registers 274 * first and preserved registers next. An optimized 275 * libc syscall wrapper can make use of this setup. 276 * The istate structure is arranged in the way to support 277 * this idea. 278 * 279 */ 280 movl %eax, ISTATE_OFFSET_EAX(%esp) 281 movl %ebx, ISTATE_OFFSET_EBX(%esp) 282 movl %ecx, ISTATE_OFFSET_ECX(%esp) 283 movl %edx, ISTATE_OFFSET_EDX(%esp) 284 movl %edi, ISTATE_OFFSET_EDI(%esp) 285 movl %esi, ISTATE_OFFSET_ESI(%esp) 286 movl %ebp, ISTATE_OFFSET_EBP(%esp) 287 288 /* 289 * Save the selector registers. 290 */ 291 movl %gs, %ecx 292 movl %fs, %edx 293 294 movl %ecx, ISTATE_OFFSET_GS(%esp) 295 movl %edx, ISTATE_OFFSET_FS(%esp) 296 297 movl %es, %ecx 298 movl %ds, %edx 299 300 movl %ecx, ISTATE_OFFSET_ES(%esp) 301 movl %edx, ISTATE_OFFSET_DS(%esp) 302 303 /* 304 * Switch to kernel selectors. 305 */ 306 movl $(GDT_SELECTOR(KDATA_DES)), %eax 307 movl %eax, %ds 308 movl %eax, %es 309 310 movl $0, ISTATE_OFFSET_EBP_FRAME(%esp) 311 movl ISTATE_OFFSET_EIP(%esp), %eax 312 movl %eax, ISTATE_OFFSET_EIP_FRAME(%esp) 313 leal ISTATE_OFFSET_EBP_FRAME(%esp), %ebp 314 315 cld 316 317 /* Call syscall_handler(edx, ecx, ebx, esi, edi, ebp, eax) */ 318 call syscall_handler 319 320 /* 321 * Restore the selector registers. 322 */ 323 movl ISTATE_OFFSET_GS(%esp), %ecx 324 movl ISTATE_OFFSET_FS(%esp), %edx 325 326 movl %ecx, %gs 327 movl %edx, %fs 328 329 movl ISTATE_OFFSET_ES(%esp), %ecx 330 movl ISTATE_OFFSET_DS(%esp), %edx 331 332 movl %ecx, %es 333 movl %edx, %ds 334 335 /* 336 * Restore the preserved registers the handler cloberred itself 337 * (i.e. EBP). 338 */ 339 movl ISTATE_OFFSET_EBP(%esp), %ebp 340 341 addl $(ISTATE_SOFT_SIZE + 4), %esp 342 iret 343 344 /** 345 * Mask for interrupts 0 - 31 (bits 0 - 31) where 0 means that int 346 * has no error word and 1 means interrupt with error word 347 * 348 */ 349 #define ERROR_WORD_INTERRUPT_LIST 0x00027d00 350 351 .macro handler i 352 .global int_\i 353 int_\i: 354 /* 355 * This macro distinguishes between two versions of ia32 356 * exceptions. One version has error word and the other 357 * does not have it. The latter version fakes the error 358 * word on the stack so that the handlers and istate_t 359 * can be the same for both types. 243 * This macro distinguishes between two versions of ia32 exceptions. 244 * One version has error word and the other does not have it. 245 * The latter version fakes the error word on the stack so that the 246 * handlers and istate_t can be the same for both types. 360 247 */ 361 248 .iflt \i - 32 362 249 .if (1 << \i) & ERROR_WORD_INTERRUPT_LIST 363 /* 364 * Exception with error word.250 /* 251 * With error word, do nothing 365 252 */ 366 subl $ISTATE_SOFT_SIZE, %esp 367 .else 368 /* 369 * Exception without error word: fake up one 370 */ 371 subl $(ISTATE_SOFT_SIZE + 4), %esp 372 .endif 373 .else 374 /* 375 * Interrupt: fake up an error word 376 */ 377 subl $(ISTATE_SOFT_SIZE + 4), %esp 253 .else 254 /* 255 * Version without error word, 256 */ 257 subl $4, %esp 258 .endif 259 .else 260 /* 261 * Version without error word, 262 */ 263 subl $4, %esp 378 264 .endif 379 265 380 /* 381 * Save the general purpose registers. 382 */ 383 movl %eax, ISTATE_OFFSET_EAX(%esp) 384 movl %ebx, ISTATE_OFFSET_EBX(%esp) 385 movl %ecx, ISTATE_OFFSET_ECX(%esp) 386 movl %edx, ISTATE_OFFSET_EDX(%esp) 387 movl %edi, ISTATE_OFFSET_EDI(%esp) 388 movl %esi, ISTATE_OFFSET_ESI(%esp) 389 movl %ebp, ISTATE_OFFSET_EBP(%esp) 390 391 /* 392 * Save the selector registers. 393 */ 394 movl %gs, %ecx 395 movl %fs, %edx 396 397 movl %ecx, ISTATE_OFFSET_GS(%esp) 398 movl %edx, ISTATE_OFFSET_FS(%esp) 399 400 movl %es, %ecx 401 movl %ds, %edx 402 403 movl %ecx, ISTATE_OFFSET_ES(%esp) 404 movl %edx, ISTATE_OFFSET_DS(%esp) 405 406 /* 407 * Switch to kernel selectors. 408 */ 409 movl $(GDT_SELECTOR(KDATA_DES)), %eax 410 movl %eax, %ds 411 movl %eax, %es 412 413 /* 414 * Imitate a regular stack frame linkage. 415 * Stop stack traces here if we came from userspace. 416 */ 417 xorl %eax, %eax 418 cmpl $(GDT_SELECTOR(KTEXT_DES)), ISTATE_OFFSET_CS(%esp) 419 cmovnzl %eax, %ebp 420 421 movl %ebp, ISTATE_OFFSET_EBP_FRAME(%esp) 422 movl ISTATE_OFFSET_EIP(%esp), %eax 423 movl %eax, ISTATE_OFFSET_EIP_FRAME(%esp) 424 leal ISTATE_OFFSET_EBP_FRAME(%esp), %ebp 425 426 cld 427 428 pushl %esp /* pass istate address */ 429 pushl $(\i) /* pass intnum */ 430 431 /* Call exc_dispatch(intnum, istate) */ 432 call exc_dispatch 433 434 addl $8, %esp /* clear arguments from the stack */ 435 436 /* 437 * Restore the selector registers. 438 */ 439 movl ISTATE_OFFSET_GS(%esp), %ecx 440 movl ISTATE_OFFSET_FS(%esp), %edx 441 442 movl %ecx, %gs 443 movl %edx, %fs 444 445 movl ISTATE_OFFSET_ES(%esp), %ecx 446 movl ISTATE_OFFSET_DS(%esp), %edx 447 448 movl %ecx, %es 449 movl %edx, %ds 450 451 /* 452 * Restore the scratch registers and the preserved 453 * registers the handler cloberred itself 454 * (i.e. EBP). 455 */ 456 movl ISTATE_OFFSET_EAX(%esp), %eax 457 movl ISTATE_OFFSET_ECX(%esp), %ecx 458 movl ISTATE_OFFSET_EDX(%esp), %edx 459 movl ISTATE_OFFSET_EBP(%esp), %ebp 460 461 addl $(ISTATE_SOFT_SIZE + 4), %esp 266 pushl %ds 267 pushl %es 268 pushl %fs 269 pushl %gs 270 271 pushl %ebp 272 pushl %edx 273 pushl %ecx 274 pushl %eax 275 276 # we must fill the data segment registers 277 movw $16, %ax 278 movw %ax, %ds 279 movw %ax, %es 280 281 # stop stack traces here 282 xorl %ebp, %ebp 283 284 pushl %esp # *istate 285 pushl $(\i) # intnum 286 call exc_dispatch # excdispatch(intnum, *istate) 287 addl $8, %esp # Clear arguments from stack 288 289 CLEAR_NT_FLAG # Modifies %ecx 290 291 popl %eax 292 popl %ecx 293 popl %edx 294 popl %ebp 295 296 popl %gs 297 popl %fs 298 popl %es 299 popl %ds 300 301 addl $4, %esp # Skip error word, no matter whether real or fake. 462 302 iret 303 .endif 304 305 .align INTERRUPT_ALIGN 306 .if (\n- \i) - 1 307 handler "(\i + 1)", \n 308 .endif 463 309 .endm 464 310 465 #define LIST_0_63 \ 466 0, 1, 2, 3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,\ 467 28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,\ 468 53,54,55,56,57,58,59,60,61,62,63 469 311 # keep in sync with pm.h !!! 312 IDT_ITEMS = 64 313 .align INTERRUPT_ALIGN 470 314 interrupt_handlers: 471 .irp cnt, LIST_0_63 472 handler \cnt 473 .endr 474 475 /** Print Unicode character to EGA display. 476 * 477 * If CONFIG_EGA is undefined or CONFIG_FB is defined 478 * then this function does nothing. 479 * 480 * Since the EGA can only display Extended ASCII (usually 481 * ISO Latin 1) characters, some of the Unicode characters 482 * can be displayed in a wrong way. Only newline and backspace 483 * are interpreted, all other characters (even unprintable) are 484 * printed verbatim. 485 * 486 * @param %ebp+0x08 Unicode character to be printed. 487 * 488 */ 489 early_putchar: 490 491 #if ((defined(CONFIG_EGA)) && (!defined(CONFIG_FB))) 492 493 /* Prologue, save preserved registers */ 494 pushl %ebp 495 movl %esp, %ebp 496 pushl %ebx 497 pushl %esi 498 pushl %edi 499 500 movl $(PA2KA(0xb8000)), %edi /* base of EGA text mode memory */ 501 xorl %eax, %eax 502 503 /* Read bits 8 - 15 of the cursor address */ 504 movw $0x3d4, %dx 505 movb $0xe, %al 506 outb %al, %dx 507 508 movw $0x3d5, %dx 509 inb %dx, %al 510 shl $8, %ax 511 512 /* Read bits 0 - 7 of the cursor address */ 513 movw $0x3d4, %dx 514 movb $0xf, %al 515 outb %al, %dx 516 517 movw $0x3d5, %dx 518 inb %dx, %al 519 520 /* Sanity check for the cursor on screen */ 521 cmp $2000, %ax 522 jb early_putchar_cursor_ok 523 524 movw $1998, %ax 525 526 early_putchar_cursor_ok: 527 528 movw %ax, %bx 529 shl $1, %eax 530 addl %eax, %edi 531 532 movl 0x08(%ebp), %eax 533 534 cmp $0x0a, %al 535 jne early_putchar_backspace 536 537 /* Interpret newline */ 538 539 movw %bx, %ax /* %bx -> %dx:%ax */ 540 xorw %dx, %dx 541 542 movw $80, %cx 543 idivw %cx, %ax /* %dx = %bx % 80 */ 544 545 /* %bx <- %bx + 80 - (%bx % 80) */ 546 addw %cx, %bx 547 subw %dx, %bx 548 549 jmp early_putchar_skip 550 551 early_putchar_backspace: 552 553 cmp $0x08, %al 554 jne early_putchar_print 555 556 /* Interpret backspace */ 557 558 cmp $0x0000, %bx 559 je early_putchar_skip 560 561 dec %bx 562 jmp early_putchar_skip 563 564 early_putchar_print: 565 566 /* Print character */ 567 568 movb $0x0e, %ah /* black background, yellow foreground */ 569 stosw 570 inc %bx 571 572 early_putchar_skip: 573 574 /* Sanity check for the cursor on the last line */ 575 cmp $2000, %bx 576 jb early_putchar_no_scroll 577 578 /* Scroll the screen (24 rows) */ 579 movl $(PA2KA(0xb80a0)), %esi 580 movl $(PA2KA(0xb8000)), %edi 581 movl $960, %ecx 582 rep movsl 583 584 /* Clear the 24th row */ 585 xorl %eax, %eax 586 movl $40, %ecx 587 rep stosl 588 589 /* Go to row 24 */ 590 movw $1920, %bx 591 592 early_putchar_no_scroll: 593 594 /* Write bits 8 - 15 of the cursor address */ 595 movw $0x3d4, %dx 596 movb $0xe, %al 597 outb %al, %dx 598 599 movw $0x3d5, %dx 600 movb %bh, %al 601 outb %al, %dx 602 603 /* Write bits 0 - 7 of the cursor address */ 604 movw $0x3d4, %dx 605 movb $0xf, %al 606 outb %al, %dx 607 608 movw $0x3d5, %dx 609 movb %bl, %al 610 outb %al, %dx 611 612 /* Epilogue, restore preserved registers */ 613 popl %edi 614 popl %esi 615 popl %ebx 616 leave 617 618 #endif 619 620 ret 621 315 h_start: 316 handler 0 IDT_ITEMS 317 h_end: 318 319 .data 320 .global interrupt_handler_size 321 322 interrupt_handler_size: .long (h_end - h_start) / IDT_ITEMS
Note:
See TracChangeset
for help on using the changeset viewer.